aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.checkpatch.conf61
-rw-r--r--.gitignore11
-rw-r--r--Makefile462
-rw-r--r--contributing.rst96
-rw-r--r--dco.txt37
-rw-r--r--defaults.mk47
-rw-r--r--docs/change-log.rst241
-rw-r--r--docs/design.rst266
-rw-r--r--docs/implementing-tests.rst128
-rw-r--r--docs/porting-guide.rst409
-rw-r--r--docs/user-guide.rst550
-rw-r--r--drivers/arm/gic/arm_gic_v2.c121
-rw-r--r--drivers/arm/gic/arm_gic_v2v3.c196
-rw-r--r--drivers/arm/gic/gic_common.c203
-rw-r--r--drivers/arm/gic/gic_v2.c353
-rw-r--r--drivers/arm/gic/gic_v3.c507
-rw-r--r--drivers/arm/pl011/aarch32/pl011_console.S275
-rw-r--r--drivers/arm/pl011/aarch64/pl011_console.S264
-rw-r--r--drivers/arm/sp805/sp805.c153
-rw-r--r--drivers/arm/timer/private_timer.c73
-rw-r--r--drivers/arm/timer/sp804.c100
-rw-r--r--drivers/arm/timer/system_timer.c92
-rw-r--r--drivers/io/io_fip.c334
-rw-r--r--drivers/io/io_memmap.c217
-rw-r--r--drivers/io/io_storage.c349
-rw-r--r--drivers/io/vexpress_nor/io_vexpress_nor_hw.c456
-rw-r--r--drivers/io/vexpress_nor/io_vexpress_nor_internal.h39
-rw-r--r--drivers/io/vexpress_nor/io_vexpress_nor_ops.c278
-rw-r--r--drivers/io/vexpress_nor/norflash.h41
-rw-r--r--el3_payload/.gitignore6
-rw-r--r--el3_payload/Makefile91
-rw-r--r--el3_payload/README141
-rw-r--r--el3_payload/arch.h20
-rw-r--r--el3_payload/asm_macros.S22
-rw-r--r--el3_payload/el3_payload.ld.S28
-rw-r--r--el3_payload/entrypoint.S65
-rw-r--r--el3_payload/plat/fvp/platform.S51
-rw-r--r--el3_payload/plat/fvp/platform.h21
-rw-r--r--el3_payload/plat/fvp/platform.mk8
-rw-r--r--el3_payload/plat/juno/platform.S34
-rw-r--r--el3_payload/plat/juno/platform.h17
-rw-r--r--el3_payload/plat/juno/platform.mk8
-rwxr-xr-xel3_payload/scripts/fvp/run_foundation.sh51
-rwxr-xr-xel3_payload/scripts/fvp/run_fvp.sh56
-rw-r--r--el3_payload/scripts/juno/load_el3_payload.ds11
-rwxr-xr-xel3_payload/scripts/juno/run_ds5_script.sh49
-rw-r--r--el3_payload/spin.S77
-rw-r--r--el3_payload/uart.S56
-rw-r--r--fwu/ns_bl1u/aarch32/ns_bl1u_entrypoint.S73
-rw-r--r--fwu/ns_bl1u/aarch64/ns_bl1u_entrypoint.S74
-rw-r--r--fwu/ns_bl1u/ns_bl1u.ld.S89
-rw-r--r--fwu/ns_bl1u/ns_bl1u.mk79
-rw-r--r--fwu/ns_bl1u/ns_bl1u_main.c226
-rw-r--r--fwu/ns_bl1u/ns_bl1u_tests.c261
-rw-r--r--fwu/ns_bl2u/aarch32/ns_bl2u_entrypoint.S65
-rw-r--r--fwu/ns_bl2u/aarch64/ns_bl2u_entrypoint.S66
-rw-r--r--fwu/ns_bl2u/ns_bl2u.ld.S77
-rw-r--r--fwu/ns_bl2u/ns_bl2u.mk75
-rw-r--r--fwu/ns_bl2u/ns_bl2u_main.c59
-rw-r--r--include/common/aarch32/asm_macros.S84
-rw-r--r--include/common/aarch32/assert_macros.S29
-rw-r--r--include/common/aarch64/asm_macros.S194
-rw-r--r--include/common/aarch64/assert_macros.S30
-rw-r--r--include/common/asm_macros_common.S66
-rw-r--r--include/common/debug.h107
-rw-r--r--include/common/firmware_image_package.h39
-rw-r--r--include/common/fwu_nvm.h58
-rw-r--r--include/common/image_loader.h45
-rw-r--r--include/common/param_header.h55
-rw-r--r--include/common/test_helpers.h166
-rw-r--r--include/drivers/arm/arm_gic.h153
-rw-r--r--include/drivers/arm/gic_common.h113
-rw-r--r--include/drivers/arm/gic_v2.h336
-rw-r--r--include/drivers/arm/gic_v3.h215
-rw-r--r--include/drivers/arm/pl011.h78
-rw-r--r--include/drivers/arm/private_timer.h15
-rw-r--r--include/drivers/arm/sp804.h56
-rw-r--r--include/drivers/arm/sp805.h55
-rw-r--r--include/drivers/arm/system_timer.h37
-rw-r--r--include/drivers/console.h28
-rw-r--r--include/drivers/io/io_driver.h59
-rw-r--r--include/drivers/io/io_fip.h50
-rw-r--r--include/drivers/io/io_memmap.h14
-rw-r--r--include/drivers/io/io_nor_flash.h32
-rw-r--r--include/lib/aarch32/arch.h595
-rw-r--r--include/lib/aarch32/arch_helpers.h358
-rw-r--r--include/lib/aarch64/arch.h621
-rw-r--r--include/lib/aarch64/arch_helpers.h332
-rw-r--r--include/lib/cassert.h18
-rw-r--r--include/lib/events.h82
-rw-r--r--include/lib/extensions/amu.h35
-rw-r--r--include/lib/extensions/amu_private.h15
-rw-r--r--include/lib/io_storage.h106
-rw-r--r--include/lib/irq.h89
-rw-r--r--include/lib/mmio.h42
-rw-r--r--include/lib/power_management.h192
-rw-r--r--include/lib/semihosting.h58
-rw-r--r--include/lib/sgi.h21
-rw-r--r--include/lib/spinlock.h18
-rw-r--r--include/lib/status.h28
-rw-r--r--include/lib/stdlib/assert.h60
-rw-r--r--include/lib/stdlib/machine/_limits.h11
-rw-r--r--include/lib/stdlib/machine/_stdint.h192
-rw-r--r--include/lib/stdlib/machine/_types.h121
-rw-r--r--include/lib/stdlib/machine/aarch32/_limits.h89
-rw-r--r--include/lib/stdlib/machine/aarch64/_limits.h85
-rw-r--r--include/lib/stdlib/stdbool.h17
-rw-r--r--include/lib/stdlib/stddef.h64
-rw-r--r--include/lib/stdlib/stdio.h76
-rw-r--r--include/lib/stdlib/stdlib.h54
-rw-r--r--include/lib/stdlib/string.h66
-rw-r--r--include/lib/stdlib/sys/_null.h47
-rw-r--r--include/lib/stdlib/sys/_stdint.h82
-rw-r--r--include/lib/stdlib/sys/_types.h106
-rw-r--r--include/lib/stdlib/sys/cdefs.h686
-rw-r--r--include/lib/stdlib/sys/ctype.h59
-rw-r--r--include/lib/stdlib/sys/errno.h193
-rw-r--r--include/lib/stdlib/sys/limits.h102
-rw-r--r--include/lib/stdlib/sys/stdarg.h75
-rw-r--r--include/lib/stdlib/sys/stdint.h74
-rw-r--r--include/lib/stdlib/sys/types.h245
-rw-r--r--include/lib/stdlib/sys/uuid.h61
-rw-r--r--include/lib/tftf_lib.h216
-rw-r--r--include/lib/timer.h133
-rw-r--r--include/lib/utils/math_utils.h23
-rw-r--r--include/lib/utils/misc_utils.h12
-rw-r--r--include/lib/utils/uuid_utils.h50
-rw-r--r--include/lib/utils_def.h156
-rw-r--r--include/lib/xlat_tables/aarch32/xlat_tables_aarch32.h73
-rw-r--r--include/lib/xlat_tables/aarch64/xlat_tables_aarch64.h83
-rw-r--r--include/lib/xlat_tables/xlat_mmu_helpers.h91
-rw-r--r--include/lib/xlat_tables/xlat_tables_arch.h43
-rw-r--r--include/lib/xlat_tables/xlat_tables_defs.h173
-rw-r--r--include/lib/xlat_tables/xlat_tables_v2.h360
-rw-r--r--include/lib/xlat_tables/xlat_tables_v2_helpers.h166
-rw-r--r--include/plat/arm/common/arm_def.h24
-rw-r--r--include/plat/arm/common/plat_arm.h29
-rw-r--r--include/plat/common/plat_topology.h180
-rw-r--r--include/plat/common/platform.h183
-rw-r--r--include/runtime_services/arm_arch_svc.h15
-rw-r--r--include/runtime_services/pmf.h49
-rw-r--r--include/runtime_services/psci.h224
-rw-r--r--include/runtime_services/sdei.h97
-rw-r--r--include/runtime_services/secure_el0_payloads/mm_svc.h31
-rw-r--r--include/runtime_services/secure_el0_payloads/secure_partition.h102
-rw-r--r--include/runtime_services/secure_el0_payloads/spm_svc.h63
-rw-r--r--include/runtime_services/secure_el1_payloads/tsp.h69
-rw-r--r--include/runtime_services/smccc.h77
-rw-r--r--include/runtime_services/sprt_svc.h46
-rw-r--r--include/runtime_services/std_svc.h27
-rw-r--r--include/runtime_services/trusted_os.h34
-rw-r--r--lib/aarch32/cache_helpers.S217
-rw-r--r--lib/aarch32/exception_stubs.S22
-rw-r--r--lib/aarch32/misc_helpers.S94
-rw-r--r--lib/aarch64/cache_helpers.S203
-rw-r--r--lib/aarch64/exception_stubs.S46
-rw-r--r--lib/aarch64/misc_helpers.S138
-rw-r--r--lib/compiler-rt/LICENSE.TXT91
-rw-r--r--lib/compiler-rt/builtins/arm/aeabi_uldivmod.S46
-rw-r--r--lib/compiler-rt/builtins/assembly.h204
-rw-r--r--lib/compiler-rt/builtins/ctzdi2.c35
-rw-r--r--lib/compiler-rt/builtins/int_endianness.h116
-rw-r--r--lib/compiler-rt/builtins/int_lib.h124
-rw-r--r--lib/compiler-rt/builtins/int_math.h114
-rw-r--r--lib/compiler-rt/builtins/int_types.h164
-rw-r--r--lib/compiler-rt/builtins/udivmoddi4.c231
-rw-r--r--lib/compiler-rt/compiler-rt.mk13
-rw-r--r--lib/delay/delay.c32
-rw-r--r--lib/events/events.c85
-rw-r--r--lib/extensions/amu/aarch32/amu.c37
-rw-r--r--lib/extensions/amu/aarch32/amu_helpers.S105
-rw-r--r--lib/extensions/amu/aarch64/amu.c37
-rw-r--r--lib/extensions/amu/aarch64/amu_helpers.S112
-rw-r--r--lib/irq/irq.c204
-rw-r--r--lib/locks/aarch32/spinlock.S37
-rw-r--r--lib/locks/aarch64/spinlock.S33
-rw-r--r--lib/power_management/hotplug/hotplug.c307
-rw-r--r--lib/power_management/suspend/aarch32/asm_tftf_suspend.S95
-rw-r--r--lib/power_management/suspend/aarch64/asm_tftf_suspend.S142
-rw-r--r--lib/power_management/suspend/suspend_private.h96
-rw-r--r--lib/power_management/suspend/tftf_suspend.c127
-rw-r--r--lib/psci/psci.c341
-rw-r--r--lib/sdei/sdei.c221
-rw-r--r--lib/semihosting/aarch32/semihosting_call.S14
-rw-r--r--lib/semihosting/aarch64/semihosting_call.S14
-rw-r--r--lib/semihosting/semihosting.c214
-rw-r--r--lib/smc/aarch32/asm_smc.S36
-rw-r--r--lib/smc/aarch32/smc.c19
-rw-r--r--lib/smc/aarch64/asm_smc.S50
-rw-r--r--lib/smc/aarch64/smc.c29
-rw-r--r--lib/stdlib/abort.c16
-rw-r--r--lib/stdlib/assert.c17
-rw-r--r--lib/stdlib/mem.c97
-rw-r--r--lib/stdlib/printf.c36
-rw-r--r--lib/stdlib/putchar.c24
-rw-r--r--lib/stdlib/puts.c31
-rw-r--r--lib/stdlib/rand.c65
-rw-r--r--lib/stdlib/strchr.c52
-rw-r--r--lib/stdlib/strcmp.c51
-rw-r--r--lib/stdlib/strlen.c45
-rw-r--r--lib/stdlib/strncmp.c52
-rw-r--r--lib/stdlib/strncpy.c61
-rw-r--r--lib/stdlib/subr_prf.c548
-rw-r--r--lib/trusted_os/trusted_os.c33
-rw-r--r--lib/utils/mp_printf.c72
-rw-r--r--lib/utils/uuid.c60
-rw-r--r--lib/xlat_tables_v2/aarch32/enable_mmu.S120
-rw-r--r--lib/xlat_tables_v2/aarch32/xlat_tables_arch.c231
-rw-r--r--lib/xlat_tables_v2/aarch64/enable_mmu.S95
-rw-r--r--lib/xlat_tables_v2/aarch64/xlat_tables_arch.c278
-rw-r--r--lib/xlat_tables_v2/xlat_tables.mk12
-rw-r--r--lib/xlat_tables_v2/xlat_tables_context.c199
-rw-r--r--lib/xlat_tables_v2/xlat_tables_core.c1157
-rw-r--r--lib/xlat_tables_v2/xlat_tables_private.h103
-rw-r--r--lib/xlat_tables_v2/xlat_tables_utils.c565
-rw-r--r--license.rst38
-rw-r--r--maintainers.rst22
-rw-r--r--plat/arm/board/fvp/aarch32/plat_helpers.S49
-rw-r--r--plat/arm/board/fvp/aarch64/plat_helpers.S48
-rw-r--r--plat/arm/board/fvp/fvp_def.h52
-rw-r--r--plat/arm/board/fvp/fvp_mem_prot.c26
-rw-r--r--plat/arm/board/fvp/fvp_pwr_state.c61
-rw-r--r--plat/arm/board/fvp/fvp_topology.c98
-rw-r--r--plat/arm/board/fvp/include/platform_def.h242
-rw-r--r--plat/arm/board/fvp/plat_setup.c49
-rw-r--r--plat/arm/board/fvp/platform.mk26
-rw-r--r--plat/arm/board/juno/aarch32/plat_helpers.S23
-rw-r--r--plat/arm/board/juno/aarch64/plat_helpers.S23
-rw-r--r--plat/arm/board/juno/include/platform_def.h238
-rw-r--r--plat/arm/board/juno/juno32_tests_to_skip.txt11
-rw-r--r--plat/arm/board/juno/juno_def.h60
-rw-r--r--plat/arm/board/juno/juno_mem_prot.c26
-rw-r--r--plat/arm/board/juno/juno_pwr_state.c66
-rw-r--r--plat/arm/board/juno/juno_timers.c31
-rw-r--r--plat/arm/board/juno/juno_topology.c56
-rw-r--r--plat/arm/board/juno/plat_setup.c83
-rw-r--r--plat/arm/board/juno/platform.mk42
-rw-r--r--plat/arm/board/juno/tests.xml40
-rw-r--r--plat/arm/common/arm_common.mk18
-rw-r--r--plat/arm/common/arm_fwu_io_storage.c172
-rw-r--r--plat/arm/common/arm_io_storage.c65
-rw-r--r--plat/arm/common/arm_setup.c53
-rw-r--r--plat/arm/common/arm_timers.c32
-rw-r--r--plat/common/aarch32/platform_helpers.S67
-rw-r--r--plat/common/aarch32/platform_mp_stack.S80
-rw-r--r--plat/common/aarch32/platform_up_stack.S50
-rw-r--r--plat/common/aarch64/platform_helpers.S67
-rw-r--r--plat/common/aarch64/platform_mp_stack.S81
-rw-r--r--plat/common/aarch64/platform_up_stack.S50
-rw-r--r--plat/common/fwu_nvm_accessors.c164
-rw-r--r--plat/common/image_loader.c210
-rw-r--r--plat/common/plat_common.c138
-rw-r--r--plat/common/plat_state_id.c172
-rw-r--r--plat/common/plat_topology.c362
-rw-r--r--plat/common/tftf_nvm_accessors.c101
-rw-r--r--readme.rst163
-rwxr-xr-xscripts/run_fwu_fvp.sh34
-rw-r--r--spm/cactus/aarch64/cactus_entrypoint.S111
-rw-r--r--spm/cactus/cactus.h36
-rw-r--r--spm/cactus/cactus.ld.S52
-rw-r--r--spm/cactus/cactus.mk73
-rw-r--r--spm/cactus/cactus_main.c124
-rw-r--r--spm/cactus/cactus_service_loop.c141
-rw-r--r--spm/cactus/cactus_tests.h46
-rw-r--r--spm/cactus/cactus_tests_memory_attributes.c236
-rw-r--r--spm/cactus/cactus_tests_misc.c39
-rw-r--r--spm/cactus/cactus_tests_system_setup.c71
-rw-r--r--spm/common/aarch64/sp_arch_helpers.S41
-rw-r--r--spm/common/sp_helpers.c71
-rw-r--r--spm/common/sp_helpers.h61
-rw-r--r--tftf/framework/aarch32/arch.c18
-rw-r--r--tftf/framework/aarch32/asm_debug.S126
-rw-r--r--tftf/framework/aarch32/entrypoint.S175
-rw-r--r--tftf/framework/aarch32/exceptions.S32
-rw-r--r--tftf/framework/aarch64/arch.c16
-rw-r--r--tftf/framework/aarch64/asm_debug.S119
-rw-r--r--tftf/framework/aarch64/entrypoint.S181
-rw-r--r--tftf/framework/aarch64/exceptions.S143
-rw-r--r--tftf/framework/debug.c43
-rw-r--r--tftf/framework/framework.mk115
-rw-r--r--tftf/framework/include/nvm.h117
-rw-r--r--tftf/framework/include/tftf.h169
-rw-r--r--tftf/framework/main.c599
-rw-r--r--tftf/framework/nvm_results_helpers.c277
-rw-r--r--tftf/framework/report.c250
-rw-r--r--tftf/framework/tftf.ld.S100
-rw-r--r--tftf/framework/timer/timer_framework.c567
-rw-r--r--tftf/tests/common/test_helpers.c130
-rw-r--r--tftf/tests/extensions/amu/test_amu.c191
-rw-r--r--tftf/tests/framework_validation_tests/test_timer_framework.c556
-rw-r--r--tftf/tests/framework_validation_tests/test_validation_events.c122
-rw-r--r--tftf/tests/framework_validation_tests/test_validation_irq.c131
-rw-r--r--tftf/tests/framework_validation_tests/test_validation_nvm.c221
-rw-r--r--tftf/tests/framework_validation_tests/test_validation_sgi.c82
-rw-r--r--tftf/tests/fwu_tests/test_fwu_auth.c82
-rw-r--r--tftf/tests/fwu_tests/test_fwu_toc.c71
-rw-r--r--tftf/tests/misc_tests/boot_req_tests/test_cntfrq.c82
-rw-r--r--tftf/tests/misc_tests/inject_serror.S114
-rw-r--r--tftf/tests/misc_tests/test_single_fault.c67
-rw-r--r--tftf/tests/misc_tests/test_uncontainable.c28
-rw-r--r--tftf/tests/performance_tests/smc_latencies.c175
-rw-r--r--tftf/tests/runtime_services/arm_arch_svc/smccc_arch_workaround_1.c148
-rw-r--r--tftf/tests/runtime_services/arm_arch_svc/smccc_arch_workaround_2.c131
-rw-r--r--tftf/tests/runtime_services/secure_service/secure_service_helpers.c23
-rw-r--r--tftf/tests/runtime_services/secure_service/test_secure_service_handle.c254
-rw-r--r--tftf/tests/runtime_services/secure_service/test_secure_service_interrupts.c137
-rw-r--r--tftf/tests/runtime_services/sip_service/test_exec_state_switch.c313
-rw-r--r--tftf/tests/runtime_services/sip_service/test_exec_state_switch_asm.S178
-rw-r--r--tftf/tests/runtime_services/standard_service/pmf/api_tests/runtime_instr/test_pmf_rt_instr.c577
-rw-r--r--tftf/tests/runtime_services/standard_service/psci/api_tests/affinity_info/test_psci_affinity_info.c434
-rw-r--r--tftf/tests/runtime_services/standard_service/psci/api_tests/cpu_hotplug/test_psci_hotplug.c160
-rw-r--r--tftf/tests/runtime_services/standard_service/psci/api_tests/cpu_hotplug/test_psci_hotplug_invalid.c178
-rw-r--r--tftf/tests/runtime_services/standard_service/psci/api_tests/cpu_suspend/test_suspend.c319
-rw-r--r--tftf/tests/runtime_services/standard_service/psci/api_tests/mem_protect/test_mem_protect.c105
-rw-r--r--tftf/tests/runtime_services/standard_service/psci/api_tests/mem_protect_check/mem_protect_check.c89
-rw-r--r--tftf/tests/runtime_services/standard_service/psci/api_tests/migrate_info_type/test_migrate_info_type.c104
-rw-r--r--tftf/tests/runtime_services/standard_service/psci/api_tests/psci_features/test_psci_features.c93
-rw-r--r--tftf/tests/runtime_services/standard_service/psci/api_tests/psci_node_hw_state/test_node_hw_state.c320
-rw-r--r--tftf/tests/runtime_services/standard_service/psci/api_tests/psci_stat/test_psci_stat.c1004
-rw-r--r--tftf/tests/runtime_services/standard_service/psci/api_tests/psci_version/test_psci_version.c29
-rw-r--r--tftf/tests/runtime_services/standard_service/psci/api_tests/reset2/reset2.c186
-rw-r--r--tftf/tests/runtime_services/standard_service/psci/api_tests/system_off/test_system_off.c31
-rw-r--r--tftf/tests/runtime_services/standard_service/psci/api_tests/system_suspend/test_psci_system_suspend.c885
-rw-r--r--tftf/tests/runtime_services/standard_service/psci/api_tests/validate_power_state/test_validate_power_state.c581
-rw-r--r--tftf/tests/runtime_services/standard_service/psci/system_tests/test_psci_hotplug_stress.c249
-rw-r--r--tftf/tests/runtime_services/standard_service/psci/system_tests/test_psci_on_off_suspend_stress.c576
-rw-r--r--tftf/tests/runtime_services/standard_service/psci/system_tests/test_psci_system_suspend_stress.c301
-rw-r--r--tftf/tests/runtime_services/standard_service/query_std_svc.c85
-rw-r--r--tftf/tests/runtime_services/standard_service/sdei/system_tests/sdei_entrypoint.S135
-rw-r--r--tftf/tests/runtime_services/standard_service/sdei/system_tests/test_sdei.c447
-rw-r--r--tftf/tests/runtime_services/standard_service/sdei/system_tests/test_sdei_state.c676
-rw-r--r--tftf/tests/runtime_services/standard_service/unknown_smc.c33
-rw-r--r--tftf/tests/runtime_services/trusted_os/tsp/test_irq_preempted_std_smc.c838
-rw-r--r--tftf/tests/runtime_services/trusted_os/tsp/test_irq_spurious_gicv2.c290
-rw-r--r--tftf/tests/runtime_services/trusted_os/tsp/test_normal_int_switch.c479
-rw-r--r--tftf/tests/runtime_services/trusted_os/tsp/test_smc_tsp_std_fn_call.c55
-rw-r--r--tftf/tests/runtime_services/trusted_os/tsp/test_tsp_fast_smc.c158
-rw-r--r--tftf/tests/template_tests/test_template_multi_core.c84
-rw-r--r--tftf/tests/template_tests/test_template_single_core.c17
-rw-r--r--tftf/tests/tests-arm-state-switch.xml25
-rw-r--r--tftf/tests/tests-boot-req.xml15
-rw-r--r--tftf/tests/tests-common.xml37
-rw-r--r--tftf/tests/tests-cpu-extensions.xml21
-rw-r--r--tftf/tests/tests-el3-power-state.xml20
-rw-r--r--tftf/tests/tests-extensive.xml39
-rw-r--r--tftf/tests/tests-fwu.xml19
-rw-r--r--tftf/tests/tests-manual.xml31
-rw-r--r--tftf/tests/tests-performance.xml17
-rw-r--r--tftf/tests/tests-psci-extensive.xml29
-rw-r--r--tftf/tests/tests-psci.xml88
-rw-r--r--tftf/tests/tests-runtime-instrumentation.xml20
-rw-r--r--tftf/tests/tests-sdei.xml19
-rw-r--r--tftf/tests/tests-single-fault.xml13
-rw-r--r--tftf/tests/tests-spm.xml21
-rw-r--r--tftf/tests/tests-template.xml20
-rw-r--r--tftf/tests/tests-tftf-validation.xml26
-rw-r--r--tftf/tests/tests-tsp.xml34
-rw-r--r--tftf/tests/tests-uncontainable.xml14
-rw-r--r--tftf/tests/tests.mk88
-rwxr-xr-xtools/generate_test_list/generate_test_list.pl192
360 files changed, 47878 insertions, 0 deletions
diff --git a/.checkpatch.conf b/.checkpatch.conf
new file mode 100644
index 000000000..63c51b0c2
--- /dev/null
+++ b/.checkpatch.conf
@@ -0,0 +1,61 @@
+#
+# Configure how the Linux checkpatch script should be invoked in the context of
+# the TFTF source tree.
+#
+
+# This is not Linux so don't expect a Linux tree!
+--no-tree
+
+# This clarifes the lines indications in the report.
+#
+# E.g.:
+# Without this option, we have the following output:
+# #333: FILE: drivers/arm/gic/arm_gic_v2v3.c:160:
+# So we have 2 lines indications (333 and 160), which is confusing.
+# We only care about the position in the source file.
+#
+# With this option, it becomes:
+# drivers/arm/gic/arm_gic_v2v3.c:160:
+--showfile
+
+# Don't show some messages like the list of ignored types or the suggestion to
+# use "--fix" or report changes to the maintainers.
+--quiet
+
+#
+# Ignore the following message types, as they don't necessarily make sense in
+# the context of the TFTF.
+#
+
+# The Linux kernel expects the SPDX license tag in the first line of each file.
+# We don't follow this convention here.
+--ignore SPDX_LICENSE_TAG
+
+# COMPLEX_MACRO generates false positives.
+--ignore COMPLEX_MACRO
+
+# TFTF commit messages are expected to have a Gerrit Change-Id.
+--ignore GERRIT_CHANGE_ID
+
+# FILE_PATH_CHANGES reports this kind of message:
+# "added, moved or deleted file(s), does MAINTAINERS need updating?"
+# We do not use this MAINTAINERS file process in TFTF.
+--ignore FILE_PATH_CHANGES
+
+# AVOID_EXTERNS reports this kind of messages:
+# "externs should be avoided in .c files"
+# We don't follow this convention in TFTF.
+--ignore AVOID_EXTERNS
+
+# NEW_TYPEDEFS reports this kind of messages:
+# "do not add new typedefs"
+# We allow adding new typedefs in TFTF.
+--ignore NEW_TYPEDEFS
+
+# Avoid "Does not appear to be a unified-diff format patch" message
+--ignore NOT_UNIFIED_DIFF
+
+# VOLATILE reports this kind of messages:
+# "Use of volatile is usually wrong: see Documentation/volatile-considered-harmful.txt"
+# We allow the usage of the volatile keyword in TFTF.
+--ignore VOLATILE
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 000000000..35afcf594
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,11 @@
+# Ignore miscellaneous files
+*~
+cscope.*
+*.swp
+*.patch
+.project
+.cproject
+
+# Ignore build directory and binary files
+build/
+*.bin
diff --git a/Makefile b/Makefile
new file mode 100644
index 000000000..4f054bc3d
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,462 @@
+#
+# Copyright (c) 2018, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# TFTF Version
+VERSION_MAJOR := 2
+VERSION_MINOR := 0
+
+################################################################################
+# Default values for build configurations, and their dependencies
+################################################################################
+
+include defaults.mk
+
+PLAT := ${DEFAULT_PLAT}
+
+# Assertions enabled for DEBUG builds by default
+ENABLE_ASSERTIONS := ${DEBUG}
+
+################################################################################
+# Checkpatch script options
+################################################################################
+
+CHECKCODE_ARGS := --no-patch
+# Do not check the coding style on imported library files or documentation files
+INC_LIB_DIRS_TO_CHECK := $(sort $(filter-out \
+ include/lib/stdlib, \
+ $(wildcard include/lib/*)))
+LIB_DIRS_TO_CHECK := $(sort $(filter-out \
+ lib/compiler-rt \
+ lib/stdlib, \
+ $(wildcard lib/*)))
+ROOT_DIRS_TO_CHECK := $(sort $(filter-out \
+ lib \
+ include \
+ docs \
+ %.md \
+ %.rst, \
+ $(wildcard *)))
+CHECK_PATHS := ${ROOT_DIRS_TO_CHECK} \
+ ${INC_LIB_DIRS_TO_CHECK} \
+ ${LIB_DIRS_TO_CHECK}
+
+ifeq (${V},0)
+ Q=@
+else
+ Q=
+endif
+export Q
+
+ifneq (${DEBUG}, 0)
+ BUILD_TYPE := debug
+ # Use LOG_LEVEL_INFO by default for debug builds
+ LOG_LEVEL := 40
+else
+ BUILD_TYPE := release
+ # Use LOG_LEVEL_ERROR by default for release builds
+ LOG_LEVEL := 20
+endif
+
+# Default build string (git branch and commit)
+ifeq (${BUILD_STRING},)
+ BUILD_STRING := $(shell git log -n 1 --pretty=format:"%h")
+endif
+
+VERSION_STRING := v${VERSION_MAJOR}.${VERSION_MINOR}(${PLAT},${BUILD_TYPE}):${BUILD_STRING}
+
+BUILD_BASE := ./build
+BUILD_PLAT := ${BUILD_BASE}/${PLAT}/${BUILD_TYPE}
+
+PLAT_MAKEFILE := platform.mk
+# Generate the platforms list by recursively searching for all directories
+# under /plat containing a PLAT_MAKEFILE. Append each platform with a `|`
+# char and strip out the final '|'.
+PLATFORMS := $(shell find plat/ -name '${PLAT_MAKEFILE}' -print0 | \
+ sed -r 's%[^\x00]*\/([^/]*)\/${PLAT_MAKEFILE}\x00%\1|%g' | \
+ sed -r 's/\|$$//')
+
+# Convenience function for adding build definitions
+# $(eval $(call add_define,BAR_DEFINES,FOO)) will have:
+# -DFOO if $(FOO) is empty; -DFOO=$(FOO) otherwise
+# inside the BAR_DEFINES variable.
+define add_define
+$(1) += -D$(2)$(if $(value $(2)),=$(value $(2)),)
+endef
+
+# Convenience function for verifying option has a boolean value
+# $(eval $(call assert_boolean,FOO)) will assert FOO is 0 or 1
+define assert_boolean
+$(and $(patsubst 0,,$(value $(1))),$(patsubst 1,,$(value $(1))),$(error $(1) must be boolean))
+endef
+
+ifeq (${PLAT},)
+ $(error "Error: Unknown platform. Please use PLAT=<platform name> to specify the platform")
+endif
+PLAT_PATH := $(shell find plat/ -wholename '*/${PLAT}')
+PLAT_MAKEFILE_FULL := ${PLAT_PATH}/${PLAT_MAKEFILE}
+ifeq ($(wildcard ${PLAT_MAKEFILE_FULL}),)
+ $(error "Error: Invalid platform. The following platforms are available: ${PLATFORMS}")
+endif
+
+.PHONY: all
+all: msg_start
+
+.PHONY: msg_start
+msg_start:
+ @echo "Building ${PLAT}"
+ @echo "Using the following tests file: ${TESTS_FILE}"
+
+# Include test images makefiles.
+include tftf/framework/framework.mk
+include tftf/tests/tests.mk
+include fwu/ns_bl1u/ns_bl1u.mk
+include fwu/ns_bl2u/ns_bl2u.mk
+include spm/cactus/cactus.mk
+
+# Include platform specific makefile last because:
+# - the platform makefile may use all previous definitions in this file.
+# - the platform makefile may wish overwriting some of them.
+include ${PLAT_MAKEFILE_FULL}
+
+
+.SUFFIXES:
+
+################################################################################
+# Build options checks
+################################################################################
+$(eval $(call assert_boolean,DEBUG))
+$(eval $(call assert_boolean,ENABLE_ASSERTIONS))
+$(eval $(call assert_boolean,FIRMWARE_UPDATE))
+$(eval $(call assert_boolean,FWU_BL_TEST))
+$(eval $(call assert_boolean,NEW_TEST_SESSION))
+$(eval $(call assert_boolean,SHELL_COLOR))
+$(eval $(call assert_boolean,USE_NVM))
+
+################################################################################
+# Add definitions to the cpp preprocessor based on the current build options.
+# This is done after including the platform specific makefile to allow the
+# platform to overwrite the default options
+################################################################################
+$(eval $(call add_define,TFTF_DEFINES,ARM_ARCH_MAJOR))
+$(eval $(call add_define,TFTF_DEFINES,ARM_ARCH_MINOR))
+$(eval $(call add_define,TFTF_DEFINES,DEBUG))
+$(eval $(call add_define,TFTF_DEFINES,ENABLE_ASSERTIONS))
+$(eval $(call add_define,TFTF_DEFINES,LOG_LEVEL))
+$(eval $(call add_define,TFTF_DEFINES,NEW_TEST_SESSION))
+$(eval $(call add_define,TFTF_DEFINES,PLAT_${PLAT}))
+$(eval $(call add_define,TFTF_DEFINES,SHELL_COLOR))
+$(eval $(call add_define,TFTF_DEFINES,USE_NVM))
+
+ifeq (${ARCH},aarch32)
+ $(eval $(call add_define,TFTF_DEFINES,AARCH32))
+else
+ $(eval $(call add_define,TFTF_DEFINES,AARCH64))
+endif
+
+################################################################################
+
+# Assembler, compiler and linker flags shared across all test images.
+COMMON_ASFLAGS :=
+COMMON_CFLAGS :=
+COMMON_LDFLAGS :=
+
+ifeq (${DEBUG},1)
+COMMON_CFLAGS += -g
+COMMON_ASFLAGS += -g -Wa,--gdwarf-2
+endif
+
+COMMON_ASFLAGS_aarch64 := -mgeneral-regs-only
+COMMON_CFLAGS_aarch64 := -mgeneral-regs-only
+
+COMMON_ASFLAGS_aarch32 := -march=armv8-a
+COMMON_CFLAGS_aarch32 := -march=armv8-a
+
+COMMON_ASFLAGS += -nostdinc -ffreestanding -Wa,--fatal-warnings \
+ -Werror -Wmissing-include-dirs \
+ -D__ASSEMBLY__ $(COMMON_ASFLAGS_$(ARCH))
+COMMON_CFLAGS += -nostdinc -ffreestanding -Wall -Werror \
+ -Wmissing-include-dirs $(COMMON_CFLAGS_$(ARCH)) \
+ -std=gnu99 -Os
+COMMON_CFLAGS += -ffunction-sections -fdata-sections
+
+# Get the content of CFLAGS user defined value last so they are appended after
+# the options defined in the Makefile
+COMMON_CFLAGS += ${CFLAGS}
+
+COMMON_LDFLAGS += --fatal-warnings -O1 --gc-sections --build-id=none
+
+CC := ${CROSS_COMPILE}gcc
+CPP := ${CROSS_COMPILE}cpp
+AS := ${CROSS_COMPILE}gcc
+AR := ${CROSS_COMPILE}ar
+LD := ${CROSS_COMPILE}ld
+OC := ${CROSS_COMPILE}objcopy
+OD := ${CROSS_COMPILE}objdump
+NM := ${CROSS_COMPILE}nm
+PP := ${CROSS_COMPILE}gcc
+
+################################################################################
+
+TFTF_SOURCES := ${FRAMEWORK_SOURCES} ${TESTS_SOURCES} ${PLAT_SOURCES}
+TFTF_INCLUDES += ${PLAT_INCLUDES}
+TFTF_CFLAGS += ${COMMON_CFLAGS}
+TFTF_ASFLAGS += ${COMMON_ASFLAGS}
+TFTF_LDFLAGS += ${COMMON_LDFLAGS}
+
+NS_BL1U_SOURCES += ${PLAT_SOURCES}
+NS_BL1U_INCLUDES += ${PLAT_INCLUDES}
+NS_BL1U_CFLAGS += ${COMMON_CFLAGS}
+NS_BL1U_ASFLAGS += ${COMMON_ASFLAGS}
+NS_BL1U_LDFLAGS += ${COMMON_LDFLAGS}
+
+NS_BL2U_SOURCES += ${PLAT_SOURCES}
+NS_BL2U_INCLUDES += ${PLAT_INCLUDES}
+NS_BL2U_CFLAGS += ${COMMON_CFLAGS}
+NS_BL2U_ASFLAGS += ${COMMON_ASFLAGS}
+NS_BL2U_LDFLAGS += ${COMMON_LDFLAGS}
+
+CACTUS_INCLUDES += ${PLAT_INCLUDES}
+CACTUS_CFLAGS += ${COMMON_CFLAGS}
+CACTUS_ASFLAGS += ${COMMON_ASFLAGS}
+CACTUS_LDFLAGS += ${COMMON_LDFLAGS}
+
+.PHONY: locate-checkpatch
+locate-checkpatch:
+ifndef CHECKPATCH
+ $(error "Please set CHECKPATCH to point to the Linux checkpatch.pl file, eg: CHECKPATCH=../linux/script/checkpatch.pl")
+else
+ifeq (,$(wildcard ${CHECKPATCH}))
+ $(error "The file CHECKPATCH points to cannot be found, use eg: CHECKPATCH=../linux/script/checkpatch.pl")
+endif
+endif
+
+.PHONY: clean
+clean:
+ @echo " CLEAN"
+ ${Q}rm -rf ${BUILD_PLAT}
+ ${MAKE} -C el3_payload clean
+
+.PHONY: realclean distclean
+realclean distclean:
+ @echo " REALCLEAN"
+ ${Q}rm -rf ${BUILD_BASE}
+ ${Q}rm -f ${CURDIR}/cscope.*
+ ${MAKE} -C el3_payload distclean
+
+.PHONY: checkcodebase
+checkcodebase: locate-checkpatch
+ @echo " CHECKING STYLE"
+ @if test -d .git ; then \
+ git ls-files | grep -E -v 'stdlib|docs|\.md|\.rst' | \
+ while read GIT_FILE ; \
+ do ${CHECKPATCH} ${CHECKCODE_ARGS} -f $$GIT_FILE ; \
+ done ; \
+ else \
+ find . -type f -not -iwholename "*.git*" \
+ -not -iwholename "*build*" \
+ -not -iwholename "*stdlib*" \
+ -not -iwholename "*docs*" \
+ -not -iwholename "*.md" \
+ -not -iwholename "*.rst" \
+ -exec ${CHECKPATCH} ${CHECKCODE_ARGS} -f {} \; ; \
+ fi
+
+.PHONY: checkpatch
+checkpatch: locate-checkpatch
+ @echo " CHECKING STYLE"
+ ${Q}COMMON_COMMIT=$$(git merge-base HEAD ${BASE_COMMIT}); \
+ for commit in `git rev-list $$COMMON_COMMIT..HEAD`; do \
+ printf "\n[*] Checking style of '$$commit'\n\n"; \
+ git log --format=email "$$commit~..$$commit" \
+ -- ${CHECK_PATHS} | ${CHECKPATCH} - || true; \
+ git diff --format=email "$$commit~..$$commit" \
+ -- ${CHECK_PATHS} | ${CHECKPATCH} - || true; \
+ done
+
+ifneq (${FIRMWARE_UPDATE},1)
+.PHONY: ns_bl1u ns_bl2u
+ns_bl1u ns_bl2u:
+ @echo "ERROR: Can't build $@ because Firmware Update is not supported \
+ on this platform."
+ @exit 1
+endif
+
+ifneq (${ARCH}-${PLAT},aarch64-fvp)
+.PHONY: cactus
+cactus:
+ @echo "ERROR: $@ is supported only on AArch64 FVP."
+ @exit 1
+endif
+
+MAKE_DEP = -Wp,-MD,$(DEP) -MT $$@
+
+define MAKE_C
+
+$(eval OBJ := $(1)/$(patsubst %.c,%.o,$(notdir $(2))))
+$(eval DEP := $(patsubst %.o,%.d,$(OBJ)))
+
+$(OBJ) : $(2)
+ @echo " CC $$<"
+ $$(Q)$$(CC) $$($(3)_CFLAGS) ${$(3)_INCLUDES} ${$(3)_DEFINES} -DIMAGE_$(3) $(MAKE_DEP) -c $$< -o $$@
+
+-include $(DEP)
+endef
+
+
+define MAKE_S
+
+$(eval OBJ := $(1)/$(patsubst %.S,%.o,$(notdir $(2))))
+$(eval DEP := $(patsubst %.o,%.d,$(OBJ)))
+
+$(OBJ) : $(2)
+ @echo " AS $$<"
+ $$(Q)$$(AS) $$($(3)_ASFLAGS) ${$(3)_INCLUDES} ${$(3)_DEFINES} -DIMAGE_$(3) $(MAKE_DEP) -c $$< -o $$@
+
+-include $(DEP)
+endef
+
+
+define MAKE_LD
+
+$(eval DEP := $(1).d)
+
+$(1) : $(2)
+ @echo " PP $$<"
+ $$(Q)$$(AS) $$($(3)_ASFLAGS) ${$(3)_INCLUDES} ${$(3)_DEFINES} -P -E $(MAKE_DEP) -o $$@ $$<
+
+-include $(DEP)
+endef
+
+
+define MAKE_OBJS
+ $(eval C_OBJS := $(filter %.c,$(2)))
+ $(eval REMAIN := $(filter-out %.c,$(2)))
+ $(eval $(foreach obj,$(C_OBJS),$(call MAKE_C,$(1),$(obj),$(3))))
+
+ $(eval S_OBJS := $(filter %.S,$(REMAIN)))
+ $(eval REMAIN := $(filter-out %.S,$(REMAIN)))
+ $(eval $(foreach obj,$(S_OBJS),$(call MAKE_S,$(1),$(obj),$(3))))
+
+ $(and $(REMAIN),$(error Unexpected source files present: $(REMAIN)))
+endef
+
+
+# NOTE: The line continuation '\' is required in the next define otherwise we
+# end up with a line-feed characer at the end of the last c filename.
+# Also bare this issue in mind if extending the list of supported filetypes.
+define SOURCES_TO_OBJS
+ $(notdir $(patsubst %.c,%.o,$(filter %.c,$(1)))) \
+ $(notdir $(patsubst %.S,%.o,$(filter %.S,$(1))))
+endef
+
+define uppercase
+$(shell echo $(1) | tr '[:lower:]' '[:upper:]')
+endef
+
+define MAKE_IMG
+ $(eval IMG_PREFIX := $(call uppercase, $(1)))
+ $(eval BUILD_DIR := ${BUILD_PLAT}/$(1))
+ $(eval SOURCES := $(${IMG_PREFIX}_SOURCES))
+ $(eval OBJS := $(addprefix $(BUILD_DIR)/,$(call SOURCES_TO_OBJS,$(SOURCES))))
+ $(eval LINKERFILE := $(BUILD_DIR)/$(1).ld)
+ $(eval MAPFILE := $(BUILD_DIR)/$(1).map)
+ $(eval ELF := $(BUILD_DIR)/$(1).elf)
+ $(eval DUMP := $(BUILD_DIR)/$(1).dump)
+ $(eval BIN := $(BUILD_PLAT)/$(1).bin)
+
+ $(eval $(call MAKE_OBJS,$(BUILD_DIR),$(SOURCES),${IMG_PREFIX}))
+ $(eval $(call MAKE_LD,$(LINKERFILE),$(${IMG_PREFIX}_LINKERFILE),${IMG_PREFIX}))
+
+$(BUILD_DIR) :
+ $$(Q)mkdir -p "$$@"
+
+$(ELF) : $(OBJS) $(LINKERFILE)
+ @echo " LD $$@"
+ @echo 'const char build_message[] = "Built : "__TIME__", "__DATE__; \
+ const char version_string[] = "${VERSION_STRING}";' | \
+ $$(CC) $$(${IMG_PREFIX}_CFLAGS) ${${IMG_PREFIX}_INCLUDES} ${${IMG_PREFIX}_DEFINES} -c -xc - -o $(BUILD_DIR)/build_message.o
+ $$(Q)$$(LD) -o $$@ $$(${IMG_PREFIX}_LDFLAGS) -Map=$(MAPFILE) \
+ -T $(LINKERFILE) $(BUILD_DIR)/build_message.o $(OBJS)
+
+$(DUMP) : $(ELF)
+ @echo " OD $$@"
+ $${Q}$${OD} -dx $$< > $$@
+
+$(BIN) : $(ELF)
+ @echo " BIN $$@"
+ $$(Q)$$(OC) -O binary $$< $$@
+ @echo
+ @echo "Built $$@ successfully"
+ @echo
+
+.PHONY : $(1)
+$(1) : $(BUILD_DIR) $(BIN) $(DUMP)
+
+all : $(1)
+
+endef
+
+$(AUTOGEN_DIR):
+ $(Q)mkdir -p "$@"
+
+$(AUTOGEN_DIR)/tests_list.c $(AUTOGEN_DIR)/tests_list.h: $(AUTOGEN_DIR) ${TESTS_FILE} ${PLAT_TESTS_SKIP_LIST}
+ @echo " AUTOGEN $@"
+ tools/generate_test_list/generate_test_list.pl $(AUTOGEN_DIR)/tests_list.c $(AUTOGEN_DIR)/tests_list.h ${TESTS_FILE} $(PLAT_TESTS_SKIP_LIST)
+
+$(eval $(call MAKE_IMG,tftf))
+
+ifeq ($(FIRMWARE_UPDATE), 1)
+ $(eval $(call MAKE_IMG,ns_bl1u))
+ $(eval $(call MAKE_IMG,ns_bl2u))
+endif
+
+ifeq (${ARCH}-${PLAT},aarch64-fvp)
+ $(eval $(call MAKE_IMG,cactus))
+endif
+
+# The EL3 test payload is only supported in AArch64. It has an independent build
+# system.
+.PHONY: el3_payload
+ifneq (${ARCH},aarch32)
+el3_payload: $(BUILD_DIR)
+ ${Q}${MAKE} -C el3_payload PLAT=${PLAT}
+ ${Q}find "el3_payload/build/${PLAT}" -name '*.bin' -exec cp {} "${BUILD_PLAT}" \;
+
+all: el3_payload
+endif
+
+.PHONY: cscope
+cscope:
+ @echo " CSCOPE"
+ ${Q}find ${CURDIR} -name "*.[chsS]" > cscope.files
+ ${Q}cscope -b -q -k
+
+.PHONY: help
+help:
+ @echo "usage: ${MAKE} PLAT=<${PLATFORMS}> <all|tftf|ns_bl1u|ns_bl2u|cactus|el3_payload|distclean|clean|checkcodebase|checkpatch>"
+ @echo ""
+ @echo "PLAT is used to specify which platform you wish to build."
+ @echo "If no platform is specified, PLAT defaults to: ${DEFAULT_PLAT}"
+ @echo ""
+ @echo "Supported Targets:"
+ @echo " all Build all supported binaries for this platform"
+ @echo " (i.e. TFTF and FWU images)"
+ @echo " tftf Build the TFTF image"
+ @echo " ns_bl1u Build the NS_BL1U image"
+ @echo " ns_bl2u Build the NS_BL2U image"
+ @echo " cactus Build the Cactus image (Test S-EL0 payload)."
+ @echo " el3_payload Build the EL3 test payload"
+ @echo " checkcodebase Check the coding style of the entire source tree"
+ @echo " checkpatch Check the coding style on changes in the current"
+ @echo " branch against BASE_COMMIT (default origin/master)"
+ @echo " clean Clean the build for the selected platform"
+ @echo " cscope Generate cscope index"
+ @echo " distclean Remove all build artifacts for all platforms"
+ @echo ""
+ @echo "note: most build targets require PLAT to be set to a specific platform."
+ @echo ""
+ @echo "example: build all targets for the FVP platform:"
+ @echo " CROSS_COMPILE=aarch64-none-elf- make PLAT=fvp all"
diff --git a/contributing.rst b/contributing.rst
new file mode 100644
index 000000000..e2c701918
--- /dev/null
+++ b/contributing.rst
@@ -0,0 +1,96 @@
+Contributing to Trusted Firmware-A Tests
+========================================
+
+Getting Started
+---------------
+
+- Make sure you have a Github account and you are logged on
+ `developer.trustedfirmware.org`_.
+
+ For any non-trivial piece of work, we advise you to create a `task`_ if one
+ does not already exist. This gives everyone visibility of whether others are
+ working on something similar.
+
+ Please select the `Trusted Firmware-A Tests` tag in the task creation form.
+
+- Clone the repository from the Gerrit server. The project details may be found
+ on the `tf-a-tests project page`_. We recommend the "`Clone with commit-msg
+ hook`" clone method, which will setup the git commit hook that automatically
+ generates and inserts appropriate `Change-Id:` lines in your commit messages.
+
+- Base you work on the latest ``master`` branch.
+
+Making Changes
+--------------
+
+- Make commits of logical units. See these general `Git guidelines`_ for
+ contributing to a project.
+
+- Follow the `Linux coding style`_; this style is enforced for the TF-A Tests
+ project (style errors only, not warnings).
+
+ Use the ``checkpatch.pl`` script provided with the Linux source tree. A
+ Makefile target is provided for convenience (see section `Checking source
+ code style`_ in the User Guide).
+
+- Keep the commits on topic. If you need to fix another bug or make another
+ enhancement, please address it on a separate patch series.
+
+- Avoid long commit series. If you do have a long series, consider whether
+ some commits should be squashed together or addressed in a separate series.
+
+- Where appropriate, please update the documentation.
+
+- Ensure that each changed file has the correct copyright and license
+ information.
+
+- Please test your changes. As a minimum, ensure that all standard tests pass
+ on the Foundation FVP. See `Running the TF-A Tests`_ for more information.
+
+Submitting Changes
+------------------
+
+- Ensure that each commit in the series has at least one ``Signed-off-by:``
+ line, using your real name and email address. If anyone else contributes to
+ the commit, they must also add their own ``Signed-off-by:`` line.
+
+ By adding this line the contributor certifies the contribution is made under
+ the terms of the `Developer Certificate of Origin (DCO)`_.
+
+ More details may be found in the `Gerrit Signed-off-by Lines guidelines`_.
+
+- Ensure that each commit also has a unique ``Change-Id:`` line. If you have
+ cloned the repository with the "`Clone with commit-msg hook`" clone method
+ (as advised in section `Getting started`_), this should already be the case.
+
+ More details may be found in the `Gerrit Change-Ids documentation`_.
+
+- Submit your patches to the Gerrit review server. Please choose a Gerrit topic
+ for your patch series - that is, a short tag associated with all of the
+ changes in the same group, such as the local topic branch name.
+
+ Refer to the `Gerrit documentation`_ for more details.
+
+- Once the patch has had sufficient peer review and has passed the checks run
+ by the continuous integration system, the `maintainers`_ will merge your
+ patch.
+
+--------------
+
+*Copyright (c) 2018, Arm Limited. All rights reserved.*
+
+.. _maintainers: maintainers.rst
+.. _license.rst: license.rst
+.. _Developer Certificate of Origin (DCO): dco.txt
+.. _Checking source code style: ../docs/user-guide.rst#checking-source-code-style
+.. _Running the TF-A Tests: ../docs/user-guide.rst#running-the-tf-a-tests
+
+.. _Git guidelines: http://git-scm.com/book/ch5-2.html
+.. _Linux coding style: https://www.kernel.org/doc/html/latest/process/coding-style.html
+
+.. _developer.trustedfirmware.org: https://developer.trustedfirmware.org
+.. _task: https://developer.trustedfirmware.org/maniphest/query/open/
+.. _tf-a-tests project page: https://review.trustedfirmware.org/#/admin/projects/tf-a-tests
+.. _Gerrit documentation: https://review.trustedfirmware.org/Documentation/user-upload.html
+.. _Gerrit Signed-off-by Lines guidelines: https://review.trustedfirmware.org/Documentation/user-signedoffby.html
+.. _Gerrit Change-Ids documentation: https://review.trustedfirmware.org/Documentation/user-changeid.html
diff --git a/dco.txt b/dco.txt
new file mode 100644
index 000000000..8201f9921
--- /dev/null
+++ b/dco.txt
@@ -0,0 +1,37 @@
+Developer Certificate of Origin
+Version 1.1
+
+Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
+1 Letterman Drive
+Suite D4700
+San Francisco, CA, 94129
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+
+Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I
+ have the right to submit it under the open source license
+ indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best
+ of my knowledge, is covered under an appropriate open source
+ license and I have the right under that license to submit that
+ work with modifications, whether created in whole or in part
+ by me, under the same open source license (unless I am
+ permitted to submit under a different license), as indicated
+ in the file; or
+
+(c) The contribution was provided directly to me by some other
+ person who certified (a), (b) or (c) and I have not modified
+ it.
+
+(d) I understand and agree that this project and the contribution
+ are public and that a record of the contribution (including all
+ personal information I submit with it, including my sign-off) is
+ maintained indefinitely and may be redistributed consistent with
+ this project or the open source license(s) involved.
diff --git a/defaults.mk b/defaults.mk
new file mode 100644
index 000000000..63c1b2e1c
--- /dev/null
+++ b/defaults.mk
@@ -0,0 +1,47 @@
+#
+# Copyright (c) 2018, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# Default, static values for build variables, listed in alphabetic order.
+# Dependencies between build options, if any, are handled in the top-level
+# Makefile, after this file is included. This ensures that the former is better
+# poised to handle dependencies, as all build variables would have a default
+# value by then.
+
+# The Target build architecture. Supported values are: aarch64, aarch32.
+ARCH := aarch64
+
+# ARM Architecture major and minor versions: 8.0 by default.
+ARM_ARCH_MAJOR := 8
+ARM_ARCH_MINOR := 0
+
+# Base commit to perform code check on
+BASE_COMMIT := origin/master
+
+# Debug/Release build
+DEBUG := 0
+
+# Build platform
+DEFAULT_PLAT := fvp
+
+# Whether the Firmware Update images (i.e. NS_BL1U and NS_BL2U images) should be
+# built. The platform makefile is free to override this value.
+FIRMWARE_UPDATE := 0
+
+# Enable FWU helper functions and inline tests in NS_BL1U and NS_BL2U images.
+FWU_BL_TEST := 1
+
+# Whether a new test session should be started every time or whether the
+# framework should try to resume a previous one if it was interrupted
+NEW_TEST_SESSION := 1
+
+# Use of shell colors
+SHELL_COLOR := 0
+
+# Use non volatile memory for storing results
+USE_NVM := 0
+
+# Build verbosity
+V := 0
diff --git a/docs/change-log.rst b/docs/change-log.rst
new file mode 100644
index 000000000..912a35a4d
--- /dev/null
+++ b/docs/change-log.rst
@@ -0,0 +1,241 @@
+
+.. section-numbering::
+ :suffix: .
+
+.. contents::
+
+Please note that the Trusted Firmware-A Tests version follows the Trusted
+Firmware-A version for simplicity. At any point in time, TF-A Tests version
+`x.y` aims at testing TF-A version `x.y`. Different versions of TF-A and TF-A
+Tests are not guaranteed to be compatible. This also means that a version
+upgrade on the TF-A-Tests side might not necessarily introduce any new feature.
+
+Trusted Firmware-A Tests - version 2.0
+======================================
+
+New features
+------------
+
+This is the first public release of the Trusted Firmware-A Tests source code.
+
+TFTF
+````
+
+- Provides a baremetal test framework to exercise TF-A features through its
+ ``SMC`` interface.
+
+- Integrates easily with TF-A: the TFTF binary is packaged in the FIP image
+ as a ``BL33`` component.
+
+- Standalone binary that runs on the target without human intervention (except
+ for some specific tests that require a manual target reset).
+
+- Designed for multi-core testing. The various sub-frameworks allow maximum
+ parallelism in order to stress the firmware.
+
+- Displays test results on the UART output. This may then be parsed by an
+ external tool and integrated in a continuous integration system.
+
+- Supports running in AArch64 (NS-EL2 or NS-EL1) and AArch32 states.
+
+- Supports parsing a tests manifest (XML file) listing the tests to include in
+ the binary.
+
+- Detects most platform features at run time (e.g. topology, GIC version, ...).
+
+- Provides a topology enumeration framework. Allows tests to easily go through
+ affinity levels and power domain nodes.
+
+- Provides an event framework to synchronize CPU operations in a multi-core
+ context.
+
+- Provides a timer framework. Relies on a single global timer to generate
+ interrupts for all CPUs in the system. This allows tests to easily program
+ interrupts on demand to use as a wake-up event source to come out of CPU
+ suspend state for example.
+
+- Provides a power-state enumeration framework. Abstracts the valid power
+ states supported on the platform.
+
+- Provides helper functions for power management operations (CPU hotplug,
+ CPU suspend, system suspend, ...) with proper saving of the hardware state.
+
+- Supports rebooting the platform at the end of each test for greater
+ independence between tests.
+
+- Supports interrupting and resuming a test session. This relies on storing
+ test results in non-volatile memory (e.g. flash).
+
+FWU images
+``````````
+
+- Provides example code to exercise the Firmware Update feature of TF-A.
+
+- Tests the robustness of the FWU state machine implemented in the TF-A by
+ sending valid and invalid authentication, copy and image execution requests
+ to the TF-A BL1 image.
+
+EL3 test payload
+````````````````
+
+- Tests the ability of TF-A to load an EL3 payload.
+
+Cactus test secure partition
+````````````````````````````
+
+- Tests that TF-A has correctly setup the secure partition environment: it
+ should be allowed to perform cache maintenance operations, access floating
+ point registers, etc.
+
+- Tests the ability of a secure partition to request changing data access
+ permissions and instruction permissions of memory regions it owns.
+
+- Tests the ability of a secure partition to handle StandaloneMM requests.
+
+Known issues and limitations
+----------------------------
+
+The sections below lists the known issues and limitations of each test image
+provided in this repository.
+
+TFTF
+````
+
+The TFTF test image might be conceptually sub-divided further in 2 parts: the
+tests themselves, and the test framework they are based upon.
+
+Test framework
+''''''''''''''
+
+- Some stability issues.
+
+- No mechanism to abort tests when they time out (e.g. this could be
+ implemented using a watchdog).
+
+- No convenient way to include or exclude tests on a per-platform basis.
+
+- Power domains and affinity levels are considered equivalent but they may
+ not necessarily be.
+
+- Need to provide better support to alleviate duplication of test code. There
+ are some recurrent test patterns for which helper functions should be
+ provided. For example, bringing up all CPUs on the platform and executing the
+ same function on all of them, or programming an interrupt and waiting for it
+ to trigger.
+
+- Every CPU that participates in a test must return from the test function. If
+ it does not - e.g. because it powered itself off for testing purposes - then
+ the test framework will wait forever for this CPU. This limitation is too
+ restrictive for some tests.
+
+- No protection against interrupted flash operations. If the target is reset
+ while some data is written to flash, the test framework might behave
+ incorrectly on reset.
+
+- When compiling the code, if the generation of the tests_list.c and/or
+ tests_list.h files fails, the build process is not aborted immediately and
+ will only fail later on.
+
+- The directory layout is confusing. Most of the test framework code has been
+ moved under the ``tftf/`` directory to better isolate it but this effort is
+ not complete. As a result, there are still some TFTF files scattered around.
+
+Tests
+'''''
+
+- Some tests are implemented for AArch64 only and are skipped on AArch32.
+
+- Some tests are not robust enough:
+
+ - Some tests might hang in some circumstances. For example, they might wait
+ forever for a condition to become true.
+
+ - Some tests rely on arbitrary time delays instead of proper synchronization
+ when executing order-sensitive steps.
+
+ - Some tests have been implemented in a practical manner: they seem to work
+ on actual hardware but they make assumptions that are not guaranteed by
+ the Arm architecture. Therefore, they might fail on some other platforms.
+
+- PSCI stress tests are very unreliable and will often hang. The root cause is
+ not known for sure but this might be due to bad synchronization between CPUs.
+
+- The GICv2 spurious IRQ test is Juno-specific. In reality, it should only be
+ GICv2-specific. It should be reworked to remove any platform-specific
+ assumption.
+
+- The GICv2 spurious IRQ test sometimes fails with the following error message:
+
+ ``SMC @ lead CPU returned 0xFFFFFFFF 0x8 0xC``
+
+ The root cause is unknown.
+
+- The manual tests in AArch32 mode do not work properly. They save some state
+ information into non-volatile memory in order to detect the reset reason but
+ this state does not appear to be retained. As a result, these tests keep
+ resetting infinitely.
+
+- The FWU tests take a long time to complete. This is because they wait for the
+ watchdog to reset the system. On FVP, TF-A configures the watchdog period to
+ about 4 min. This is way too long in an automated testing context. Besides,
+ the user gets not feedback, which may let them think that the tests are not
+ working properly.
+
+- The test "Target timer to a power down cpu" sometimes fails with the
+ following error message:
+
+ ``Expected timer switch: 4 Actual: 3``
+
+ The root cause is unknown.
+
+FWU images
+``````````
+
+- NS-BL1U and NS-BL2U images reuse TFTF-specific code for legacy reasons. This
+ is not a clean design and may cause confusion.
+
+Cactus test secure partition
+````````````````````````````
+
+- Cactus is experimental code. It's likely to change a lot as the secure
+ partition software architecture evolves.
+
+- Fails to link with GNU toolchain 7.3.1.
+
+- Cactus is supported on AArch64 FVP platform only.
+
+All test images
+```````````````
+
+- TF-A Tests are derived from a fork of TF-A so:
+
+ - they've got some code in common but lag behind on some features.
+
+ - there might still be some irrelevant references to TF-A.
+
+- Some design issues.
+ E.g. TF-A Tests inherited from the I/O layer of TF-A, which still needs a
+ major rework.
+
+- Cannot build TF-A Tests with Clang. Only GCC is supported.
+
+- The build system does not cope well with parallel building. The user should
+ not attempt to run multiple jobs in parallel with the ``-j`` option of `GNU
+ make`.
+
+- The build system does not properly track build options. A clean build must be
+ performed every time a build option changes.
+
+- SMCCC v2 is not properly supported.
+
+- UUIDs are not compliant to RFC 4122.
+
+- No floating point support. The code is compiled with GCC flag
+ ``-mgeneral-regs-only``, which prevents the compiler from generating code
+ that accesses floating point registers. This might limit some test scenarios.
+
+- The documentation is too lightweight.
+
+--------------
+
+*Copyright (c) 2018, Arm Limited. All rights reserved.*
diff --git a/docs/design.rst b/docs/design.rst
new file mode 100644
index 000000000..a59c1d7b4
--- /dev/null
+++ b/docs/design.rst
@@ -0,0 +1,266 @@
+Trusted Firmware-A Tests - Design
+=================================
+
+.. section-numbering::
+ :suffix: .
+
+.. contents::
+
+This document provides some details about the internals of the TF-A Tests
+design. It is incomplete at the moment.
+
+Global overview of the TF-A tests behaviour
+-------------------------------------------
+
+The EL3 firmware is expected to hand over to the TF-A tests with all secondary
+cores powered down, i.e. only the primary core should enter the TF-A tests.
+
+The primary CPU initialises the platform and the TF-A tests internal data
+structures.
+
+Then the test session begins. The TF-A tests are executed one after the
+other. Tests results are saved in non-volatile memory as we go along.
+
+Once all tests have completed, a report is generated. The TF-A tests currently
+supports 2 report formats:
+
+- Raw output, i.e. text messages over serial console.
+
+- Junit output, i.e. XML file generated over semihosting. This is useful when a
+ post-processing tool needs to parse the test report, as it is more
+ machine-friendly than the raw output.
+
+The report format to use is chosen at build time. For more information,
+refer to the section `Summary of build options`_ in the User Guide.
+
+Global Code Structure
+---------------------
+
+The code is organised into the following categories (present as directories at
+the top level or under the ``tftf/`` directory):
+
+- **Drivers.**
+
+ Some examples follow, this list might not be exhaustive.
+
+ - Generic GIC driver.
+
+ ``arm_gic.h`` contains the public APIs that tests might use. Both GIC
+ architecture versions 2 and 3 are supported.
+
+ - PL011 UART driver.
+
+ - VExpress NOR flash driver.
+
+ Note that tests are not expected to use this driver in most
+ cases. Instead, they should use the ``tftf_nvm_read()`` and
+ ``tftf_nvm_write()`` wrapper APIs. See definitions in
+ ``tftf/framework/include/nvm.h``. See also the NVM validation test cases
+ (``tftf/tests/framework_validation_tests/test_validation_nvm.c``) for an
+ example of usage of these functions.
+
+ - SP805 watchdog.
+
+ Used solely to generate an interrupt that will reset the system on purpose
+ (used in ``tftf_plat_reset()``).
+
+ - SP804 timer.
+
+ This is used as the system timer on Juno. It is configured such that an
+ interrupt is generated when it reaches 0. It is programmed in one-shot
+ mode, i.e. it must be rearmed every time it reaches 0.
+
+- **Framework.**
+
+ Core features of the test framework.
+
+- **Library code.**
+
+ Some examples of APIs provided in ``include/lib/`` follow, this list might
+ not be exhaustive.
+
+ - ``aarch64/``
+
+ Architecture helper functions for e.g. system registers access, cache
+ maintenance operations, MMU configuration, ...
+
+ - ``events.h``
+
+ Events API. Used to create synchronisation points between CPUs in tests.
+
+ - ``irq.h``
+
+ IRQ handling support. Used to configure IRQs and register/unregister
+ handlers called upon reception of a specific IRQ.
+
+ - ``power_management.h``
+
+ Power management operations (CPU ON/OFF, CPU suspend, etc.).
+
+ - ``sgi.h``
+
+ Software Generated Interrupt support. Used as an inter-CPU communication
+ mechanism.
+
+ - ``spinlock.h``
+
+ Lightweight implementation of synchronisation locks. Used to prevent
+ concurrent accesses to shared data structures.
+
+ - ``timer.h``
+
+ Support for programming the timer. Any timer which is in the `always-on`
+ power domain can be used to exit CPUs from suspend state.
+
+ - ``tftf_lib.h``
+
+ Miscellaneous helper functions/macros: MP-safe ``printf()``, low-level
+ PSCI wrappers, insertion of delays, raw SMC interface, support for writing
+ a string in the test report, macros to skip tests on platforms that do not
+ meet topology requirements, etc.
+
+ - ``semihosting.h``
+
+ Semihosting support.
+
+ - ``io_storage.h``
+
+ Low-level IO operations. Tests are not expected to use these APIs
+ directly. They should use higher-level APIs like ``tftf_nvm_read()``
+ and ``tftf_nvm_write()``.
+
+ ``include/stdlib/`` provides standard C library functions (``memcpy()``,
+ ``printf()``, and so on).
+
+- **Platform specific.**
+
+ Note that ``include/plat/common/plat_topology.h`` provides the interfaces
+ that a platform must implement to support topology discovery (i.e. how many
+ CPUs and clusters there are).
+
+- **Tests.**
+
+The tests are divided into the following categories (present as directories in
+the ``tftf/tests/`` directory):
+
+- **Framework validation tests.**
+
+ Tests that exercise the core features of the framework. Verify that the test
+ framework itself works properly.
+
+- **Runtime services tests.**
+
+ Tests that exercise the runtime services offered by the EL3 Firmware to the
+ Normal World software. For example, this includes tests for the Standard
+ Service (to which PSCI belongs to), the Trusted OS service or the SiP
+ service.
+
+- **CPU extensions tests.**
+
+ Tests some CPU extensions features. For example, the AMU tests ensure that
+ the counters provided by the Activity Monitor Unit are behaving correctly.
+
+- **Firmware Update tests.**
+
+ Tests that exercise the `Firmware Update`_ feature of TF-A.
+
+- **Template tests.**
+
+ Sample test code showing how to write tests in practice. Serves as
+ documentation.
+
+- **Performance tests.**
+
+ Simple tests measuring the latency of an SMC call.
+
+- **Miscellaneous tests.**
+
+ Tests for RAS support, correct system setup, ...
+
+All assembler files have the ``.S`` extension. The linker source file has the
+extension ``.ld.S``. This is processed by GCC to create the linker script which
+has the extension ``.ld``.
+
+Detailed Code Structure
+-----------------------
+
+The cold boot entry point is ``tftf_entrypoint`` (see
+``tftf/framework/aarch64/entrypoint.S``). As explained in section `Global
+overview of the TF-A tests behaviour`_, only the primary CPU is expected to
+execute this code.
+
+Tests can power on other CPUs using the function ``tftf_cpu_on()``. This uses
+the PSCI ``CPU_ON`` API of the EL3 Firmware. When entering the Normal World,
+execution starts at the warm boot entry point, which is ``tftf_hotplug_entry()``
+(see ``tftf/framework/aarch64/entrypoint.S``).
+
+Information about the progression of the test session and tests results are
+written into Non-Volatile Memory as we go along. This consists of the following
+data (see struct ``tftf_state_t`` typedef in ``tftf/framework/include/nvm.h``):
+
+- ``test_to_run``
+
+ Reference to the test to run.
+
+- ``test_progress``
+
+ Progress in the execution of ``test_to_run``. This is used to implement the
+ following state machine:
+
+::
+
+ +-> TEST_READY (initial state of the test) <--------------+
+ | | |
+ | | Test framework prepares the test environment. |
+ | | |
+ | v |
+ | TEST_IN_PROGRESS |
+ | | |
+ | | Hand over to the test function. |
+ | | If the test wants to reboot the platform ---> TEST_REBOOTING |
+ | | | |
+ | | Test function returns into framework. | Reboot |
+ | | | |
+ | | +---------+
+ | v
+ | TEST_COMPLETE
+ | |
+ | | Do some framework management.
+ | | Move to next test.
+ +--------+
+
+- ``testcase_buffer``
+
+ A buffer that the test can use as a scratch area for whatever it is doing.
+
+- ``testcase_results``
+
+- ``result_buffer_size``
+
+- ``result_buffer``
+
+ Buffer holding the tests output. Tests output are concatenated.
+
+Interrupts management
+---------------------
+
+The TF-A tests expect SGIs #0 to #7 to be available for their own usage. In
+particular, this means that Trusted World software must configure them as
+non-secure interrupts.
+
+SGI #7 has a special status. It is the SGI that the timer management framework
+sends to all CPUs when the system timer fires off (see the definition of the
+constant ``IRQ_WAKE_SGI`` in the header file ``include/lib/irq.h``). Although
+test cases can use this specific SGI - e.g. they can register an IRQ handler for
+it and use it as an inter-CPU communication mechanism - they have to be aware of
+the underlying consequences. Some tests, like the PSCI ``CPU_SUSPEND`` tests,
+rely on this SGI to be enabled in order to wake up CPUs from their suspend
+state. If it is disabled, these tests will leave the system in an unresponsive
+state.
+
+--------------
+
+*Copyright (c) 2018, Arm Limited. All rights reserved.*
+
+.. _Summary of build options: user-guide.html#summary-of-build-options
+.. _Firmware Update: https://github.com/ARM-software/arm-trusted-firmware/blob/master/docs/firmware-update.rst
diff --git a/docs/implementing-tests.rst b/docs/implementing-tests.rst
new file mode 100644
index 000000000..f7be28433
--- /dev/null
+++ b/docs/implementing-tests.rst
@@ -0,0 +1,128 @@
+How to implement tests
+======================
+
+.. section-numbering::
+ :suffix: .
+
+.. contents::
+
+This document aims at providing some pointers to help implementing new tests in
+the TFTF image.
+
+Structure of a test
+-------------------
+
+A test might be divided into 3 logical parts, detailed in the following
+sections.
+
+Prologue
+''''''''
+
+A test has a main entry point function, whose type is:
+
+::
+
+ typedef test_result_t (*test_function_t)(void);
+
+See `tftf/framework/include/tftf.h`_.
+
+Only the primary CPU enters this function, while other CPUs are powered down.
+
+First of all, the test function should check whether this test is applicable to
+this platform and environment. Some tests rely on specific hardware features or
+firmware capabilities to be present. If these are not available, the test should
+be skipped. For example, a multi-core test requires at least 2 CPUs to
+run. Macros and functions are provided in `include/common/test_helpers.h`_ to
+help test code verify that their requirements are met.
+
+Core
+''''
+
+This is completely dependent on the purpose of the test. The paragraphs below
+just provide some useful, general information.
+
+The primary CPU may power on other CPUs by calling the function
+``tftf_cpu_on()``. It provides an address to which secondary CPUs should jump
+to once they have been initialized by the test framework. This address should be
+different from the primary CPU test function.
+
+Synchronization primitives are provided in `include/lib/events.h`_ in case CPUs'
+execution threads need to be synchronized. Most multi-processing tests will need
+some synchronisation points that all/some CPUs need to reach before test
+execution may continue.
+
+Any CPU that is involved in a test must return from its test function. Failure
+to do so will put the framework in an unrecoverable state, see the `TFTF known
+limitations`_. The return code indicates the test result from the point of view
+of this CPU. At the end of the test, individual CPU results are aggregated and
+the overall test result is derived from that. A test is considered as passed if
+all involved CPUs reported a success status code.
+
+Epilogue
+''''''''
+
+Each test is responsible for releasing any allocated resources and putting the
+system back in a clean state when it finishes. Any change to the system
+configuration (e.g. MMU setup, GIC configuration, system registers, ...) must be
+undone and the original configuration must be restored. This guarantees that the
+next test is not affected by the actions of the previous one.
+
+One exception to this rule is that CPUs powered on as part of a test must not be
+powered down. As already stated above, as soon as a CPU enters the test, the
+framework expects it to return from the test.
+
+Template test code
+------------------
+
+Some template test code is provided in `tftf/tests/template_tests`_. It can be
+used as a starting point for developing new tests. Template code for both
+single-core and multi-core tests is provided.
+
+Plugging the test into the build system
+---------------------------------------
+
+All test code is located under the `tftf/tests`_ directory. Tests are usually
+divided into categories represented as sub-directories under ``tftf/tests/``.
+
+The source file implementing the new test code should be added to the
+appropriate tests makefile, see `.*mk` files under `tftf/tests`_.
+
+The new test code should also appear in a tests manifest, see ``*.xml`` files
+under `tftf/tests`_. A unique name and test function must be provided. An
+optional description may be provided as well.
+
+For example, to create a test case named "``Foo test case``", whose test
+function is ``foo()``, add the following line in the tests manifest:
+
+::
+
+ <testcase name="Foo test case" function="foo" />
+
+A testcase must be part of a testsuite. The ``testcase`` XML node above must be
+inside a ``testsuite`` XML node. A unique name and a description must be
+provided for the testsuite.
+
+For example, to create a test suite named "``Bar test suite``", whose
+description is: "``An example test suite``", add the following 2 lines:
+
+::
+
+ <testsuite name="Bar test suite" description="An example test suite">
+ </testsuite>
+
+See the template test manifest for reference: `tftf/tests/tests-template.xml`_.
+
+--------------
+
+*Copyright (c) 2018, Arm Limited. All rights reserved.*
+
+.. _SMC Calling Convention: SMCCC_
+.. _SMCCC: http://infocenter.arm.com/help/topic/com.arm.doc.den0028b/ARM_DEN0028B_SMC_Calling_Convention.pdf
+
+.. _TFTF known limitations: change-log.rst#test-framework
+.. _tftf/framework/include/tftf.h: ../tftf/framework/include/tftf.h
+.. _tftf/tests: ../tftf/tests
+.. _tftf/tests/template_tests: ../tftf/tests/template_tests
+.. _tftf/tests/tests-template.xml: ../tftf/tests/tests-template.xml
+.. _include/common/test_helpers.h: ../include/common/test_helpers.h
+.. _include/lib/events.h: ../include/lib/events.h
diff --git a/docs/porting-guide.rst b/docs/porting-guide.rst
new file mode 100644
index 000000000..59d9dbad5
--- /dev/null
+++ b/docs/porting-guide.rst
@@ -0,0 +1,409 @@
+Trusted Firmware-A Tests - Porting Guide
+========================================
+
+.. section-numbering::
+ :suffix: .
+
+.. contents::
+
+--------------
+
+Introduction
+------------
+
+Please note that this document is incomplete.
+
+Porting the TF-A Tests to a new platform involves making some mandatory and
+optional modifications for both the cold and warm boot paths. Modifications
+consist of:
+
+* Implementing a platform-specific function or variable,
+* Setting up the execution context in a certain way, or
+* Defining certain constants (for example #defines).
+
+The platform-specific functions and variables are all declared in
+``include/plat/common/platform.h``. The framework provides a default
+implementation of variables and functions to fulfill the optional requirements.
+These implementations are all weakly defined; they are provided to ease the
+porting effort. Each platform port can override them with its own implementation
+if the default implementation is inadequate.
+
+Platform requirements
+---------------------
+
+The TF-A Tests rely on the following features to be present on the platform and
+accessible from Normal World.
+
+- Watchdog
+- Non-Volatile Memory
+- System Timer
+
+This also means that a platform port of the TF-A Tests must include software
+drivers for those features.
+
+Mandatory modifications
+-----------------------
+
+File : platform_def.h [mandatory]
+`````````````````````````````````
+
+Each platform must ensure that a header file of this name is in the system
+include path with the following constants defined. This may require updating the
+list of ``PLAT_INCLUDES`` in the ``platform.mk`` file. In the ARM FVP port, this
+file is found in ``plat/arm/board/fvp/include/platform_def.h``.
+
+- **#define : PLATFORM_LINKER_FORMAT**
+
+ Defines the linker format used by the platform, for example
+ `elf64-littleaarch64` used by the FVP.
+
+- **#define : PLATFORM_LINKER_ARCH**
+
+ Defines the processor architecture for the linker by the platform, for
+ example `aarch64` used by the FVP.
+
+- **#define : PLATFORM_STACK_SIZE**
+
+ Defines the stack memory available to each CPU. This constant is used by
+ ``plat/common/aarch64/platform_mp_stack.S``.
+
+- **#define : PLATFORM_CLUSTER_COUNT**
+
+ Defines the total number of clusters implemented by the platform in the
+ system.
+
+- **#define : PLATFORM_CORE_COUNT**
+
+ Defines the total number of CPUs implemented by the platform across all
+ clusters in the system.
+
+- **#define : PLATFORM_NUM_AFFS**
+
+ Defines the total number of nodes in the affinity hierarchy at all affinity
+ levels used by the platform.
+
+- **#define : PLATFORM_MAX_AFFLVL**
+
+ Defines the maximum number of affinity levels in the system that the platform
+ implements. ARMv8-A has support for 4 affinity levels. It is likely that
+ hardware will implement fewer affinity levels. For example, the Base AEM FVP
+ implements two clusters with a configurable number of CPUs. It reports the
+ maximum affinity level as 1.
+
+- **#define : PLAT_MAX_SPI_OFFSET_ID**
+
+ Defines the offset of the last Shared Peripheral Interrupt supported by the
+ TF-A Tests on this platform. SPI numbers are mapped onto GIC interrupt IDs,
+ starting from interrupt ID 32. In other words, this offset ID corresponds to
+ the last SPI number, to which 32 must be added to get the corresponding last
+ GIC IRQ ID.
+
+ E.g. If ``PLAT_MAX_SPI_OFFSET_ID`` is 10, this means that IRQ #42 is the last
+ SPI.
+
+- **#define : PLAT_LOCAL_PSTATE_WIDTH**
+
+ Defines the bit-field width of the local state in State-ID field of the
+ power-state parameter. This macro will be used to compose the State-ID field
+ given the local power state at different affinity levels.
+
+- **#define : PLAT_MAX_PWR_STATES_PER_LVL**
+
+ Defines the maximum number of power states at a power domain level for the
+ platform. This macro will be used by the ``PSCI_STAT_COUNT/RESIDENCY`` tests
+ to determine the size of the array to allocate for storing the statistics.
+
+- **#define : TFTF_BASE**
+
+ Defines the base address of the TFTF binary in DRAM. Used by the linker
+ script to link the image at the right address. Must be aligned on a page-size
+ boundary.
+
+- **#define : IRQ_PCPU_NS_TIMER**
+
+ Defines the IRQ ID of the per-CPU Non-Secure timer of the platform.
+
+- **#define : IRQ_CNTPSIRQ1**
+
+ Defines the IRQ ID of the System timer of the platform.
+
+- **#define : TFTF_NVM_OFFSET**
+
+ The TFTF needs some Non-Volatile Memory to store persistent data. This
+ defines the offset from the beginning of this memory that the TFTF can use.
+
+- **#define : TFTF_NVM_SIZE**
+
+ Defines the size of the Non-Volatile Memory allocated for TFTF usage.
+
+If the platform port uses the ARM Watchdog Module (`SP805`_) peripheral, the
+following constant needs to be defined:
+
+- **#define : SP805_WDOG_BASE**
+
+ Defines the base address of the `SP805`_ watchdog peripheral.
+
+If the platform port uses the IO storage framework, the following constants
+must also be defined:
+
+- **#define : MAX_IO_DEVICES**
+
+ Defines the maximum number of registered IO devices. Attempting to register
+ more devices than this value using ``io_register_device()`` will fail with
+ ``IO_RESOURCES_EXHAUSTED``.
+
+- **#define : MAX_IO_HANDLES**
+
+ Defines the maximum number of open IO handles. Attempting to open more IO
+ entities than this value using ``io_open()`` will fail with
+ ``IO_RESOURCES_EXHAUSTED``.
+
+If the platform port uses the VExpress NOR flash driver (see
+``drivers/io/vexpress_nor/``), the following constants must also be defined:
+
+- **#define : NOR_FLASH_BLOCK_SIZE**
+
+ Defines the largest block size as seen by the software while writing to NOR
+ flash.
+
+Function : tftf_plat_arch_setup() [mandatory]
+`````````````````````````````````````````````
+::
+
+ Argument : void
+ Return : void
+
+This function performs any platform-specific and architectural setup that the
+platform requires.
+
+In both the ARM FVP and Juno ports, this function configures and enables the
+MMU.
+
+Function : tftf_early_platform_setup() [mandatory]
+``````````````````````````````````````````````````
+
+::
+
+ Argument : void
+ Return : void
+
+This function executes with the MMU and data caches disabled. It is only called
+by the primary CPU. It is used to perform platform-specific actions very early
+in the boot.
+
+In both the ARM FVP and Juno ports, this function configures the console.
+
+Function : tftf_platform_setup() [mandatory]
+````````````````````````````````````````````
+
+::
+
+ Argument : void
+ Return : void
+
+This function executes with the MMU and data caches enabled. It is responsible
+for performing any remaining platform-specific setup that can occur after the
+MMU and data cache have been enabled.
+
+This function is also responsible for initializing the storage abstraction layer
+used to access non-volatile memory for permanent storage of test results. It
+also initialises the GIC and detects the platform topology using
+platform-specific means.
+
+Function : plat_get_nvm_handle() [mandatory]
+````````````````````````````````````````````
+
+::
+
+ Argument : uintptr_t *
+ Return : void
+
+It is needed if the platform port uses IO storage framework. This function is
+responsible for getting the pointer to the initialised non-volatile memory
+entity.
+
+Function : tftf_plat_get_pwr_domain_tree_desc() [mandatory]
+```````````````````````````````````````````````````````````
+
+::
+
+ Argument : void
+ Return : const unsigned char *
+
+This function returns the platform topology description array in a suitable
+format as expected by TFTF. The size of the array is expected to be
+``PLATFORM_NUM_AFFS - PLATFORM_CORE_COUNT + 1``. The format used to describe
+this array is :
+
+1. The first entry in the array specifies the number of power domains at the
+ highest power level implemented in the platform. This caters for platforms
+ where the power domain tree does not have a single root node e.g. the FVP
+ which has two cluster power domains at the highest level (that is, 1).
+
+2. Each subsequent entry corresponds to a power domain and contains the number
+ of power domains that are its direct children.
+
+The array format is the same as the one used by Trusted Firmware-A and more
+details of its description can be found in the Trusted Firmware-A documentation:
+`docs/psci-pd-tree.rst`_.
+
+Function : tftf_plat_get_mpidr() [mandatory]
+````````````````````````````````````````````
+
+::
+
+ Argument : unsigned int
+ Return : uint64_t
+
+This function converts a given `core_pos` into a valid MPIDR if the CPU is
+present in the platform. The `core_pos` is a unique number less than the
+``PLATFORM_CORE_COUNT`` returned by ``platform_get_core_pos()`` for a given
+CPU. This API is used by the topology framework in TFTF to query the presence of
+a CPU and, if present, returns the corresponding MPIDR for it. If the CPU
+referred to by the `core_pos` is absent, then this function returns
+``INVALID_MPID``.
+
+Function : plat_get_state_prop() [mandatory]
+````````````````````````````````````````````
+
+::
+
+ Argument : unsigned int
+ Return : const plat_state_prop_t *
+
+This functions returns the ``plat_state_prop_t`` array for all the valid low
+power states from platform for a specified affinity level and returns ``NULL``
+for an invalid affinity level. The array is expected to be NULL-terminated.
+This function is expected to be used by tests that need to compose the power
+state parameter for use in ``PSCI_CPU_SUSPEND`` API or ``PSCI_STAT/RESIDENCY``
+API.
+
+Function : plat_fwu_io_setup() [mandatory]
+``````````````````````````````````````````
+
+::
+
+ Argument : void
+ Return : void
+
+This function initializes the IO system used by the firmware update.
+
+Function : plat_arm_gic_init() [mandatory]
+``````````````````````````````````````````
+
+::
+
+ Argument : void
+ Return : void
+
+This function initializes the ARM Generic Interrupt Controller (GIC).
+
+Optional modifications
+----------------------
+
+The following are helper functions implemented by the test framework that
+perform common platform-specific tasks. A platform may choose to override these
+definitions.
+
+Function : platform_get_stack()
+```````````````````````````````
+
+::
+
+ Argument : unsigned long
+ Return : unsigned long
+
+This function returns the base address of the memory stack that has been
+allocated for the CPU specified by MPIDR. The size of the stack allocated to
+each CPU is specified by the platform defined constant ``PLATFORM_STACK_SIZE``.
+
+Common implementation of this function is provided in
+``plat/common/aarch64/platform_mp_stack.S``.
+
+Function : tftf_platform_end()
+``````````````````````````````
+
+::
+
+ Argument : void
+ Return : void
+
+This function performs any operation required by the platform to properly finish
+the test session.
+
+The default implementation sends an EOT (End Of Transmission) character on the
+UART. This can be used to automatically shutdown the FVP models. When running on
+real hardware, the UART output may be parsed by an external tool looking for
+this character and rebooting the platform for example.
+
+Function : tftf_plat_reset()
+````````````````````````````
+
+::
+
+ Argument : void
+ Return : void
+
+This function resets the platform.
+
+The default implementation uses the ARM watchdog peripheral (`SP805`_) to
+generate a watchdog timeout interrupt. This interrupt remains deliberately
+unserviced, which eventually asserts the reset signal.
+
+Function : tftf_platform_setup()
+````````````````````````````````
+
+::
+
+ Argument : void
+ Return : void
+
+Setup code for platform hardware. The default implementation initializes the IO
+and GIC.
+
+Storage abstraction layer
+-------------------------
+
+In order to improve platform independence and portability a storage abstraction
+layer is used to store test results to non-volatile platform storage.
+
+Each platform should register devices and their drivers via the Storage layer.
+These drivers then need to be initialized in ``tftf_platform_setup()`` function.
+
+It is mandatory to implement at least one storage driver. For the FVP and Juno
+platforms the NOR Flash driver is provided as the default means to store test
+results to storage. The storage layer is described in the header file
+``include/lib/io_storage.h``. The implementation of the common library is in
+``drivers/io/io_storage.c`` and the driver files are located in ``drivers/io/``.
+
+
+Build Flags
+-----------
+
+- **PLAT_TESTS_SKIP_LIST**
+
+This build flag can be defined by the platform to control exclusion of some
+testcases from the default test plan for a platform. If used this needs to
+point to a text file which follows the following criteria:
+
+ - Contain a list of tests to skip for this platform.
+
+ - Specify 1 test per line, using the following format:
+
+ ::
+
+ testsuite_name/testcase_name
+
+ where ``testsuite_name`` and ``testcase_name`` are the names that appear in
+ the XML tests file.
+
+ - Alternatively, it is possible to disable a test suite entirely, which will
+ disable all test cases part of this test suite. To do so, only specify the
+ test suite name, omitting the ``/testcase_name`` part.
+
+--------------
+
+*Copyright (c) 2018, Arm Limited. All rights reserved.*
+
+.. _docs/psci-pd-tree.rst: https://github.com/ARM-software/arm-trusted-firmware/blob/master/docs/psci-pd-tree.rst
+.. _SP805: https://static.docs.arm.com/ddi0270/b/DDI0270.pdf
diff --git a/docs/user-guide.rst b/docs/user-guide.rst
new file mode 100644
index 000000000..b638c6872
--- /dev/null
+++ b/docs/user-guide.rst
@@ -0,0 +1,550 @@
+Trusted Firmware-A Tests - User Guide
+=====================================
+
+.. section-numbering::
+ :suffix: .
+
+.. contents::
+
+This document describes how to build the Trusted Firmware-A Tests (TF-A Tests)
+and run them on a set of platforms. It assumes that the reader has previous
+experience building and running the `Trusted Firmware-A (TF-A)`_.
+
+Host machine requirements
+-------------------------
+
+The minimum recommended machine specification for building the software and
+running the `FVP models`_ is a dual-core processor running at 2GHz with 12GB of
+RAM. For best performance, use a machine with a quad-core processor running at
+2.6GHz with 16GB of RAM.
+
+The software has been tested on Ubuntu 16.04 LTS (64-bit). Packages used for
+building the software were installed from that distribution unless otherwise
+specified.
+
+Tools
+-----
+
+Install the required packages to build TF-A Tests with the following command:
+
+::
+
+ sudo apt-get install build-essential make git perl libxml-libxml-perl
+
+Download and install the GNU cross-toolchain from Linaro. The TF-A Tests have
+been tested with version 6.2-2016.11 (gcc 6.2):
+
+- `AArch32 GNU cross-toolchain`_
+- `AArch64 GNU cross-toolchain`_
+
+In addition, the following optional packages and tools may be needed:
+
+- For debugging, Arm `Development Studio 5 (DS-5)`_.
+
+Getting the TF-A Tests source code
+----------------------------------
+
+Download the TF-A Tests source code using the following command:
+
+::
+
+ git clone https://git.trustedfirmware.org/tf-a-tests.git
+
+Building TF-A Tests
+-------------------
+
+- Before building TF-A Tests, the environment variable ``CROSS_COMPILE`` must
+ point to the Linaro cross compiler.
+
+ For AArch64:
+
+ ::
+
+ export CROSS_COMPILE=<path-to-aarch64-gcc>/bin/aarch64-linux-gnu-
+
+ For AArch32:
+
+ ::
+
+ export CROSS_COMPILE=<path-to-aarch32-gcc>/bin/arm-linux-gnueabihf-
+
+- Change to the root directory of the TF-A Tests source tree and build.
+
+ For AArch64:
+
+ ::
+
+ make PLAT=<platform>
+
+ For AArch32:
+
+ ::
+
+ make PLAT=<platform> ARCH=aarch32
+
+ Notes:
+
+ - If ``PLAT`` is not specified, ``fvp`` is assumed by default. See the
+ `Summary of build options`_ for more information on available build
+ options.
+
+ - By default this produces a release version of the build. To produce a
+ debug version instead, build the code with ``DEBUG=1``.
+
+ - The build process creates products in a ``build/`` directory tree,
+ building the objects and binaries for each test image in separate
+ sub-directories. The following binary files are created from the
+ corresponding ELF files:
+
+ - ``build/<platform>/<build-type>/tftf.bin``
+ - ``build/<platform>/<build-type>/ns_bl1u.bin``
+ - ``build/<platform>/<build-type>/ns_bl2u.bin``
+ - ``build/<platform>/<build-type>/el3_payload.bin``
+ - ``build/<platform>/<build-type>/cactus.bin``
+
+ where ``<platform>`` is the name of the chosen platform and ``<build-type>``
+ is either ``debug`` or ``release``. The actual number of images might differ
+ depending on the platform.
+
+ Refer to the sections below for more information about each image.
+
+- Build products for a specific build variant can be removed using:
+
+ ::
+
+ make DEBUG=<D> PLAT=<platform> clean
+
+ ... where ``<D>`` is ``0`` or ``1``, as specified when building.
+
+ The build tree can be removed completely using:
+
+ ::
+
+ make realclean
+
+- Use the following command to list all supported build commands:
+
+ ::
+
+ make help
+
+TFTF test image
+```````````````
+
+``tftf.bin`` is the main test image to exercise the TF-A features. The other
+test images provided in this repository are optional dependencies that TFTF
+needs to test some specific features.
+
+``tftf.bin`` may be built independently of the other test images using the
+following command:
+
+::
+
+ make PLAT=<platform> tftf
+
+In TF-A boot flow, ``tftf.bin`` replaces the ``BL33`` image and should be
+injected in the FIP image. This might be achieved by running the following
+command from the TF-A root directory:
+
+::
+
+ BL33=tftf.bin make PLAT=<platform> fip
+
+Please refer to the `TF-A User guide`_ for further details.
+
+NS_BL1U and NS_BL2U test images
+```````````````````````````````
+
+``ns_bl1u.bin`` and ``ns_bl2u.bin`` are test images that exercise the `Firmware
+Update`_ (FWU) feature of TF-A [#]_. Throughout this document, they will be
+referred as the `FWU test images`.
+
+In addition to updating the firmware, the FWU test images also embed some tests
+that exercise the `FWU state machine`_ implemented in the TF-A. They send valid
+and invalid SMC requests to the TF-A BL1 image in order to test its robustness.
+
+NS_BL1U test image
+''''''''''''''''''
+
+The ``NS_BL1U`` image acts as the `Application Processor (AP) Firmware Update
+Boot ROM`. This typically is the first software agent executing on the AP in the
+Normal World during a firmware update operation. Its primary purpose is to load
+subsequent firmware update images from an external interface, such as NOR Flash,
+and communicate with ``BL1`` to authenticate those images.
+
+The ``NS_BL1U`` test image provided in this repository performs the following
+tasks:
+
+- Load FWU images from external non-volatile storage (typically flash memory)
+ to Non-Secure RAM.
+
+- Request TF-A BL1 to copy these images in Secure RAM and authenticate them.
+
+- Jump to ``NS_BL2U`` which carries out the next steps in the firmware update
+ process.
+
+This image may be built independently of the other test images using the
+following command:
+
+::
+
+ make PLAT=<platform> ns_bl1u
+
+NS_BL2U test image
+''''''''''''''''''
+
+The ``NS_BL2U`` image acts as the `AP Firmware Updater`. Its primary
+responsibility is to load a new set of firmware images from an external
+interface and write them into non-volatile storage.
+
+The ``NS_BL2U`` test image provided in this repository overrides the original
+FIP image stored in flash with the backup FIP image (see below).
+
+This image may be built independently of the other test images using the
+following command:
+
+::
+
+ make PLAT=<platform> ns_bl2u
+
+Putting it all together
+'''''''''''''''''''''''
+
+The FWU test images should be used in conjunction with the TFTF image, as the
+latter initiates the FWU process by corrupting the FIP image and resetting the
+target. Once the FWU process is complete, TFTF takes over again and checks that
+the firmware was successfully updated.
+
+To sum up, 3 images must be built out of the TF-A Tests repository in order to
+test the TF-A Firmware Update feature:
+
+ - ``ns_bl1u.bin``
+ - ``ns_bl2u.bin``
+ - ``tftf.bin``
+
+Once that's done, they must be combined in the right way.
+
+ - ``ns_bl1u.bin`` is a standalone image and does not require any further
+ processing.
+
+ - ``ns_bl2u.bin`` must be injected into the ``FWU_FIP`` image. This might be
+ achieved by setting ``NS_BL2U=ns_bl2u.bin`` when building the ``FWU_FIP``
+ image out of the TF-A repository. Please refer to the section `Building FIP
+ images with support for Trusted Board Boot`_ in the TF-A User Guide.
+
+ - ``tftf.bin`` must be injected in the standard FIP image, as explained
+ in section `TFTF test image`_.
+
+Additionally, on Juno platform, the FWU FIP must contain a ``SCP_BL2U`` image.
+This image can simply be a copy of the standard ``SCP_BL2`` image if no specific
+firmware update operations need to be carried on the SCP side.
+
+Finally, the backup FIP image must be created. This can simply be a copy of the
+standard FIP image, which means that the Firmware Update process will restore
+the original, uncorrupted FIP image.
+
+EL3 test payload
+````````````````
+
+``el3_payload.bin`` is a test image exercising the alternative `EL3 payload boot
+flow`_ in TF-A. Refer to the `EL3 test payload README file`_ for more details
+about its behaviour and how to build and run it.
+
+Cactus test image
+`````````````````
+
+``cactus.bin`` is a test secure partition that exercises the `Secure Partition
+Manager`_ (SPM) in TF-A [#]_. It runs in Secure-EL0. It performs the following
+tasks:
+
+- Test that TF-A has correctly setup the secure partition environment: Cactus
+ should be allowed to perform cache maintenance operations, access floating
+ point registers, etc.
+
+- Test that TF-A accepts to change data access permissions and instruction
+ permissions on behalf of Cactus for memory regions the latter owns.
+
+- Test communication with SPM through the `ARM Management Mode Interface`_.
+
+At the moment, Cactus is supported on AArch64 FVP only. It may be built
+independently of the other test images using the following command:
+
+::
+
+ make PLAT=fvp cactus
+
+In TF-A boot flow, Cactus replaces the ``BL32`` image and should be injected in
+the FIP image. This might be achieved by running the following command from
+the TF-A root directory:
+
+::
+
+ BL32=cactus.bin make PLAT=fvp fip
+
+Please refer to the `TF-A User guide`_ for further details.
+
+To run the full set of tests in Cactus, it should be used in conjunction with
+the TFTF image, as the latter sends the Management Mode requests that Cactus
+services.
+
+Summary of build options
+````````````````````````
+
+As much as possible, TF-A Tests dynamically detect the platform hardware
+components and available features. There are a few build options to select
+specific features where the dynamic detection falls short. This section lists
+them.
+
+Unless mentioned otherwise, these options are expected to be specified at the
+build command line and are not to be modified in any component makefiles.
+
+Note that the build system doesn't track dependencies for build options.
+Therefore, if any of the build options are changed from a previous build, a
+clean build must be performed.
+
+Build options shared across test images
+'''''''''''''''''''''''''''''''''''''''
+
+Most of the build options listed in this section apply to TFTF, the FWU test
+images and Cactus, unless otherwise specified. These do not influence the EL3
+payload, whose simplistic build system is mostly independent.
+
+- ``ARCH``: Choose the target build architecture for TF-A Tests. It can take
+ either ``aarch64`` or ``aarch32`` as values. By default, it is defined to
+ ``aarch64``. Not all test images support this build option.
+
+- ``ARM_ARCH_MAJOR``: The major version of Arm Architecture to target when
+ compiling TF-A Tests. Its value must be numeric, and defaults to 8.
+
+- ``ARM_ARCH_MINOR``: The minor version of Arm Architecture to target when
+ compiling TF-A Tests. Its value must be a numeric, and defaults to 0.
+
+- ``DEBUG``: Chooses between a debug and a release build. A debug build
+ typically embeds assertions checking the validity of some assumptions and its
+ output is more verbose. The option can take either 0 (release) or 1 (debug)
+ as values. 0 is the default.
+
+- ``ENABLE_ASSERTIONS``: This option controls whether calls to ``assert()`` are
+ compiled out.
+
+ - For debug builds, this option defaults to 1, and calls to ``assert()`` are
+ compiled in.
+ - For release builds, this option defaults to 0 and calls to ``assert()``
+ are compiled out.
+
+ This option can be set independently of ``DEBUG``. It can also be used to
+ hide any auxiliary code that is only required for the assertion and does not
+ fit in the assertion itself.
+
+- ``LOG_LEVEL``: Chooses the log level, which controls the amount of console log
+ output compiled into the build. This should be one of the following:
+
+ ::
+
+ 0 (LOG_LEVEL_NONE)
+ 10 (LOG_LEVEL_ERROR)
+ 20 (LOG_LEVEL_NOTICE)
+ 30 (LOG_LEVEL_WARNING)
+ 40 (LOG_LEVEL_INFO)
+ 50 (LOG_LEVEL_VERBOSE)
+
+ All log output up to and including the selected log level is compiled into
+ the build. The default value is 40 in debug builds and 20 in release builds.
+
+- ``PLAT``: Choose a platform to build TF-A Tests for. The chosen platform name
+ must be a subdirectory of any depth under ``plat/``, and must contain a
+ platform makefile named ``platform.mk``. For example, to build TF-A Tests for
+ the Arm Juno board, select ``PLAT=juno``.
+
+- ``V``: Verbose build. If assigned anything other than 0, the build commands
+ are printed. Default is 0.
+
+TFTF build options
+''''''''''''''''''
+
+- ``NEW_TEST_SESSION``: Choose whether a new test session should be started
+ every time or whether the framework should determine whether a previous
+ session was interrupted and resume it. It can take either 1 (always
+ start new session) or 0 (resume session as appropriate). 1 is the default.
+
+- ``SHELL_COLOR``: Choose whether text messages should use shell's color escape
+ sequences to ease identifying which CPU displays it. If enabled, this makes
+ each CPU write part of the message in a different color. It can take either
+ 0 (disabled) or 1 (enabled) as values. 0 is the default.
+
+- ``TEST_REPORTS``: List of desired test reports. Test reports are described by
+ a space-separated list of colon-separated pairs of report destination and
+ report type. Valid destinations are: 'uart' and 'semihosting'. Valid report
+ types are 'raw' (text output) or 'junit' (XML Junit format). The default is
+ 'uart:raw', that is a text report printed on the UART. Here's an example of
+ multiple reports: 'uart:raw semihosting:junit'. The files stored on
+ semihosting are named 'tftf_report_junit.xml' and 'tftf_report_raw.txt'.
+
+- ``TESTS_FILE``: Path to the XML file listing the tests to run. Default is
+ ``plat/<platform>/tests.xml`` if it exists, otherwise it falls back to
+ ``tftf/tests/tests-common.xml``.
+
+- ``USE_NVM``: Used to select the location of test results. It can take either 0
+ (RAM) or 1 (non-volatile memory like flash) as test results storage. Default
+ value is 0, as writing to the flash significantly slows tests down.
+
+FWU test images build options
+'''''''''''''''''''''''''''''
+
+- ``FIRMWARE_UPDATE``: Whether the Firmware Update test images (i.e.
+ ``NS_BL1U`` and ``NS_BL2U``) should be built. The default value is 0. The
+ platform makefile is free to override this value if Firmware Update is
+ supported on this platform.
+
+Checking source code style
+--------------------------
+
+When making changes to the source for submission to the project, the source must
+be in compliance with the Linux style guide. To assist with this, the project
+Makefile provides two targets, which both utilise the ``checkpatch.pl`` script
+that ships with the Linux source tree.
+
+To check the entire source tree, you must first download copies of
+``checkpatch.pl``, ``spelling.txt`` and ``const_structs.checkpatch`` available
+in the `Linux master tree`_ scripts directory, then set the ``CHECKPATCH``
+environment variable to point to ``checkpatch.pl`` (with the other 2 files in
+the same directory).
+
+Then use the following command:
+
+::
+
+ make CHECKPATCH=<path-to-linux>/linux/scripts/checkpatch.pl checkcodebase
+
+To limit the coding style checks to your local changes, use:
+
+::
+
+ make CHECKPATCH=<path-to-linux>/linux/scripts/checkpatch.pl checkpatch
+
+By default, this will check all patches between ``origin/master`` and your local
+branch. If you wish to use a different reference commit, this can be specified
+using the ``BASE_COMMIT`` variable.
+
+Running the TF-A Tests
+----------------------
+
+Refer to the sections `Running the software on FVP`_ and `Running the software
+on Juno`_ in `TF-A User Guide`_. The same instructions mostly apply to run the
+TF-A Tests on those 2 platforms. The difference is that the following images are
+not needed here:
+
+- Normal World bootloader. The TFTF replaces it in the boot flow;
+
+- Linux Kernel;
+
+- Device tree;
+
+- Filesystem.
+
+In other words, only the following software images are needed:
+
+- ``BL1`` firmware image;
+
+- ``FIP`` image containing the following images:
+ - ``BL2``;
+ - ``SCP_BL2`` if required by the platform (e.g. Juno);
+ - ``BL31``;
+ - ``BL32`` (optional);
+ - ``tftf.bin`` (standing as the BL33 image).
+
+Running the FWU tests
+`````````````````````
+
+As previously mentioned in section `Putting it all together`_, there are a
+couple of extra images involved when running the FWU tests. They need to be
+loaded at the right addresses, which depend on the platform.
+
+FVP
+'''
+
+In addition to the usual BL1 and FIP images, the following extra images must be
+loaded:
+
+- ``NS_BL1U`` image at address ``0x0BEB8000`` (i.e. NS_BL1U_BASE macro in TF-A)
+- ``FWU_FIP`` image at address ``0x08400000`` (i.e. NS_BL2U_BASE macro in TF-A)
+- ``Backup FIP`` image at address ``0x09000000`` (i.e. FIP_BKP_ADDRESS macro in
+ TF-A tests).
+
+An example script is provided in `scripts/run_fwu_fvp.sh`_.
+
+Juno
+''''
+
+The same set of extra images and load addresses apply for Juno as for FVP.
+
+The new images must be programmed in flash memory by adding some entries in the
+``SITE1/HBI0262x/images.txt`` configuration file on the Juno SD card (where
+``x`` depends on the revision of the Juno board). Refer to the `Juno Getting
+Started Guide`_, section 2.3 "Flash memory programming" for more
+information. Users should ensure these do not overlap with any other entries in
+the file.
+
+Addresses in this file are expressed as an offset from the base address of the
+flash (that is, ``0x08000000``).
+
+::
+
+ NOR10UPDATE: AUTO ; Image Update:NONE/AUTO/FORCE
+ NOR10ADDRESS: 0x00400000 ; Image Flash Address
+ NOR10FILE: \SOFTWARE\fwu_fip.bin ; Image File Name
+ NOR10LOAD: 00000000 ; Image Load Address
+ NOR10ENTRY: 00000000 ; Image Entry Point
+
+ NOR11UPDATE: AUTO ; Image Update:NONE/AUTO/FORCE
+ NOR11ADDRESS: 0x03EB8000 ; Image Flash Address
+ NOR11FILE: \SOFTWARE\ns_bl1u.bin ; Image File Name
+ NOR11LOAD: 00000000 ; Image Load Address
+ NOR11ENTRY: 00000000 ; Image Load Address
+
+ NOR12UPDATE: AUTO ; Image Update:NONE/AUTO/FORCE
+ NOR12ADDRESS: 0x01000000 ; Image Flash Address
+ NOR12FILE: \SOFTWARE\backup_fip.bin ; Image File Name
+ NOR12LOAD: 00000000 ; Image Load Address
+ NOR12ENTRY: 00000000 ; Image Entry Point
+
+--------------
+
+.. [#] Therefore, the Trusted Board Boot feature must be enabled in TF-A for
+ the FWU test images to work. Please refer the `TF-A User guide`_ for
+ further details.
+
+.. [#] Therefore, the Secure Partition Manager must be enabled in TF-A for
+ Cactus to work. Please refer to the `TF-A User guide`_ for further
+ details.
+
+--------------
+
+*Copyright (c) 2018, Arm Limited. All rights reserved.*
+
+.. _Development Studio 5 (DS-5): https://developer.arm.com/products/software-development-tools/ds-5-development-studio
+
+.. _FVP models: https://developer.arm.com/products/system-design/fixed-virtual-platforms
+
+.. _AArch32 GNU cross-toolchain: http://releases.linaro.org/components/toolchain/binaries/6.2-2016.11/arm-linux-gnueabihf/gcc-linaro-6.2.1-2016.11-x86_64_arm-linux-gnueabihf.tar.xz
+.. _AArch64 GNU cross-toolchain: http://releases.linaro.org/components/toolchain/binaries/6.2-2016.11/aarch64-linux-gnu/gcc-linaro-6.2.1-2016.11-x86_64_aarch64-linux-gnu.tar.xz
+
+.. _Linux master tree: https://github.com/torvalds/linux/tree/master/
+
+.. _TF-A: https://www.github.com/ARM-software/arm-trusted-firmware
+.. _Trusted Firmware-A (TF-A): TF-A_
+.. _EL3 payload boot flow: https://github.com/ARM-software/arm-trusted-firmware/blob/master/docs/user-guide.rst#el3-payloads-alternative-boot-flow
+.. _TF-A User Guide: https://github.com/ARM-software/arm-trusted-firmware/blob/master/docs/user-guide.rst
+.. _Firmware Update: FWU_
+.. _FWU: https://github.com/ARM-software/arm-trusted-firmware/blob/master/docs/firmware-update.rst
+.. _FWU state machine: https://github.com/ARM-software/arm-trusted-firmware/blob/master/docs/firmware-update.rst#fwu-state-machine
+.. _Running the software on FVP: https://github.com/ARM-software/arm-trusted-firmware/blob/master/docs/user-guide.rst#running-the-software-on-fvp
+.. _Running the software on Juno: https://github.com/ARM-software/arm-trusted-firmware/blob/master/docs/user-guide.rst#running-the-software-on-juno
+.. _Building FIP images with support for Trusted Board Boot: https://github.com/ARM-software/arm-trusted-firmware/blob/master/docs/user-guide.rst#building-fip-images-with-support-for-trusted-board-boot
+.. _Secure partition Manager: https://github.com/ARM-software/arm-trusted-firmware/blob/master/docs/secure-partition-manager-design.rst
+
+.. _EL3 test payload README file: ../el3_payload/README
+.. _scripts/run_fwu_fvp.sh: ../scripts/run_fwu_fvp.sh
+
+.. _ARM Management Mode Interface: http://infocenter.arm.com/help/topic/com.arm.doc.den0060a/DEN0060A_ARM_MM_Interface_Specification.pdf
+.. _Juno Getting Started Guide: http://infocenter.arm.com/help/topic/com.arm.doc.dui0928e/DUI0928E_juno_arm_development_platform_gsg.pdf
diff --git a/drivers/arm/gic/arm_gic_v2.c b/drivers/arm/gic/arm_gic_v2.c
new file mode 100644
index 000000000..025d48d34
--- /dev/null
+++ b/drivers/arm/gic/arm_gic_v2.c
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <gic_v2.h>
+
+void arm_gic_enable_interrupts_local(void)
+{
+ gicv2_enable_cpuif();
+}
+
+void arm_gic_setup_local(void)
+{
+ gicv2_probe_gic_cpu_id();
+ gicv2_setup_cpuif();
+}
+
+void arm_gic_disable_interrupts_local(void)
+{
+ gicv2_disable_cpuif();
+}
+
+void arm_gic_save_context_local(void)
+{
+ gicv2_save_cpuif_context();
+}
+
+void arm_gic_restore_context_local(void)
+{
+ gicv2_restore_cpuif_context();
+}
+
+void arm_gic_save_context_global(void)
+{
+ gicv2_save_sgi_ppi_context();
+}
+
+void arm_gic_restore_context_global(void)
+{
+ gicv2_setup_distif();
+ gicv2_restore_sgi_ppi_context();
+}
+
+void arm_gic_setup_global(void)
+{
+ gicv2_setup_distif();
+}
+
+unsigned int arm_gic_get_intr_priority(unsigned int num)
+{
+ return gicv2_gicd_get_ipriorityr(num);
+}
+
+void arm_gic_set_intr_priority(unsigned int num,
+ unsigned int priority)
+{
+ gicv2_gicd_set_ipriorityr(num, priority);
+}
+
+void arm_gic_send_sgi(unsigned int sgi_id, unsigned int core_pos)
+{
+ gicv2_send_sgi(sgi_id, core_pos);
+}
+
+void arm_gic_set_intr_target(unsigned int num, unsigned int core_pos)
+{
+ gicv2_set_itargetsr(num, core_pos);
+}
+
+unsigned int arm_gic_intr_enabled(unsigned int num)
+{
+ return gicv2_gicd_get_isenabler(num) != 0;
+}
+
+void arm_gic_intr_enable(unsigned int num)
+{
+ gicv2_gicd_set_isenabler(num);
+}
+
+void arm_gic_intr_disable(unsigned int num)
+{
+ gicv2_gicd_set_icenabler(num);
+}
+
+unsigned int arm_gic_intr_ack(unsigned int *raw_iar)
+{
+ assert(raw_iar);
+
+ *raw_iar = gicv2_gicc_read_iar();
+ return get_gicc_iar_intid(*raw_iar);
+}
+
+unsigned int arm_gic_is_intr_pending(unsigned int num)
+{
+ return gicv2_gicd_get_ispendr(num);
+}
+
+void arm_gic_intr_clear(unsigned int num)
+{
+ gicv2_gicd_set_icpendr(num);
+}
+
+void arm_gic_end_of_intr(unsigned int raw_iar)
+{
+ gicv2_gicc_write_eoir(raw_iar);
+}
+
+void arm_gic_init(uintptr_t gicc_base,
+ uintptr_t gicd_base,
+ uintptr_t gicr_base)
+{
+ gicv2_init(gicc_base, gicd_base);
+ INFO("ARM GIC v2 driver initialized\n");
+}
+
diff --git a/drivers/arm/gic/arm_gic_v2v3.c b/drivers/arm/gic/arm_gic_v2v3.c
new file mode 100644
index 000000000..576c6112b
--- /dev/null
+++ b/drivers/arm/gic/arm_gic_v2v3.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <gic_common.h>
+#include <gic_v2.h>
+#include <gic_v3.h>
+
+/* Record whether a GICv3 was detected on the system */
+static unsigned int gicv3_detected;
+
+void arm_gic_enable_interrupts_local(void)
+{
+ if (gicv3_detected)
+ gicv3_enable_cpuif();
+ else
+ gicv2_enable_cpuif();
+}
+
+void arm_gic_setup_local(void)
+{
+ if (gicv3_detected) {
+ gicv3_probe_redistif_addr();
+ gicv3_setup_cpuif();
+ } else {
+ gicv2_probe_gic_cpu_id();
+ gicv2_setup_cpuif();
+ }
+}
+
+void arm_gic_disable_interrupts_local(void)
+{
+ if (gicv3_detected)
+ gicv3_disable_cpuif();
+ else
+ gicv2_disable_cpuif();
+}
+
+void arm_gic_save_context_local(void)
+{
+ if (gicv3_detected)
+ gicv3_save_cpuif_context();
+ else
+ gicv2_save_cpuif_context();
+}
+
+void arm_gic_restore_context_local(void)
+{
+ if (gicv3_detected)
+ gicv3_restore_cpuif_context();
+ else
+ gicv2_restore_cpuif_context();
+}
+
+void arm_gic_save_context_global(void)
+{
+ if (gicv3_detected)
+ gicv3_save_sgi_ppi_context();
+ else
+ gicv2_save_sgi_ppi_context();
+}
+
+void arm_gic_restore_context_global(void)
+{
+ if (gicv3_detected) {
+ gicv3_setup_distif();
+ gicv3_restore_sgi_ppi_context();
+ } else {
+ gicv2_setup_distif();
+ gicv2_restore_sgi_ppi_context();
+ }
+}
+
+void arm_gic_setup_global(void)
+{
+ if (gicv3_detected)
+ gicv3_setup_distif();
+ else
+ gicv2_setup_distif();
+}
+
+unsigned int arm_gic_get_intr_priority(unsigned int num)
+{
+ if (gicv3_detected)
+ return gicv3_get_ipriorityr(num);
+ else
+ return gicv2_gicd_get_ipriorityr(num);
+}
+
+void arm_gic_set_intr_priority(unsigned int num,
+ unsigned int priority)
+{
+ if (gicv3_detected)
+ gicv3_set_ipriorityr(num, priority);
+ else
+ gicv2_gicd_set_ipriorityr(num, priority);
+}
+
+void arm_gic_send_sgi(unsigned int sgi_id, unsigned int core_pos)
+{
+ if (gicv3_detected)
+ gicv3_send_sgi(sgi_id, core_pos);
+ else
+ gicv2_send_sgi(sgi_id, core_pos);
+}
+
+void arm_gic_set_intr_target(unsigned int num, unsigned int core_pos)
+{
+ if (gicv3_detected)
+ gicv3_set_intr_route(num, core_pos);
+ else
+ gicv2_set_itargetsr(num, core_pos);
+}
+
+unsigned int arm_gic_intr_enabled(unsigned int num)
+{
+ if (gicv3_detected)
+ return gicv3_get_isenabler(num) != 0;
+ else
+ return gicv2_gicd_get_isenabler(num) != 0;
+}
+
+void arm_gic_intr_enable(unsigned int num)
+{
+ if (gicv3_detected)
+ gicv3_set_isenabler(num);
+ else
+ gicv2_gicd_set_isenabler(num);
+}
+
+void arm_gic_intr_disable(unsigned int num)
+{
+ if (gicv3_detected)
+ gicv3_set_icenabler(num);
+ else
+ gicv2_gicd_set_icenabler(num);
+}
+
+unsigned int arm_gic_intr_ack(unsigned int *raw_iar)
+{
+ assert(raw_iar);
+
+ if (gicv3_detected) {
+ *raw_iar = gicv3_acknowledge_interrupt();
+ return *raw_iar;
+ } else {
+ *raw_iar = gicv2_gicc_read_iar();
+ return get_gicc_iar_intid(*raw_iar);
+ }
+}
+
+unsigned int arm_gic_is_intr_pending(unsigned int num)
+{
+ if (gicv3_detected)
+ return gicv3_get_ispendr(num);
+ else
+ return gicv2_gicd_get_ispendr(num);
+}
+
+void arm_gic_intr_clear(unsigned int num)
+{
+ if (gicv3_detected)
+ gicv3_set_icpendr(num);
+ else
+ gicv2_gicd_set_icpendr(num);
+}
+
+void arm_gic_end_of_intr(unsigned int raw_iar)
+{
+ if (gicv3_detected)
+ gicv3_end_of_interrupt(raw_iar);
+ else
+ gicv2_gicc_write_eoir(raw_iar);
+}
+
+void arm_gic_init(uintptr_t gicc_base,
+ uintptr_t gicd_base,
+ uintptr_t gicr_base)
+{
+
+ if (is_gicv3_mode()) {
+ gicv3_detected = 1;
+ gicv3_init(gicr_base, gicd_base);
+ } else {
+ gicv2_init(gicc_base, gicd_base);
+ }
+
+ INFO("%s mode detected\n", (gicv3_detected) ?
+ "GICv3" : "GICv2");
+}
diff --git a/drivers/arm/gic/gic_common.c b/drivers/arm/gic/gic_common.c
new file mode 100644
index 000000000..207ee15ad
--- /dev/null
+++ b/drivers/arm/gic/gic_common.c
@@ -0,0 +1,203 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <gic_common.h>
+#include <gic_v3.h>
+#include <mmio.h>
+
+/*******************************************************************************
+ * GIC Distributor interface accessors for reading entire registers
+ ******************************************************************************/
+
+unsigned int gicd_read_isenabler(unsigned int base, unsigned int interrupt_id)
+{
+ unsigned int n = interrupt_id >> ISENABLER_SHIFT;
+ return mmio_read_32(base + GICD_ISENABLER + (n << 2));
+}
+
+unsigned int gicd_read_icenabler(unsigned int base, unsigned int interrupt_id)
+{
+ unsigned int n = interrupt_id >> ICENABLER_SHIFT;
+ return mmio_read_32(base + GICD_ICENABLER + (n << 2));
+}
+
+unsigned int gicd_read_ispendr(unsigned int base, unsigned int interrupt_id)
+{
+ unsigned int n = interrupt_id >> ISPENDR_SHIFT;
+ return mmio_read_32(base + GICD_ISPENDR + (n << 2));
+}
+
+unsigned int gicd_read_icpendr(unsigned int base, unsigned int interrupt_id)
+{
+ unsigned int n = interrupt_id >> ICPENDR_SHIFT;
+ return mmio_read_32(base + GICD_ICPENDR + (n << 2));
+}
+
+unsigned int gicd_read_isactiver(unsigned int base, unsigned int interrupt_id)
+{
+ unsigned int n = interrupt_id >> ISACTIVER_SHIFT;
+ return mmio_read_32(base + GICD_ISACTIVER + (n << 2));
+}
+
+unsigned int gicd_read_icactiver(unsigned int base, unsigned int interrupt_id)
+{
+ unsigned int n = interrupt_id >> ICACTIVER_SHIFT;
+ return mmio_read_32(base + GICD_ICACTIVER + (n << 2));
+}
+
+unsigned int gicd_read_ipriorityr(unsigned int base, unsigned int interrupt_id)
+{
+ unsigned int n = interrupt_id >> IPRIORITYR_SHIFT;
+ return mmio_read_32(base + GICD_IPRIORITYR + (n << 2));
+}
+
+unsigned int gicd_read_icfgr(unsigned int base, unsigned int interrupt_id)
+{
+ unsigned int n = interrupt_id >> ICFGR_SHIFT;
+ return mmio_read_32(base + GICD_ICFGR + (n << 2));
+}
+
+/*******************************************************************************
+ * GIC Distributor interface accessors for writing entire registers
+ ******************************************************************************/
+
+void gicd_write_isenabler(unsigned int base,
+ unsigned int interrupt_id, unsigned int val)
+{
+ unsigned int n = interrupt_id >> ISENABLER_SHIFT;
+ mmio_write_32(base + GICD_ISENABLER + (n << 2), val);
+}
+
+void gicd_write_icenabler(unsigned int base,
+ unsigned int interrupt_id, unsigned int val)
+{
+ unsigned int n = interrupt_id >> ICENABLER_SHIFT;
+ mmio_write_32(base + GICD_ICENABLER + (n << 2), val);
+}
+
+void gicd_write_ispendr(unsigned int base,
+ unsigned int interrupt_id, unsigned int val)
+{
+ unsigned int n = interrupt_id >> ISPENDR_SHIFT;
+ mmio_write_32(base + GICD_ISPENDR + (n << 2), val);
+}
+
+void gicd_write_icpendr(unsigned int base,
+ unsigned int interrupt_id, unsigned int val)
+{
+ unsigned int n = interrupt_id >> ICPENDR_SHIFT;
+ mmio_write_32(base + GICD_ICPENDR + (n << 2), val);
+}
+
+void gicd_write_isactiver(unsigned int base,
+ unsigned int interrupt_id, unsigned int val)
+{
+ unsigned int n = interrupt_id >> ISACTIVER_SHIFT;
+ mmio_write_32(base + GICD_ISACTIVER + (n << 2), val);
+}
+
+void gicd_write_icactiver(unsigned int base,
+ unsigned int interrupt_id, unsigned int val)
+{
+ unsigned int n = interrupt_id >> ICACTIVER_SHIFT;
+ mmio_write_32(base + GICD_ICACTIVER + (n << 2), val);
+}
+
+void gicd_write_ipriorityr(unsigned int base,
+ unsigned int interrupt_id, unsigned int val)
+{
+ unsigned int n = interrupt_id >> IPRIORITYR_SHIFT;
+ mmio_write_32(base + GICD_IPRIORITYR + (n << 2), val);
+}
+
+void gicd_write_icfgr(unsigned int base,
+ unsigned int interrupt_id, unsigned int val)
+{
+ unsigned int n = interrupt_id >> ICFGR_SHIFT;
+ mmio_write_32(base + GICD_ICFGR + (n << 2), val);
+}
+
+/*******************************************************************************
+ * GIC Distributor interface accessors for individual interrupt manipulation
+ ******************************************************************************/
+unsigned int gicd_get_isenabler(unsigned int base, unsigned int interrupt_id)
+{
+ unsigned int bit_num = interrupt_id & ((1 << ISENABLER_SHIFT) - 1);
+
+ return gicd_read_isenabler(base, interrupt_id) & (1 << bit_num);
+}
+
+void gicd_set_isenabler(unsigned int base, unsigned int interrupt_id)
+{
+ unsigned int bit_num = interrupt_id & ((1 << ISENABLER_SHIFT) - 1);
+
+ gicd_write_isenabler(base, interrupt_id, (1 << bit_num));
+}
+
+void gicd_set_icenabler(unsigned int base, unsigned int interrupt_id)
+{
+ unsigned int bit_num = interrupt_id & ((1 << ICENABLER_SHIFT) - 1);
+
+ gicd_write_icenabler(base, interrupt_id, (1 << bit_num));
+}
+
+void gicd_set_ispendr(unsigned int base, unsigned int interrupt_id)
+{
+ unsigned int bit_num = interrupt_id & ((1 << ISPENDR_SHIFT) - 1);
+
+ gicd_write_ispendr(base, interrupt_id, (1 << bit_num));
+}
+
+void gicd_set_icpendr(unsigned int base, unsigned int interrupt_id)
+{
+ unsigned int bit_num = interrupt_id & ((1 << ICPENDR_SHIFT) - 1);
+
+ gicd_write_icpendr(base, interrupt_id, (1 << bit_num));
+}
+
+void gicd_set_isactiver(unsigned int base, unsigned int interrupt_id)
+{
+ unsigned int bit_num = interrupt_id & ((1 << ISACTIVER_SHIFT) - 1);
+
+ gicd_write_isactiver(base, interrupt_id, (1 << bit_num));
+}
+
+void gicd_set_icactiver(unsigned int base, unsigned int interrupt_id)
+{
+ unsigned int bit_num = interrupt_id & ((1 << ICACTIVER_SHIFT) - 1);
+
+ gicd_write_icactiver(base, interrupt_id, (1 << bit_num));
+}
+
+unsigned int gicd_get_ipriorityr(unsigned int base, unsigned int interrupt_id)
+{
+ return gicd_read_ipriorityr(base, interrupt_id) & GIC_PRI_MASK;
+}
+
+void gicd_set_ipriorityr(unsigned int base, unsigned int interrupt_id,
+ unsigned int priority)
+{
+ mmio_write_8(base + GICD_IPRIORITYR + interrupt_id,
+ priority & GIC_PRI_MASK);
+}
+
+unsigned int is_gicv3_mode(void)
+{
+ /* Check if GICv3 system register available */
+#ifndef AARCH32
+ if (!(read_id_aa64pfr0_el1() & (ID_AA64PFR0_GIC_MASK << ID_AA64PFR0_GIC_SHIFT)))
+ return 0;
+#else
+ if (!(read_id_pfr1() & (ID_PFR1_GIC_MASK << ID_PFR1_GIC_SHIFT)))
+ return 0;
+#endif
+
+ /* Check whether the system register interface is enabled */
+ return !!is_sre_enabled();
+}
diff --git a/drivers/arm/gic/gic_v2.c b/drivers/arm/gic/gic_v2.c
new file mode 100644
index 000000000..48ee29e2a
--- /dev/null
+++ b/drivers/arm/gic/gic_v2.c
@@ -0,0 +1,353 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <assert.h>
+#include <gic_common.h>
+#include <gic_v2.h>
+#include <mmio.h>
+#include <platform.h>
+
+/*
+ * Data structure to store the GIC per CPU context before entering
+ * system suspend. Only the GIC context of first 32 interrupts (SGIs and PPIs)
+ * will be saved. The GIC SPI context needs to be restored by the respective
+ * drivers. The GICC_PMR is not saved here as it will be reinitialized during
+ * GIC restore.
+ */
+struct gicv2_pcpu_ctx {
+ unsigned int gicc_ctlr;
+ unsigned int gicd_isenabler0;
+ unsigned int gicd_ipriorityr[NUM_PCPU_INTR >> IPRIORITYR_SHIFT];
+ unsigned int gicd_icfgr;
+};
+
+static struct gicv2_pcpu_ctx pcpu_gic_ctx[PLATFORM_CORE_COUNT];
+
+static uintptr_t gicc_base_addr;
+static uintptr_t gicd_base_addr;
+
+static unsigned int gic_cpu_id[PLATFORM_CORE_COUNT] = {UINT32_MAX};
+
+/* Helper function to convert core pos to gic id */
+static unsigned int core_pos_to_gic_id(unsigned int core_pos)
+{
+ assert(gic_cpu_id[core_pos] != UINT32_MAX);
+ return gic_cpu_id[core_pos];
+}
+
+/*******************************************************************************
+ * GIC Distributor interface accessors for reading entire registers
+ ******************************************************************************/
+unsigned int gicd_read_itargetsr(unsigned int base, unsigned int interrupt_id)
+{
+ unsigned n = interrupt_id >> ITARGETSR_SHIFT;
+ return mmio_read_32(base + GICD_ITARGETSR + (n << 2));
+}
+
+unsigned int gicd_read_cpendsgir(unsigned int base, unsigned int interrupt_id)
+{
+ unsigned n = interrupt_id >> CPENDSGIR_SHIFT;
+ return mmio_read_32(base + GICD_CPENDSGIR + (n << 2));
+}
+
+unsigned int gicd_read_spendsgir(unsigned int base, unsigned int interrupt_id)
+{
+ unsigned n = interrupt_id >> SPENDSGIR_SHIFT;
+ return mmio_read_32(base + GICD_SPENDSGIR + (n << 2));
+}
+
+/*******************************************************************************
+ * GIC Distributor interface accessors for writing entire registers
+ ******************************************************************************/
+void gicd_write_itargetsr(unsigned int base,
+ unsigned int interrupt_id, unsigned int val)
+{
+ unsigned n = interrupt_id >> ITARGETSR_SHIFT;
+ mmio_write_32(base + GICD_ITARGETSR + (n << 2), val);
+}
+
+void gicd_write_itargetsr_byte(unsigned int base,
+ unsigned int interrupt_id, unsigned int val)
+{
+ mmio_write_8(base + GICD_ITARGETSR + interrupt_id, val);
+}
+
+void gicd_write_cpendsgir(unsigned int base,
+ unsigned int interrupt_id, unsigned int val)
+{
+ unsigned n = interrupt_id >> CPENDSGIR_SHIFT;
+ mmio_write_32(base + GICD_CPENDSGIR + (n << 2), val);
+}
+
+void gicd_write_spendsgir(unsigned int base,
+ unsigned int interrupt_id, unsigned int val)
+{
+ unsigned n = interrupt_id >> SPENDSGIR_SHIFT;
+ mmio_write_32(base + GICD_SPENDSGIR + (n << 2), val);
+}
+
+/*******************************************************************************
+ * GIC Distributor interface accessors for individual interrupt manipulation
+ ******************************************************************************/
+void gicd_set_itargetsr(unsigned int base,
+ unsigned int interrupt_id, unsigned int iface)
+{
+ mmio_write_8(base + GICD_ITARGETSR + interrupt_id, (1 << iface));
+}
+
+/******************************************************************************
+ * GICv2 public driver API
+ *****************************************************************************/
+
+void gicv2_enable_cpuif(void)
+{
+ unsigned int gicc_ctlr;
+
+ assert(gicc_base_addr);
+
+ /* Enable the GICC and disable bypass */
+ gicc_ctlr = GICC_CTLR_ENABLE | FIQ_BYP_DIS_GRP1
+ | IRQ_BYP_DIS_GRP1;
+ gicc_write_ctlr(gicc_base_addr, gicc_ctlr);
+}
+
+void gicv2_probe_gic_cpu_id(void)
+{
+ unsigned int gicd_itargets_val, core_pos;
+
+ assert(gicd_base_addr);
+ core_pos = platform_get_core_pos(read_mpidr_el1());
+ gicd_itargets_val = gicd_read_itargetsr(gicd_base_addr, 0);
+
+ assert(gicd_itargets_val);
+
+ /* Convert the bit pos returned by read of ITARGETSR0 to GIC CPU ID */
+ gic_cpu_id[core_pos] = __builtin_ctz(gicd_itargets_val);
+}
+
+void gicv2_setup_cpuif(void)
+{
+ assert(gicc_base_addr);
+
+ /* Set the priority mask register to allow all interrupts to trickle in */
+ gicc_write_pmr(gicc_base_addr, GIC_PRI_MASK);
+ gicv2_enable_cpuif();
+}
+
+void gicv2_disable_cpuif(void)
+{
+ unsigned int gicc_ctlr;
+
+ assert(gicc_base_addr);
+
+ /* Disable non-secure interrupts and disable their bypass */
+ gicc_ctlr = gicc_read_ctlr(gicc_base_addr);
+ gicc_ctlr &= ~GICC_CTLR_ENABLE;
+ gicc_ctlr |= FIQ_BYP_DIS_GRP1 | IRQ_BYP_DIS_GRP1;
+ gicc_write_ctlr(gicc_base_addr, gicc_ctlr);
+}
+
+void gicv2_save_cpuif_context(void)
+{
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+
+ assert(gicc_base_addr);
+ pcpu_gic_ctx[core_pos].gicc_ctlr =
+ gicc_read_ctlr(gicc_base_addr);
+}
+
+void gicv2_restore_cpuif_context(void)
+{
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+
+ assert(gicc_base_addr);
+
+ /* The GICC_PMR is never modified, hence we initialize this register */
+ gicc_write_pmr(gicc_base_addr, GIC_PRI_MASK);
+
+ gicc_write_ctlr(gicc_base_addr,
+ pcpu_gic_ctx[core_pos].gicc_ctlr);
+}
+
+void gicv2_setup_distif(void)
+{
+ unsigned int gicd_ctlr;
+
+ assert(gicd_base_addr);
+
+ /* Enable the forwarding of interrupts to CPU interface */
+ gicd_ctlr = gicd_read_ctlr(gicd_base_addr);
+ gicd_ctlr |= GICD_CTLR_ENABLE;
+ gicd_write_ctlr(gicd_base_addr, gicd_ctlr);
+}
+
+/* Save the per-cpu GICD ISENABLER, IPRIORITYR and ICFGR registers */
+void gicv2_save_sgi_ppi_context(void)
+{
+ unsigned int i;
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+
+ assert(gicd_base_addr);
+ pcpu_gic_ctx[core_pos].gicd_isenabler0 =
+ gicd_read_isenabler(gicd_base_addr, 0);
+
+ /* Read the ipriority registers, 4 at a time */
+ for (i = 0; i < (NUM_PCPU_INTR >> IPRIORITYR_SHIFT); i++)
+ pcpu_gic_ctx[core_pos].gicd_ipriorityr[i] =
+ gicd_read_ipriorityr(gicd_base_addr, i << IPRIORITYR_SHIFT);
+
+ pcpu_gic_ctx[core_pos].gicd_icfgr =
+ gicd_read_icfgr(gicd_base_addr, MIN_PPI_ID);
+}
+
+/* Restore the per-cpu GICD ISENABLER, IPRIORITYR and ICFGR registers */
+void gicv2_restore_sgi_ppi_context(void)
+{
+ unsigned int i;
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+
+ assert(gicd_base_addr);
+
+ /* Write the ipriority registers, 4 at a time */
+ for (i = 0; i < (NUM_PCPU_INTR >> IPRIORITYR_SHIFT); i++)
+ gicd_write_ipriorityr(gicd_base_addr, i << IPRIORITYR_SHIFT,
+ pcpu_gic_ctx[core_pos].gicd_ipriorityr[i]);
+
+ gicd_write_icfgr(gicd_base_addr, MIN_PPI_ID,
+ pcpu_gic_ctx[core_pos].gicd_icfgr);
+
+ gicd_write_isenabler(gicd_base_addr, 0,
+ pcpu_gic_ctx[core_pos].gicd_isenabler0);
+}
+
+unsigned int gicv2_gicd_get_ipriorityr(unsigned int interrupt_id)
+{
+ assert(gicd_base_addr);
+ assert(IS_VALID_INTR_ID(interrupt_id));
+
+ return gicd_get_ipriorityr(gicd_base_addr, interrupt_id);
+}
+
+void gicv2_gicd_set_ipriorityr(unsigned int interrupt_id,
+ unsigned int priority)
+{
+ assert(gicd_base_addr);
+ assert(IS_VALID_INTR_ID(interrupt_id));
+
+ gicd_set_ipriorityr(gicd_base_addr, interrupt_id, priority);
+}
+
+void gicv2_send_sgi(unsigned int sgi_id, unsigned int core_pos)
+{
+ unsigned int sgir_val;
+
+ assert(gicd_base_addr);
+ assert(IS_SGI(sgi_id));
+
+ sgir_val = sgi_id << GICD_SGIR_INTID_SHIFT;
+ sgir_val |= (1 << core_pos_to_gic_id(core_pos)) << GICD_SGIR_CPUTL_SHIFT;
+
+ gicd_write_sgir(gicd_base_addr, sgir_val);
+}
+
+void gicv2_set_itargetsr(unsigned int num, unsigned int core_pos)
+{
+ unsigned int gic_cpu_id;
+ assert(gicd_base_addr);
+ assert(IS_SPI(num));
+
+ gic_cpu_id = core_pos_to_gic_id(core_pos);
+ gicd_set_itargetsr(gicd_base_addr, num, gic_cpu_id);
+}
+
+void gicv2_set_itargetsr_value(unsigned int num, unsigned int val)
+{
+ assert(gicd_base_addr);
+ assert(IS_SPI(num));
+
+ gicd_write_itargetsr_byte(gicd_base_addr, num, val);
+}
+
+unsigned int gicv2_gicd_get_isenabler(unsigned int num)
+{
+ assert(gicd_base_addr);
+ assert(IS_VALID_INTR_ID(num));
+
+ return gicd_get_isenabler(gicd_base_addr, num);
+}
+
+void gicv2_gicd_set_isenabler(unsigned int num)
+{
+ assert(gicd_base_addr);
+ assert(IS_VALID_INTR_ID(num));
+
+ gicd_set_isenabler(gicd_base_addr, num);
+}
+
+void gicv2_gicd_set_icenabler(unsigned int num)
+{
+ assert(gicd_base_addr);
+ assert(IS_VALID_INTR_ID(num));
+
+ gicd_set_icenabler(gicd_base_addr, num);
+}
+
+unsigned int gicv2_gicc_read_iar(void)
+{
+ assert(gicc_base_addr);
+ return gicc_read_iar(gicc_base_addr);
+}
+
+unsigned int gicv2_gicd_get_ispendr(unsigned int interrupt_id)
+{
+ unsigned int ispendr;
+ unsigned int bit_pos;
+
+ assert(gicd_base_addr);
+ assert(IS_VALID_INTR_ID(interrupt_id));
+
+ ispendr = gicd_read_ispendr(gicd_base_addr, interrupt_id);
+ bit_pos = interrupt_id % (1 << ISPENDR_SHIFT);
+
+ return !!(ispendr & (1 << bit_pos));
+}
+
+void gicv2_gicd_set_ispendr(unsigned int interrupt_id)
+{
+ assert(gicd_base_addr);
+ assert(IS_PPI(interrupt_id) || IS_SPI(interrupt_id));
+ gicd_set_ispendr(gicd_base_addr, interrupt_id);
+}
+
+void gicv2_gicd_set_icpendr(unsigned int interrupt_id)
+{
+ assert(gicd_base_addr);
+ assert(IS_PPI(interrupt_id) || IS_SPI(interrupt_id));
+
+ gicd_set_icpendr(gicd_base_addr, interrupt_id);
+}
+
+void gicv2_gicc_write_eoir(unsigned int val)
+{
+ assert(gicc_base_addr);
+
+ gicc_write_eoir(gicc_base_addr, val);
+}
+
+void gicv2_init(uintptr_t gicc_base,
+ uintptr_t gicd_base)
+{
+ assert(gicc_base);
+ assert(gicd_base);
+
+ /* Assert that this is a GICv2 system */
+ assert(!is_gicv3_mode());
+ gicc_base_addr = gicc_base;
+ gicd_base_addr = gicd_base;
+}
diff --git a/drivers/arm/gic/gic_v3.c b/drivers/arm/gic/gic_v3.c
new file mode 100644
index 000000000..76b08639c
--- /dev/null
+++ b/drivers/arm/gic/gic_v3.c
@@ -0,0 +1,507 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <assert.h>
+#include <debug.h>
+#include <gic_common.h>
+#include <gic_v3.h>
+#include <mmio.h>
+#include <platform.h>
+
+/* Global variables to store the GIC base addresses */
+static uintptr_t gicr_base_addr;
+static uintptr_t gicd_base_addr;
+
+#ifndef AARCH32
+#define MPIDR_AFFLVL3_MASK ((unsigned long long)MPIDR_AFFLVL_MASK << MPIDR_AFF3_SHIFT)
+#define gic_typer_affinity_from_mpidr(mpidr) \
+ (((mpidr) & (~MPIDR_AFFLVL3_MASK)) | (((mpidr) & MPIDR_AFFLVL3_MASK) >> 8))
+#else
+#define gic_typer_affinity_from_mpidr(mpidr) \
+ ((mpidr) & ((MPIDR_AFFLVL_MASK << MPIDR_AFF2_SHIFT) | MPID_MASK))
+#endif
+
+/*
+ * Data structure to store the GIC per CPU context before entering
+ * system suspend. Only the GIC context of first 32 interrupts (SGIs and PPIs)
+ * will be saved. The GIC SPI context needs to be restored by the respective
+ * drivers.
+ */
+struct gicv3_pcpu_ctx {
+ /* Flag to indicate whether the CPU is suspended */
+ unsigned int is_suspended;
+ unsigned int icc_igrpen1;
+ unsigned int gicr_isenabler;
+ unsigned int gicr_ipriorityr[NUM_PCPU_INTR >> IPRIORITYR_SHIFT];
+ unsigned int gicr_icfgr;
+};
+
+/* Array to store the per-cpu GICv3 context when being suspended.*/
+static struct gicv3_pcpu_ctx pcpu_ctx[PLATFORM_CORE_COUNT];
+
+/* Array to store the per-cpu redistributor frame addresses */
+static uintptr_t rdist_pcpu_base[PLATFORM_CORE_COUNT];
+
+/*
+ * Array to store the mpidr corresponding to each initialized per-CPU
+ * redistributor interface.
+ */
+static unsigned long long mpidr_list[PLATFORM_CORE_COUNT] = {UINT64_MAX};
+
+/******************************************************************************
+ * GIC Distributor interface accessors for writing entire registers
+ *****************************************************************************/
+static void gicd_write_irouter(unsigned int base,
+ unsigned int interrupt_id,
+ unsigned long long route)
+{
+ assert(interrupt_id >= MIN_SPI_ID);
+ mmio_write_64(base + GICD_IROUTER + (interrupt_id << 3), route);
+}
+
+/******************************************************************************
+ * GIC Re-distributor interface accessors for writing entire registers
+ *****************************************************************************/
+static void gicr_write_isenabler0(unsigned int base, unsigned int val)
+{
+ mmio_write_32(base + GICR_ISENABLER0, val);
+}
+
+static void gicr_write_icenabler0(unsigned int base, unsigned int val)
+{
+ mmio_write_32(base + GICR_ICENABLER0, val);
+}
+
+static void gicr_write_icpendr0(unsigned int base, unsigned int val)
+{
+ mmio_write_32(base + GICR_ICPENDR0, val);
+}
+
+static void gicr_write_ipriorityr(uintptr_t base, unsigned int id, unsigned int val)
+{
+ unsigned n = id >> IPRIORITYR_SHIFT;
+ mmio_write_32(base + GICR_IPRIORITYR + (n << 2), val);
+}
+
+static void gicr_write_icfgr1(uintptr_t base, unsigned int val)
+{
+ mmio_write_32(base + GICR_ICFGR1, val);
+}
+
+/******************************************************************************
+ * GIC Re-distributor interface accessors for reading entire registers
+ *****************************************************************************/
+static unsigned long long gicr_read_typer(uintptr_t base)
+{
+ return mmio_read_64(base + GICR_TYPER);
+}
+
+static unsigned int gicr_read_icfgr1(uintptr_t base)
+{
+ return mmio_read_32(base + GICR_ICFGR1);
+}
+
+static unsigned int gicr_read_isenabler0(unsigned int base)
+{
+ return mmio_read_32(base + GICR_ISENABLER0);
+}
+
+static unsigned int gicr_read_ipriorityr(uintptr_t base, unsigned int id)
+{
+ unsigned n = id >> IPRIORITYR_SHIFT;
+ return mmio_read_32(base + GICR_IPRIORITYR + (n << 2));
+}
+
+static unsigned int gicr_read_ispendr0(unsigned int base)
+{
+ return mmio_read_32(base + GICR_ISPENDR0);
+}
+
+/******************************************************************************
+ * GIC Re-distributor interface accessors for individual interrupt
+ * manipulation
+ *****************************************************************************/
+static unsigned int gicr_get_isenabler0(unsigned int base,
+ unsigned int interrupt_id)
+{
+ unsigned bit_num = interrupt_id & ((1 << ISENABLER_SHIFT) - 1);
+ return gicr_read_isenabler0(base) & (1 << bit_num);
+}
+
+static void gicr_set_isenabler0(unsigned int base, unsigned int interrupt_id)
+{
+ unsigned bit_num = interrupt_id & ((1 << ISENABLER_SHIFT) - 1);
+ gicr_write_isenabler0(base, (1 << bit_num));
+}
+
+static void gicr_set_icenabler0(unsigned int base, unsigned int interrupt_id)
+{
+ unsigned bit_num = interrupt_id & ((1 << ISENABLER_SHIFT) - 1);
+ gicr_write_icenabler0(base, (1 << bit_num));
+}
+
+static void gicr_set_icpendr0(unsigned int base, unsigned int interrupt_id)
+{
+ unsigned bit_num = interrupt_id & ((1 << ICPENDR_SHIFT) - 1);
+ gicr_write_icpendr0(base, (1 << bit_num));
+}
+
+/******************************************************************************
+ * GICv3 public driver API
+ *****************************************************************************/
+void gicv3_enable_cpuif(void)
+{
+ /* Assert that system register access is enabled */
+ assert(IS_IN_EL2() ? (read_icc_sre_el2() & ICC_SRE_SRE_BIT) :
+ (read_icc_sre_el1() & ICC_SRE_SRE_BIT));
+
+ /* Enable Group1 non secure interrupts */
+ write_icc_igrpen1_el1(read_icc_igrpen1_el1() | IGRPEN1_EL1_ENABLE_BIT);
+ isb();
+}
+
+void gicv3_setup_cpuif(void)
+{
+ /* Set the priority mask register to allow all interrupts to trickle in */
+ write_icc_pmr_el1(GIC_PRI_MASK);
+ isb();
+ gicv3_enable_cpuif();
+}
+
+void gicv3_disable_cpuif(void)
+{
+ /* Disable Group1 non secure interrupts */
+ write_icc_igrpen1_el1(read_icc_igrpen1_el1() &
+ ~IGRPEN1_EL1_ENABLE_BIT);
+ isb();
+}
+
+void gicv3_save_cpuif_context(void)
+{
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+
+ /* Set the `is_suspended` flag as this core is being suspended. */
+ pcpu_ctx[core_pos].is_suspended = 1;
+ pcpu_ctx[core_pos].icc_igrpen1 = read_icc_igrpen1_el1();
+}
+
+void gicv3_restore_cpuif_context(void)
+{
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+
+ /* Reset the `is_suspended` flag as this core has resumed from suspend. */
+ pcpu_ctx[core_pos].is_suspended = 0;
+ write_icc_pmr_el1(GIC_PRI_MASK);
+ write_icc_igrpen1_el1(pcpu_ctx[core_pos].icc_igrpen1);
+ isb();
+}
+
+void gicv3_save_sgi_ppi_context(void)
+{
+ unsigned int i, core_pos;
+ unsigned int my_core_pos = platform_get_core_pos(read_mpidr_el1());
+
+ /* Save the context for all the suspended cores */
+ for (core_pos = 0; core_pos < PLATFORM_CORE_COUNT; core_pos++) {
+ /*
+ * Continue if the core pos is not the current core
+ * and has not suspended
+ */
+ if ((core_pos != my_core_pos) &&
+ (!pcpu_ctx[core_pos].is_suspended))
+ continue;
+
+ assert(rdist_pcpu_base[core_pos]);
+
+ pcpu_ctx[core_pos].gicr_isenabler =
+ gicr_read_isenabler0(rdist_pcpu_base[core_pos]);
+
+ /* Read the ipriority registers, 4 at a time */
+ for (i = 0; i < (NUM_PCPU_INTR >> IPRIORITYR_SHIFT); i++)
+ pcpu_ctx[core_pos].gicr_ipriorityr[i] =
+ gicr_read_ipriorityr(rdist_pcpu_base[core_pos],
+ i << IPRIORITYR_SHIFT);
+
+ pcpu_ctx[core_pos].gicr_icfgr =
+ gicr_read_icfgr1(rdist_pcpu_base[core_pos]);
+ }
+}
+
+void gicv3_restore_sgi_ppi_context(void)
+{
+ unsigned int i, core_pos;
+ unsigned int my_core_pos = platform_get_core_pos(read_mpidr_el1());
+
+ /* Restore the context for all the suspended cores */
+ for (core_pos = 0; core_pos < PLATFORM_CORE_COUNT; core_pos++) {
+ /*
+ * Continue if the core pos is not the current core
+ * and has not suspended
+ */
+ if ((core_pos != my_core_pos) &&
+ (!pcpu_ctx[core_pos].is_suspended))
+ continue;
+
+ assert(rdist_pcpu_base[core_pos]);
+
+ /* Read the ipriority registers, 4 at a time */
+ for (i = 0; i < (NUM_PCPU_INTR >> IPRIORITYR_SHIFT); i++)
+ gicr_write_ipriorityr(rdist_pcpu_base[core_pos],
+ i << IPRIORITYR_SHIFT,
+ pcpu_ctx[core_pos].gicr_ipriorityr[i]);
+
+ gicr_write_icfgr1(rdist_pcpu_base[core_pos],
+ pcpu_ctx[core_pos].gicr_icfgr);
+ gicr_write_isenabler0(rdist_pcpu_base[core_pos],
+ pcpu_ctx[core_pos].gicr_isenabler);
+ }
+}
+
+unsigned int gicv3_get_ipriorityr(unsigned int interrupt_id)
+{
+ unsigned int core_pos;
+ assert(gicd_base_addr);
+ assert(IS_VALID_INTR_ID(interrupt_id));
+
+ if (interrupt_id < MIN_SPI_ID) {
+ core_pos = platform_get_core_pos(read_mpidr_el1());
+ assert(rdist_pcpu_base[core_pos]);
+ return mmio_read_8(rdist_pcpu_base[core_pos] + GICR_IPRIORITYR
+ + interrupt_id);
+ } else {
+ return mmio_read_8(gicd_base_addr +
+ GICD_IPRIORITYR + interrupt_id);
+ }
+}
+
+void gicv3_set_ipriorityr(unsigned int interrupt_id,
+ unsigned int priority)
+{
+ unsigned int core_pos;
+ assert(gicd_base_addr);
+ assert(IS_VALID_INTR_ID(interrupt_id));
+
+ if (interrupt_id < MIN_SPI_ID) {
+ core_pos = platform_get_core_pos(read_mpidr_el1());
+ assert(rdist_pcpu_base[core_pos]);
+ mmio_write_8(rdist_pcpu_base[core_pos] + GICR_IPRIORITYR
+ + interrupt_id, priority & GIC_PRI_MASK);
+ } else {
+ mmio_write_8(gicd_base_addr + GICD_IPRIORITYR + interrupt_id,
+ priority & GIC_PRI_MASK);
+ }
+}
+
+void gicv3_send_sgi(unsigned int sgi_id, unsigned int core_pos)
+{
+ unsigned long long aff0, aff1, aff2;
+ unsigned long long sgir, target_list;
+
+ assert(IS_SGI(sgi_id));
+ assert(core_pos < PLATFORM_CORE_COUNT);
+
+ assert(mpidr_list[core_pos] != UINT64_MAX);
+
+ /* Extract the affinity information */
+ aff0 = MPIDR_AFF_ID(mpidr_list[core_pos], 0);
+ aff1 = MPIDR_AFF_ID(mpidr_list[core_pos], 1);
+ aff2 = MPIDR_AFF_ID(mpidr_list[core_pos], 2);
+#ifndef AARCH32
+ unsigned long long aff3;
+ aff3 = MPIDR_AFF_ID(mpidr_list[core_pos], 3);
+#endif
+
+ /* Construct the SGI target list using Affinity 0 */
+ assert(aff0 < SGI_TARGET_MAX_AFF0);
+ target_list = 1 << aff0;
+
+ /* Construct the SGI target affinity */
+ sgir =
+#ifndef AARCH32
+ ((aff3 & SGI1R_AFF_MASK) << SGI1R_AFF3_SHIFT) |
+#endif
+ ((aff2 & SGI1R_AFF_MASK) << SGI1R_AFF2_SHIFT) |
+ ((aff1 & SGI1R_AFF_MASK) << SGI1R_AFF1_SHIFT) |
+ ((target_list & SGI1R_TARGET_LIST_MASK)
+ << SGI1R_TARGET_LIST_SHIFT);
+
+ /* Combine SGI target affinity with the SGI ID */
+ sgir |= ((sgi_id & SGI1R_INTID_MASK) << SGI1R_INTID_SHIFT);
+#ifndef AARCH32
+ write_icc_sgi1r(sgir);
+#else
+ write64_icc_sgi1r(sgir);
+#endif
+ isb();
+}
+
+void gicv3_set_intr_route(unsigned int interrupt_id,
+ unsigned int core_pos)
+{
+ unsigned long long route_affinity;
+
+ assert(gicd_base_addr);
+ assert(core_pos < PLATFORM_CORE_COUNT);
+ assert(mpidr_list[core_pos] != UINT64_MAX);
+
+ /* Routing information can be set only for SPIs */
+ assert(IS_SPI(interrupt_id));
+ route_affinity = mpidr_list[core_pos];
+
+ gicd_write_irouter(gicd_base_addr, interrupt_id, route_affinity);
+}
+
+unsigned int gicv3_get_isenabler(unsigned int interrupt_id)
+{
+ unsigned int core_pos;
+
+ assert(gicd_base_addr);
+ assert(IS_VALID_INTR_ID(interrupt_id));
+
+ if (interrupt_id < MIN_SPI_ID) {
+ core_pos = platform_get_core_pos(read_mpidr_el1());
+ assert(rdist_pcpu_base[core_pos]);
+ return gicr_get_isenabler0(rdist_pcpu_base[core_pos], interrupt_id);
+ } else
+ return gicd_get_isenabler(gicd_base_addr, interrupt_id);
+}
+
+void gicv3_set_isenabler(unsigned int interrupt_id)
+{
+ unsigned int core_pos;
+
+ assert(gicd_base_addr);
+ assert(IS_VALID_INTR_ID(interrupt_id));
+
+ if (interrupt_id < MIN_SPI_ID) {
+ core_pos = platform_get_core_pos(read_mpidr_el1());
+ assert(rdist_pcpu_base[core_pos]);
+ gicr_set_isenabler0(rdist_pcpu_base[core_pos], interrupt_id);
+ } else
+ gicd_set_isenabler(gicd_base_addr, interrupt_id);
+}
+
+void gicv3_set_icenabler(unsigned int interrupt_id)
+{
+ unsigned int core_pos;
+
+ assert(gicd_base_addr);
+ assert(IS_VALID_INTR_ID(interrupt_id));
+
+ if (interrupt_id < MIN_SPI_ID) {
+ core_pos = platform_get_core_pos(read_mpidr_el1());
+ assert(rdist_pcpu_base[core_pos]);
+ gicr_set_icenabler0(rdist_pcpu_base[core_pos], interrupt_id);
+ } else
+ gicd_set_icenabler(gicd_base_addr, interrupt_id);
+}
+
+unsigned int gicv3_get_ispendr(unsigned int interrupt_id)
+{
+ unsigned int ispendr;
+ unsigned int bit_pos, core_pos;
+
+ assert(gicd_base_addr);
+ assert(IS_VALID_INTR_ID(interrupt_id));
+
+ if (interrupt_id < MIN_SPI_ID) {
+ core_pos = platform_get_core_pos(read_mpidr_el1());
+ assert(rdist_pcpu_base[core_pos]);
+ ispendr = gicr_read_ispendr0(rdist_pcpu_base[core_pos]);
+ } else
+ ispendr = gicd_read_ispendr(gicd_base_addr, interrupt_id);
+
+ bit_pos = interrupt_id % (1 << ISPENDR_SHIFT);
+ return !!(ispendr & (1 << bit_pos));
+}
+
+void gicv3_set_icpendr(unsigned int interrupt_id)
+{
+ unsigned int core_pos;
+
+ assert(gicd_base_addr);
+ assert(IS_SPI(interrupt_id) || IS_PPI(interrupt_id));
+
+ if (interrupt_id < MIN_SPI_ID) {
+ core_pos = platform_get_core_pos(read_mpidr_el1());
+ assert(rdist_pcpu_base[core_pos]);
+ gicr_set_icpendr0(rdist_pcpu_base[core_pos], interrupt_id);
+
+ } else
+ gicd_set_icpendr(gicd_base_addr, interrupt_id);
+}
+
+void gicv3_probe_redistif_addr(void)
+{
+ unsigned long long typer_val;
+ uintptr_t rdistif_base;
+ unsigned long long affinity;
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+
+ assert(gicr_base_addr);
+
+ /*
+ * Return if the re-distributor base address is already populated
+ * for this core.
+ */
+ if (rdist_pcpu_base[core_pos])
+ return;
+
+ /* Iterate over the GICR frames and find the matching frame*/
+ rdistif_base = gicr_base_addr;
+ affinity = gic_typer_affinity_from_mpidr(read_mpidr_el1() & MPIDR_AFFINITY_MASK);
+ do {
+ typer_val = gicr_read_typer(rdistif_base);
+ if (affinity == ((typer_val >> TYPER_AFF_VAL_SHIFT) & TYPER_AFF_VAL_MASK)) {
+ rdist_pcpu_base[core_pos] = rdistif_base;
+ mpidr_list[core_pos] = read_mpidr_el1() & MPIDR_AFFINITY_MASK;
+ return;
+ }
+ rdistif_base += (1 << GICR_PCPUBASE_SHIFT);
+ } while (!(typer_val & TYPER_LAST_BIT));
+
+ ERROR("Re-distributor address not found for core %d\n", core_pos);
+ panic();
+}
+
+void gicv3_setup_distif(void)
+{
+ unsigned int gicd_ctlr;
+
+ assert(gicd_base_addr);
+
+ /* Check for system register support */
+#ifndef AARCH32
+ assert(read_id_aa64pfr0_el1() &
+ (ID_AA64PFR0_GIC_MASK << ID_AA64PFR0_GIC_SHIFT));
+#else
+ assert(read_id_pfr1() & (ID_PFR1_GIC_MASK << ID_PFR1_GIC_SHIFT));
+#endif
+
+ /* Assert that system register access is enabled */
+ assert(is_sre_enabled());
+
+ /* Enable the forwarding of interrupts to CPU interface */
+ gicd_ctlr = gicd_read_ctlr(gicd_base_addr);
+
+ /* Assert ARE_NS bit in GICD */
+ assert(gicd_ctlr & (GICD_CTLR_ARE_NS_MASK << GICD_CTLR_ARE_NS_SHIFT));
+
+ gicd_ctlr |= GICD_CTLR_ENABLE_GRP1A;
+ gicd_write_ctlr(gicd_base_addr, gicd_ctlr);
+}
+
+void gicv3_init(uintptr_t gicr_base, uintptr_t gicd_base)
+{
+ assert(gicr_base);
+ assert(gicd_base);
+
+ gicr_base_addr = gicr_base;
+ gicd_base_addr = gicd_base;
+}
diff --git a/drivers/arm/pl011/aarch32/pl011_console.S b/drivers/arm/pl011/aarch32/pl011_console.S
new file mode 100644
index 000000000..96da6f92b
--- /dev/null
+++ b/drivers/arm/pl011/aarch32/pl011_console.S
@@ -0,0 +1,275 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+#include <asm_macros.S>
+#include <console.h>
+#include <pl011.h>
+
+ .globl console_init
+ .globl console_putc
+ .globl console_getc
+ .globl console_try_getc
+ .globl console_flush
+ .globl console_core_init
+ .globl console_core_putc
+ .globl console_core_getc
+ .globl console_core_flush
+
+ /*
+ * The console base is in the data section and not in .bss
+ * even though it is zero-init. In particular, this allows
+ * the console functions to start using this variable before
+ * the runtime memory is initialized for images which do not
+ * need to copy the .data section from ROM to RAM.
+ */
+ .section .data.console_base
+ .align 2
+console_base: .word 0x0
+
+ /* -----------------------------------------------
+ * int console_init(uintptr_t base_addr,
+ * unsigned int uart_clk, unsigned int baud_rate)
+ * Function to initialize the console without a
+ * C Runtime to print debug information. It saves
+ * the console base to the data section.
+ * In: r0 - Console base address
+ * r1 - Uart clock in Hz
+ * r2 - Baud rate
+ * Out: r0 - Return 1 on success, 0 on error.
+ * Clobber list : r1 - r3
+ * -----------------------------------------------
+ */
+func console_init
+ ldr r3, =console_base
+ str r0, [r3]
+ b console_core_init
+endfunc console_init
+
+ /* -----------------------------------------------
+ * int console_core_init(uintptr_t base_addr,
+ * unsigned int uart_clk, unsigned int baud_rate)
+ * Function to initialize the console without a
+ * C Runtime to print debug information. This
+ * function will be accessed by console_init and
+ * crash reporting.
+ * In: r0 - Console base address
+ * r1 - Uart clock in Hz
+ * r2 - Baud rate
+ * Out: r0 - Return 1 on success, 0 on error.
+ * Clobber list : r1 - r3
+ * -----------------------------------------------
+ */
+func console_core_init
+ /* Check the input base address */
+ cmp r0, #0
+ beq core_init_fail
+ /* Check baud rate and uart clock for sanity */
+ cmp r1, #0
+ beq core_init_fail
+ cmp r2, #0
+ beq core_init_fail
+ /* Disable the UART before initialization */
+ ldr r3, [r0, #UARTCR]
+ bic r3, r3, #PL011_UARTCR_UARTEN
+ str r3, [r0, #UARTCR]
+ /* Program the baudrate */
+ /* Divisor = (Uart clock * 4) / baudrate */
+ lsl r1, r1, #2
+ udiv r2, r1, r2
+ /* IBRD = Divisor >> 6 */
+ lsr r1, r2, #6
+ /* Write the IBRD */
+ str r1, [r0, #UARTIBRD]
+ /* FBRD = Divisor & 0x3F */
+ and r1, r2, #0x3f
+ /* Write the FBRD */
+ str r1, [r0, #UARTFBRD]
+ mov r1, #PL011_LINE_CONTROL
+ str r1, [r0, #UARTLCR_H]
+ /* Clear any pending errors */
+ mov r1, #0
+ str r1, [r0, #UARTECR]
+ /* Enable tx, rx, and uart overall */
+ ldr r1, =(PL011_UARTCR_RXE | PL011_UARTCR_TXE | PL011_UARTCR_UARTEN)
+ str r1, [r0, #UARTCR]
+ mov r0, #1
+ bx lr
+core_init_fail:
+ mov r0, #0
+ bx lr
+endfunc console_core_init
+
+ /* ---------------------------------------------
+ * int console_putc(int c)
+ * Function to output a character over the
+ * console. It returns the character printed on
+ * success or an error code.
+ * In : r0 - Character to be printed
+ * Out : r0 - Input character or error code.
+ * Clobber list : r1, r2
+ * ---------------------------------------------
+ */
+func console_putc
+ ldr r1, =console_base
+ ldr r1, [r1]
+ b console_core_putc
+endfunc console_putc
+
+ /* --------------------------------------------------------
+ * int console_core_putc(int c, uintptr_t base_addr)
+ * Function to output a character over the console. It
+ * returns the character printed on success or an error
+ * code.
+ * In : r0 - Character to be printed
+ * r1 - Console base address
+ * Out : r0 - Input character or error code.
+ * Clobber list : r2
+ * --------------------------------------------------------
+ */
+func console_core_putc
+ /* Check the input parameter */
+ cmp r1, #0
+ beq putc_error
+ /* Prepend '\r' to '\n' */
+ cmp r0, #0xA
+ bne 2f
+1:
+ /* Check if the transmit FIFO is full */
+ ldr r2, [r1, #UARTFR]
+ tst r2, #PL011_UARTFR_TXFF
+ bne 1b
+ mov r2, #0xD
+ str r2, [r1, #UARTDR]
+2:
+ /* Check if the transmit FIFO is full */
+ ldr r2, [r1, #UARTFR]
+ tst r2, #PL011_UARTFR_TXFF
+ bne 2b
+
+ /* Only write 8 bits */
+ and r0, r0, #0xFF
+ str r0, [r1, #UARTDR]
+ bx lr
+putc_error:
+ mov r0, #ERROR_NO_VALID_CONSOLE
+ bx lr
+endfunc console_core_putc
+
+ /* ---------------------------------------------
+ * int console_getc(void)
+ * Function to get a character from the console.
+ * It returns the character grabbed on success
+ * or an error code on error. This function is
+ * blocking, it waits until there is an
+ * available character to return.
+ * Out : r0 - Return character or error code.
+ * Clobber list : r0 - r3
+ * ---------------------------------------------
+ */
+func console_getc
+ ldr r2, =console_base
+ ldr r2, [r2]
+ mov r3, lr
+
+ /* Loop until it returns a character or an error. */
+1: mov r0, r2
+ bl console_core_getc
+ cmp r0, #ERROR_NO_PENDING_CHAR
+ beq 1b
+
+ bx r3
+endfunc console_getc
+
+ /* ---------------------------------------------
+ * int console_try_getc(void)
+ * Function to get a character from the console.
+ * It returns the character grabbed on success
+ * or an error code on error. This function is
+ * non-blocking, it returns immediately.
+ * Out : r0 - Return character or error code.
+ * Clobber list : r0, r1
+ * ---------------------------------------------
+ */
+func console_try_getc
+ ldr r0, =console_base
+ ldr r0, [r0]
+ b console_core_getc
+endfunc console_try_getc
+
+ /* ---------------------------------------------
+ * int console_core_getc(uintptr_t base_addr)
+ * Function to get a character from the console.
+ * It returns the character grabbed on success
+ * or an error code.
+ * In : r0 - Console base address
+ * Out : r0 - Return character or error code.
+ * Clobber list : r0, r1
+ * ---------------------------------------------
+ */
+func console_core_getc
+ cmp r0, #0
+ beq getc_error
+
+ /* Check if the receive FIFO is empty */
+ ldr r1, [r0, #UARTFR]
+ tst r1, #PL011_UARTFR_RXFE
+ bne getc_empty
+
+ /* Read a character from the FIFO */
+ ldr r1, [r0, #UARTDR]
+ /* Mask out error flags */
+ and r0, r1, #0xFF
+ bx lr
+
+getc_empty:
+ mov r0, #ERROR_NO_PENDING_CHAR
+ bx lr
+getc_error:
+ mov r0, #ERROR_NO_VALID_CONSOLE
+ bx lr
+endfunc console_core_getc
+
+ /* ---------------------------------------------
+ * int console_flush(void)
+ * Function to force a write of all buffered
+ * data that hasn't been output. It returns 0
+ * upon successful completion, otherwise it
+ * returns an error code.
+ * Out: r0 - Error code or 0.
+ * Clobber list : r0, r1
+ * ---------------------------------------------
+ */
+func console_flush
+ ldr r0, =console_base
+ ldr r0, [r0]
+ b console_core_flush
+endfunc console_flush
+
+ /* ---------------------------------------------
+ * int console_core_flush(uintptr_t base_addr)
+ * Function to force a write of all buffered
+ * data that hasn't been output.
+ * In : r0 - Console base address
+ * Out : r0 - Error code or 0.
+ * Clobber list : r0, r1
+ * ---------------------------------------------
+ */
+func console_core_flush
+ cmp r0, #0
+ beq flush_error
+
+1:
+ /* Loop while the transmit FIFO is busy */
+ ldr r1, [r0, #UARTFR]
+ tst r1, #PL011_UARTFR_BUSY
+ bne 1b
+
+ mov r0, #0
+ bx lr
+flush_error:
+ mov r0, #ERROR_NO_VALID_CONSOLE
+ bx lr
+endfunc console_core_flush
diff --git a/drivers/arm/pl011/aarch64/pl011_console.S b/drivers/arm/pl011/aarch64/pl011_console.S
new file mode 100644
index 000000000..d87982a64
--- /dev/null
+++ b/drivers/arm/pl011/aarch64/pl011_console.S
@@ -0,0 +1,264 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+#include <asm_macros.S>
+#include <console.h>
+#include <pl011.h>
+
+ .globl console_init
+ .globl console_putc
+ .globl console_getc
+ .globl console_try_getc
+ .globl console_flush
+ .globl console_core_init
+ .globl console_core_putc
+ .globl console_core_getc
+ .globl console_core_flush
+
+ /*
+ * The console base is in the data section and not in .bss
+ * even though it is zero-init. In particular, this allows
+ * the console functions to start using this variable before
+ * the runtime memory is initialized for images which do not
+ * need to copy the .data section from ROM to RAM.
+ */
+ .section .data.console_base
+ .align 3
+console_base: .quad 0x0
+
+ /* -----------------------------------------------
+ * int console_init(uintptr_t base_addr,
+ * unsigned int uart_clk, unsigned int baud_rate)
+ * Function to initialize the console without a
+ * C Runtime to print debug information. It saves
+ * the console base to the data section.
+ * In: x0 - Console base address
+ * w1 - Uart clock in Hz
+ * w2 - Baud rate
+ * Out: w0 - Return 1 on success, 0 on error.
+ * Clobber list : x1 - x3
+ * -----------------------------------------------
+ */
+func console_init
+ adrp x3, console_base
+ str x0, [x3, :lo12:console_base]
+ b console_core_init
+endfunc console_init
+
+ /* -----------------------------------------------
+ * int console_core_init(uintptr_t base_addr,
+ * unsigned int uart_clk, unsigned int baud_rate)
+ * Function to initialize the console without a
+ * C Runtime to print debug information. This
+ * function will be accessed by console_init and
+ * crash reporting.
+ * In: x0 - Console base address
+ * w1 - Uart clock in Hz
+ * w2 - Baud rate
+ * Out: w0 - Return 1 on success, 0 on error.
+ * Clobber list : x1 - x3
+ * -----------------------------------------------
+ */
+func console_core_init
+ /* Check the input base address */
+ cbz x0, init_fail
+ /* Check baud rate and uart clock for sanity */
+ cbz w1, init_fail
+ cbz w2, init_fail
+ /* Disable uart before programming */
+ ldr w3, [x0, #UARTCR]
+ bic w3, w3, #PL011_UARTCR_UARTEN
+ str w3, [x0, #UARTCR]
+ /* Program the baudrate */
+ /* Divisor = (Uart clock * 4) / baudrate */
+ lsl w1, w1, #2
+ udiv w2, w1, w2
+ /* IBRD = Divisor >> 6 */
+ lsr w1, w2, #6
+ /* Write the IBRD */
+ str w1, [x0, #UARTIBRD]
+ /* FBRD = Divisor & 0x3F */
+ and w1, w2, #0x3f
+ /* Write the FBRD */
+ str w1, [x0, #UARTFBRD]
+ mov w1, #PL011_LINE_CONTROL
+ str w1, [x0, #UARTLCR_H]
+ /* Clear any pending errors */
+ str wzr, [x0, #UARTECR]
+ /* Enable tx, rx, and uart overall */
+ mov w1, #(PL011_UARTCR_RXE | PL011_UARTCR_TXE | PL011_UARTCR_UARTEN)
+ str w1, [x0, #UARTCR]
+ mov w0, #1
+ ret
+init_fail:
+ mov w0, wzr
+ ret
+endfunc console_core_init
+
+ /* ---------------------------------------------
+ * int console_putc(int c)
+ * Function to output a character over the
+ * console. It returns the character printed on
+ * success or an error code.
+ * In : x0 - Character to be printed
+ * Out : w0 - Input character or error code.
+ * Clobber list : x1, x2
+ * ---------------------------------------------
+ */
+func console_putc
+ adrp x1, console_base
+ ldr x1, [x1, :lo12:console_base]
+ b console_core_putc
+endfunc console_putc
+
+ /* ---------------------------------------------
+ * int console_core_putc(int c, uintptr_t base_addr)
+ * Function to output a character over the console. It
+ * returns the character printed on success or an error
+ * code.
+ * In : w0 - Character to be printed
+ * x1 - Console base address
+ * Out : w0 - Input character or error code.
+ * Clobber list : x2
+ * ---------------------------------------------
+ */
+func console_core_putc
+ /* Check the input parameter */
+ cbz x1, putc_error
+ /* Prepend '\r' to '\n' */
+ cmp w0, #0xA
+ b.ne 2f
+1:
+ /* Check if the transmit FIFO is full */
+ ldr w2, [x1, #UARTFR]
+ tbnz w2, #PL011_UARTFR_TXFF_BIT, 1b
+ mov w2, #0xD
+ str w2, [x1, #UARTDR]
+2:
+ /* Check if the transmit FIFO is full */
+ ldr w2, [x1, #UARTFR]
+ tbnz w2, #PL011_UARTFR_TXFF_BIT, 2b
+
+ /* Only write 8 bits */
+ and w0, w0, #0xFF
+ str w0, [x1, #UARTDR]
+ ret
+putc_error:
+ mov w0, #ERROR_NO_VALID_CONSOLE
+ ret
+endfunc console_core_putc
+
+ /* ---------------------------------------------
+ * int console_getc(void)
+ * Function to get a character from the console.
+ * It returns the character grabbed on success
+ * or an error code on error. This function is
+ * blocking, it waits until there is an
+ * available character to return.
+ * Out : w0 - Return character or error code.
+ * Clobber list : x0 - x3
+ * ---------------------------------------------
+ */
+func console_getc
+ adrp x2, console_base
+ ldr x2, [x2, :lo12:console_base]
+ mov x3, x30
+
+ /* Loop until it returns a character or an error. */
+1: mov x0, x2
+ bl console_core_getc
+ cmp w0, #ERROR_NO_PENDING_CHAR
+ b.eq 1b
+
+ ret x3
+endfunc console_getc
+
+ /* ---------------------------------------------
+ * int console_try_getc(void)
+ * Function to get a character from the console.
+ * It returns the character grabbed on success
+ * or an error code on error. This function is
+ * non-blocking, it returns immediately.
+ * Out : w0 - Return character or error code.
+ * Clobber list : x0, x1
+ * ---------------------------------------------
+ */
+func console_try_getc
+ adrp x0, console_base
+ ldr x0, [x0, :lo12:console_base]
+ b console_core_getc
+endfunc console_try_getc
+
+ /* ---------------------------------------------
+ * int console_core_getc(uintptr_t base_addr)
+ * Function to get a character from the console.
+ * It returns the character grabbed on success
+ * or an error code.
+ * In : x0 - Console base address
+ * Out : w0 - Return character or error code.
+ * Clobber list : x0, x1
+ * ---------------------------------------------
+ */
+func console_core_getc
+ cbz x0, getc_error
+
+ /* Check if the receive FIFO is empty */
+ ldr w1, [x0, #UARTFR]
+ tbnz w1, #PL011_UARTFR_RXFE_BIT, getc_empty
+
+ /* Read a character from the FIFO */
+ ldr w0, [x0, #UARTDR]
+ /* Mask out error flags */
+ and w0, w0, #0xFF
+ ret
+
+getc_empty:
+ mov w0, #ERROR_NO_PENDING_CHAR
+ ret
+getc_error:
+ mov w0, #ERROR_NO_VALID_CONSOLE
+ ret
+endfunc console_core_getc
+
+ /* ---------------------------------------------
+ * int console_flush(void)
+ * Function to force a write of all buffered
+ * data that hasn't been output. It returns 0
+ * upon successful completion, otherwise it
+ * returns an error code.
+ * Out: w0 - Error code or 0.
+ * Clobber list : x0, x1
+ * ---------------------------------------------
+ */
+func console_flush
+ adrp x0, console_base
+ ldr x0, [x0, :lo12:console_base]
+ b console_core_flush
+endfunc console_flush
+
+ /* ---------------------------------------------
+ * int console_core_flush(uintptr_t base_addr)
+ * Function to force a write of all buffered
+ * data that hasn't been output.
+ * In : x0 - Console base address
+ * Out : w0 - Error code or 0.
+ * Clobber list : x0, x1
+ * ---------------------------------------------
+ */
+func console_core_flush
+ cbz x0, flush_error
+
+1:
+ /* Loop until the transmit FIFO is empty */
+ ldr w1, [x0, #UARTFR]
+ tbnz w1, #PL011_UARTFR_BUSY_BIT, 1b
+
+ mov w0, wzr
+ ret
+flush_error:
+ mov w0, #ERROR_NO_VALID_CONSOLE
+ ret
+endfunc console_core_flush
diff --git a/drivers/arm/sp805/sp805.c b/drivers/arm/sp805/sp805.c
new file mode 100644
index 000000000..72a668d2c
--- /dev/null
+++ b/drivers/arm/sp805/sp805.c
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <debug.h>
+#include <mmio.h>
+#include <platform_def.h>
+#include <sp805.h>
+#include <stdint.h>
+
+static inline uint32_t sp805_read_wdog_load(unsigned long base)
+{
+ assert(base);
+ return mmio_read_32(base + SP805_WDOG_LOAD_OFF);
+}
+
+static inline void sp805_write_wdog_load(unsigned long base, uint32_t value)
+{
+ assert(base);
+ mmio_write_32(base + SP805_WDOG_LOAD_OFF, value);
+}
+
+static inline uint32_t sp805_read_wdog_value(unsigned long base)
+{
+ assert(base);
+ return mmio_read_32(base + SP805_WDOG_VALUE_0FF);
+}
+
+static inline uint32_t sp805_read_wdog_ctrl(unsigned long base)
+{
+ assert(base);
+ return mmio_read_32(base + SP805_WDOG_CTRL_OFF) & SP805_WDOG_CTRL_MASK;
+}
+
+static inline void sp805_write_wdog_ctrl(unsigned long base, uint32_t value)
+{
+ assert(base);
+ /* Not setting reserved bits */
+ assert(!(value & ~SP805_WDOG_CTRL_MASK));
+ mmio_write_32(base + SP805_WDOG_CTRL_OFF, value);
+}
+
+static inline void sp805_write_wdog_int_clr(unsigned long base, uint32_t value)
+{
+ assert(base);
+ mmio_write_32(base + SP805_WDOG_INT_CLR_OFF, value);
+}
+
+static inline uint32_t sp805_read_wdog_ris(unsigned long base)
+{
+ assert(base);
+ return mmio_read_32(base + SP805_WDOG_RIS_OFF) & SP805_WDOG_RIS_MASK;
+}
+
+static inline uint32_t sp805_read_wdog_mis(unsigned long base)
+{
+ assert(base);
+ return mmio_read_32(base + SP805_WDOG_MIS_OFF) & SP805_WDOG_MIS_MASK;
+}
+
+static inline uint32_t sp805_read_wdog_lock(unsigned long base)
+{
+ assert(base);
+ return mmio_read_32(base + SP805_WDOG_LOCK_OFF);
+}
+
+static inline void sp805_write_wdog_lock(unsigned long base, uint32_t value)
+{
+ assert(base);
+ mmio_write_32(base + SP805_WDOG_LOCK_OFF, value);
+}
+
+static inline uint32_t sp805_read_wdog_itcr(unsigned long base)
+{
+ assert(base);
+ return mmio_read_32(base + SP805_WDOG_ITCR_OFF) & SP805_WDOG_ITCR_MASK;
+}
+
+static inline void sp805_write_wdog_itcr(unsigned long base, uint32_t value)
+{
+ assert(base);
+ /* Not setting reserved bits */
+ assert(!(value & ~SP805_WDOG_ITCR_MASK));
+ mmio_write_32(base + SP805_WDOG_ITCR_OFF, value);
+}
+
+static inline void sp805_write_wdog_itop(unsigned long base, uint32_t value)
+{
+ assert(base);
+ /* Not setting reserved bits */
+ assert(!(value & ~SP805_WDOG_ITOP_MASK));
+ mmio_write_32(base + SP805_WDOG_ITOP_OFF, value);
+}
+
+static inline uint32_t sp805_read_wdog_periph_id(unsigned long base, unsigned int id)
+{
+ assert(base);
+ assert(id < 4);
+ return mmio_read_32(base + SP805_WDOG_PERIPH_ID_OFF + (id << 2));
+}
+
+static inline uint32_t sp805_read_wdog_pcell_id(unsigned long base, unsigned int id)
+{
+ assert(base);
+ assert(id < 4);
+ return mmio_read_32(base + SP805_WDOG_PCELL_ID_OFF + (id << 2));
+}
+
+void sp805_wdog_start(uint32_t wdog_cycles)
+{
+ /* Unlock to access the watchdog registers */
+ sp805_write_wdog_lock(SP805_WDOG_BASE, SP805_WDOG_UNLOCK_ACCESS);
+
+ /* Write the number of cycles needed */
+ sp805_write_wdog_load(SP805_WDOG_BASE, wdog_cycles);
+
+ /* Enable reset interrupt and watchdog interrupt on expiry */
+ sp805_write_wdog_ctrl(SP805_WDOG_BASE,
+ SP805_WDOG_CTRL_RESEN | SP805_WDOG_CTRL_INTEN);
+
+ /* Lock registers so that they can't be accidently overwritten */
+ sp805_write_wdog_lock(SP805_WDOG_BASE, 0x0);
+}
+
+void sp805_wdog_stop(void)
+{
+ /* Unlock to access the watchdog registers */
+ sp805_write_wdog_lock(SP805_WDOG_BASE, SP805_WDOG_UNLOCK_ACCESS);
+
+ /* Clearing INTEN bit stops the counter */
+ sp805_write_wdog_ctrl(SP805_WDOG_BASE, 0x00);
+
+ /* Lock registers so that they can't be accidently overwritten */
+ sp805_write_wdog_lock(SP805_WDOG_BASE, 0x0);
+}
+
+void sp805_wdog_refresh(void)
+{
+ /* Unlock to access the watchdog registers */
+ sp805_write_wdog_lock(SP805_WDOG_BASE, SP805_WDOG_UNLOCK_ACCESS);
+
+ /*
+ * Write of any value to WdogIntClr clears interrupt and reloads
+ * the counter from the value in WdogLoad Register.
+ */
+ sp805_write_wdog_int_clr(SP805_WDOG_BASE, 1);
+
+ /* Lock registers so that they can't be accidently overwritten */
+ sp805_write_wdog_lock(SP805_WDOG_BASE, 0x0);
+}
diff --git a/drivers/arm/timer/private_timer.c b/drivers/arm/timer/private_timer.c
new file mode 100644
index 000000000..c629448cd
--- /dev/null
+++ b/drivers/arm/timer/private_timer.c
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <platform.h>
+
+/*******************************************************************************
+ * Data structure to keep track of per-cpu secure generic timer context across
+ * power management operations.
+ ******************************************************************************/
+typedef struct timer_context {
+ uint64_t cval;
+ uint32_t ctl;
+} timer_context_t;
+
+static timer_context_t pcpu_timer_context[PLATFORM_CORE_COUNT];
+
+/*******************************************************************************
+ * This function initializes the generic timer to fire every `timeo` ms.
+ ******************************************************************************/
+void private_timer_start(unsigned long timeo)
+{
+ uint64_t cval, freq;
+ uint32_t ctl = 0;
+
+ /* Any previous pending timer activation will be disabled. */
+ cval = read_cntpct_el0();
+ freq = read_cntfrq_el0();
+ cval += (freq * timeo) / 1000;
+ write_cnthp_cval_el2(cval);
+
+ /* Enable the secure physical timer */
+ set_cntp_ctl_enable(ctl);
+ write_cnthp_ctl_el2(ctl);
+}
+
+/*******************************************************************************
+ * This function deasserts the timer interrupt prior to cpu power down
+ ******************************************************************************/
+void private_timer_stop(void)
+{
+ /* Disable the timer */
+ write_cnthp_ctl_el2(0);
+}
+
+/*******************************************************************************
+ * This function saves the timer context prior to cpu suspension
+ ******************************************************************************/
+void private_timer_save(void)
+{
+ uint32_t linear_id = platform_get_core_pos(read_mpidr_el1());
+
+ pcpu_timer_context[linear_id].cval = read_cnthp_cval_el2();
+ pcpu_timer_context[linear_id].ctl = read_cnthp_ctl_el2();
+ flush_dcache_range((uintptr_t) &pcpu_timer_context[linear_id],
+ sizeof(pcpu_timer_context[linear_id]));
+}
+
+/*******************************************************************************
+ * This function restores the timer context post cpu resummption
+ ******************************************************************************/
+void private_timer_restore(void)
+{
+ uint32_t linear_id = platform_get_core_pos(read_mpidr_el1());
+
+ write_cnthp_cval_el2(pcpu_timer_context[linear_id].cval);
+ write_cnthp_ctl_el2(pcpu_timer_context[linear_id].ctl);
+}
diff --git a/drivers/arm/timer/sp804.c b/drivers/arm/timer/sp804.c
new file mode 100644
index 000000000..de88cda21
--- /dev/null
+++ b/drivers/arm/timer/sp804.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <assert.h>
+#include <gic_v2.h>
+#include <mmio.h>
+#include <sp804.h>
+
+static unsigned int sp804_freq;
+static uintptr_t sp804_base;
+
+int sp804_timer_program(unsigned long time_out_ms)
+{
+ unsigned int load_val;
+ unsigned char ctrl_reg;
+
+ assert(sp804_base);
+ assert(time_out_ms);
+
+ /* Disable the timer */
+ ctrl_reg = mmio_read_8(sp804_base + SP804_CTRL_OFFSET);
+ ctrl_reg &= ~(TIMER_EN | INT_ENABLE);
+ mmio_write_8(sp804_base + SP804_CTRL_OFFSET, ctrl_reg);
+
+ /* Calculate the load value */
+ load_val = (sp804_freq * time_out_ms) / 1000;
+
+ /* Write the load value to sp804 timer */
+ mmio_write_32(sp804_base + SP804_LOAD_OFFSET, load_val);
+
+ /* Enable the timer */
+ ctrl_reg |= (TIMER_EN | INT_ENABLE);
+ mmio_write_8(sp804_base + SP804_CTRL_OFFSET, ctrl_reg);
+
+ return 0;
+}
+
+static void sp804_timer_disable(void)
+{
+ unsigned char ctrl_reg;
+
+ /*
+ * The interrupt line should be cleared prior to timer disable.
+ * Otherwise the interrupt line level decay from high to quiescent
+ * level is not quick enough which may trigger spurious interrupt.
+ * Write a dummy load value to sp804 timer to clear the interrupt.
+ */
+ mmio_write_32(sp804_base + SP804_LOAD_OFFSET, 0xffff);
+
+ /* De-assert the timer interrupt */
+ mmio_write_8(sp804_base + SP804_INT_CLR_OFFSET, 0x0);
+
+ /* Disable the timer */
+ ctrl_reg = mmio_read_8(sp804_base + SP804_CTRL_OFFSET);
+ ctrl_reg &= ~(TIMER_EN | INT_ENABLE);
+ mmio_write_8(sp804_base + SP804_CTRL_OFFSET, ctrl_reg);
+}
+
+int sp804_timer_cancel(void)
+{
+ assert(sp804_base);
+ sp804_timer_disable();
+ return 0;
+}
+
+int sp804_timer_handler(void)
+{
+ assert(sp804_base);
+ sp804_timer_disable();
+ return 0;
+}
+
+int sp804_timer_init(uintptr_t base_addr, unsigned int timer_freq)
+{
+ unsigned char ctrl_reg;
+
+ /* Check input parameters */
+ assert(base_addr && timer_freq);
+
+ /* Check for duplicate initialization */
+ assert(sp804_base == 0);
+
+ sp804_base = base_addr;
+ sp804_freq = timer_freq;
+
+ /*
+ * Configure the timer in one shot mode, pre-scalar divider to 1,
+ * timer counter width to 32 bits and un-mask the interrupt.
+ */
+ ctrl_reg = ONESHOT_MODE | TIMER_PRE_DIV1 | TIMER_SIZE;
+ mmio_write_8(sp804_base + SP804_CTRL_OFFSET, ctrl_reg);
+
+ return 0;
+}
diff --git a/drivers/arm/timer/system_timer.c b/drivers/arm/timer/system_timer.c
new file mode 100644
index 000000000..3415e41b3
--- /dev/null
+++ b/drivers/arm/timer/system_timer.c
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <assert.h>
+#include <debug.h>
+#include <gic_v2.h>
+#include <irq.h>
+#include <mmio.h>
+#include <system_timer.h>
+
+static uintptr_t g_systimer_base;
+
+int program_systimer(unsigned long time_out_ms)
+{
+ unsigned int cntp_ctl;
+ unsigned long long count_val;
+ unsigned int freq;
+
+ /* Check timer base is initialised */
+ assert(g_systimer_base);
+
+ count_val = mmio_read_64(g_systimer_base + CNTPCT_LO);
+ freq = read_cntfrq_el0();
+ count_val += (freq * time_out_ms) / 1000;
+ mmio_write_64(g_systimer_base + CNTP_CVAL_LO, count_val);
+
+ /* Enable the timer */
+ cntp_ctl = mmio_read_32(g_systimer_base + CNTP_CTL);
+ set_cntp_ctl_enable(cntp_ctl);
+ clr_cntp_ctl_imask(cntp_ctl);
+ mmio_write_32(g_systimer_base + CNTP_CTL, cntp_ctl);
+
+ /*
+ * Ensure that we have programmed a timer interrupt for a time in
+ * future. Else we will have to wait for the systimer to rollover
+ * for the interrupt to fire (which is 64 years).
+ */
+ if (count_val < mmio_read_64(g_systimer_base + CNTPCT_LO))
+ panic();
+
+ VERBOSE("%s : interrupt requested at sys_counter: %lld "
+ "time_out_ms: %ld\n", __func__, count_val, time_out_ms);
+
+ return 0;
+}
+
+static void disable_systimer(void)
+{
+ uint32_t val;
+
+ /* Check timer base is initialised */
+ assert(g_systimer_base);
+
+ /* Deassert and disable the timer interrupt */
+ val = 0;
+ set_cntp_ctl_imask(val);
+ mmio_write_32(g_systimer_base + CNTP_CTL, val);
+}
+
+int cancel_systimer(void)
+{
+ disable_systimer();
+ return 0;
+}
+
+int handler_systimer(void)
+{
+ disable_systimer();
+ return 0;
+}
+
+int init_systimer(uintptr_t systimer_base)
+{
+ /* Check timer base is not initialised */
+ assert(!g_systimer_base);
+
+ g_systimer_base = systimer_base;
+
+ /* Disable the timer as the reset value is unknown */
+ disable_systimer();
+
+ /* Initialise CVAL to zero */
+ mmio_write_64(g_systimer_base + CNTP_CVAL_LO, 0);
+
+ return 0;
+}
diff --git a/drivers/io/io_fip.c b/drivers/io/io_fip.c
new file mode 100644
index 000000000..8b5af6ec4
--- /dev/null
+++ b/drivers/io/io_fip.c
@@ -0,0 +1,334 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <debug.h>
+#include <errno.h>
+#include <firmware_image_package.h>
+#include <image_loader.h>
+#include <io_driver.h>
+#include <io_fip.h>
+#include <io_storage.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <stdint.h>
+#include <string.h>
+#include <uuid.h>
+#include <uuid_utils.h>
+
+
+typedef struct {
+ unsigned int file_pos;
+ fip_toc_entry_t entry;
+} file_state_t;
+
+static file_state_t current_file = {0};
+static uintptr_t backend_dev_handle;
+static uintptr_t backend_image_spec;
+
+
+/* Firmware Image Package driver functions */
+static int fip_dev_open(const uintptr_t dev_spec, io_dev_info_t **dev_info);
+static int fip_file_open(io_dev_info_t *dev_info, const uintptr_t spec,
+ io_entity_t *entity);
+static int fip_file_len(io_entity_t *entity, size_t *length);
+static int fip_file_read(io_entity_t *entity, uintptr_t buffer, size_t length,
+ size_t *length_read);
+static int fip_file_close(io_entity_t *entity);
+static int fip_dev_init(io_dev_info_t *dev_info, const uintptr_t init_params);
+static int fip_dev_close(io_dev_info_t *dev_info);
+
+
+/* TODO: We could check version numbers or do a package checksum? */
+static inline int is_valid_header(fip_toc_header_t *header)
+{
+ if ((header->name == TOC_HEADER_NAME) && (header->serial_number != 0)) {
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+
+/* Identify the device type as a virtual driver */
+io_type_t device_type_fip(void)
+{
+ return IO_TYPE_FIRMWARE_IMAGE_PACKAGE;
+}
+
+
+static const io_dev_connector_t fip_dev_connector = {
+ .dev_open = fip_dev_open
+};
+
+
+static const io_dev_funcs_t fip_dev_funcs = {
+ .type = device_type_fip,
+ .open = fip_file_open,
+ .seek = NULL,
+ .size = fip_file_len,
+ .read = fip_file_read,
+ .write = NULL,
+ .close = fip_file_close,
+ .dev_init = fip_dev_init,
+ .dev_close = fip_dev_close,
+};
+
+
+/* No state associated with this device so structure can be const */
+static const io_dev_info_t fip_dev_info = {
+ .funcs = &fip_dev_funcs,
+ .info = (uintptr_t)NULL
+};
+
+
+/* Open a connection to the FIP device */
+static int fip_dev_open(const uintptr_t dev_spec __attribute__((unused)),
+ io_dev_info_t **dev_info)
+{
+ assert(dev_info != NULL);
+ *dev_info = (io_dev_info_t *)&fip_dev_info; /* cast away const */
+
+ return IO_SUCCESS;
+}
+
+
+/* Do some basic package checks. */
+static int fip_dev_init(io_dev_info_t *dev_info, const uintptr_t init_params)
+{
+ int result = IO_FAIL;
+ unsigned int image_id = (unsigned int)init_params;
+ uintptr_t backend_handle;
+ fip_toc_header_t header;
+ size_t bytes_read;
+
+ /* Obtain a reference to the image by querying the platform layer */
+ result = plat_get_image_source(image_id, &backend_dev_handle,
+ &backend_image_spec);
+ if (result != IO_SUCCESS) {
+ WARN("Failed to obtain reference to image id=%u (%i)\n",
+ image_id, result);
+ result = IO_FAIL;
+ goto fip_dev_init_exit;
+ }
+
+ /* Attempt to access the FIP image */
+ result = io_open(backend_dev_handle, backend_image_spec,
+ &backend_handle);
+ if (result != IO_SUCCESS) {
+ WARN("Failed to access image id=%u (%i)\n", image_id, result);
+ result = IO_FAIL;
+ goto fip_dev_init_exit;
+ }
+
+ result = io_read(backend_handle, (uintptr_t)&header, sizeof(header),
+ &bytes_read);
+ if (result == IO_SUCCESS) {
+ if (!is_valid_header(&header)) {
+ WARN("Firmware Image Package header check failed.\n");
+ result = IO_FAIL;
+ } else {
+ VERBOSE("FIP header looks OK.\n");
+ }
+ }
+
+ io_close(backend_handle);
+
+ fip_dev_init_exit:
+ return result;
+}
+
+/* Close a connection to the FIP device */
+static int fip_dev_close(io_dev_info_t *dev_info)
+{
+ /* TODO: Consider tracking open files and cleaning them up here */
+
+ /* Clear the backend. */
+ backend_dev_handle = (uintptr_t)NULL;
+ backend_image_spec = (uintptr_t)NULL;
+
+ return IO_SUCCESS;
+}
+
+
+/* Open a file for access from package. */
+static int fip_file_open(io_dev_info_t *dev_info, const uintptr_t spec,
+ io_entity_t *entity)
+{
+ int result = IO_FAIL;
+ uintptr_t backend_handle;
+ const io_uuid_spec_t *uuid_spec = (io_uuid_spec_t *)spec;
+ size_t bytes_read;
+ int found_file = 0;
+
+ assert(uuid_spec != NULL);
+ assert(entity != NULL);
+
+ /* Can only have one file open at a time for the moment. We need to
+ * track state like file cursor position. We know the header lives at
+ * offset zero, so this entry should never be zero for an active file.
+ * When the system supports dynamic memory allocation we can allow more
+ * than one open file at a time if needed.
+ */
+ if (current_file.entry.offset_address != 0) {
+ WARN("fip_file_open : Only one open file at a time.\n");
+ return IO_RESOURCES_EXHAUSTED;
+ }
+
+ /* Attempt to access the FIP image */
+ result = io_open(backend_dev_handle, backend_image_spec,
+ &backend_handle);
+ if (result != IO_SUCCESS) {
+ WARN("Failed to open Firmware Image Package (%i)\n", result);
+ result = IO_FAIL;
+ goto fip_file_open_exit;
+ }
+
+ /* Seek past the FIP header into the Table of Contents */
+ result = io_seek(backend_handle, IO_SEEK_SET, sizeof(fip_toc_header_t));
+ if (result != IO_SUCCESS) {
+ WARN("fip_file_open: failed to seek\n");
+ result = IO_FAIL;
+ goto fip_file_open_close;
+ }
+
+ found_file = 0;
+ do {
+ result = io_read(backend_handle,
+ (uintptr_t)&current_file.entry,
+ sizeof(current_file.entry),
+ &bytes_read);
+ if (result == IO_SUCCESS) {
+ if (uuid_equal(&current_file.entry.uuid,
+ &uuid_spec->uuid)) {
+ found_file = 1;
+ break;
+ }
+ } else {
+ WARN("Failed to read FIP (%i)\n", result);
+ goto fip_file_open_close;
+ }
+ } while (!is_uuid_null(&current_file.entry.uuid));
+
+ if (found_file == 1) {
+ /* All fine. Update entity info with file state and return. Set
+ * the file position to 0. The 'current_file.entry' holds the
+ * base and size of the file.
+ */
+ current_file.file_pos = 0;
+ entity->info = (uintptr_t)&current_file;
+ } else {
+ /* Did not find the file in the FIP. */
+ current_file.entry.offset_address = 0;
+ result = IO_FAIL;
+ }
+
+ fip_file_open_close:
+ io_close(backend_handle);
+
+ fip_file_open_exit:
+ return result;
+}
+
+
+/* Return the size of a file in package */
+static int fip_file_len(io_entity_t *entity, size_t *length)
+{
+ assert(entity != NULL);
+ assert(length != NULL);
+
+ *length = ((file_state_t *)entity->info)->entry.size;
+
+ return IO_SUCCESS;
+}
+
+
+/* Read data from a file in package */
+static int fip_file_read(io_entity_t *entity, uintptr_t buffer, size_t length,
+ size_t *length_read)
+{
+ int result = IO_FAIL;
+ file_state_t *fp;
+ size_t file_offset;
+ size_t bytes_read;
+ uintptr_t backend_handle;
+
+ assert(entity != NULL);
+ assert(buffer != (uintptr_t)NULL);
+ assert(length_read != NULL);
+ assert(entity->info != (uintptr_t)NULL);
+
+ /* Open the backend, attempt to access the blob image */
+ result = io_open(backend_dev_handle, backend_image_spec,
+ &backend_handle);
+ if (result != IO_SUCCESS) {
+ WARN("Failed to open FIP (%i)\n", result);
+ result = IO_FAIL;
+ goto fip_file_read_exit;
+ }
+
+ fp = (file_state_t *)entity->info;
+
+ /* Seek to the position in the FIP where the payload lives */
+ file_offset = fp->entry.offset_address + fp->file_pos;
+ result = io_seek(backend_handle, IO_SEEK_SET, file_offset);
+ if (result != IO_SUCCESS) {
+ WARN("fip_file_read: failed to seek\n");
+ result = IO_FAIL;
+ goto fip_file_read_close;
+ }
+
+ result = io_read(backend_handle, buffer, length, &bytes_read);
+ if (result != IO_SUCCESS) {
+ /* We cannot read our data. Fail. */
+ WARN("Failed to read payload (%i)\n", result);
+ result = IO_FAIL;
+ goto fip_file_read_close;
+ } else {
+ /* Set caller length and new file position. */
+ *length_read = bytes_read;
+ fp->file_pos += bytes_read;
+ }
+
+/* Close the backend. */
+ fip_file_read_close:
+ io_close(backend_handle);
+
+ fip_file_read_exit:
+ return result;
+}
+
+
+/* Close a file in package */
+static int fip_file_close(io_entity_t *entity)
+{
+ /* Clear our current file pointer.
+ * If we had malloc() we would free() here.
+ */
+ if (current_file.entry.offset_address != 0) {
+ memset(&current_file, 0, sizeof(current_file));
+ }
+
+ /* Clear the Entity info. */
+ entity->info = 0;
+
+ return IO_SUCCESS;
+}
+
+/* Exported functions */
+
+/* Register the Firmware Image Package driver with the IO abstraction */
+int register_io_dev_fip(const io_dev_connector_t **dev_con)
+{
+ int result = IO_FAIL;
+ assert(dev_con != NULL);
+
+ result = io_register_device(&fip_dev_info);
+ if (result == IO_SUCCESS)
+ *dev_con = &fip_dev_connector;
+
+ return result;
+}
diff --git a/drivers/io/io_memmap.c b/drivers/io/io_memmap.c
new file mode 100644
index 000000000..3f4b2dbb6
--- /dev/null
+++ b/drivers/io/io_memmap.c
@@ -0,0 +1,217 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <debug.h>
+#include <io_driver.h>
+#include <io_storage.h>
+#include <string.h>
+
+/* As we need to be able to keep state for seek, only one file can be open
+ * at a time. Make this a structure and point to the entity->info. When we
+ * can malloc memory we can change this to support more open files.
+ */
+typedef struct {
+ /* Use the 'in_use' flag as any value for base and file_pos could be
+ * valid.
+ */
+ int in_use;
+ uintptr_t base;
+ size_t file_pos;
+} file_state_t;
+
+static file_state_t current_file = {0};
+
+/* Identify the device type as memmap */
+io_type_t device_type_memmap(void)
+{
+ return IO_TYPE_MEMMAP;
+}
+
+/* Memmap device functions */
+static int memmap_dev_open(const uintptr_t dev_spec, io_dev_info_t **dev_info);
+static int memmap_block_open(io_dev_info_t *dev_info, const uintptr_t spec,
+ io_entity_t *entity);
+static int memmap_block_seek(io_entity_t *entity, int mode,
+ ssize_t offset);
+static int memmap_block_read(io_entity_t *entity, uintptr_t buffer,
+ size_t length, size_t *length_read);
+static int memmap_block_write(io_entity_t *entity, const uintptr_t buffer,
+ size_t length, size_t *length_written);
+static int memmap_block_close(io_entity_t *entity);
+static int memmap_dev_close(io_dev_info_t *dev_info);
+
+
+static const io_dev_connector_t memmap_dev_connector = {
+ .dev_open = memmap_dev_open
+};
+
+
+static const io_dev_funcs_t memmap_dev_funcs = {
+ .type = device_type_memmap,
+ .open = memmap_block_open,
+ .seek = memmap_block_seek,
+ .size = NULL,
+ .read = memmap_block_read,
+ .write = memmap_block_write,
+ .close = memmap_block_close,
+ .dev_init = NULL,
+ .dev_close = memmap_dev_close,
+};
+
+
+/* No state associated with this device so structure can be const */
+static const io_dev_info_t memmap_dev_info = {
+ .funcs = &memmap_dev_funcs,
+ .info = (uintptr_t)NULL
+};
+
+
+/* Open a connection to the memmap device */
+static int memmap_dev_open(const uintptr_t dev_spec __attribute__((unused)),
+ io_dev_info_t **dev_info)
+{
+ assert(dev_info != NULL);
+ *dev_info = (io_dev_info_t *)&memmap_dev_info; /* cast away const */
+
+ return IO_SUCCESS;
+}
+
+
+
+/* Close a connection to the memmap device */
+static int memmap_dev_close(io_dev_info_t *dev_info)
+{
+ /* NOP */
+ /* TODO: Consider tracking open files and cleaning them up here */
+ return IO_SUCCESS;
+}
+
+
+/* Open a file on the memmap device */
+/* TODO: Can we do any sensible limit checks on requested memory */
+static int memmap_block_open(io_dev_info_t *dev_info, const uintptr_t spec,
+ io_entity_t *entity)
+{
+ int result = IO_FAIL;
+ const io_block_spec_t *block_spec = (io_block_spec_t *)spec;
+
+ /* Since we need to track open state for seek() we only allow one open
+ * spec at a time. When we have dynamic memory we can malloc and set
+ * entity->info.
+ */
+ if (current_file.in_use == 0) {
+ assert(block_spec != NULL);
+ assert(entity != NULL);
+
+ current_file.in_use = 1;
+ current_file.base = block_spec->offset;
+ /* File cursor offset for seek and incremental reads etc. */
+ current_file.file_pos = 0;
+ entity->info = (uintptr_t)&current_file;
+ result = IO_SUCCESS;
+ } else {
+ WARN("A Memmap device is already active. Close first.\n");
+ result = IO_RESOURCES_EXHAUSTED;
+ }
+
+ return result;
+}
+
+
+/* Seek to a particular file offset on the memmap device */
+static int memmap_block_seek(io_entity_t *entity, int mode, ssize_t offset)
+{
+ int result = IO_FAIL;
+
+ /* We only support IO_SEEK_SET for the moment. */
+ if (mode == IO_SEEK_SET) {
+ assert(entity != NULL);
+
+ /* TODO: can we do some basic limit checks on seek? */
+ ((file_state_t *)entity->info)->file_pos = offset;
+ result = IO_SUCCESS;
+ } else {
+ result = IO_FAIL;
+ }
+
+ return result;
+}
+
+
+/* Read data from a file on the memmap device */
+static int memmap_block_read(io_entity_t *entity, uintptr_t buffer,
+ size_t length, size_t *length_read)
+{
+ file_state_t *fp;
+
+ assert(entity != NULL);
+ assert(buffer != (uintptr_t)NULL);
+ assert(length_read != NULL);
+
+ fp = (file_state_t *)entity->info;
+
+ memcpy((void *)buffer, (void *)(fp->base + fp->file_pos), length);
+
+ *length_read = length;
+ /* advance the file 'cursor' for incremental reads */
+ fp->file_pos += length;
+
+ return IO_SUCCESS;
+}
+
+
+/* Write data to a file on the memmap device */
+static int memmap_block_write(io_entity_t *entity, const uintptr_t buffer,
+ size_t length, size_t *length_written)
+{
+ file_state_t *fp;
+
+ assert(entity != NULL);
+ assert(buffer != (uintptr_t)NULL);
+ assert(length_written != NULL);
+
+ fp = (file_state_t *)entity->info;
+
+ memcpy((void *)(fp->base + fp->file_pos), (void *)buffer, length);
+
+ *length_written = length;
+
+ /* advance the file 'cursor' for incremental writes */
+ fp->file_pos += length;
+
+ return IO_SUCCESS;
+}
+
+
+/* Close a file on the memmap device */
+static int memmap_block_close(io_entity_t *entity)
+{
+ assert(entity != NULL);
+
+ entity->info = 0;
+
+ /* This would be a mem free() if we had malloc.*/
+ memset((void *)&current_file, 0, sizeof(current_file));
+
+ return IO_SUCCESS;
+}
+
+
+/* Exported functions */
+
+/* Register the memmap driver with the IO abstraction */
+int register_io_dev_memmap(const io_dev_connector_t **dev_con)
+{
+ int result = IO_FAIL;
+ assert(dev_con != NULL);
+
+ result = io_register_device(&memmap_dev_info);
+ if (result == IO_SUCCESS)
+ *dev_con = &memmap_dev_connector;
+
+ return result;
+}
diff --git a/drivers/io/io_storage.c b/drivers/io/io_storage.c
new file mode 100644
index 000000000..57dc76147
--- /dev/null
+++ b/drivers/io/io_storage.c
@@ -0,0 +1,349 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <io_driver.h>
+#include <io_storage.h>
+#include <platform_def.h>
+#include <stddef.h>
+
+
+/* Storage for a fixed maximum number of IO entities, definable by platform */
+static io_entity_t entity_pool[MAX_IO_HANDLES];
+
+/* Simple way of tracking used storage - each entry is NULL or a pointer to an
+ * entity */
+static io_entity_t *entity_map[MAX_IO_HANDLES];
+
+/* Track number of allocated entities */
+static unsigned int entity_count;
+
+/* Array of fixed maximum of registered devices, definable by platform */
+static const io_dev_info_t *devices[MAX_IO_DEVICES];
+
+/* Number of currently registered devices */
+static unsigned int dev_count;
+
+
+#if DEBUG /* Extra validation functions only used in debug builds */
+
+/* Return a boolean value indicating whether a device connector is valid */
+static int is_valid_dev_connector(const io_dev_connector_t *dev_con)
+{
+ int result = (dev_con != NULL) && (dev_con->dev_open != NULL);
+ return result;
+}
+
+
+/* Return a boolean value indicating whether a device handle is valid */
+static int is_valid_dev(const uintptr_t dev_handle)
+{
+ const io_dev_info_t *dev = (io_dev_info_t *)dev_handle;
+ int result = (dev != NULL) && (dev->funcs != NULL) &&
+ (dev->funcs->type != NULL) &&
+ (dev->funcs->type() < IO_TYPE_MAX);
+ return result;
+}
+
+
+/* Return a boolean value indicating whether an IO entity is valid */
+static int is_valid_entity(const uintptr_t handle)
+{
+ const io_entity_t *entity = (io_entity_t *)handle;
+ int result = (entity != NULL) &&
+ (is_valid_dev((uintptr_t)entity->dev_handle));
+ return result;
+}
+
+
+/* Return a boolean value indicating whether a seek mode is valid */
+static int is_valid_seek_mode(io_seek_mode_t mode)
+{
+ return ((mode != IO_SEEK_INVALID) && (mode < IO_SEEK_MAX));
+}
+
+#endif /* End of debug-only validation functions */
+
+
+/* Open a connection to a specific device */
+static int dev_open(const io_dev_connector_t *dev_con, const uintptr_t dev_spec,
+ io_dev_info_t **dev_info)
+{
+ int result = IO_FAIL;
+ assert(dev_info != NULL);
+ assert(is_valid_dev_connector(dev_con));
+
+ result = dev_con->dev_open(dev_spec, dev_info);
+ return result;
+}
+
+
+/* Set a handle to track an entity */
+static void set_handle(uintptr_t *handle, io_entity_t *entity)
+{
+ assert(handle != NULL);
+ *handle = (uintptr_t)entity;
+}
+
+
+/* Locate an entity in the pool, specified by address */
+static int find_first_entity(const io_entity_t *entity, unsigned int *index_out)
+{
+ int result = IO_FAIL;
+ for (int index = 0; index < MAX_IO_HANDLES; ++index) {
+ if (entity_map[index] == entity) {
+ result = IO_SUCCESS;
+ *index_out = index;
+ break;
+ }
+ }
+ return result;
+}
+
+
+/* Allocate an entity from the pool and return a pointer to it */
+static int allocate_entity(io_entity_t **entity)
+{
+ int result = IO_FAIL;
+ assert(entity != NULL);
+
+ if (entity_count < MAX_IO_HANDLES) {
+ unsigned int index = 0;
+ result = find_first_entity(NULL, &index);
+ assert(result == IO_SUCCESS);
+ *entity = entity_map[index] = &entity_pool[index];
+ ++entity_count;
+ } else
+ result = IO_RESOURCES_EXHAUSTED;
+
+ return result;
+}
+
+
+/* Release an entity back to the pool */
+static int free_entity(const io_entity_t *entity)
+{
+ int result = IO_FAIL;
+ unsigned int index = 0;
+ assert(entity != NULL);
+
+ result = find_first_entity(entity, &index);
+ if (result == IO_SUCCESS) {
+ entity_map[index] = NULL;
+ --entity_count;
+ }
+
+ return result;
+}
+
+
+/* Exported API */
+
+/* Register a device driver */
+int io_register_device(const io_dev_info_t *dev_info)
+{
+ int result = IO_FAIL;
+ assert(dev_info != NULL);
+
+ if (dev_count < MAX_IO_DEVICES) {
+ devices[dev_count] = dev_info;
+ dev_count++;
+ result = IO_SUCCESS;
+ } else {
+ result = IO_RESOURCES_EXHAUSTED;
+ }
+
+ return result;
+}
+
+
+/* Open a connection to an IO device */
+int io_dev_open(const io_dev_connector_t *dev_con, const uintptr_t dev_spec,
+ uintptr_t *handle)
+{
+ int result = IO_FAIL;
+ assert(handle != NULL);
+
+ result = dev_open(dev_con, dev_spec, (io_dev_info_t **)handle);
+ return result;
+}
+
+
+/* Initialise an IO device explicitly - to permit lazy initialisation or
+ * re-initialisation */
+int io_dev_init(uintptr_t dev_handle, const uintptr_t init_params)
+{
+ int result = IO_FAIL;
+ assert(dev_handle != (uintptr_t)NULL);
+ assert(is_valid_dev(dev_handle));
+
+ io_dev_info_t *dev = (io_dev_info_t *)dev_handle;
+
+ if (dev->funcs->dev_init != NULL) {
+ result = dev->funcs->dev_init(dev, init_params);
+ } else {
+ /* Absence of registered function implies NOP here */
+ result = IO_SUCCESS;
+ }
+ return result;
+}
+
+
+/* TODO: Consider whether an explicit "shutdown" API should be included */
+
+/* Close a connection to a device */
+int io_dev_close(uintptr_t dev_handle)
+{
+ int result = IO_FAIL;
+ assert(dev_handle != (uintptr_t)NULL);
+ assert(is_valid_dev(dev_handle));
+
+ io_dev_info_t *dev = (io_dev_info_t *)dev_handle;
+
+ if (dev->funcs->dev_close != NULL) {
+ result = dev->funcs->dev_close(dev);
+ } else {
+ /* Absence of registered function implies NOP here */
+ result = IO_SUCCESS;
+ }
+
+ return result;
+}
+
+
+/* Synchronous operations */
+
+
+/* Open an IO entity */
+int io_open(uintptr_t dev_handle, const uintptr_t spec, uintptr_t *handle)
+{
+ int result = IO_FAIL;
+ assert((spec != (uintptr_t)NULL) && (handle != NULL));
+ assert(is_valid_dev(dev_handle));
+
+ io_dev_info_t *dev = (io_dev_info_t *)dev_handle;
+ io_entity_t *entity;
+
+ result = allocate_entity(&entity);
+
+ if (result == IO_SUCCESS) {
+ assert(dev->funcs->open != NULL);
+ result = dev->funcs->open(dev, spec, entity);
+
+ if (result == IO_SUCCESS) {
+ entity->dev_handle = dev;
+ set_handle(handle, entity);
+ } else
+ free_entity(entity);
+ }
+ return result;
+}
+
+
+/* Seek to a specific position in an IO entity */
+int io_seek(uintptr_t handle, io_seek_mode_t mode, ssize_t offset)
+{
+ int result = IO_FAIL;
+ assert(is_valid_entity(handle) && is_valid_seek_mode(mode));
+
+ io_entity_t *entity = (io_entity_t *)handle;
+
+ io_dev_info_t *dev = entity->dev_handle;
+
+ if (dev->funcs->seek != NULL)
+ result = dev->funcs->seek(entity, mode, offset);
+ else
+ result = IO_NOT_SUPPORTED;
+
+ return result;
+}
+
+
+/* Determine the length of an IO entity */
+int io_size(uintptr_t handle, size_t *length)
+{
+ int result = IO_FAIL;
+ assert(is_valid_entity(handle) && (length != NULL));
+
+ io_entity_t *entity = (io_entity_t *)handle;
+
+ io_dev_info_t *dev = entity->dev_handle;
+
+ if (dev->funcs->size != NULL)
+ result = dev->funcs->size(entity, length);
+ else
+ result = IO_NOT_SUPPORTED;
+
+ return result;
+}
+
+
+/* Read data from an IO entity */
+int io_read(uintptr_t handle,
+ uintptr_t buffer,
+ size_t length,
+ size_t *length_read)
+{
+ int result = IO_FAIL;
+ assert(is_valid_entity(handle) && (buffer != (uintptr_t)NULL));
+
+ io_entity_t *entity = (io_entity_t *)handle;
+
+ io_dev_info_t *dev = entity->dev_handle;
+
+ if (dev->funcs->read != NULL)
+ result = dev->funcs->read(entity, buffer, length, length_read);
+ else
+ result = IO_NOT_SUPPORTED;
+
+ return result;
+}
+
+
+/* Write data to an IO entity */
+int io_write(uintptr_t handle,
+ const uintptr_t buffer,
+ size_t length,
+ size_t *length_written)
+{
+ int result = IO_FAIL;
+ assert(is_valid_entity(handle) && (buffer != (uintptr_t)NULL));
+
+ io_entity_t *entity = (io_entity_t *)handle;
+
+ io_dev_info_t *dev = entity->dev_handle;
+
+ if (dev->funcs->write != NULL) {
+ result = dev->funcs->write(entity, buffer, length,
+ length_written);
+ } else
+ result = IO_NOT_SUPPORTED;
+
+ return result;
+}
+
+
+/* Close an IO entity */
+int io_close(uintptr_t handle)
+{
+ int result = IO_FAIL;
+ assert(is_valid_entity(handle));
+
+ io_entity_t *entity = (io_entity_t *)handle;
+
+ io_dev_info_t *dev = entity->dev_handle;
+
+ if (dev->funcs->close != NULL)
+ result = dev->funcs->close(entity);
+ else {
+ /* Absence of registered function implies NOP here */
+ result = IO_SUCCESS;
+ }
+ /* Ignore improbable free_entity failure */
+ (void)free_entity(entity);
+
+ return result;
+}
diff --git a/drivers/io/vexpress_nor/io_vexpress_nor_hw.c b/drivers/io/vexpress_nor/io_vexpress_nor_hw.c
new file mode 100644
index 000000000..de7b4ceff
--- /dev/null
+++ b/drivers/io/vexpress_nor/io_vexpress_nor_hw.c
@@ -0,0 +1,456 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <debug.h>
+#include <mmio.h>
+#include <string.h>
+#include "io_vexpress_nor_internal.h"
+#include "norflash.h"
+
+/* Device Id information */
+#define NOR_DEVICE_ID_LOCK_CONFIGURATION 0x02
+#define NOR_DEVICE_ID_BLOCK_LOCKED (1 << 0)
+#define NOR_DEVICE_ID_BLOCK_LOCKED_DOWN (1 << 1)
+
+/* Status Register Bits */
+#define NOR_SR_BIT_WRITE ((1 << 23) | (1 << 7))
+#define NOR_SR_BIT_ERASE ((1 << 21) | (1 << 5))
+#define NOR_SR_BIT_PROGRAM ((1 << 20) | (1 << 4))
+#define NOR_SR_BIT_VPP ((1 << 19) | (1 << 3))
+#define NOR_SR_BIT_BLOCK_LOCKED ((1 << 17) | (1 << 1))
+
+/*
+ * On chip buffer size for buffered programming operations
+ * There are 2 chips, each chip can buffer up to 32 (16-bit)words.
+ * Therefore the total size of the buffer is 2 x 32 x 2 = 128 bytes.
+ */
+#define NOR_MAX_BUFFER_SIZE_IN_BYTES 128
+#define NOR_MAX_BUFFER_SIZE_IN_WORDS (NOR_MAX_BUFFER_SIZE_IN_BYTES / 4)
+
+#define MAX_BUFFERED_PROG_ITERATIONS 1000
+#define LOW_16_BITS 0x0000FFFF
+#define FOLD_32BIT_INTO_16BIT(value) ((value >> 16) | (value & LOW_16_BITS))
+#define BOUNDARY_OF_32_WORDS 0x7F
+
+#define CHECK_VPP_RANGE_ERROR(status_register, address) \
+ do { \
+ if ((status_register) & NOR_SR_BIT_VPP) { \
+ ERROR("%s (address:0x%X): " \
+ "VPP Range Error\n", __func__, address);\
+ err = IO_FAIL; \
+ } \
+ } while (0)
+
+#define CHECK_BLOCK_LOCK_ERROR(status_register, address) \
+ do { \
+ if ((status_register) & NOR_SR_BIT_BLOCK_LOCKED) { \
+ ERROR("%s (address:0x%X): Device Protect " \
+ "Error\n", __func__, address); \
+ err = IO_FAIL; \
+ } \
+ } while (0)
+
+#define CHECK_BLOCK_ERASE_ERROR(status_register, block_offset) \
+ do { \
+ if ((status_register) & NOR_SR_BIT_ERASE) { \
+ ERROR("%s (block_offset=0x%08x: " \
+ "Block Erase Error status_register" \
+ ":0x%x\n", __func__, block_offset, \
+ status_register); \
+ err = IO_FAIL; \
+ } \
+ } while (0)
+
+#define CHECK_SR_BIT_PROGRAM_ERROR(status_register, address) \
+ do { \
+ if ((status_register) & NOR_SR_BIT_PROGRAM) { \
+ ERROR("%s(address:0x%X): Program Error\n", \
+ __func__, address); \
+ err = IO_FAIL; \
+ } \
+ } while (0)
+
+/* Helper macros to access two flash banks in parallel */
+#define NOR_2X16(d) ((d << 16) | (d & 0xffff))
+
+static inline void nor_send_cmd(uintptr_t base_addr, unsigned long cmd)
+{
+ mmio_write_32(base_addr, NOR_2X16(cmd));
+}
+
+static uint32_t flash_read_status(const io_nor_flash_spec_t *device)
+{
+ /* Prepare to read the status register */
+ nor_send_cmd(device->device_address, NOR_CMD_READ_STATUS_REG);
+
+ return mmio_read_32(device->device_address);
+}
+
+static uint32_t flash_wait_until_complete(const io_nor_flash_spec_t *device)
+{
+ uint32_t lock_status;
+
+ /* Wait until the status register gives us the all clear */
+ do {
+ lock_status = flash_read_status(device);
+ } while ((lock_status & NOR_SR_BIT_WRITE) != NOR_SR_BIT_WRITE);
+
+ return lock_status;
+}
+
+static int flash_block_is_locked(uint32_t block_offset)
+{
+ uint32_t lock_status;
+
+ uintptr_t addr = block_offset + (NOR_DEVICE_ID_LOCK_CONFIGURATION << 2);
+
+ /* Send command for reading device id */
+ nor_send_cmd(addr, NOR_CMD_READ_ID_CODE);
+
+ /* Read block lock status */
+ lock_status = mmio_read_32(addr);
+
+ /* Decode block lock status */
+ lock_status = FOLD_32BIT_INTO_16BIT(lock_status);
+
+ if ((lock_status & NOR_DEVICE_ID_BLOCK_LOCKED_DOWN) != 0)
+ WARN("flash_block_is_locked: Block LOCKED DOWN\n");
+
+ return lock_status & NOR_DEVICE_ID_BLOCK_LOCKED;
+}
+
+
+static void flash_perform_lock_operation(const io_nor_flash_spec_t *device,
+ uint32_t block_offset,
+ uint32_t lock_operation)
+{
+ assert ((lock_operation == NOR_UNLOCK_BLOCK) ||
+ (lock_operation == NOR_LOCK_BLOCK));
+
+ /* Request a lock setup */
+ nor_send_cmd(block_offset, NOR_CMD_LOCK_UNLOCK);
+
+ /* Request lock or unlock */
+ nor_send_cmd(block_offset, lock_operation);
+
+ /* Wait until status register shows device is free */
+ flash_wait_until_complete(device);
+
+ /* Put device back into Read Array mode */
+ nor_send_cmd(block_offset, NOR_CMD_READ_ARRAY);
+}
+
+static void flash_unlock_block_if_necessary(const io_nor_flash_spec_t *device,
+ uint32_t block_offset)
+{
+ if (flash_block_is_locked(block_offset) != 0)
+ flash_perform_lock_operation(device, block_offset,
+ NOR_UNLOCK_BLOCK);
+}
+
+
+static int flash_erase_block(const io_nor_flash_spec_t *device,
+ uint32_t block_offset)
+{
+ int err = IO_SUCCESS;
+ uint32_t status_register;
+
+ VERBOSE("%s : 0x%x\n", __func__, block_offset);
+ /* Request a block erase and then confirm it */
+ nor_send_cmd(block_offset, NOR_CMD_BLOCK_ERASE);
+ nor_send_cmd(block_offset, NOR_CMD_BLOCK_ERASE_ACK);
+
+ /* Wait for the write to complete and then check for any errors;
+ * i.e. check the Status Register */
+ status_register = flash_wait_until_complete(device);
+
+ CHECK_VPP_RANGE_ERROR(status_register, block_offset);
+
+ if ((status_register & (NOR_SR_BIT_ERASE | NOR_SR_BIT_PROGRAM)) ==
+ (NOR_SR_BIT_ERASE | NOR_SR_BIT_PROGRAM)) {
+ ERROR("%s(block_offset=0x%08x: "
+ "Command Sequence Error\n", __func__, block_offset);
+ err = IO_FAIL;
+ }
+
+ CHECK_BLOCK_ERASE_ERROR(status_register, block_offset);
+
+ CHECK_BLOCK_LOCK_ERROR(status_register, block_offset);
+
+ if (err) {
+ /* Clear the Status Register */
+ nor_send_cmd(device->device_address, NOR_CMD_CLEAR_STATUS_REG);
+ }
+
+ /* Put device back into Read Array mode */
+ nor_send_cmd(device->device_address, NOR_CMD_READ_ARRAY);
+
+ return err;
+}
+
+/*
+ * Writes data to the NOR Flash using the Buffered Programming method.
+ *
+ * The maximum size of the on-chip buffer is 32-words, because of hardware
+ * restrictions. Therefore this function will only handle buffers up to 32
+ * words or 128 bytes. To deal with larger buffers, call this function again.
+ *
+ * This function presumes that both the offset and the offset+BufferSize
+ * fit entirely within the NOR Flash. Therefore these conditions will not
+ * be checked here.
+ *
+ * In buffered programming, if the target address is not at the beginning of a
+ * 32-bit word boundary, then programming time is doubled and power consumption
+ * is increased. Therefore, it is a requirement to align buffer writes to
+ * 32-bit word boundaries.
+ */
+static int flash_write_buffer(const io_nor_flash_spec_t *device,
+ uint32_t offset,
+ const uint32_t *buffer,
+ uint32_t buffer_size)
+{
+ int err = IO_SUCCESS;
+ uint32_t size_in_words;
+ uint32_t count;
+ volatile uint32_t *data;
+ uint32_t timeout;
+ int is_buffer_available = 0;
+ uint32_t status_register;
+
+ timeout = MAX_BUFFERED_PROG_ITERATIONS;
+ is_buffer_available = 0;
+
+ /* Check that the target offset does not cross a 32-word boundary. */
+ if ((offset & BOUNDARY_OF_32_WORDS) != 0)
+ return IO_FAIL;
+
+ /* This implementation requires the buffer to be 32bit aligned. */
+ if (((uintptr_t)buffer & (sizeof(uint32_t) - 1)) != 0)
+ return IO_FAIL;
+
+ /* Check there are some data to program */
+ assert(buffer_size > 0);
+
+ /* Check that the buffer size does not exceed the maximum hardware
+ * buffer size on chip.
+ */
+ assert(buffer_size <= NOR_MAX_BUFFER_SIZE_IN_BYTES);
+
+ /* Check that the buffer size is a multiple of 32-bit words */
+ assert((buffer_size % 4) == 0);
+
+ /* Pre-programming conditions checked, now start the algorithm. */
+
+ /* Prepare the data destination address */
+ data = (uint32_t *)(uintptr_t)offset;
+
+ /* Check the availability of the buffer */
+ do {
+ /* Issue the Buffered Program Setup command */
+ nor_send_cmd(offset, NOR_CMD_BUFFERED_PROGRAM);
+
+ /* Read back the status register bit#7 from the same offset */
+ if (((*data) & NOR_SR_BIT_WRITE) == NOR_SR_BIT_WRITE)
+ is_buffer_available = 1;
+
+ /* Update the loop counter */
+ timeout--;
+ } while ((timeout > 0) && (is_buffer_available == 0));
+
+ /* The buffer was not available for writing */
+ if (timeout == 0) {
+ err = IO_FAIL;
+ goto exit;
+ }
+
+ /* From now on we work in 32-bit words */
+ size_in_words = buffer_size / sizeof(uint32_t);
+
+ /* Write the word count, which is (buffer_size_in_words - 1),
+ * because word count 0 means one word. */
+ nor_send_cmd(offset, size_in_words - 1);
+
+ /* Write the data to the NOR Flash, advancing each address by 4 bytes */
+ for (count = 0; count < size_in_words; count++, data++, buffer++)
+ *data = *buffer;
+
+ /* Issue the Buffered Program Confirm command, to start the programming
+ * operation */
+ nor_send_cmd(device->device_address, NOR_CMD_BUFFERED_PROGRAM_ACK);
+
+ /* Wait for the write to complete and then check for any errors;
+ * i.e. check the Status Register */
+ status_register = flash_wait_until_complete(device);
+
+ /* Perform a full status check:
+ * Mask the relevant bits of Status Register.
+ * Everything should be zero, if not, we have a problem */
+
+ CHECK_VPP_RANGE_ERROR(status_register, offset);
+
+ CHECK_SR_BIT_PROGRAM_ERROR(status_register, offset);
+
+ CHECK_BLOCK_LOCK_ERROR(status_register, offset);
+
+ if (err != IO_SUCCESS) {
+ /* Clear the Status Register */
+ nor_send_cmd(device->device_address,
+ NOR_CMD_CLEAR_STATUS_REG);
+ }
+
+exit:
+ /* Put device back into Read Array mode */
+ nor_send_cmd(device->device_address, NOR_CMD_READ_ARRAY);
+
+ return err;
+}
+
+static int flash_write_single_word(const io_nor_flash_spec_t *device,
+ int32_t offset, uint32_t data)
+{
+ int err = IO_SUCCESS;
+ uint32_t status_register;
+
+ /* NOR Flash Programming: Request a write single word command */
+ nor_send_cmd(offset, NOR_CMD_WORD_PROGRAM);
+
+ /* Store the word into NOR Flash; */
+ mmio_write_32(offset, data);
+
+ /* Wait for the write to complete and then check for any errors;
+ * i.e. check the Status Register */
+ status_register = flash_wait_until_complete(device);
+
+ /* Perform a full status check: */
+ /* Mask the relevant bits of Status Register.
+ * Everything should be zero, if not, we have a problem */
+
+ CHECK_VPP_RANGE_ERROR(status_register, offset);
+
+ CHECK_SR_BIT_PROGRAM_ERROR(status_register, offset);
+
+ CHECK_BLOCK_LOCK_ERROR(status_register, offset);
+
+ if (err != IO_SUCCESS)
+ /* Clear the Status Register */
+ nor_send_cmd(device->device_address,
+ NOR_CMD_CLEAR_STATUS_REG);
+
+ /* Put device back into Read Array mode */
+ nor_send_cmd(device->device_address, NOR_CMD_READ_ARRAY);
+
+ return err;
+}
+
+int flash_block_write(file_state_t *fp, uint32_t offset,
+ const uintptr_t buffer, size_t *written)
+{
+ int ret;
+ uintptr_t buffer_ptr = buffer;
+ uint32_t buffer_size;
+ uint32_t remaining = fp->block_spec->block_size;
+ uint32_t flash_pos = fp->block_spec->region_address + offset;
+ uint32_t block_offset = flash_pos;
+
+ /* address passed should be block aligned */
+ assert(!(offset % fp->block_spec->block_size));
+
+ VERBOSE("%s : 0x%x\n", __func__, flash_pos);
+ /* Unlock block */
+ flash_unlock_block_if_necessary(fp->block_spec, block_offset);
+
+ /* Erase one block */
+ ret = flash_erase_block(fp->block_spec, flash_pos);
+
+ if (ret != IO_SUCCESS)
+ /* Perform lock operation as we unlocked it */
+ goto lock_block;
+
+ /* Start by using NOR Flash buffer while the buffer size is a multiple
+ * of 32-bit */
+ while ((remaining >= sizeof(uint32_t)) && (ret == IO_SUCCESS)) {
+ if (remaining >= NOR_MAX_BUFFER_SIZE_IN_BYTES)
+ buffer_size = NOR_MAX_BUFFER_SIZE_IN_BYTES;
+ else
+ /* Copy the remaining 32bit words of the buffer */
+ buffer_size = remaining & (sizeof(uint32_t) - 1);
+
+ ret = flash_write_buffer(fp->block_spec, flash_pos,
+ (const uint32_t *)buffer_ptr, buffer_size);
+ flash_pos += buffer_size;
+ remaining -= buffer_size;
+ buffer_ptr += buffer_size;
+
+ }
+
+ /* Write the remaining bytes */
+ while ((remaining > 0) && (ret == IO_SUCCESS)) {
+ ret = flash_write_single_word(fp->block_spec,
+ flash_pos++, buffer_ptr++);
+ remaining--;
+ }
+
+ if (ret == IO_SUCCESS)
+ *written = fp->block_spec->block_size;
+
+lock_block:
+ VERBOSE("%s : 0x%x\n", __func__, block_offset);
+ /* Lock the block once done */
+ flash_perform_lock_operation(fp->block_spec,
+ block_offset,
+ NOR_LOCK_BLOCK);
+
+ return ret;
+}
+
+/* In case of partial write we need to save the block into a temporary buffer */
+static char block_buffer[NOR_FLASH_BLOCK_SIZE];
+
+int flash_partial_write(file_state_t *fp, uint32_t offset,
+ const uintptr_t buffer, size_t length, size_t *written)
+{
+ uintptr_t block_start;
+ uint32_t block_size;
+ uint32_t block_offset;
+ int ret;
+
+ assert((fp != NULL) && (fp->block_spec != NULL));
+ assert(written != NULL);
+
+ block_size = fp->block_spec->block_size;
+ /* Start address of the block to write */
+ block_start = (offset / block_size) * block_size;
+
+ /* Ensure 'block_buffer' is big enough to contain a copy of the block.
+ * 'block_buffer' is reserved at build time - so it might not match */
+ assert(sizeof(block_buffer) >= block_size);
+
+ /*
+ * Check the buffer fits inside a single block.
+ * It must not span several blocks
+ */
+ assert((offset / block_size) ==
+ ((offset + length - 1) / block_size));
+
+ /* Make a copy of the block from flash to a temporary buffer */
+ memcpy(block_buffer, (void *)(fp->block_spec->region_address +
+ block_start), block_size);
+
+ /* Calculate the offset of the buffer into the block */
+ block_offset = offset % block_size;
+
+ /* update the content of the block buffer */
+ memcpy(block_buffer + block_offset, (void *)buffer, length);
+
+ /* Write the block buffer back */
+ ret = flash_block_write(fp, block_start,
+ (uintptr_t)block_buffer, written);
+ if (ret == IO_SUCCESS)
+ *written = length;
+
+ return ret;
+}
diff --git a/drivers/io/vexpress_nor/io_vexpress_nor_internal.h b/drivers/io/vexpress_nor/io_vexpress_nor_internal.h
new file mode 100644
index 000000000..e06d492ed
--- /dev/null
+++ b/drivers/io/vexpress_nor/io_vexpress_nor_internal.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __IO_VEXPRESS_NOR_INTERNAL_H__
+#define __IO_VEXPRESS_NOR_INTERNAL_H__
+
+#include <io_driver.h>
+#include <io_nor_flash.h>
+#include <io_storage.h>
+
+#define IS_FLASH_ADDRESS_BLOCK_ALIGNED(fp, addr) \
+ (((addr) & ((fp)->block_spec->block_size - 1)) == 0)
+
+/* As we need to be able to keep state for seek, only one file can be open
+ * at a time. Make this a structure and point to the entity->info. When we
+ * can malloc memory we can change this to support more open files.
+ */
+typedef struct {
+ /* Use the 'in_use' flag as any value for base and file_pos could be
+ * valid.
+ */
+ int in_use;
+ uintptr_t base;
+ size_t file_pos;
+
+ /* Definition of the flash block device */
+ const io_nor_flash_spec_t *block_spec;
+} file_state_t;
+
+int flash_block_write(file_state_t *fp, uint32_t address,
+ const uintptr_t buffer, size_t *written);
+
+int flash_partial_write(file_state_t *fp, uint32_t address,
+ const uintptr_t buffer, size_t length, size_t *written);
+
+#endif /* __IO_VEXPRESS_NOR_INTERNAL_H__ */
diff --git a/drivers/io/vexpress_nor/io_vexpress_nor_ops.c b/drivers/io/vexpress_nor/io_vexpress_nor_ops.c
new file mode 100644
index 000000000..d31da57d8
--- /dev/null
+++ b/drivers/io/vexpress_nor/io_vexpress_nor_ops.c
@@ -0,0 +1,278 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <assert.h>
+#include <debug.h>
+#include <string.h>
+#include "io_vexpress_nor_internal.h"
+
+static file_state_t current_file = {0};
+
+/* Identify the device type as flash */
+io_type_t device_type_flash(void)
+{
+ return IO_TYPE_FLASH;
+}
+
+/* NOR Flash device functions */
+static int flash_dev_open(const uintptr_t dev_spec, io_dev_info_t **dev_info);
+static int flash_open(io_dev_info_t *dev_info, const uintptr_t spec,
+ io_entity_t *entity);
+static int flash_seek(io_entity_t *entity, int mode,
+ ssize_t offset);
+static int flash_read(io_entity_t *entity, uintptr_t buffer,
+ size_t length, size_t *length_read);
+static int flash_write(io_entity_t *entity, const uintptr_t buffer,
+ size_t length, size_t *length_written);
+static int flash_close(io_entity_t *entity);
+static int flash_dev_close(io_dev_info_t *dev_info);
+
+
+static const io_dev_connector_t flash_dev_connector = {
+ .dev_open = flash_dev_open
+};
+
+static const io_dev_funcs_t flash_dev_funcs = {
+ .type = device_type_flash,
+ .open = flash_open,
+ .seek = flash_seek,
+ .size = NULL,
+ .read = flash_read,
+ .write = flash_write,
+ .close = flash_close,
+ .dev_init = NULL,
+ .dev_close = flash_dev_close,
+};
+
+/* No state associated with this device so structure can be const */
+static const io_dev_info_t flash_dev_info = {
+ .funcs = &flash_dev_funcs,
+ .info = (uintptr_t)NULL
+};
+
+/* Open a connection to the flash device */
+static int flash_dev_open(const uintptr_t dev_spec __attribute__((unused)),
+ io_dev_info_t **dev_info)
+{
+ assert(dev_info != NULL);
+ *dev_info = (io_dev_info_t *)&flash_dev_info; /* cast away const */
+
+ return IO_SUCCESS;
+}
+
+/* Close a connection to the flash device */
+static int flash_dev_close(io_dev_info_t *dev_info)
+{
+ /* NOP */
+ /* TODO: Consider tracking open files and cleaning them up here */
+ return IO_SUCCESS;
+}
+
+
+/* Open a file on the flash device */
+/* TODO: Can we do any sensible limit checks on requested memory */
+static int flash_open(io_dev_info_t *dev_info, const uintptr_t spec,
+ io_entity_t *entity)
+{
+ int result;
+ const io_nor_flash_spec_t *block_spec = (io_nor_flash_spec_t *)spec;
+
+ /* Since we need to track open state for seek() we only allow one open
+ * spec at a time. When we have dynamic memory we can malloc and set
+ * entity->info.
+ */
+ if (current_file.in_use == 0) {
+ assert(block_spec != NULL);
+ assert(entity != NULL);
+
+ current_file.in_use = 1;
+ current_file.base = block_spec->region_address;
+ /* File cursor offset for seek and incremental reads etc. */
+ current_file.file_pos = 0;
+ /* Attach the device specification to this file */
+ current_file.block_spec = block_spec;
+
+ entity->info = (uintptr_t)&current_file;
+ result = IO_SUCCESS;
+ } else {
+ WARN("A Flash device is already active. Close first.\n");
+ result = IO_RESOURCES_EXHAUSTED;
+ }
+
+ return result;
+}
+
+/* Seek to a particular file offset on the flash device */
+static int flash_seek(io_entity_t *entity, int mode, ssize_t offset)
+{
+ const io_nor_flash_spec_t *block_spec;
+ int result;
+ uintptr_t flash_base;
+ size_t flash_size;
+
+ assert(entity != NULL);
+
+ block_spec = ((file_state_t *)entity->info)->block_spec;
+ flash_size = block_spec->block_count * block_spec->block_size;
+
+ if (mode == IO_SEEK_SET) {
+ /* Ensure the offset is into the flash */
+ if ((offset >= 0) && (offset < flash_size)) {
+ ((file_state_t *)entity->info)->file_pos = offset;
+ result = IO_SUCCESS;
+ } else {
+ result = IO_FAIL;
+ }
+ } else if (mode == IO_SEEK_END) {
+ flash_base = block_spec->region_address;
+ /* Ensure the offset is into the flash */
+ if ((offset <= 0) && (flash_size + offset >= 0)) {
+ ((file_state_t *)entity->info)->file_pos =
+ flash_base + flash_size + offset;
+ result = IO_SUCCESS;
+ } else {
+ result = IO_FAIL;
+ }
+ } else if (mode == IO_SEEK_CUR) {
+ flash_base = block_spec->region_address;
+ /* Ensure the offset is into the flash */
+ if ((((file_state_t *)entity->info)->file_pos +
+ offset >= flash_base) &&
+ (((file_state_t *)entity->info)->file_pos +
+ offset < flash_base + flash_size)) {
+ ((file_state_t *)entity->info)->file_pos += offset;
+ result = IO_SUCCESS;
+ } else {
+ result = IO_FAIL;
+ }
+ } else {
+ result = IO_FAIL;
+ }
+
+ return result;
+}
+
+/* Read data from a file on the flash device */
+static int flash_read(io_entity_t *entity, uintptr_t buffer,
+ size_t length, size_t *length_read)
+{
+ file_state_t *fp;
+
+ assert(entity != NULL);
+ assert(buffer != (uintptr_t)NULL);
+ assert(length_read != NULL);
+
+ fp = (file_state_t *)entity->info;
+
+ memcpy((void *)buffer, (void *)(fp->base + fp->file_pos), length);
+
+ *length_read = length;
+ /* advance the file 'cursor' for incremental reads */
+ fp->file_pos += length;
+
+ return IO_SUCCESS;
+}
+
+/* Write data to a file on the flash device */
+static int flash_write(io_entity_t *entity, const uintptr_t buffer,
+ size_t length, size_t *length_written)
+{
+ file_state_t *fp;
+ const io_nor_flash_spec_t *block_spec;
+ size_t file_pos;
+ int ret;
+ uint32_t remaining_block_size;
+ size_t written;
+ uintptr_t buffer_ptr = buffer;
+
+ assert(entity != NULL);
+ assert(buffer != (uintptr_t)NULL);
+ assert(length_written != NULL);
+
+ fp = (file_state_t *)entity->info;
+ block_spec = fp->block_spec;
+ file_pos = fp->file_pos;
+
+ *length_written = 0;
+
+ while (length > 0) {
+ /* Check if we can do a block write */
+ if (IS_FLASH_ADDRESS_BLOCK_ALIGNED(fp, file_pos)) {
+ if (length / block_spec->block_size > 0) {
+ ret = flash_block_write(fp, file_pos,
+ buffer_ptr, &written);
+ } else {
+ /* Case when the length is smaller than a
+ * block size
+ */
+ ret = flash_partial_write(fp, file_pos,
+ buffer_ptr, length, &written);
+ }
+ } else {
+ /* Case when the buffer does not start at the beginning
+ * of a block
+ */
+
+ /* Length between the current file_pos and the end of
+ * the block
+ */
+ remaining_block_size = block_spec->block_size -
+ (file_pos % block_spec->block_size);
+
+ if (length < remaining_block_size) {
+ ret = flash_partial_write(fp, file_pos,
+ buffer_ptr, length, &written);
+ } else {
+ ret = flash_partial_write(fp, file_pos,
+ buffer_ptr, remaining_block_size,
+ &written);
+ }
+ }
+
+ /* If one of the write operations fails then we do not pursue */
+ if (ret != IO_SUCCESS) {
+ return ret;
+ } else {
+ buffer_ptr += written;
+ file_pos += written;
+
+ *length_written += written;
+ length -= written;
+ }
+ }
+
+ /* advance the file 'cursor' for incremental writes */
+ fp->file_pos += length;
+
+ return IO_SUCCESS;
+}
+
+/* Close a file on the flash device */
+static int flash_close(io_entity_t *entity)
+{
+ assert(entity != NULL);
+
+ entity->info = 0;
+
+ /* This would be a mem free() if we had malloc.*/
+ memset((void *)&current_file, 0, sizeof(current_file));
+
+ return IO_SUCCESS;
+}
+
+/* Exported functions */
+
+/* Register the flash driver with the IO abstraction */
+int register_io_dev_nor_flash(const io_dev_connector_t **dev_con)
+{
+ int result;
+ assert(dev_con != NULL);
+
+ result = io_register_device(&flash_dev_info);
+ if (result == IO_SUCCESS)
+ *dev_con = &flash_dev_connector;
+
+ return result;
+}
diff --git a/drivers/io/vexpress_nor/norflash.h b/drivers/io/vexpress_nor/norflash.h
new file mode 100644
index 000000000..893a2c502
--- /dev/null
+++ b/drivers/io/vexpress_nor/norflash.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2015-2017, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __NORFLASH_H_
+#define __NORFLASH_H_
+
+#include <stdint.h>
+
+/* First bus cycle */
+#define NOR_CMD_READ_ARRAY 0xFF
+#define NOR_CMD_READ_ID_CODE 0x90
+#define NOR_CMD_READ_QUERY 0x98
+#define NOR_CMD_READ_STATUS_REG 0x70
+#define NOR_CMD_CLEAR_STATUS_REG 0x50
+#define NOR_CMD_WRITE_TO_BUFFER 0xE8
+#define NOR_CMD_WORD_PROGRAM 0x40
+#define NOR_CMD_BLOCK_ERASE 0x20
+#define NOR_CMD_LOCK_UNLOCK 0x60
+#define NOR_CMD_BLOCK_ERASE_ACK 0xD0
+#define NOR_CMD_BUFFERED_PROGRAM 0xE8
+
+/* Second bus cycle */
+#define NOR_LOCK_BLOCK 0x01
+#define NOR_UNLOCK_BLOCK 0xD0
+#define NOR_CMD_BUFFERED_PROGRAM_ACK 0xD0
+
+/* Status register bits */
+#define NOR_DWS (1 << 7)
+#define NOR_ESS (1 << 6)
+#define NOR_ES (1 << 5)
+#define NOR_PS (1 << 4)
+#define NOR_VPPS (1 << 3)
+#define NOR_PSS (1 << 2)
+#define NOR_BLS (1 << 1)
+#define NOR_BWS (1 << 0)
+
+#endif /* __NORFLASH_H_ */
+
diff --git a/el3_payload/.gitignore b/el3_payload/.gitignore
new file mode 100644
index 000000000..381e35464
--- /dev/null
+++ b/el3_payload/.gitignore
@@ -0,0 +1,6 @@
+*.o
+*~
+*.elf
+*.bin
+*.ld
+*.dump
diff --git a/el3_payload/Makefile b/el3_payload/Makefile
new file mode 100644
index 000000000..f6e809f2c
--- /dev/null
+++ b/el3_payload/Makefile
@@ -0,0 +1,91 @@
+#
+# Copyright (c) 2018, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+CROSS_COMPILE := aarch64-linux-gnu-
+CC := ${CROSS_COMPILE}gcc
+AS := ${CROSS_COMPILE}as
+LD := ${CROSS_COMPILE}ld
+OC := ${CROSS_COMPILE}objcopy
+OD := ${CROSS_COMPILE}objdump
+
+PLAT ?= fvp
+
+ASFLAGS := -nostdinc -ffreestanding -Wa,--fatal-warnings -Werror
+ASFLAGS += -Iplat/${PLAT}/ -I.
+
+PLAT_BUILD_DIR := build/${PLAT}
+SOURCES := entrypoint.S spin.S uart.S plat/${PLAT}/platform.S
+OBJS := $(patsubst %,$(PLAT_BUILD_DIR)/%,$(notdir $(SOURCES:.S=.o)))
+
+PROGRAM := el3_payload
+LINKER_SCRIPT := ${PLAT_BUILD_DIR}/${PROGRAM}.ld
+ELF := ${PLAT_BUILD_DIR}/${PROGRAM}.elf
+BIN := ${PLAT_BUILD_DIR}/${PROGRAM}.bin
+DUMP := ${PLAT_BUILD_DIR}/${PROGRAM}.dump
+
+include plat/${PLAT}/platform.mk
+
+all: ${BIN}
+
+${PLAT_BUILD_DIR}:
+ mkdir -p $@
+
+${PLAT_BUILD_DIR}/%.o: %.S ${PLAT_BUILD_DIR}
+ @echo " CC $<"
+ ${CC} ${ASFLAGS} -c $< -o $@
+
+# Specific rule for this '.o' file to avoid worrying about
+# plat/${PLAT}/platform.S being in a sub-directory...
+# TODO: Fix this workaround.
+${PLAT_BUILD_DIR}/platform.o: plat/${PLAT}/platform.S ${PLAT_BUILD_DIR}
+ @echo " CC $<"
+ ${CC} ${ASFLAGS} -c $< -o $@
+
+${PLAT_BUILD_DIR}/%.ld: %.ld.S ${PLAT_BUILD_DIR}
+ @echo " PP $<"
+ ${CC} -DDRAM_BASE=${DRAM_BASE} -DDRAM_SIZE=${DRAM_SIZE} -E -P -o $@ $<
+
+${ELF}: ${LINKER_SCRIPT} ${OBJS}
+ @echo " LD $<"
+ ${LD} ${LDFLAGS} ${OBJS} --script ${LINKER_SCRIPT} -o $@
+
+${BIN}: ${ELF}
+ @echo " BIN $@"
+ ${OC} -O binary $< $@
+
+${DUMP}: $(ELF)
+ @echo " OD $$@"
+ ${OD} -dx $< > $@
+
+dump: ${DUMP}
+
+clean:
+ rm -rf ${PLAT_BUILD_DIR}
+
+distclean: clean
+ rm -rf build/
+
+run: run_${PLAT}
+
+run_juno: ${ELF} scripts/juno/run_ds5_script.sh scripts/juno/load_el3_payload.ds
+ scripts/juno/run_ds5_script.sh scripts/juno/load_el3_payload.ds
+
+run_fvp: scripts/fvp/run_fvp.sh
+ scripts/fvp/run_fvp.sh
+
+help:
+ @echo "EL3 test payload"
+ @echo
+ @echo "To build:"
+ @echo "make [PLAT=fvp|juno] [TARGETS]"
+ @echo ""
+ @echo "The default platform is fvp."
+ @echo
+ @echo "TARGETS:"
+ @echo " all Build the payload [default target]"
+ @echo " dump Generate a dump file of the program"
+ @echo " run Run the payload on the given platform."
+ @echo " /!\ For Juno, requires a connection to a Juno board via DSTREAM"
diff --git a/el3_payload/README b/el3_payload/README
new file mode 100644
index 000000000..fcb3ae106
--- /dev/null
+++ b/el3_payload/README
@@ -0,0 +1,141 @@
+EL3 test payload
+================
+
+This program is a very simple EL3 baremetal application. It has been developed
+to test the ability of the Trusted Firmware-A to load an EL3 payload. All it
+does is making sure that all CPUs enter this image and if so, reports it through
+the UART.
+
+
+Building
+--------
+
+$ make PLAT=juno
+or
+$ make PLAT=fvp
+
+
+Preparing Juno board configuration files
+----------------------------------------
+
+You should have the following line in your 'SITE1/HBI0262X/board.txt' Juno
+configuration file on the SD card (where X depends on the revision of your Juno
+board):
+
+ SCC: 0x0F4 0x000003F8
+
+This:
+ - ensures all CPUs are powered on at reset;
+ - designates the Cortex-A53 #0 as the primary CPU.
+
+See the 'Assumptions' section below' for more details.
+
+
+Running on Juno
+---------------
+
+The Trusted Firmware-A must be compiled with SPIN_ON_BL1_EXIT=1. This will
+introduce an infinite loop in BL1 that gives you some time to load and run the
+EL3 payload over JTAG.
+
+Boot the board and wait until you see the following messages on the UART.
+(Note that the "INFO" messages appear only in a debug build of the Trusted
+Firmware but you should see the "NOTICE" messages in any case.)
+
+ NOTICE: BL1: Booting BL31
+ INFO: BL1: BL31 address = 0x80000000
+ INFO: BL1: BL31 spsr = 0x3cd
+ INFO: BL1: BL31 params address = 0x0
+ INFO: BL1: BL31 plat params address = 0xf1e2d3c4b5a6978
+ INFO: BL1: BL31 address = 0x80000000
+ INFO: BL1: BL31 spsr = 0x3cd
+ INFO: BL1: BL31 params address = 0x0
+ INFO: BL1: BL31 plat params address = 0xf1e2d3c4b5a6978
+ NOTICE: BL1: Debug loop, spinning forever
+ NOTICE: BL1: Please connect the debugger to jump over it
+
+TODO: Update the above messages.
+
+At this point, type the following command in a shell from the EL3 test payload
+top directory:
+
+$ make PLAT=juno run
+
+You should see something like this in your shell (it takes a few seconds):
+
+ Trying to detect your DSTREAM unit...
+ Available connections:
+ USB:000765
+ Connecting to USB:000765...
+ Connected to running target Cortex-A53_0
+ Execution stopped in EL3h mode at EL3:0x000000000BEC2548
+ EL3:0x000000000BEC2548 B {pc} ; 0xbec2548
+ Loaded section ro: EL3:0x0000000080000000 ~ EL3:0x0000000080000123 (size 0x124)
+ Loaded section .data: EL3:0x0000000080000128 ~ EL3:0x0000000080000197 (size 0x70)
+ Entry point EL3:0x0000000080000000
+ Disconnected from running target Cortex-A53_0
+
+And on the Juno UART, this should print the following messages:
+
+ Booting the EL3 test payload
+ All CPUs booted!
+
+
+Running on FVP (AEMv8A)
+-----------------------
+
+First, copy the "bl1.bin" and "fip.bin" files into the current directory.
+Alternatively, symbolic links might be created.
+
+Then run:
+
+$ make PLAT=fvp run
+
+Note: The EL3 payload does not work on the Foundation FVP.
+(This is because it expects 8 CPUs and the Foundation FVP has maximum 4.)
+
+
+How does it work?
+-----------------
+
+There is a per-cpu array. Each entry is initialised to a "dead" value. On entry
+into the payload, each CPU writes its MPID to its entry, which allows it to
+signal its presence.
+
+Secondary CPUs then spin forever.
+
+The primary CPU carries on.
+ 1) It prints a "welcome" string.
+ 2) It waits for each entry in the CPUs array to be updated.
+ 3) Once all CPUs have been detected in that way, it prints a success message.
+ 4) Finally, it spins forever.
+
+
+Assumptions
+-----------
+
+- All CPUs enter the EL3 test payload at some point.
+ The order doesn't matter, though.
+ If some CPU doesn't boot then the EL3 payload will wait forever.
+
+- On FVP, the number of cores is hard-coded to 8.
+ If the FVP model is configured to disable some CPUs then the EL3 payload will
+ hang, waiting forever for absent CPUs.
+ For the same reason, the EL3 payload hangs on the Foundation FVP (which has
+ 4 CPUs only).
+
+- The UART is already configured.
+
+- On Juno, the primary CPU is hard-coded to the Cortex-A53 #0.
+ Any CPU could be the primary CPU, though. However, the DS-5 scripts launched
+ by 'make run' assumes the Cortex-A53 #0 is the primary CPU.
+
+ On FVP, the primary CPU is hard-coded to the CPU with MPID 0x0.
+
+ Designating a CPU as the primary one simplifies the code, More particularly,
+ only the primary CPU accesses the UART, which removes the need for
+ synchronisation locks to avoid interleaved messages.
+
+- The EL3 test pyaload runs from RAM.
+ It can't execute from flash, as we would need to relocate the .data section
+ in RAM at run-time and this is not implemented for now.
diff --git a/el3_payload/arch.h b/el3_payload/arch.h
new file mode 100644
index 000000000..a6a480c65
--- /dev/null
+++ b/el3_payload/arch.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __ARCH_H__
+#define __ARCH_H__
+
+#define MPIDR_MT_MASK (1 << 24)
+#define MPIDR_AFFLVL_MASK 0xff
+#define MPIDR_AFFINITY_BITS 8
+#define MPIDR_CPU_MASK MPIDR_AFFLVL_MASK
+#define MPIDR_CLUSTER_MASK (MPIDR_AFFLVL_MASK << MPIDR_AFFINITY_BITS)
+#define MPIDR_AFF0_SHIFT 0
+#define MPIDR_AFF1_SHIFT 8
+#define MPIDR_AFF2_SHIFT 16
+#define MPIDR_AFFINITY_MASK 0xff00ffffff
+
+#endif /* __ARCH_H__ */
diff --git a/el3_payload/asm_macros.S b/el3_payload/asm_macros.S
new file mode 100644
index 000000000..d02afba2d
--- /dev/null
+++ b/el3_payload/asm_macros.S
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+ /*
+ * This macro is used to create a function label.
+ */
+ .macro func _name
+ .type \_name, %function
+ .func \_name
+ \_name:
+ .endm
+
+ /*
+ * This macro is used to mark the end of a function.
+ */
+ .macro endfunc _name
+ .endfunc
+ .size \_name, . - \_name
+ .endm
diff --git a/el3_payload/el3_payload.ld.S b/el3_payload/el3_payload.ld.S
new file mode 100644
index 000000000..b1c28dc00
--- /dev/null
+++ b/el3_payload/el3_payload.ld.S
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+OUTPUT_FORMAT("elf64-littleaarch64")
+OUTPUT_ARCH("aarch64")
+ENTRY(entrypoint)
+
+MEMORY {
+ RAM (rwx): ORIGIN = DRAM_BASE, LENGTH = (DRAM_BASE + DRAM_SIZE)
+}
+
+SECTIONS
+{
+ . = DRAM_BASE;
+
+ ro . : {
+ */entrypoint.o(.text)
+ *(.text*)
+ *(.rodata*)
+ } >RAM
+
+ .data : {
+ *(.data*)
+ } >RAM
+}
diff --git a/el3_payload/entrypoint.S b/el3_payload/entrypoint.S
new file mode 100644
index 000000000..2da49363c
--- /dev/null
+++ b/el3_payload/entrypoint.S
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "arch.h"
+#include "asm_macros.S"
+#include "platform.h"
+
+#define EOT_ASCII_CODE 4
+
+ .data
+welcome_str:
+ .asciz "Booting the EL3 test payload\r\n"
+all_cpus_booted_str:
+ .asciz "All CPUs booted!\r\n"
+
+ .text
+ .global entrypoint
+
+func entrypoint
+ bl mark_cpu_presence
+
+ /* Distinguish primary from secondary CPUs */
+ mrs x0, mpidr_el1
+ ldr x1, =MPIDR_AFFINITY_MASK
+ and x0, x0, x1
+
+ ldr x1, =PRIMARY_CPU_MPID
+ cmp x0, x1
+ b.ne spin_forever
+
+ /*
+ * Only the primary CPU executes the code below
+ */
+
+ adr x0, welcome_str
+ bl print_string
+
+ /* Wait to see each CPU */
+ mov x3, xzr
+1:
+ mov x0, x3
+ bl is_cpu_present
+ cbz x0, 1b
+
+ /* Next CPU, if any */
+ add x3, x3, #1
+ mov x0, #CPUS_COUNT
+ cmp x3, x0
+ b.lt 1b
+
+ /* All CPUs have been detected, announce the good news! */
+ adr x0, all_cpus_booted_str
+ bl print_string
+
+ /* Send EOT (End of Transmission character) character over the UART */
+ mov x0, #EOT_ASCII_CODE
+ bl print_char
+
+spin_forever:
+ wfe
+ b spin_forever
+endfunc entrypoint
diff --git a/el3_payload/plat/fvp/platform.S b/el3_payload/plat/fvp/platform.S
new file mode 100644
index 000000000..5855ca754
--- /dev/null
+++ b/el3_payload/plat/fvp/platform.S
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "arch.h"
+#include "asm_macros.S"
+#include "platform.h"
+
+ .text
+ .global platform_get_core_pos
+
+/*----------------------------------------------------------------------
+ * unsigned int platform_get_core_pos(unsigned long mpid)
+ *
+ * Function to calculate the core position on FVP.
+ *
+ * (ClusterId * FVP_MAX_CPUS_PER_CLUSTER * FVP_MAX_PE_PER_CPU) +
+ * (CPUId * FVP_MAX_PE_PER_CPU) +
+ * ThreadId
+ *
+ * which can be simplified as:
+ *
+ * ((ClusterId * FVP_MAX_CPUS_PER_CLUSTER + CPUId) * FVP_MAX_PE_PER_CPU)
+ * + ThreadId
+ *
+ * clobbers: x0, x1, x3, x4
+ * ---------------------------------------------------------------------
+ */
+func platform_get_core_pos
+ /*
+ * Check for MT bit in MPIDR. If not set, shift MPIDR to left to make it
+ * look as if in a multi-threaded implementation.
+ */
+ tst x0, #MPIDR_MT_MASK
+ lsl x3, x0, #MPIDR_AFFINITY_BITS
+ csel x3, x3, x0, eq
+
+ /* Extract individual affinity fields from MPIDR */
+ ubfx x0, x3, #MPIDR_AFF0_SHIFT, #MPIDR_AFFINITY_BITS
+ ubfx x1, x3, #MPIDR_AFF1_SHIFT, #MPIDR_AFFINITY_BITS
+ ubfx x4, x3, #MPIDR_AFF2_SHIFT, #MPIDR_AFFINITY_BITS
+
+ /* Compute linear position */
+ mov x3, #FVP_MAX_CPUS_PER_CLUSTER
+ madd x1, x4, x3, x1
+ mov x3, #FVP_MAX_PE_PER_CPU
+ madd x0, x1, x3, x0
+ ret
+endfunc platform_get_core_pos
diff --git a/el3_payload/plat/fvp/platform.h b/el3_payload/plat/fvp/platform.h
new file mode 100644
index 000000000..809ae7eb2
--- /dev/null
+++ b/el3_payload/plat/fvp/platform.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLATFORM_H__
+#define __PLATFORM_H__
+
+#define PRIMARY_CPU_MPID 0x0
+
+/* Always expect 8 cores, although this is configurable on FVP */
+#define CPUS_COUNT 8
+
+#define UART_BASE 0x1c090000
+
+#define FVP_MAX_CPUS_PER_CLUSTER 4
+/* Currently multi-threaded CPUs only have a single thread */
+#define FVP_MAX_PE_PER_CPU 1
+
+#endif /* __PLATFORM_H__ */
diff --git a/el3_payload/plat/fvp/platform.mk b/el3_payload/plat/fvp/platform.mk
new file mode 100644
index 000000000..b4cda9b65
--- /dev/null
+++ b/el3_payload/plat/fvp/platform.mk
@@ -0,0 +1,8 @@
+#
+# Copyright (c) 2018, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+DRAM_BASE := 0x80000000
+DRAM_SIZE := 0x80000000
diff --git a/el3_payload/plat/juno/platform.S b/el3_payload/plat/juno/platform.S
new file mode 100644
index 000000000..6c8773b66
--- /dev/null
+++ b/el3_payload/plat/juno/platform.S
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "arch.h"
+#include "asm_macros.S"
+
+ .text
+ .global platform_get_core_pos
+
+ /* --------------------------------------------------------------------
+ * unsigned int get_core_pos(uint64_t mpidr);
+ *
+ * Helper function to calculate the core position from its MPID.
+ * Core positions must be consecutive, there must be no holes.
+ *
+ * MPID -> core position:
+ * 0x100 -> 0
+ * 0x101 -> 1
+ * 0x102 -> 2
+ * 0x103 -> 3
+ * 0x0 -> 4
+ * 0x1 -> 5
+ * --------------------------------------------------------------------
+ */
+func platform_get_core_pos
+ and x1, x0, #MPIDR_CPU_MASK
+ and x0, x0, #MPIDR_CLUSTER_MASK
+ eor x0, x0, #(1 << MPIDR_AFFINITY_BITS) /* swap cluster order */
+ add x0, x1, x0, LSR #6
+ ret
+endfunc platform_get_core_pos
diff --git a/el3_payload/plat/juno/platform.h b/el3_payload/plat/juno/platform.h
new file mode 100644
index 000000000..7fdcb3ee5
--- /dev/null
+++ b/el3_payload/plat/juno/platform.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLATFORM_H__
+#define __PLATFORM_H__
+
+/* Designate the Cortex-A53 #0 as the primary CPU */
+#define PRIMARY_CPU_MPID 0x100
+
+#define CPUS_COUNT 6
+
+#define UART_BASE 0x7ff80000
+
+#endif /* __PLATFORM_H__ */
diff --git a/el3_payload/plat/juno/platform.mk b/el3_payload/plat/juno/platform.mk
new file mode 100644
index 000000000..b4cda9b65
--- /dev/null
+++ b/el3_payload/plat/juno/platform.mk
@@ -0,0 +1,8 @@
+#
+# Copyright (c) 2018, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+DRAM_BASE := 0x80000000
+DRAM_SIZE := 0x80000000
diff --git a/el3_payload/scripts/fvp/run_foundation.sh b/el3_payload/scripts/fvp/run_foundation.sh
new file mode 100755
index 000000000..f8ec343d3
--- /dev/null
+++ b/el3_payload/scripts/fvp/run_foundation.sh
@@ -0,0 +1,51 @@
+#! /bin/bash
+
+#
+# Script to run the EL3 payload on the Foundation FVP.
+#
+# /!\ The EL3 payload is not supported on the Foundation FVP without tweaking
+# the code. You need to modify the number of expected cores in
+# plat/fvp/platform.h:
+# -#define CPUS_COUNT 8
+# +#define CPUS_COUNT 4
+#
+
+set -e
+
+# usage: check_file_is_present <filename>
+# Check that <filename> exists in the current directory.
+# If not, print an error message and exit.
+function check_file_is_present
+{
+ BIN_FILE=$1
+ if [ ! -e "$BIN_FILE" ]; then
+ echo "ERROR: Can't find \"$BIN_FILE\" file"
+ echo "Please copy $BIN_FILE into the current working directory."
+ echo "Alternatively, a symbolic link might be created."
+ echo
+ exit 1
+ fi
+}
+
+check_file_is_present "bl1.bin"
+check_file_is_present "fip.bin"
+
+# Create an 8-byte file containing all zero bytes.
+# It will be loaded at the beginning of the Trusted SRAM to zero the mailbox.
+MAILBOX_FILE=mailbox.dat
+rm -f $MAILBOX_FILE
+dd if=/dev/zero of=$MAILBOX_FILE bs=1 count=8
+
+# The path to the Foundation model must be provided by the user.
+MODEL_EXEC="${MODEL_EXEC:?}"
+MODEL_PARAMETERS=" \
+ --cores=4 \
+ --visualization \
+ --data=bl1.bin@0x0 \
+ --data=fip.bin@0x08000000 \
+ --data=$MAILBOX_FILE@0x04000000 \
+ --data=build/fvp/el3_payload.bin@0x80000000 \
+"
+
+echo $MODEL_EXEC $MODEL_PARAMETERS
+$MODEL_EXEC $MODEL_PARAMETERS
diff --git a/el3_payload/scripts/fvp/run_fvp.sh b/el3_payload/scripts/fvp/run_fvp.sh
new file mode 100755
index 000000000..788c90cb6
--- /dev/null
+++ b/el3_payload/scripts/fvp/run_fvp.sh
@@ -0,0 +1,56 @@
+#! /bin/bash
+
+set -e
+
+UART_OUTPUT_FILE=uart0.log
+
+# usage: check_file_is_present <filename>
+# Check that <filename> exists in the current directory.
+# If not, print an error message and exit.
+function check_file_is_present
+{
+ BIN_FILE=$1
+ if [ ! -e "$BIN_FILE" ]; then
+ echo "ERROR: Can't find \"$BIN_FILE\" file"
+ echo "Please copy $BIN_FILE into the current working directory."
+ echo "Alternatively, a symbolic link might be created."
+ echo
+ exit 1
+ fi
+}
+
+check_file_is_present "bl1.bin"
+check_file_is_present "fip.bin"
+
+# The path to the Foundation model must be provided by the user.
+MODEL_EXEC="${MODEL_EXEC:?}"
+MODEL_PARAMETERS="
+ -C pctl.startup=0.0.*.*,0.1.*.* \
+ -C bp.secureflashloader.fname=bl1.bin \
+ -C bp.flashloader0.fname=fip.bin \
+ --data cluster0.cpu0=build/fvp/el3_payload.bin@0x80000000 \
+ -C bp.secureSRAM.fill1=0x00000000 \
+ -C bp.secureSRAM.fill2=0x00000000 \
+ -C bp.pl011_uart0.out_file=$UART_OUTPUT_FILE \
+ -C bp.pl011_uart0.shutdown_on_eot=1 \
+"
+
+echo $MODEL_EXEC $MODEL_PARAMETERS
+$MODEL_EXEC $MODEL_PARAMETERS
+
+# Print results
+green='\033[1;32;40m'
+no_color='\033[0m'
+echo
+echo -e "$green"
+echo "============"
+echo " COMPLETE!"
+echo "============"
+echo -e "$no_color"
+echo "UART output:"
+echo "--------------------------------8<------------------------------"
+cat $UART_OUTPUT_FILE
+echo "--------------------------------8<------------------------------"
+echo
+echo "Output saved in $UART_OUTPUT_FILE file."
+echo
diff --git a/el3_payload/scripts/juno/load_el3_payload.ds b/el3_payload/scripts/juno/load_el3_payload.ds
new file mode 100644
index 000000000..23db44dbd
--- /dev/null
+++ b/el3_payload/scripts/juno/load_el3_payload.ds
@@ -0,0 +1,11 @@
+# Stop target
+interrupt
+
+# Load EL3 payload
+load build/juno/el3_payload.elf
+
+# Jump over BL1 infinite loop
+set var $pc = $pc + 4
+
+# Resume execution
+continue
diff --git a/el3_payload/scripts/juno/run_ds5_script.sh b/el3_payload/scripts/juno/run_ds5_script.sh
new file mode 100755
index 000000000..67d07365a
--- /dev/null
+++ b/el3_payload/scripts/juno/run_ds5_script.sh
@@ -0,0 +1,49 @@
+#! /bin/sh
+
+set -e
+
+# Expect the script to run in argument
+if [ $# != 1 ]; then
+ echo "ERROR: No script provided"
+ echo "usage: $(basename $0) <ds5_script_to_run>"
+ exit 1
+fi
+
+# Is DS-5 command-line debugger found?
+if [ ! $(which debugger) ]; then
+ echo 'ERROR: Failed to find DS-5 command-line debugger.'
+ echo 'Please add the path to the command-line debugger in your PATH.'
+ echo 'E.g.: export PATH=<DS-5 install dir>/bin:$PATH'
+ exit 1
+fi
+
+# DS-5 configuration database entry for Juno r0
+juno_cdb_entry='ARM Development Boards::Juno ARM Development Platform (r0)::Bare Metal Debug::Bare Metal Debug::Debug Cortex-A53_0::DSTREAM'
+
+# Browse for available DSTREAM connections and lists targets that match the
+# connection type specified in the configuration database entry
+echo "Trying to detect your DSTREAM unit..."
+connections_list=available_connections
+debugger --cdb-entry "$juno_cdb_entry" --browse \
+ | tee $connections_list
+
+# Remove first line in the file (i.e. "Available connections:")
+tail -n +2 $connections_list > ${connections_list}_stripped
+mv ${connections_list}_stripped ${connections_list}
+
+# Use first available connection
+read connection < $connections_list || true
+rm $connections_list
+
+if [ -z "$connection" ] ; then
+ echo "ERROR: Found no connection"
+ exit 1
+fi
+
+# Run DS-5 script
+echo "Connecting to $connection..."
+debugger \
+ --cdb-entry "$juno_cdb_entry" \
+ --cdb-entry-param "Connection=$connection" \
+ --stop_on_connect=false \
+ --script=$1
diff --git a/el3_payload/spin.S b/el3_payload/spin.S
new file mode 100644
index 000000000..8cf067f20
--- /dev/null
+++ b/el3_payload/spin.S
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "arch.h"
+#include "asm_macros.S"
+#include "platform.h"
+
+/* Initial value of each entry in the cpus_table[] array */
+#define NO_CPU 0xDEADDEADDEADDEAD
+
+/*
+ * Declare a per-CPU array to mark the CPUs presence.
+ *
+ * If cpus_table[i] == NO_CPU then CPU 'i' hasn't successfully booted to the
+ * to the EL3 test payload yet.
+ *
+ * Otherwise, it successfully booted (and cpus_table[i] should contain the
+ * CPU MPID).
+ */
+ .data
+ .align 3
+cpus_table:
+ .rept CPUS_COUNT
+ .quad NO_CPU
+ .endr
+
+
+ .text
+ .global mark_cpu_presence
+ .global is_cpu_present
+
+ /*
+ * void mark_cpu_presence();
+ * Mark the calling CPU present in the CPUs array.
+ * clobbers: x0, x1, x2, x9
+ */
+func mark_cpu_presence
+ /* Store masked MPID in x2 */
+ mrs x0, mpidr_el1
+ ldr x1, =MPIDR_AFFINITY_MASK
+ and x2, x0, x1
+
+ /* Store core position in x0 */
+ mov x9, x30
+ bl platform_get_core_pos
+ mov x30, x9
+
+ /* Write masked CPU MPID in the CPU entry */
+ adr x1, cpus_table
+ add x1, x1, x0, lsl #3
+ str x2, [x1]
+
+ ret
+endfunc mark_cpu_presence
+
+ /*
+ * unsigned int is_cpu_present(unsigned int core_pos);
+ * Return 0 if CPU is absent, 1 if it is present.
+ * clobbers: x0, x1
+ */
+func is_cpu_present
+ adr x1, cpus_table
+ add x1, x1, x0, lsl #3
+ ldr x0, [x1]
+
+ ldr x1, =NO_CPU
+ cmp x0, x1
+ b.eq 1f
+ mov x0, #1
+ ret
+1:
+ mov x0, #0
+ ret
+endfunc is_cpu_present
diff --git a/el3_payload/uart.S b/el3_payload/uart.S
new file mode 100644
index 000000000..b7c5e9444
--- /dev/null
+++ b/el3_payload/uart.S
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "asm_macros.S"
+#include "platform.h"
+
+/*
+ * PL011 UART registers
+ */
+/* UART Flag Register */
+#define UARTFR 0x018
+/* Transmit FIFO full bit in UARTFR register */
+#define PL011_UARTFR_TXFF_BIT 5
+/* UART Data Register */
+#define UARTDR 0x000
+
+ .text
+ .global print_string
+ .global print_char
+
+ /*
+ * void print_char(unsigned int c);
+ * clobbers: x3, x4
+ */
+func print_char
+ ldr x3, =UART_BASE
+1:
+ /* Check if the transmit FIFO is full */
+ ldr w4, [x3, #UARTFR]
+ tbnz w4, #PL011_UARTFR_TXFF_BIT, 1b
+ str w0, [x3, #UARTDR]
+ ret
+endfunc print_char
+
+ /*
+ * void print_string(const char *str);
+ * clobbers: x0, x1, x2, x9
+ */
+func print_string
+ ldr x1, =UART_BASE
+ mov x2, x0
+1:
+ ldrb w0, [x2], #1
+ cmp wzr, w0
+ b.eq 2f
+
+ mov x9, x30
+ bl print_char
+ mov x30, x9
+ b 1b
+2:
+ ret
+endfunc print_string
diff --git a/fwu/ns_bl1u/aarch32/ns_bl1u_entrypoint.S b/fwu/ns_bl1u/aarch32/ns_bl1u_entrypoint.S
new file mode 100644
index 000000000..61f816f13
--- /dev/null
+++ b/fwu/ns_bl1u/aarch32/ns_bl1u_entrypoint.S
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <tftf.h>
+
+ .globl ns_bl1u_entrypoint
+
+func ns_bl1u_entrypoint
+ /* --------------------------------------------------------------------
+ * Set the exception vectors.
+ * --------------------------------------------------------------------
+ */
+ ldr r0, =exception_stubs
+ stcopr r0, HVBAR
+
+ /* --------------------------------------------------------------------
+ * Enable the instruction cache and data access alignment checks.
+ * --------------------------------------------------------------------
+ */
+ ldcopr r0, HSCTLR
+ ldr r1, =(HSCTLR_I_BIT | HSCTLR_A_BIT)
+ orr r0, r0, r1
+ stcopr r0, HSCTLR
+ isb
+
+ /* ---------------------------------------------------------------------
+ * Init C runtime environment.
+ * - Zero-initialise the .bss section;
+ * - Copy the data section from NS-BL1U image (stored in ROM) to the
+ * correct location in RAM.
+ * ---------------------------------------------------------------------
+ */
+ ldr r0, =__BSS_START__
+ ldr r1, =__BSS_SIZE__
+ bl zeromem
+
+ ldr r0, =__DATA_RAM_START__
+ ldr r1, =__DATA_ROM_START__
+ ldr r2, =__DATA_SIZE__
+ bl memcpy4
+
+ /* ---------------------------------------------------------------------
+ * Allocate a stack whose memory will be marked as Normal
+ * Inner-Shareable, Write-Back, Write-Allocate memory when the MMU is
+ * enabled.
+ * There is no risk of reading stale stack memory after enabling the MMU
+ * as only the primary CPU is running at the moment.
+ * ---------------------------------------------------------------------
+ */
+ ldcopr r0, MPIDR
+ bl platform_set_stack
+
+ /* ---------------------------------------------------------------------
+ * Perform early platform setup & platform specific early architectural
+ * setup, e.g. MMU setup.
+ * ---------------------------------------------------------------------
+ */
+ bl tftf_early_platform_setup
+ bl tftf_plat_arch_setup
+
+ /* ---------------------------------------------------------------------
+ * Jump to main function.
+ * ---------------------------------------------------------------------
+ */
+ bl ns_bl1u_main
+dead:
+ b dead
+endfunc ns_bl1u_entrypoint
diff --git a/fwu/ns_bl1u/aarch64/ns_bl1u_entrypoint.S b/fwu/ns_bl1u/aarch64/ns_bl1u_entrypoint.S
new file mode 100644
index 000000000..d83be3be1
--- /dev/null
+++ b/fwu/ns_bl1u/aarch64/ns_bl1u_entrypoint.S
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <tftf.h>
+
+ .globl ns_bl1u_entrypoint
+
+func ns_bl1u_entrypoint
+ /* --------------------------------------------------------------------
+ * Set the exception vectors.
+ * --------------------------------------------------------------------
+ */
+ adr x0, exception_stubs
+ asm_write_vbar_el1_or_el2 x1
+
+ /* --------------------------------------------------------------------
+ * Enable the instruction cache, stack pointer and data access
+ * alignment checks.
+ * --------------------------------------------------------------------
+ */
+ mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
+ asm_read_sctlr_el1_or_el2
+ orr x0, x0, x1
+ asm_write_sctlr_el1_or_el2 x1
+ isb
+
+ /* ---------------------------------------------------------------------
+ * Init C runtime environment.
+ * - Zero-initialise the .bss section;
+ * - Copy the data section from NS-BL1U image (stored in ROM) to the
+ * correct location in RAM.
+ * ---------------------------------------------------------------------
+ */
+ ldr x0, =__BSS_START__
+ ldr x1, =__BSS_SIZE__
+ bl zeromem16
+
+ ldr x0, =__DATA_RAM_START__
+ ldr x1, =__DATA_ROM_START__
+ ldr x2, =__DATA_SIZE__
+ bl memcpy16
+
+ /* ---------------------------------------------------------------------
+ * Allocate a stack whose memory will be marked as Normal
+ * Inner-Shareable, Write-Back, Write-Allocate memory when the MMU is
+ * enabled.
+ * There is no risk of reading stale stack memory after enabling the MMU
+ * as only the primary CPU is running at the moment.
+ * ---------------------------------------------------------------------
+ */
+ mrs x0, mpidr_el1
+ bl platform_set_stack
+
+ /* ---------------------------------------------------------------------
+ * Perform early platform setup & platform specific early architectural
+ * setup, e.g. MMU setup.
+ * ---------------------------------------------------------------------
+ */
+ bl tftf_early_platform_setup
+ bl tftf_plat_arch_setup
+
+ /* ---------------------------------------------------------------------
+ * Jump to main function.
+ * ---------------------------------------------------------------------
+ */
+ bl ns_bl1u_main
+dead:
+ b dead
+endfunc ns_bl1u_entrypoint
diff --git a/fwu/ns_bl1u/ns_bl1u.ld.S b/fwu/ns_bl1u/ns_bl1u.ld.S
new file mode 100644
index 000000000..2ca5292c5
--- /dev/null
+++ b/fwu/ns_bl1u/ns_bl1u.ld.S
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <platform_def.h>
+#include <xlat_tables_defs.h>
+
+OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
+OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
+ENTRY(ns_bl1u_entrypoint)
+
+MEMORY {
+ ROM (rx): ORIGIN = NS_BL1U_RO_BASE, LENGTH = NS_BL1U_RO_LIMIT - NS_BL1U_RO_BASE
+ RAM (rwx): ORIGIN = NS_BL1U_RW_BASE, LENGTH = NS_BL1U_RW_LIMIT - NS_BL1U_RW_BASE
+}
+
+SECTIONS
+{
+ . = NS_BL1U_RO_BASE;
+ ASSERT(. == ALIGN(PAGE_SIZE),
+ "NS_BL1U_RO_BASE address is not aligned on a page boundary.")
+
+ ro . : {
+ __RO_START__ = .;
+ *ns_bl1u_entrypoint.o(.text*)
+ *(.text*)
+ *(.rodata*)
+ __RO_END__ = .;
+ } >ROM
+
+ /*
+ * The .data section gets copied from ROM to RAM at runtime.
+ * Its LMA must be 16-byte aligned.
+ * Its VMA must be page-aligned as it marks the first read/write page.
+ */
+ . = NS_BL1U_RW_BASE;
+ ASSERT(. == ALIGN(PAGE_SIZE),
+ "NS_BL1U_RW_BASE address is not aligned on a page boundary.")
+ .data . : ALIGN(16) {
+ __DATA_RAM_START__ = .;
+ *(.data*)
+ __DATA_RAM_END__ = .;
+ } >RAM AT>ROM
+
+ stacks . (NOLOAD) : {
+ __STACKS_START__ = .;
+ *(ns_bl_normal_stacks)
+ __STACKS_END__ = .;
+ } >RAM
+
+ /*
+ * The .bss section gets initialised to 0 at runtime.
+ * Its base address must be 16-byte aligned.
+ */
+ .bss : ALIGN(16) {
+ __BSS_START__ = .;
+ *(SORT_BY_ALIGNMENT(.bss*))
+ *(COMMON)
+ __BSS_END__ = .;
+ } >RAM
+
+ /*
+ * The xlat_table section is for full, aligned page tables (4K).
+ * Removing them from .bss avoids forcing 4K alignment on
+ * the .bss section and eliminates the unecessary zero init
+ */
+ xlat_table (NOLOAD) : {
+ *(xlat_table)
+ } >RAM
+
+ __NS_BL1U_RAM_START__ = ADDR(.data);
+ __NS_BL1U_RAM_END__ = .;
+
+ __DATA_ROM_START__ = LOADADDR(.data);
+ __DATA_SIZE__ = SIZEOF(.data);
+
+ /*
+ * The .data section is the last PROGBITS section so its end marks the end
+ * of the read-only part of NS_BL1U's binary.
+ */
+ ASSERT(__DATA_ROM_START__ + __DATA_SIZE__ <= NS_BL1U_RO_LIMIT,
+ "NS_BL1U's RO section has exceeded its limit.")
+
+ __BSS_SIZE__ = SIZEOF(.bss);
+
+ ASSERT(. <= NS_BL1U_RW_LIMIT, "NS_BL1U's RW section has exceeded its limit.")
+}
diff --git a/fwu/ns_bl1u/ns_bl1u.mk b/fwu/ns_bl1u/ns_bl1u.mk
new file mode 100644
index 000000000..430ff5e19
--- /dev/null
+++ b/fwu/ns_bl1u/ns_bl1u.mk
@@ -0,0 +1,79 @@
+#
+# Copyright (c) 2018, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+include lib/xlat_tables_v2/xlat_tables.mk
+include lib/compiler-rt/compiler-rt.mk
+
+NS_BL1U_INCLUDES := \
+ -I${AUTOGEN_DIR} \
+ -Itftf/framework/include \
+ -Iinclude/common \
+ -Iinclude/common/${ARCH} \
+ -Iinclude/drivers \
+ -Iinclude/drivers/arm \
+ -Iinclude/drivers/io \
+ -Iinclude/lib \
+ -Iinclude/lib/${ARCH} \
+ -Iinclude/lib/stdlib \
+ -Iinclude/lib/stdlib/sys \
+ -Iinclude/lib/utils \
+ -Iinclude/lib/xlat_tables \
+ -Iinclude/plat/common \
+ -Iinclude/runtime_services
+
+# TODO: Remove dependency on TFTF files.
+NS_BL1U_SOURCES := $(addprefix tftf/framework/, \
+ ${ARCH}/arch.c \
+ ${ARCH}/asm_debug.S \
+ ${ARCH}/exceptions.S \
+ debug.c \
+)
+
+NS_BL1U_SOURCES += drivers/io/io_fip.c \
+ drivers/io/io_memmap.c \
+ fwu/ns_bl1u/${ARCH}/ns_bl1u_entrypoint.S \
+ fwu/ns_bl1u/ns_bl1u_main.c \
+ lib/${ARCH}/cache_helpers.S \
+ lib/${ARCH}/exception_stubs.S \
+ lib/${ARCH}/misc_helpers.S \
+ lib/locks/${ARCH}/spinlock.S \
+ lib/smc/${ARCH}/asm_smc.S \
+ lib/smc/${ARCH}/smc.c \
+ ${STD_LIB_SOURCES} \
+ lib/utils/mp_printf.c \
+ lib/utils/uuid.c \
+ ${XLAT_TABLES_LIB_SRCS} \
+ plat/arm/common/arm_fwu_io_storage.c \
+ plat/common/${ARCH}/platform_helpers.S \
+ plat/common/${ARCH}/platform_up_stack.S \
+ plat/common/image_loader.c \
+ plat/common/plat_common.c
+
+NS_BL1U_SOURCES += ${COMPILER_RT_SRCS}
+
+ifeq (${FWU_BL_TEST},1)
+ NS_BL1U_SOURCES += fwu/ns_bl1u/ns_bl1u_tests.c
+endif
+
+NS_BL1U_LINKERFILE := fwu/ns_bl1u/ns_bl1u.ld.S
+
+# NS_BL1U requires accessing the flash. Force-enable it.
+NS_BL1U_DEFINES := -DUSE_NVM=1
+
+$(eval $(call add_define,NS_BL1U_DEFINES,ARM_ARCH_MAJOR))
+$(eval $(call add_define,NS_BL1U_DEFINES,ARM_ARCH_MINOR))
+$(eval $(call add_define,NS_BL1U_DEFINES,DEBUG))
+$(eval $(call add_define,NS_BL1U_DEFINES,ENABLE_ASSERTIONS))
+$(eval $(call add_define,NS_BL1U_DEFINES,FWU_BL_TEST))
+$(eval $(call add_define,NS_BL1U_DEFINES,LOG_LEVEL))
+$(eval $(call add_define,NS_BL1U_DEFINES,PLAT_${PLAT}))
+ifeq (${ARCH},aarch32)
+ $(eval $(call add_define,NS_BL1U_DEFINES,AARCH32))
+else
+ $(eval $(call add_define,NS_BL1U_DEFINES,AARCH64))
+endif
+
+ns_bl1u: ${AUTOGEN_DIR}/tests_list.h
diff --git a/fwu/ns_bl1u/ns_bl1u_main.c b/fwu/ns_bl1u/ns_bl1u_main.c
new file mode 100644
index 000000000..46c226367
--- /dev/null
+++ b/fwu/ns_bl1u/ns_bl1u_main.c
@@ -0,0 +1,226 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <errno.h>
+#include <image_loader.h>
+#include <io_fip.h>
+#include <io_storage.h>
+#include <mmio.h>
+#include <nvm.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <smccc.h>
+#include <status.h>
+#include <string.h>
+#include <tftf.h>
+#include <tftf_lib.h>
+
+#define FWU_NON_SECURE (0x0)
+#define FWU_SECURE (0x1)
+
+#define FWU_NON_EXEC (0x0)
+#define FWU_EXEC (0x1)
+
+/* This size is used to exercise partial copy */
+#define FWU_COPY_PARTIAL_SIZE (0x10)
+
+extern const char version_string[];
+
+typedef void (*ns_bl2u_entrypoint_t)(unsigned long);
+
+void ns_bl1u_fwu_test_main(void);
+
+/*
+ * This structure will be used for:
+ * 1. Assigning unique image identifier.
+ * 2. Assigning attribute to FWU image.
+ * (FWU_NON_SECURE/FWU_SECURE)
+ * (FWU_NON_EXEC/FWU_EXEC)
+ */
+typedef struct fwu_image_load_desc {
+ unsigned int image_id;
+ unsigned int secure;
+ unsigned int execute;
+} fwu_image_load_desc_t;
+
+static const fwu_image_load_desc_t ns_bl1u_desc[] = {
+ [0] = {
+ /* Initialize FWU_CERT image params */
+ .image_id = FWU_CERT_ID,
+ .secure = FWU_SECURE,
+ },
+ [1] = {
+ /*
+ * Initialize SCP_BL2U image params
+ * Not needed for FVP platform.
+ */
+ .image_id = SCP_BL2U_IMAGE_ID,
+ .secure = FWU_SECURE,
+ },
+ [2] = {
+ /* Initialize BL2U image params */
+ .image_id = BL2U_IMAGE_ID,
+ .secure = FWU_SECURE,
+ .execute = FWU_EXEC
+ },
+ [3] = {
+ /* Initialize NS_BL2U image params */
+ .image_id = NS_BL2U_IMAGE_ID,
+ }
+};
+
+static long smc_result;
+
+#define CHECK_SMC_RESULT(_r) do { \
+ if (smc_result != _r) { \
+ ERROR("NS_BL1U: SMC call failed with result:%li\n", smc_result);\
+ panic(); \
+ } \
+ } while (0);
+
+static void ns_bl1u_fwu_smc_call(unsigned int smc_id,
+ unsigned long x1,
+ unsigned long x2,
+ unsigned long x3,
+ unsigned long x4)
+{
+ smc_ret_values fwu_result = {0};
+ smc_args fwu_params = {smc_id, x1, x2, x3, x4};
+ fwu_result = tftf_smc(&fwu_params);
+ smc_result = fwu_result.ret0;
+}
+
+
+/*******************************************************************************
+ * Following are the responsibilities of NS_BL1U image:
+ * Load FWU images from external NVM memory to NS RAM.
+ * Call SMC's to authenticate images.
+ * Jump to NS_BL2U which carries out next FWU steps.
+******************************************************************************/
+void ns_bl1u_main(void)
+{
+ int index;
+ unsigned int img_size;
+ int err;
+ unsigned long offset;
+ ns_bl2u_entrypoint_t ns_bl2u_entrypoint =
+ (ns_bl2u_entrypoint_t)NS_BL2U_BASE;
+ const fwu_image_load_desc_t *image_desc;
+
+ NOTICE("NS_BL1U: %s\n", version_string);
+ NOTICE("NS_BL1U: %s\n", build_message);
+
+ tftf_arch_setup();
+
+ plat_fwu_io_setup();
+
+#if FWU_BL_TEST
+ ns_bl1u_fwu_test_main();
+#endif
+
+ for (index = 0; index < ARRAY_SIZE(ns_bl1u_desc); index++) {
+
+ image_desc = &ns_bl1u_desc[index];
+
+#if PLAT_fvp
+ /* Skip SCP_BL2U loading for FVP */
+ if (image_desc->image_id == SCP_BL2U_IMAGE_ID)
+ continue;
+#endif
+
+ INFO("NS_BL1U: Loading image '%s' (ID:%u)\n",
+ get_image_name(image_desc->image_id),
+ image_desc->image_id);
+
+ img_size = get_image_size(image_desc->image_id);
+ INFO("NS_BL1U: Image size = %d\n", img_size);
+ if (img_size == 0) {
+ ERROR("NS_BL1U: Invalid image size\n");
+ panic();
+ }
+
+ if (image_desc->secure == FWU_SECURE) {
+
+ offset = get_image_offset(image_desc->image_id);
+
+ INFO("NS_BL1U: Calling COPY SMC for partial copy\n");
+ ns_bl1u_fwu_smc_call(FWU_SMC_IMAGE_COPY, image_desc->image_id,
+ offset, FWU_COPY_PARTIAL_SIZE, img_size);
+ CHECK_SMC_RESULT(0);
+
+ ns_bl1u_fwu_smc_call(FWU_SMC_IMAGE_COPY, image_desc->image_id,
+ (offset + FWU_COPY_PARTIAL_SIZE),
+ (img_size - FWU_COPY_PARTIAL_SIZE), img_size);
+ CHECK_SMC_RESULT(0);
+ } else {
+ /* The only non-secure image in ns_bl1u_desc[] should be NS_BL2U */
+ assert(image_desc->image_id == NS_BL2U_IMAGE_ID);
+
+ err = load_image(image_desc->image_id, NS_BL2U_BASE);
+ if (err) {
+ ERROR("NS_BL1U: Failed to load NS_BL2U\n");
+ panic();
+ }
+ offset = NS_BL2U_BASE;
+ }
+
+ INFO("NS_BL1U: Calling AUTH SMC\n");
+ ns_bl1u_fwu_smc_call(FWU_SMC_IMAGE_AUTH, image_desc->image_id,
+ offset, img_size, 0);
+ CHECK_SMC_RESULT(0);
+#if FWU_BL_TEST
+ /* Check if authenticating again the same image returns error. */
+ INFO("NS_BL1U: TEST Calling SMC to auth again\n");
+ ns_bl1u_fwu_smc_call(FWU_SMC_IMAGE_AUTH, ns_bl1u_desc[index].image_id,
+ offset, img_size, 0);
+ CHECK_SMC_RESULT(-EPERM);
+#endif
+ if (image_desc->execute == FWU_EXEC) {
+ INFO("NS_BL1U: Calling EXECUTE SMC\n");
+ ns_bl1u_fwu_smc_call(FWU_SMC_IMAGE_EXECUTE, image_desc->image_id,
+ 0, 0, 0);
+ CHECK_SMC_RESULT(0);
+
+#if FWU_BL_TEST
+ /* Check if executing again the same image returns error. */
+ INFO("NS_BL1U: TEST Calling SMC to execute again\n");
+ ns_bl1u_fwu_smc_call(FWU_SMC_IMAGE_EXECUTE,
+ ns_bl1u_desc[index].image_id, 0, 0, 0);
+ CHECK_SMC_RESULT(-EPERM);
+#endif
+ } else {
+ /*
+ * If the image is not executable, its internal state
+ * needs to be reset, unless it is for later consumption
+ * by another CPU (like for the SCP_BL2U firmware).
+ */
+ if (image_desc->image_id != SCP_BL2U_IMAGE_ID) {
+ INFO("NS_BL1U: Calling RESET SMC\n");
+ ns_bl1u_fwu_smc_call(FWU_SMC_IMAGE_RESET,
+ image_desc->image_id,
+ 0, 0, 0);
+ CHECK_SMC_RESULT(0);
+ }
+ }
+ }
+
+ /*
+ * Clean and invalidate the caches.
+ * And disable the MMU before jumping to NS_BL2U.
+ */
+ disable_mmu_icache();
+
+ /*
+ * The argument passed to NS_BL2U is not used currently.
+ * But keeping the argument passing mechanism for future use.
+ */
+ ns_bl2u_entrypoint(0);
+
+ panic();
+}
diff --git a/fwu/ns_bl1u/ns_bl1u_tests.c b/fwu/ns_bl1u/ns_bl1u_tests.c
new file mode 100644
index 000000000..082439a90
--- /dev/null
+++ b/fwu/ns_bl1u/ns_bl1u_tests.c
@@ -0,0 +1,261 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*******************************************************************************
+ * Test the FWU SMC interface in Trusted Firmware-A, which is implemented in
+ * BL1.
+ ******************************************************************************/
+
+#include <debug.h>
+#include <errno.h>
+#include <io_fip.h>
+#include <platform_def.h>
+#include <smccc.h>
+#include <status.h>
+#include <tftf.h>
+#include <tftf_lib.h>
+
+/* Expected number of SMC calls supported in BL1 */
+#define BL1_NUM_SMC_CALLS 11
+
+/* Expected version of BL1 SMC implementation */
+#define BL1_SMC_VER_VALUE 1
+
+typedef struct {
+ /* Description to print before sending the SMC */
+ const char *description;
+ /* The arguments to pass to the SMC */
+ const smc_args args;
+ /* The expected SMC return value */
+ u_register_t expect;
+} ns_bl1u_test_t;
+
+/*
+ * The tests consist in sending a succession of SMCs to trigger FWU operations
+ * in BL1. The order of the SMCs is important because they internally change the
+ * FWU state machine in Trusted Firmware-A.
+ */
+static const ns_bl1u_test_t tests[] = {
+ /* Basic FWU SMC handler test cases. */
+ {
+ .description = "BL1_SMC_CALL_COUNT",
+ .args = { BL1_SMC_CALL_COUNT, 0, 0, 0, 0 },
+ .expect = BL1_NUM_SMC_CALLS,
+ },
+
+ {
+ .description = "BL1_SMC_VERSION",
+ .args = { BL1_SMC_VERSION, 0, 0, 0, 0 },
+ .expect = BL1_SMC_VER_VALUE,
+ },
+
+ {
+ .description = "Invalid SMC",
+ .args = { 0xdeadbeef, 0, 0, 0, 0 },
+ .expect = SMC_UNKNOWN,
+ },
+
+ /* FWU_SMC_IMAGE_COPY test cases. */
+ {
+ .description = "IMAGE_COPY with invalid image_id",
+ .args = { FWU_SMC_IMAGE_COPY, 0xdeadbeef, 0, 0, 0 },
+ .expect = -EPERM,
+ },
+
+ {
+ .description = "IMAGE_COPY with non-secure image_id",
+ .args = { FWU_SMC_IMAGE_COPY, NS_BL2U_IMAGE_ID, 0, 0, 0 },
+ .expect = -EPERM,
+ },
+
+ {
+ .description = "IMAGE_COPY with valid args",
+ .args = { FWU_SMC_IMAGE_COPY, FWU_CERT_ID, PLAT_ARM_FWU_FIP_BASE, 0x20, 0x20 },
+ .expect = STATUS_SUCCESS,
+ },
+
+ {
+ .description = "IMAGE_COPY to copy an image_id again",
+ .args = { FWU_SMC_IMAGE_COPY, FWU_CERT_ID, PLAT_ARM_FWU_FIP_BASE, 0x20, 0x20 },
+ .expect = -EPERM,
+ },
+
+ {
+ .description = "IMAGE_COPY with source address not mapped",
+ .args = { FWU_SMC_IMAGE_COPY, BL2U_IMAGE_ID, 0, 0, 0 },
+ .expect = -ENOMEM,
+ },
+
+ {
+ .description = "IMAGE_COPY with source size not mapped",
+ .args = { FWU_SMC_IMAGE_COPY, BL2U_IMAGE_ID, PLAT_ARM_FWU_FIP_BASE, 0xdeadbeef, 0xdeadbeef },
+ .expect = -ENOMEM,
+ },
+
+ {
+ .description = "IMAGE_COPY with image size more than secure mem",
+ .args = { FWU_SMC_IMAGE_COPY, BL2U_IMAGE_ID, PLAT_ARM_FWU_FIP_BASE, 0x40000, 0x40000 },
+ .expect = -ENOMEM,
+ },
+
+ {
+ .description = "IMAGE_COPY with image size 0",
+ .args = { FWU_SMC_IMAGE_COPY, BL2U_IMAGE_ID, PLAT_ARM_FWU_FIP_BASE, 0, 0 },
+ .expect = -ENOMEM,
+ },
+
+ /*
+ * At this point, the FWU Certificate Image has been copied by a
+ * previous test. Try to load the BL2U image over it at the same
+ * address.
+ */
+ {
+ .description = "IMAGE_COPY with an image that overlaps a different one",
+ .args = { FWU_SMC_IMAGE_COPY, BL2U_IMAGE_ID, PLAT_ARM_FWU_FIP_BASE, 0x20, 0x40 },
+ .expect = -EPERM,
+ },
+
+ /*
+ * The authentication of the FWU certificate will fail, which will set
+ * the state of this image to "RESET" for the following tests.
+ */
+ {
+ .description = "IMAGE_AUTH with an invalid image",
+ .args = { FWU_SMC_IMAGE_AUTH, FWU_CERT_ID, PLAT_ARM_FWU_FIP_BASE, 0x20, 0x20 },
+ .expect = -EAUTH,
+ },
+
+ {
+ .description = "IMAGE_COPY with 1st block size in partial copy",
+ .args = { FWU_SMC_IMAGE_COPY, BL2U_IMAGE_ID, PLAT_ARM_FWU_FIP_BASE, 0x20, 0x40 },
+ .expect = STATUS_SUCCESS,
+ },
+
+ {
+ .description = "IMAGE_AUTH while copying the image",
+ .args = { FWU_SMC_IMAGE_AUTH, BL2U_IMAGE_ID, PLAT_ARM_FWU_FIP_BASE, 0x40, 0 },
+ .expect = -EPERM,
+ },
+
+ {
+ .description = "IMAGE_COPY with last block with invalid source in partial copy",
+ .args = { FWU_SMC_IMAGE_COPY, BL2U_IMAGE_ID, 0, 0x21, 0x40 },
+ .expect = -ENOMEM,
+ },
+
+ {
+ .description = "IMAGE_COPY with last block size > total size in partial copy",
+ .args = { FWU_SMC_IMAGE_COPY, BL2U_IMAGE_ID, PLAT_ARM_FWU_FIP_BASE, 0x21, 0x40 },
+ .expect = STATUS_SUCCESS,
+ },
+
+ {
+ .description = "IMAGE_AUTH to RESET the image state",
+ .args = { FWU_SMC_IMAGE_AUTH, BL2U_IMAGE_ID, PLAT_ARM_FWU_FIP_BASE, 0x40, 0 },
+ .expect = -EAUTH,
+ },
+
+ {
+ .description = "IMAGE_COPY with block size > total size",
+ .args = { FWU_SMC_IMAGE_COPY, BL2U_IMAGE_ID, PLAT_ARM_FWU_FIP_BASE, 0x21, 0x20 },
+ .expect = STATUS_SUCCESS,
+ },
+
+ {
+ .description = "IMAGE_RESET to RESET the image state",
+ .args = { FWU_SMC_IMAGE_RESET, BL2U_IMAGE_ID, 0, 0, 0 },
+ .expect = STATUS_SUCCESS,
+ },
+
+
+ /* FWU_SMC_IMAGE_AUTH test cases. */
+ {
+ .description = "IMAGE_AUTH with invalid image_id",
+ .args = { FWU_SMC_IMAGE_AUTH, 0, 0, 0, 0 },
+ .expect = -EPERM,
+ },
+
+ {
+ .description = "IMAGE_AUTH with secure image not copied",
+ .args = { FWU_SMC_IMAGE_AUTH, BL2U_IMAGE_ID, 0, 0, 0 },
+ .expect = -EPERM,
+ },
+
+ {
+ .description = "IMAGE_AUTH with source address not mapped",
+ .args = { FWU_SMC_IMAGE_AUTH, NS_BL2U_IMAGE_ID, 0, 0, 0 },
+ .expect = -ENOMEM,
+ },
+
+ {
+ .description = "IMAGE_AUTH with source size not mapped",
+ .args = { FWU_SMC_IMAGE_AUTH, NS_BL2U_IMAGE_ID, PLAT_ARM_FWU_FIP_BASE, 0xdeadbeef, 0 },
+ .expect = -ENOMEM,
+ },
+
+ {
+ .description = "IMAGE_COPY to copy after auth failure",
+ .args = { FWU_SMC_IMAGE_COPY, FWU_CERT_ID, PLAT_ARM_FWU_FIP_BASE, 0x40, 0x40 },
+ .expect = STATUS_SUCCESS,
+ },
+
+ {
+ .description = "IMAGE_AUTH with valid args for copied image",
+ .args = { FWU_SMC_IMAGE_AUTH, FWU_CERT_ID, 0, 0, 0 },
+ .expect = -EAUTH,
+ },
+
+ /* FWU_SMC_IMAGE_EXECUTE test cases. */
+ {
+ .description = "IMAGE_EXECUTE with invalid image_id",
+ .args = { FWU_SMC_IMAGE_EXECUTE, 0, 0, 0, 0 },
+ .expect = -EPERM,
+ },
+
+ {
+ .description = "IMAGE_EXECUTE with non-executable image_id",
+ .args = { FWU_SMC_IMAGE_EXECUTE, FWU_CERT_ID, 0, 0, 0 },
+ .expect = -EPERM,
+ },
+
+ {
+ .description = "IMAGE_EXECUTE with un-authenticated image_id",
+ .args = { FWU_SMC_IMAGE_EXECUTE, BL2U_IMAGE_ID, 0, 0, 0 },
+ .expect = -EPERM,
+ },
+
+
+ /* FWU_SMC_IMAGE_RESUME test case. */
+ {
+ .description = "IMAGE_RESUME with invalid args",
+ .args = { FWU_SMC_IMAGE_RESUME, 0, 0, 0, 0 },
+ .expect = -EPERM,
+ },
+};
+
+
+void ns_bl1u_fwu_test_main(void)
+{
+ NOTICE("NS_BL1U: ***** Starting NS_BL1U FWU test *****\n");
+
+ for (int i = 0 ; i < ARRAY_SIZE(tests); ++i) {
+ u_register_t result;
+
+ INFO("NS_BL1U: %s\n", tests[i].description);
+
+ smc_ret_values smc_ret;
+ smc_ret = tftf_smc(&tests[i].args);
+ result = smc_ret.ret0;
+
+ if (result != tests[i].expect) {
+ ERROR("NS_BL1U: Unexpected SMC return value 0x%lX, "
+ "expected 0x%lX\n", result, tests[i].expect);
+ panic();
+ }
+ }
+
+ NOTICE("NS_BL1U: ***** All FWU test passed *****\n\n");
+}
diff --git a/fwu/ns_bl2u/aarch32/ns_bl2u_entrypoint.S b/fwu/ns_bl2u/aarch32/ns_bl2u_entrypoint.S
new file mode 100644
index 000000000..dfe9e4a44
--- /dev/null
+++ b/fwu/ns_bl2u/aarch32/ns_bl2u_entrypoint.S
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <tftf.h>
+
+ .globl ns_bl2u_entrypoint
+
+func ns_bl2u_entrypoint
+ /* ---------------------------------------------------------------------
+ * Set the exception vectors.
+ * ---------------------------------------------------------------------
+ */
+ ldr r0, =exception_stubs
+ stcopr r0, HVBAR
+
+ /* ---------------------------------------------------------------------
+ * Enable the instruction cache and data access alignment checks.
+ * ---------------------------------------------------------------------
+ */
+ ldcopr r0, HSCTLR
+ ldr r1, =(HSCTLR_I_BIT | HSCTLR_A_BIT)
+ orr r0, r0, r1
+ stcopr r0, HSCTLR
+ isb
+
+ /* ---------------------------------------------------------------------
+ * Zero-initialise the .bss section.
+ * ---------------------------------------------------------------------
+ */
+ ldr r0, =__BSS_START__
+ ldr r1, =__BSS_SIZE__
+ bl zeromem
+
+ /* ---------------------------------------------------------------------
+ * Allocate a stack whose memory will be marked as Normal
+ * Inner-Shareable, Write-Back, Write-Allocate memory when the MMU is
+ * enabled.
+ * There is no risk of reading stale stack memory after enabling the MMU
+ * as only the primary CPU is running at the moment.
+ * ---------------------------------------------------------------------
+ */
+ ldcopr r0, MPIDR
+ bl platform_set_stack
+
+ /* ---------------------------------------------------------------------
+ * Perform early platform setup.
+ * TODO: Investigate why tftf_plat_arch_setup() is not needed on
+ * AArch32, whereas it is on AArch64.
+ * ---------------------------------------------------------------------
+ */
+ bl tftf_early_platform_setup
+
+ /* ---------------------------------------------------------------------
+ * Jump to main function.
+ * ---------------------------------------------------------------------
+ */
+ bl ns_bl2u_main
+dead:
+ b dead
+endfunc ns_bl2u_entrypoint
diff --git a/fwu/ns_bl2u/aarch64/ns_bl2u_entrypoint.S b/fwu/ns_bl2u/aarch64/ns_bl2u_entrypoint.S
new file mode 100644
index 000000000..902636e5d
--- /dev/null
+++ b/fwu/ns_bl2u/aarch64/ns_bl2u_entrypoint.S
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <tftf.h>
+
+ .globl ns_bl2u_entrypoint
+
+func ns_bl2u_entrypoint
+ /* ---------------------------------------------------------------------
+ * Set the exception vectors.
+ * ---------------------------------------------------------------------
+ */
+ adr x0, exception_stubs
+ asm_write_vbar_el1_or_el2 x1
+
+ /* --------------------------------------------------------------------
+ * Enable the instruction cache, stack pointer and data access
+ * alignment checks.
+ * --------------------------------------------------------------------
+ */
+ mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
+ asm_read_sctlr_el1_or_el2
+ orr x0, x0, x1
+ asm_write_sctlr_el1_or_el2 x1
+ isb
+
+ /* ---------------------------------------------------------------------
+ * Zero-initialise the .bss section.
+ * ---------------------------------------------------------------------
+ */
+ ldr x0, =__BSS_START__
+ ldr x1, =__BSS_SIZE__
+ bl zeromem16
+
+ /* ---------------------------------------------------------------------
+ * Allocate a stack whose memory will be marked as Normal
+ * Inner-Shareable, Write-Back, Write-Allocate memory when the MMU is
+ * enabled.
+ * There is no risk of reading stale stack memory after enabling the MMU
+ * as only the primary CPU is running at the moment.
+ * ---------------------------------------------------------------------
+ */
+ mrs x0, mpidr_el1
+ bl platform_set_stack
+
+ /* ---------------------------------------------------------------------
+ * Perform early platform setup & platforms specific early architectural
+ * setup, e.g. MMU setup.
+ * ----------------------------------------------------------------------
+ */
+ bl tftf_early_platform_setup
+ bl tftf_plat_arch_setup
+
+ /* ---------------------------------------------------------------------
+ * Jump to main function.
+ * ---------------------------------------------------------------------
+ */
+ bl ns_bl2u_main
+dead:
+ b dead
+endfunc ns_bl2u_entrypoint
diff --git a/fwu/ns_bl2u/ns_bl2u.ld.S b/fwu/ns_bl2u/ns_bl2u.ld.S
new file mode 100644
index 000000000..48af3036b
--- /dev/null
+++ b/fwu/ns_bl2u/ns_bl2u.ld.S
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <platform_def.h>
+#include <xlat_tables_defs.h>
+
+OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
+OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
+ENTRY(ns_bl2u_entrypoint)
+
+MEMORY {
+ RAM (rwx): ORIGIN = NS_BL2U_BASE, LENGTH = NS_BL2U_LIMIT - NS_BL2U_BASE
+}
+
+SECTIONS
+{
+ . = NS_BL2U_BASE;
+ ASSERT(. == ALIGN(PAGE_SIZE),
+ "NS_BL2U_BASE address is not aligned on a page boundary.")
+
+ ro . : {
+ __RO_START__ = .;
+ *ns_bl2u_entrypoint.o(.text*)
+ *(.text*)
+ *(.rodata*)
+ *(.vectors)
+ __RO_END_UNALIGNED__ = .;
+ /*
+ * Memory page(s) mapped to this section will be marked as
+ * read-only, executable. No RW data from the next section must
+ * creep in. Ensure the rest of the current memory page is unused.
+ */
+ . = ALIGN(PAGE_SIZE);
+ __RO_END__ = .;
+ } >RAM
+
+ .data . : {
+ __DATA_START__ = .;
+ *(.data*)
+ __DATA_END__ = .;
+ } >RAM
+
+ stacks (NOLOAD) : {
+ __STACKS_START__ = .;
+ *(ns_bl_normal_stacks)
+ __STACKS_END__ = .;
+ } >RAM
+
+ /*
+ * The .bss section gets initialised to 0 at runtime.
+ * Its base address must be 16-byte aligned.
+ */
+ .bss : ALIGN(16) {
+ __BSS_START__ = .;
+ *(SORT_BY_ALIGNMENT(.bss*))
+ *(COMMON)
+ __BSS_END__ = .;
+ } >RAM
+
+ /*
+ * The xlat_table section is for full, aligned page tables (4K).
+ * Removing them from .bss avoids forcing 4K alignment on
+ * the .bss section and eliminates the unecessary zero init
+ */
+ xlat_table (NOLOAD) : {
+ *(xlat_table)
+ } >RAM
+
+ __NS_BL2U_END__ = .;
+
+ __BSS_SIZE__ = SIZEOF(.bss);
+
+ ASSERT(. <= NS_BL2U_LIMIT, "NS_BL2U image has exceeded its limit.")
+}
diff --git a/fwu/ns_bl2u/ns_bl2u.mk b/fwu/ns_bl2u/ns_bl2u.mk
new file mode 100644
index 000000000..e4c93a1d4
--- /dev/null
+++ b/fwu/ns_bl2u/ns_bl2u.mk
@@ -0,0 +1,75 @@
+#
+# Copyright (c) 2018, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+include lib/xlat_tables_v2/xlat_tables.mk
+include lib/compiler-rt/compiler-rt.mk
+
+NS_BL2U_INCLUDES := \
+ -I${AUTOGEN_DIR} \
+ -Itftf/framework/include \
+ -Iinclude/common \
+ -Iinclude/common/${ARCH} \
+ -Iinclude/drivers \
+ -Iinclude/drivers/arm \
+ -Iinclude/drivers/io \
+ -Iinclude/lib \
+ -Iinclude/lib/${ARCH} \
+ -Iinclude/lib/stdlib \
+ -Iinclude/lib/stdlib/sys \
+ -Iinclude/lib/utils \
+ -Iinclude/lib/xlat_tables \
+ -Iinclude/plat/common \
+ -Iinclude/runtime_services
+
+# TODO: Remove dependency on TFTF files.
+NS_BL2U_SOURCES := $(addprefix tftf/framework/, \
+ ${ARCH}/arch.c \
+ ${ARCH}/asm_debug.S \
+ ${ARCH}/exceptions.S \
+ debug.c \
+)
+
+NS_BL2U_SOURCES += fwu/ns_bl2u/${ARCH}/ns_bl2u_entrypoint.S \
+ fwu/ns_bl2u/ns_bl2u_main.c \
+ lib/${ARCH}/cache_helpers.S \
+ lib/${ARCH}/exception_stubs.S \
+ lib/${ARCH}/misc_helpers.S \
+ lib/locks/${ARCH}/spinlock.S \
+ lib/smc/${ARCH}/asm_smc.S \
+ lib/smc/${ARCH}/smc.c \
+ ${STD_LIB_SOURCES} \
+ lib/utils/mp_printf.c \
+ lib/utils/uuid.c \
+ ${XLAT_TABLES_LIB_SRCS} \
+ plat/arm/common/arm_fwu_io_storage.c \
+ plat/common/${ARCH}/platform_helpers.S \
+ plat/common/${ARCH}/platform_up_stack.S \
+ plat/common/fwu_nvm_accessors.c \
+ plat/common/plat_common.c \
+ drivers/io/io_memmap.c \
+ drivers/io/io_fip.c
+
+NS_BL2U_SOURCES += ${COMPILER_RT_SRCS}
+
+NS_BL2U_LINKERFILE := fwu/ns_bl2u/ns_bl2u.ld.S
+
+# NS_BL2U requires accessing the flash. Force-enable it.
+NS_BL2U_DEFINES := -DUSE_NVM=1
+
+$(eval $(call add_define,NS_BL2U_DEFINES,ARM_ARCH_MAJOR))
+$(eval $(call add_define,NS_BL2U_DEFINES,ARM_ARCH_MINOR))
+$(eval $(call add_define,NS_BL2U_DEFINES,DEBUG))
+$(eval $(call add_define,NS_BL2U_DEFINES,ENABLE_ASSERTIONS))
+$(eval $(call add_define,NS_BL2U_DEFINES,FWU_BL_TEST))
+$(eval $(call add_define,NS_BL2U_DEFINES,LOG_LEVEL))
+$(eval $(call add_define,NS_BL2U_DEFINES,PLAT_${PLAT}))
+ifeq (${ARCH},aarch32)
+ $(eval $(call add_define,NS_BL2U_DEFINES,AARCH32))
+else
+ $(eval $(call add_define,NS_BL2U_DEFINES,AARCH64))
+endif
+
+ns_bl2u: ${AUTOGEN_DIR}/tests_list.h
diff --git a/fwu/ns_bl2u/ns_bl2u_main.c b/fwu/ns_bl2u/ns_bl2u_main.c
new file mode 100644
index 000000000..815859de1
--- /dev/null
+++ b/fwu/ns_bl2u/ns_bl2u_main.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <debug.h>
+#include <firmware_image_package.h>
+#include <fwu_nvm.h>
+#include <platform.h>
+#include <platform_def.h>
+
+extern const char version_string[];
+
+void ns_bl2u_main(void)
+{
+ smc_args fwu_params = {0};
+ smc_ret_values fwu_result = {0};
+
+ NOTICE("NS_BL2U: %s\n", version_string);
+ NOTICE("NS_BL2U: %s\n", build_message);
+
+ tftf_platform_setup();
+
+#if FWU_BL_TEST
+ unsigned int toc_header;
+ STATUS status = fwu_nvm_read(0, &toc_header, 4);
+ if (status != STATUS_SUCCESS) {
+ ERROR("NS_BL2U: Failed to read NVM\n");
+ panic();
+ }
+
+ /* Update the TOC if found invalid. */
+ if (toc_header != TOC_HEADER_NAME) {
+ toc_header = TOC_HEADER_NAME;
+ status = fwu_nvm_write(0, &toc_header, 4);
+ if (status != STATUS_SUCCESS) {
+ ERROR("NS_BL2U: Failed to update TOC value\n");
+ panic();
+ }
+ INFO("NS_BL2U: Updated TOC value\n");
+ }
+#endif
+
+ /* Update the FIP image. */
+ if (fwu_update_fip(FIP_BKP_ADDRESS) != STATUS_SUCCESS) {
+ ERROR("NS_BL2U: Firmware Image Update Failed\n");
+ panic();
+ }
+
+ /* Call FWU_SMC_UPDATE_DONE to indicate image update done. */
+ INFO("NS_BL2U: Calling FWU_SMC_UPDATE_DONE\n");
+ fwu_params.arg0 = FWU_SMC_UPDATE_DONE;
+ fwu_result = tftf_smc(&fwu_params);
+ ERROR("NS_BL2U: Unexpected return from FWU process (%d)\n",
+ (int)fwu_result.ret0);
+ panic();
+}
diff --git a/include/common/aarch32/asm_macros.S b/include/common/aarch32/asm_macros.S
new file mode 100644
index 000000000..2edf5b900
--- /dev/null
+++ b/include/common/aarch32/asm_macros.S
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __ASM_MACROS_S__
+#define __ASM_MACROS_S__
+
+#include <arch.h>
+#include <asm_macros_common.S>
+
+#define TLB_INVALIDATE(_reg, _coproc) \
+ stcopr _reg, _coproc
+
+ /*
+ * Co processor register accessors
+ */
+ .macro ldcopr reg, coproc, opc1, CRn, CRm, opc2
+ mrc \coproc, \opc1, \reg, \CRn, \CRm, \opc2
+ .endm
+
+ .macro ldcopr16 reg1, reg2, coproc, opc1, CRm
+ mrrc \coproc, \opc1, \reg1, \reg2, \CRm
+ .endm
+
+ .macro stcopr reg, coproc, opc1, CRn, CRm, opc2
+ mcr \coproc, \opc1, \reg, \CRn, \CRm, \opc2
+ .endm
+
+ .macro stcopr16 reg1, reg2, coproc, opc1, CRm
+ mcrr \coproc, \opc1, \reg1, \reg2, \CRm
+ .endm
+
+ /* Cache line size helpers */
+ .macro dcache_line_size reg, tmp
+ ldcopr \tmp, CTR
+ ubfx \tmp, \tmp, #16, #4
+ mov \reg, #4
+ lsl \reg, \reg, \tmp
+ .endm
+
+ .macro icache_line_size reg, tmp
+ ldcopr \tmp, CTR
+ and \tmp, \tmp, #0xf
+ mov \reg, #4
+ lsl \reg, \reg, \tmp
+ .endm
+
+ /*
+ * Declare the exception vector table, enforcing it is aligned on a
+ * 32 byte boundary.
+ */
+ .macro vector_base label
+ .section .vectors, "ax"
+ .align 5
+ \label:
+ .endm
+
+ /*
+ * This macro calculates the base address of an MP stack using the
+ * platform_get_core_pos() index, the name of the stack storage and
+ * the size of each stack
+ * Out: r0 = physical address of stack base
+ * Clobber: r14, r1, r2
+ */
+ .macro get_mp_stack _name, _size
+ bl platform_get_core_pos
+ ldr r2, =(\_name + \_size)
+ mov r1, #\_size
+ mla r0, r0, r1, r2
+ .endm
+
+ /*
+ * This macro calculates the base address of a uniprocessor(UP) stack
+ * using the name of the stack storage and the size of the stack
+ * Out: r0 = physical address of stack base
+ */
+ .macro get_up_stack _name, _size
+ ldr r0, =(\_name + \_size)
+ .endm
+
+#endif /* __ASM_MACROS_S__ */
+
diff --git a/include/common/aarch32/assert_macros.S b/include/common/aarch32/assert_macros.S
new file mode 100644
index 000000000..5dc9e6472
--- /dev/null
+++ b/include/common/aarch32/assert_macros.S
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef __ASSERT_MACROS_S__
+#define __ASSERT_MACROS_S__
+
+ /*
+ * Assembler macro to enable asm_assert. We assume that the stack is
+ * initialized prior to invoking this macro. Please note that the
+ * macro makes use of label '300' to provide the logic and the
+ * caller should make sure that this label is not used to branch
+ * prior to calling this macro.
+ */
+#define ASM_ASSERT(_cc) \
+.ifndef .L_assert_filename ;\
+ .pushsection .rodata.str1.1, "aS" ;\
+ .L_assert_filename: ;\
+ .string __FILE__ ;\
+ .popsection ;\
+.endif ;\
+ b##_cc 300f ;\
+ ldr r0, =.L_assert_filename ;\
+ mov r1, #__LINE__ ;\
+ b asm_assert ;\
+300:
+
+#endif /* __ASSERT_MACROS_S__ */
diff --git a/include/common/aarch64/asm_macros.S b/include/common/aarch64/asm_macros.S
new file mode 100644
index 000000000..ad4729018
--- /dev/null
+++ b/include/common/aarch64/asm_macros.S
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __ASM_MACROS_S__
+#define __ASM_MACROS_S__
+
+#include <arch.h>
+#include <asm_macros_common.S>
+
+#define TLB_INVALIDATE(_type) \
+ tlbi _type
+
+ .macro func_prologue
+ stp x29, x30, [sp, #-0x10]!
+ mov x29,sp
+ .endm
+
+ .macro func_epilogue
+ ldp x29, x30, [sp], #0x10
+ .endm
+
+
+ .macro dcache_line_size reg, tmp
+ mrs \tmp, ctr_el0
+ ubfx \tmp, \tmp, #16, #4
+ mov \reg, #4
+ lsl \reg, \reg, \tmp
+ .endm
+
+
+ .macro icache_line_size reg, tmp
+ mrs \tmp, ctr_el0
+ and \tmp, \tmp, #0xf
+ mov \reg, #4
+ lsl \reg, \reg, \tmp
+ .endm
+
+ /*
+ * Declare the exception vector table, enforcing it is aligned on a
+ * 2KB boundary, as required by the ARMv8 architecture.
+ * Use zero bytes as the fill value to be stored in the padding bytes
+ * so that it inserts illegal AArch64 instructions. This increases
+ * security, robustness and potentially facilitates debugging.
+ */
+ .macro vector_base label
+ .section .vectors, "ax"
+ .align 11, 0
+ \label:
+ .endm
+
+ /*
+ * Create an entry in the exception vector table, enforcing it is
+ * aligned on a 128-byte boundary, as required by the ARMv8
+ * architecture. Use zero bytes as the fill value to be stored in the
+ * padding bytes so that it inserts illegal AArch64 instructions.
+ * This increases security, robustness and potentially facilitates
+ * debugging.
+ */
+ .macro vector_entry label
+ .section .vectors, "ax"
+ .align 7, 0
+ \label:
+ .endm
+
+ /*
+ * This macro verifies that the a given vector doesn't exceed the
+ * architectural limit of 32 instructions. This is meant to be placed
+ * immedately after the last instruction in the vector. It takes the
+ * vector entry as the parameter
+ */
+ .macro check_vector_size since
+ .if (. - \since) > (32 * 4)
+ .error "Vector exceeds 32 instructions"
+ .endif
+ .endm
+
+ /*
+ * This macro calculates the base address of an MP stack using the
+ * platform_get_core_pos() index, the name of the stack storage and
+ * the size of each stack
+ * Out: X0 = physical address of stack base
+ * Clobber: X30, X1, X2
+ */
+ .macro get_mp_stack _name, _size
+ bl platform_get_core_pos
+ ldr x2, =(\_name + \_size)
+ mov x1, #\_size
+ madd x0, x0, x1, x2
+ .endm
+
+ /*
+ * This macro calculates the base address of a UP stack using the
+ * name of the stack storage and the size of the stack
+ * Out: X0 = physical address of stack base
+ */
+ .macro get_up_stack _name, _size
+ ldr x0, =(\_name + \_size)
+ .endm
+
+ /*
+ * Helper macro to generate the best mov/movk combinations according
+ * the value to be moved. The 16 bits from '_shift' are tested and
+ * if not zero, they are moved into '_reg' without affecting
+ * other bits.
+ */
+ .macro _mov_imm16 _reg, _val, _shift
+ .if (\_val >> \_shift) & 0xffff
+ .if (\_val & (1 << \_shift - 1))
+ movk \_reg, (\_val >> \_shift) & 0xffff, LSL \_shift
+ .else
+ mov \_reg, \_val & (0xffff << \_shift)
+ .endif
+ .endif
+ .endm
+
+ /*
+ * Helper macro to load arbitrary values into 32 or 64-bit registers
+ * which generates the best mov/movk combinations. Many base addresses
+ * are 64KB aligned the macro will eliminate updating bits 15:0 in
+ * that case
+ */
+ .macro mov_imm _reg, _val
+ .if (\_val) == 0
+ mov \_reg, #0
+ .else
+ _mov_imm16 \_reg, (\_val), 0
+ _mov_imm16 \_reg, (\_val), 16
+ _mov_imm16 \_reg, (\_val), 32
+ _mov_imm16 \_reg, (\_val), 48
+ .endif
+ .endm
+
+ .macro asm_read_sysreg_el1_or_el2 sysreg
+ mrs x0, CurrentEL
+ cmp x0, #(MODE_EL1 << MODE_EL_SHIFT)
+ b.eq 1f
+ cmp x0, #(MODE_EL2 << MODE_EL_SHIFT)
+ b.eq 2f
+ b dead
+1:
+ mrs x0, \sysreg\()_el1
+ b 3f
+2:
+ mrs x0, \sysreg\()_el2
+3:
+ .endm
+
+ .macro asm_write_sysreg_el1_or_el2 sysreg scratch_reg
+ mrs \scratch_reg, CurrentEL
+ cmp \scratch_reg, #(MODE_EL1 << MODE_EL_SHIFT)
+ b.eq 1f
+ cmp \scratch_reg, #(MODE_EL2 << MODE_EL_SHIFT)
+ b.eq 2f
+ b dead
+1:
+ msr \sysreg\()_el1, x0
+ b 3f
+2:
+ msr \sysreg\()_el2, x0
+3:
+ .endm
+
+ .macro asm_read_sctlr_el1_or_el2
+ asm_read_sysreg_el1_or_el2 sctlr
+ .endm
+
+ .macro asm_write_sctlr_el1_or_el2 scratch_reg
+ asm_write_sysreg_el1_or_el2 sctlr \scratch_reg
+ .endm
+
+ .macro asm_write_vbar_el1_or_el2 scratch_reg
+ asm_write_sysreg_el1_or_el2 vbar \scratch_reg
+ .endm
+
+/*
+ * Depending on the current exception level, jump to 'label_el1' or 'label_el2'.
+ * If the current exception level is neither EL1 nor EL2, jump to 'label_error'
+ * instead.
+ * The caller needs to provide the macro with a scratch 64-bit register to use.
+ * Its contents prior to calling this function will be lost.
+ */
+ .macro JUMP_EL1_OR_EL2 scratch_reg, label_el1, label_el2, label_error
+ mrs \scratch_reg, CurrentEL
+ cmp \scratch_reg, #(MODE_EL1 << MODE_EL_SHIFT)
+ b.eq \label_el1
+ cmp \scratch_reg, #(MODE_EL2 << MODE_EL_SHIFT)
+ b.eq \label_el2
+ b \label_error
+ .endm
+
+#endif /* __ASM_MACROS_S__ */
diff --git a/include/common/aarch64/assert_macros.S b/include/common/aarch64/assert_macros.S
new file mode 100644
index 000000000..b91633163
--- /dev/null
+++ b/include/common/aarch64/assert_macros.S
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __ASSERT_MACROS_S__
+#define __ASSERT_MACROS_S__
+
+ /*
+ * Assembler macro to enable asm_assert. Use this macro wherever
+ * assert is required in assembly. Please note that the macro makes
+ * use of label '300' to provide the logic and the caller
+ * should make sure that this label is not used to branch prior
+ * to calling this macro.
+ */
+#define ASM_ASSERT(_cc) \
+.ifndef .L_assert_filename ;\
+ .pushsection .rodata.str1.1, "aS" ;\
+ .L_assert_filename: ;\
+ .string __FILE__ ;\
+ .popsection ;\
+.endif ;\
+ b._cc 300f ;\
+ adr x0, .L_assert_filename ;\
+ mov x1, __LINE__ ;\
+ b asm_assert ;\
+300:
+
+#endif /* __ASSERT_MACROS_S__ */
diff --git a/include/common/asm_macros_common.S b/include/common/asm_macros_common.S
new file mode 100644
index 000000000..d38dcce71
--- /dev/null
+++ b/include/common/asm_macros_common.S
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __ASM_MACROS_COMMON_S__
+#define __ASM_MACROS_COMMON_S__
+
+ /*
+ * This macro is used to create a function label and place the
+ * code into a separate text section based on the function name
+ * to enable elimination of unused code during linking. It also adds
+ * basic debug information to enable call stack printing most of the
+ * time.
+ */
+ .macro func _name
+ /*
+ * Add Call Frame Information entry in the .debug_frame section for
+ * debugger consumption. This enables callstack printing in debuggers.
+ * This does not use any space in the final loaded binary, only in the
+ * ELF file.
+ * Note that a function manipulating the CFA pointer location (i.e. the
+ * x29 frame pointer on AArch64) should declare it using the
+ * appropriate .cfi* directives, or be prepared to have a degraded
+ * debugging experience.
+ */
+ .cfi_sections .debug_frame
+ .section .text.\_name, "ax"
+ .type \_name, %function
+ .func \_name
+ /*
+ * .cfi_startproc and .cfi_endproc are needed to output entries in
+ * .debug_frame
+ */
+ .cfi_startproc
+ \_name:
+ .endm
+
+ /*
+ * This macro is used to mark the end of a function.
+ */
+ .macro endfunc _name
+ .endfunc
+ .cfi_endproc
+ .size \_name, . - \_name
+ .endm
+
+ /*
+ * This macro declares an array of 1 or more stacks, properly
+ * aligned and in the requested section
+ */
+#define STACK_ALIGN 6
+
+ .macro declare_stack _name, _section, _size, _count
+ .if ((\_size & ((1 << STACK_ALIGN) - 1)) <> 0)
+ .error "Stack size not correctly aligned"
+ .endif
+ .section \_section, "aw", %nobits
+ .align STACK_ALIGN
+ \_name:
+ .space ((\_count) * (\_size)), 0
+ .endm
+
+#endif /* __ASM_MACROS_COMMON_S__ */
+
diff --git a/include/common/debug.h b/include/common/debug.h
new file mode 100644
index 000000000..c5b0f8e3b
--- /dev/null
+++ b/include/common/debug.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2014-2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __DEBUG_H__
+#define __DEBUG_H__
+
+#include <stdio.h>
+
+/* TODO: Deal with per-image printf functions in a cleaner way. */
+
+#ifdef IMAGE_CACTUS
+/*
+ * The register MPIDR_EL1 can't be read from EL0, which means that mp_printf()
+ * can't be used.
+ */
+#define mp_printf printf
+#else
+/*
+ * Print a formatted string on the UART.
+ *
+ * Does the same thing as the standard libc's printf() function but in a MP-safe
+ * manner, i.e. it can be called from several CPUs simultaneously without
+ * getting interleaved messages.
+ *
+ * The messages printed using mp_printf() won't be saved in the test results
+ * (use tftf_testcase_output() instead for that). mp_printf() is meant to be
+ * used for debug traces only. Unlike messages stored in the tests output which
+ * appear only at the end of the test session in the test report, messages
+ * printed using mp_printf() will be displayed straight away.
+ *
+ * Messaged will be prefixed by the CPU MPID issuing the call, like that:
+ * [cpu 0x0002] Sending SGI #1 to cpu 0
+ */
+__attribute__((format(printf, 1, 2)))
+void mp_printf(const char *fmt, ...);
+#endif
+
+/*
+ * The log output macros print output to the console. These macros produce
+ * compiled log output only if the LOG_LEVEL defined in the makefile (or the
+ * make command line) is greater or equal than the level required for that
+ * type of log output.
+ * The format expected is similar to printf(). For example:
+ * INFO("Info %s.\n", "message") -> [cpu 0xxx] INFO: Info message.
+ * WARN("Warning %s.\n", "message") -> [cpu 0xxx] WARNING: Warning message.
+ */
+#define LOG_LEVEL_NONE 0
+#define LOG_LEVEL_ERROR 10
+#define LOG_LEVEL_NOTICE 20
+#define LOG_LEVEL_WARNING 30
+#define LOG_LEVEL_INFO 40
+#define LOG_LEVEL_VERBOSE 50
+
+#if LOG_LEVEL >= LOG_LEVEL_NOTICE
+# define NOTICE(...) mp_printf("NOTICE: " __VA_ARGS__)
+#else
+# define NOTICE(...)
+#endif
+
+#if LOG_LEVEL >= LOG_LEVEL_ERROR
+# define ERROR(...) mp_printf("ERROR: " __VA_ARGS__)
+#else
+# define ERROR(...)
+#endif
+
+#if LOG_LEVEL >= LOG_LEVEL_WARNING
+# define WARN(...) mp_printf("WARNING: " __VA_ARGS__)
+#else
+# define WARN(...)
+#endif
+
+#if LOG_LEVEL >= LOG_LEVEL_INFO
+# define INFO(...) mp_printf("INFO: " __VA_ARGS__)
+#else
+# define INFO(...)
+#endif
+
+#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
+# define VERBOSE(...) mp_printf("VERBOSE: " __VA_ARGS__)
+#else
+# define VERBOSE(...)
+#endif
+
+/*
+ * For the moment this Panic function is very basic, Report an error and
+ * spin. This can be expanded in the future to provide more information.
+ */
+#if DEBUG
+void __attribute__((__noreturn__)) do_panic(const char *file, int line);
+#define panic() do_panic(__FILE__, __LINE__)
+
+void __attribute__((__noreturn__)) do_bug_unreachable(const char *file, int line);
+#define bug_unreachable() do_bug_unreachable(__FILE__, __LINE__)
+
+#else
+void __attribute__((__noreturn__)) do_panic(void);
+#define panic() do_panic()
+
+void __attribute__((__noreturn__)) do_bug_unreachable(void);
+#define bug_unreachable() do_bug_unreachable()
+
+#endif
+
+#endif /* __DEBUG_H__ */
diff --git a/include/common/firmware_image_package.h b/include/common/firmware_image_package.h
new file mode 100644
index 000000000..f168f236e
--- /dev/null
+++ b/include/common/firmware_image_package.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __FIRMWARE_IMAGE_PACKAGE_H__
+#define __FIRMWARE_IMAGE_PACKAGE_H__
+
+#include <stdint.h>
+#include <uuid.h>
+
+/* This is used as a signature to validate the blob header */
+#define TOC_HEADER_NAME 0xAA640001
+
+/* ToC Entry UUIDs */
+#define UUID_FIRMWARE_UPDATE_SCP_BL2U \
+ {0x03279265, 0x742f, 0x44e6, 0x8d, 0xff, {0x57, 0x9a, 0xc1, 0xff, 0x06, 0x10} }
+#define UUID_FIRMWARE_UPDATE_BL2U \
+ {0x37ebb360, 0xe5c1, 0x41ea, 0x9d, 0xf3, {0x19, 0xed, 0xa1, 0x1f, 0x68, 0x01} }
+#define UUID_FIRMWARE_UPDATE_NS_BL2U \
+ {0x111d514f, 0xe52b, 0x494e, 0xb4, 0xc5, {0x83, 0xc2, 0xf7, 0x15, 0x84, 0x0a} }
+#define UUID_FIRMWARE_UPDATE_FWU_CERT \
+ {0xb28a4071, 0xd618, 0x4c87, 0x8b, 0x2e, {0xc6, 0xdc, 0xcd, 0x50, 0xf0, 0x96} }
+
+typedef struct fip_toc_header {
+ uint32_t name;
+ uint32_t serial_number;
+ uint64_t flags;
+} fip_toc_header_t;
+
+typedef struct fip_toc_entry {
+ uuid_t uuid;
+ uint64_t offset_address;
+ uint64_t size;
+ uint64_t flags;
+} fip_toc_entry_t;
+
+#endif /* __FIRMWARE_IMAGE_PACKAGE_H__ */
diff --git a/include/common/fwu_nvm.h b/include/common/fwu_nvm.h
new file mode 100644
index 000000000..923a5968d
--- /dev/null
+++ b/include/common/fwu_nvm.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __FWU_NVM_H__
+#define __FWU_NVM_H__
+
+#include <nvm.h>
+#include <platform_def.h>
+
+#define FIP_IMAGE_UPDATE_DONE_FLAG (0xDEADBEEF)
+/*
+ * This is the temporary ddr address for loading backup fip.bin
+ * image from NVM which is used for replacing original fip.bin
+ * This address is chosen such that the NS_BL2U can be expanded
+ * in future and also considering the large size of fip.bin.
+ */
+#define FIP_IMAGE_TMP_DDR_ADDRESS (DRAM_BASE + 0x100000)
+#define FWU_TFTF_TESTCASE_BUFFER_OFFSET \
+ (TFTF_NVM_OFFSET + TFTF_STATE_OFFSET(testcase_buffer))
+
+/*
+ * This offset is used to corrupt data in fip.bin
+ * The offset is from the base where fip.bin is
+ * located in NVM. This particular value is chosen
+ * to make sure the corruption is done beyond fip header.
+ */
+#define FIP_CORRUPT_OFFSET (0x300)
+
+/*
+ * This is the base address for backup fip.bin image in NVM
+ * which is used for replacing original fip.bin
+ * This address is chosen such that it can stay with all
+ * the other images in the NVM.
+ */
+#define FIP_BKP_ADDRESS (FLASH_BASE + 0x1000000)
+
+/* Writes the buffer to the flash at offset with length equal to
+ * size
+ * Returns: STATUS_FAIL, STATUS_SUCCESS, STATUS_OUT_OF_RESOURCES
+ */
+STATUS fwu_nvm_write(unsigned long long offset, const void *buffer, size_t size);
+
+/* Reads the flash into buffer at offset with length equal to
+ * size
+ * Returns: STATUS_FAIL, STATUS_SUCCESS, STATUS_OUT_OF_RESOURCES
+ */
+STATUS fwu_nvm_read(unsigned long long offset, void *buffer, size_t size);
+
+/*
+ * This function is used to replace the original fip.bin
+ * by the backup fip.bin passed through fip_addr argument.
+ */
+STATUS fwu_update_fip(unsigned long fip_addr);
+
+#endif /* __FWU_NVM_H__ */
diff --git a/include/common/image_loader.h b/include/common/image_loader.h
new file mode 100644
index 000000000..032c00ec3
--- /dev/null
+++ b/include/common/image_loader.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __IMAGE_LOADER_H__
+#define __IMAGE_LOADER_H__
+
+#include <firmware_image_package.h>
+#include <stddef.h>
+#include <stdint.h>
+
+/* Generic function to get flash offset of an image */
+unsigned long get_image_offset(unsigned int image_id);
+
+/* Generic function to return the size of an image */
+unsigned long get_image_size(unsigned int image_id);
+
+/*
+ * Generic function to load an image at a specific address given an image id
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int load_image(unsigned int image_id,
+ uintptr_t image_base);
+
+/*
+ * Generic function to load partial image at a specific address given
+ * an image id. The flag argument is used to indicate last block to be
+ * loaded in the partial loading scenario. If is_last_block == 0 then
+ * devices are closed else they are kept open for partial image loading.
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int load_partial_image(unsigned int image_id,
+ uintptr_t image_base,
+ size_t image_size,
+ unsigned int is_last_block);
+
+/* This is to keep track of file related data. */
+typedef struct {
+ unsigned int file_pos;
+ fip_toc_entry_t entry;
+} fip_file_state_t;
+
+#endif /* __IMAGE_LOADER_H__ */
diff --git a/include/common/param_header.h b/include/common/param_header.h
new file mode 100644
index 000000000..32763ddc3
--- /dev/null
+++ b/include/common/param_header.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2017, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PARAM_HEADER_H__
+#define __PARAM_HEADER_H__
+
+/* Param header types */
+#define PARAM_EP 0x01
+#define PARAM_IMAGE_BINARY 0x02
+#define PARAM_BL31 0x03
+#define PARAM_BL_LOAD_INFO 0x04
+#define PARAM_BL_PARAMS 0x05
+#define PARAM_PSCI_LIB_ARGS 0x06
+#define PARAM_SP_IMAGE_BOOT_INFO 0x07
+
+/* Param header version */
+#define VERSION_1 0x01
+#define VERSION_2 0x02
+
+#define SET_PARAM_HEAD(_p, _type, _ver, _attr) do { \
+ (_p)->h.type = (uint8_t)(_type); \
+ (_p)->h.version = (uint8_t)(_ver); \
+ (_p)->h.size = (uint16_t)sizeof(*_p); \
+ (_p)->h.attr = (uint32_t)(_attr) ; \
+ } while (0)
+
+/* Following is used for populating structure members statically. */
+#define SET_STATIC_PARAM_HEAD(_p, _type, _ver, _p_type, _attr) \
+ ._p.h.type = (uint8_t)(_type), \
+ ._p.h.version = (uint8_t)(_ver), \
+ ._p.h.size = (uint16_t)sizeof(_p_type), \
+ ._p.h.attr = (uint32_t)(_attr)
+
+#ifndef __ASSEMBLY__
+
+#include <types.h>
+
+/***************************************************************************
+ * This structure provides version information and the size of the
+ * structure, attributes for the structure it represents
+ ***************************************************************************/
+typedef struct param_header {
+ uint8_t type; /* type of the structure */
+ uint8_t version; /* version of this structure */
+ uint16_t size; /* size of this structure in bytes */
+ uint32_t attr; /* attributes: unused bits SBZ */
+} param_header_t;
+
+#endif /*__ASSEMBLY__*/
+
+#endif /* __PARAM_HEADER_H__ */
+
diff --git a/include/common/test_helpers.h b/include/common/test_helpers.h
new file mode 100644
index 000000000..942e9f835
--- /dev/null
+++ b/include/common/test_helpers.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __TEST_HELPERS_H__
+#define __TEST_HELPERS_H__
+
+#include <plat_topology.h>
+#include <psci.h>
+#include <tftf_lib.h>
+#include <trusted_os.h>
+#include <tsp.h>
+#include <uuid.h>
+#include <uuid_utils.h>
+
+typedef struct {
+ uintptr_t addr;
+ size_t size;
+ unsigned int attr;
+ void *arg;
+} map_args_unmap_t;
+
+typedef test_result_t (*test_function_arg_t)(void *arg);
+
+#define SKIP_TEST_IF_LESS_THAN_N_CLUSTERS(n) \
+ do { \
+ unsigned int clusters_cnt; \
+ clusters_cnt = tftf_get_total_clusters_count(); \
+ if (clusters_cnt < (n)) { \
+ tftf_testcase_printf( \
+ "Need at least %u clusters, only found %u\n", \
+ (n), clusters_cnt); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (0)
+
+#define SKIP_TEST_IF_LESS_THAN_N_CPUS(n) \
+ do { \
+ unsigned int cpus_cnt; \
+ cpus_cnt = tftf_get_total_cpus_count(); \
+ if (cpus_cnt < (n)) { \
+ tftf_testcase_printf( \
+ "Need at least %u CPUs, only found %u\n", \
+ (n), cpus_cnt); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (0)
+
+#define SKIP_TEST_IF_TRUSTED_OS_NOT_PRESENT() \
+ do { \
+ uuid_t tos_uuid; \
+ \
+ if (!is_trusted_os_present(&tos_uuid)) { \
+ tftf_testcase_printf("No Trusted OS detected\n"); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (0)
+
+#define SKIP_TEST_IF_TSP_NOT_PRESENT() \
+ do { \
+ uuid_t tos_uuid; \
+ char tos_uuid_str[UUID_STR_SIZE]; \
+ \
+ if (!is_trusted_os_present(&tos_uuid)) { \
+ tftf_testcase_printf("No Trusted OS detected\n"); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ \
+ if (!uuid_equal(&tos_uuid, &tsp_uuid)) { \
+ tftf_testcase_printf( \
+ "Trusted OS is not the TSP, its UUID is: %s\n", \
+ uuid_to_str(&tos_uuid, tos_uuid_str)); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (0)
+
+#define SKIP_TEST_IF_MM_NOT_PRESENT() \
+ do { \
+ smc_args version_smc = { MM_VERSION_AARCH32 }; \
+ smc_ret_values smc_ret = tftf_smc(&version_smc); \
+ uint32_t version = smc_ret.ret0; \
+ \
+ if (version == (uint32_t) SMC_UNKNOWN) { \
+ tftf_testcase_printf("SPM not detected.\n"); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (0)
+
+#define SKIP_TEST_IF_MM_VERSION_LESS_THAN(major, minor) \
+ do { \
+ smc_args version_smc = { MM_VERSION_AARCH32 }; \
+ smc_ret_values smc_ret = tftf_smc(&version_smc); \
+ uint32_t version = smc_ret.ret0; \
+ \
+ if (version == (uint32_t) SMC_UNKNOWN) { \
+ tftf_testcase_printf("SPM not detected.\n"); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ \
+ if (version < MM_VERSION_FORM(major, minor)) { \
+ tftf_testcase_printf("MM_VERSION returned %d.%d\n" \
+ "The required version is %d.%d\n", \
+ version >> MM_VERSION_MAJOR_SHIFT, \
+ version & MM_VERSION_MINOR_MASK, \
+ major, minor); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ \
+ VERBOSE("MM_VERSION returned %d.%d\n", \
+ version >> MM_VERSION_MAJOR_SHIFT, \
+ version & MM_VERSION_MINOR_MASK); \
+ } while (0)
+
+/* Helper macro to verify if system suspend API is supported */
+#define is_psci_sys_susp_supported() \
+ (tftf_get_psci_feature_info(SMC_PSCI_SYSTEM_SUSPEND) \
+ == PSCI_E_SUCCESS)
+
+/* Helper macro to verify if PSCI_STAT_COUNT API is supported */
+#define is_psci_stat_count_supported() \
+ (tftf_get_psci_feature_info(SMC_PSCI_STAT_COUNT) \
+ == PSCI_E_SUCCESS)
+
+/*
+ * Helper function to verify the system state is ready for system
+ * suspend. i.e., a single CPU is running and all other CPUs are powered off.
+ * Returns 1 if the system is ready to suspend, 0 otherwise.
+ */
+int is_sys_suspend_state_ready(void);
+
+/*
+ * Helper function to reset the system. This function shouldn't return.
+ * It is not marked with __dead to help the test to catch some error in
+ * TF
+ */
+void psci_system_reset(void);
+
+/*
+ * Helper function that enables/disables the mem_protect mechanism
+ */
+int psci_mem_protect(int val);
+
+
+/*
+ * Helper function to call PSCI MEM_PROTECT_CHECK
+ */
+int psci_mem_protect_check(uintptr_t addr, size_t size);
+
+
+/*
+ * Helper function to get a sentinel address that can be used to test mem_protect
+ */
+unsigned char *psci_mem_prot_get_sentinel(void);
+
+/*
+ * Helper function to memory map and unmap a region needed by a test.
+ *
+ * Return TEST_RESULT_FAIL if the memory could not be successfully mapped or
+ * unmapped. Otherwise, return the test functions's result.
+ */
+test_result_t map_test_unmap(const map_args_unmap_t *args,
+ test_function_arg_t test);
+
+#endif /* __TEST_HELPERS_H__ */
diff --git a/include/drivers/arm/arm_gic.h b/include/drivers/arm/arm_gic.h
new file mode 100644
index 000000000..1a6bc3d46
--- /dev/null
+++ b/include/drivers/arm/arm_gic.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __ARM_GIC_H__
+#define __ARM_GIC_H__
+
+#include <stdint.h>
+
+/***************************************************************************
+ * Defines and prototypes for ARM GIC driver.
+ **************************************************************************/
+#define MAX_SGIS 16
+#define MIN_SGI_ID 0
+#define MAX_SGI_ID 15
+#define MIN_PPI_ID 16
+#define MAX_PPI_ID 31
+#define MIN_SPI_ID 32
+#define MAX_SPI_ID 1020
+
+#define IS_SGI(irq_num) \
+ (((irq_num) >= MIN_SGI_ID) && ((irq_num) <= MAX_SGI_ID))
+
+#define IS_PPI(irq_num) \
+ (((irq_num) >= MIN_PPI_ID) && ((irq_num) <= MAX_PPI_ID))
+
+#define IS_SPI(irq_num) \
+ (((irq_num) >= MIN_SPI_ID) && ((irq_num) <= MAX_SPI_ID))
+
+#define IS_VALID_INTR_ID(irq_num) \
+ (((irq_num) >= MIN_SGI_ID) && ((irq_num) <= MAX_SPI_ID))
+
+#define GIC_HIGHEST_NS_PRIORITY 0
+#define GIC_LOWEST_NS_PRIORITY 254 /* 255 would disable an interrupt */
+#define GIC_SPURIOUS_INTERRUPT 1023
+
+/******************************************************************************
+ * Setup the global GIC interface. In case of GICv2, it would be the GIC
+ * Distributor and in case of GICv3 it would be GIC Distributor and
+ * Re-distributor.
+ *****************************************************************************/
+void arm_gic_setup_global(void);
+
+/******************************************************************************
+ * Setup the GIC interface local to the CPU
+ *****************************************************************************/
+void arm_gic_setup_local(void);
+
+/******************************************************************************
+ * Disable interrupts for this local CPU
+ *****************************************************************************/
+void arm_gic_disable_interrupts_local(void);
+
+/******************************************************************************
+ * Enable interrupts for this local CPU
+ *****************************************************************************/
+void arm_gic_enable_interrupts_local(void);
+
+/******************************************************************************
+ * Send SGI with ID `sgi_id` to a core with index `core_pos`.
+ *****************************************************************************/
+void arm_gic_send_sgi(unsigned int sgi_id, unsigned int core_pos);
+
+/******************************************************************************
+ * Set the interrupt target of interrupt ID `num` to a core with index
+ * `core_pos`
+ *****************************************************************************/
+void arm_gic_set_intr_target(unsigned int num, unsigned int core_pos);
+
+/******************************************************************************
+ * Get the priority of the interrupt ID `num`.
+ *****************************************************************************/
+unsigned int arm_gic_get_intr_priority(unsigned int num);
+
+/******************************************************************************
+ * Set the priority of the interrupt ID `num` to `priority`.
+ *****************************************************************************/
+void arm_gic_set_intr_priority(unsigned int num, unsigned int priority);
+
+/******************************************************************************
+ * Check if the interrupt ID `num` is enabled
+ *****************************************************************************/
+unsigned int arm_gic_intr_enabled(unsigned int num);
+
+/******************************************************************************
+ * Enable the interrupt ID `num`
+ *****************************************************************************/
+void arm_gic_intr_enable(unsigned int num);
+
+/******************************************************************************
+ * Disable the interrupt ID `num`
+ *****************************************************************************/
+void arm_gic_intr_disable(unsigned int num);
+
+/******************************************************************************
+ * Acknowledge the highest pending interrupt. Return the interrupt ID of the
+ * acknowledged interrupt. The raw interrupt acknowledge register value will
+ * be populated in `raw_iar`.
+ *****************************************************************************/
+unsigned int arm_gic_intr_ack(unsigned int *raw_iar);
+
+/******************************************************************************
+ * Signal the end of interrupt processing of a interrupt. The raw interrupt
+ * acknowledge register value returned by arm_gic_intr_ack() should be passed
+ * as argument to this function.
+ *****************************************************************************/
+void arm_gic_end_of_intr(unsigned int raw_iar);
+
+/******************************************************************************
+ * Check if the interrupt with ID `num` is pending at the GIC. Returns 1 if
+ * interrupt is pending else returns 0.
+ *****************************************************************************/
+unsigned int arm_gic_is_intr_pending(unsigned int num);
+
+/******************************************************************************
+ * Clear the pending status of the interrupt with ID `num` at the GIC.
+ *****************************************************************************/
+void arm_gic_intr_clear(unsigned int num);
+
+/******************************************************************************
+ * Initialize the GIC Driver. This function will detect the GIC Architecture
+ * present on the system and initialize the appropriate driver. The
+ * `gicr_base` argument will be ignored on GICv2 systems.
+ *****************************************************************************/
+void arm_gic_init(uintptr_t gicc_base, uintptr_t gicd_base, uintptr_t gicr_base);
+
+/******************************************************************************
+ * Save the GIC context local to this CPU (like GIC CPU Interface) which will
+ * be lost when this CPU is powered down.
+ *****************************************************************************/
+void arm_gic_save_context_local(void);
+
+/******************************************************************************
+ * Restore the GIC context local to this CPU ((like GIC CPU Interface) which
+ * was lost when this CPU was powered down.
+ *****************************************************************************/
+void arm_gic_restore_context_local(void);
+
+/******************************************************************************
+ * Save the global GIC context when GIC will be powered down (like GIC
+ * Distributor and Re-distributor) as a result of system suspend.
+ *****************************************************************************/
+void arm_gic_save_context_global(void);
+
+/******************************************************************************
+ * Restore the global GIC context which was lost as a result of GIC power
+ * down (like GIC Distributor and Re-distributor) during system suspend.
+ *****************************************************************************/
+void arm_gic_restore_context_global(void);
+
+#endif /* __ARM_GIC_H__ */
diff --git a/include/drivers/arm/gic_common.h b/include/drivers/arm/gic_common.h
new file mode 100644
index 000000000..ea0340c31
--- /dev/null
+++ b/include/drivers/arm/gic_common.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __GIC_COMMON_H__
+#define __GIC_COMMON_H__
+
+/***************************************************************************
+ * Defines and prototypes common to GIC v2 and v3 drivers.
+ **************************************************************************/
+/* Distributor interface register offsets */
+#define GICD_CTLR 0x0
+#define GICD_TYPER 0x4
+#define GICD_ISENABLER 0x100
+#define GICD_ICENABLER 0x180
+#define GICD_ISPENDR 0x200
+#define GICD_ICPENDR 0x280
+#define GICD_ISACTIVER 0x300
+#define GICD_ICACTIVER 0x380
+#define GICD_IPRIORITYR 0x400
+#define GICD_ICFGR 0xC00
+
+/* Distributor interface register shifts */
+#define ISENABLER_SHIFT 5
+#define ICENABLER_SHIFT ISENABLER_SHIFT
+#define ISPENDR_SHIFT 5
+#define ICPENDR_SHIFT ISPENDR_SHIFT
+#define ISACTIVER_SHIFT 5
+#define ICACTIVER_SHIFT ISACTIVER_SHIFT
+#define IPRIORITYR_SHIFT 2
+#define ICFGR_SHIFT 4
+
+/* GICD_TYPER bit definitions */
+#define IT_LINES_NO_MASK 0x1f
+
+/* GICD Priority register mask */
+#define GIC_PRI_MASK 0xff
+
+/*
+ * Number of per-cpu interrupts to save prior to system suspend.
+ * This comprises all SGIs and PPIs.
+ */
+#define NUM_PCPU_INTR 32
+
+#ifndef __ASSEMBLY__
+
+#include <mmio.h>
+
+/* Helper to detect the GIC mode (GICv2 or GICv3) configured in the system */
+unsigned int is_gicv3_mode(void);
+
+/*******************************************************************************
+ * Private GIC Distributor function prototypes for use by GIC drivers
+ ******************************************************************************/
+unsigned int gicd_read_isenabler(unsigned int base, unsigned int interrupt_id);
+unsigned int gicd_read_icenabler(unsigned int base, unsigned int interrupt_id);
+unsigned int gicd_read_ispendr(unsigned int base, unsigned int interrupt_id);
+unsigned int gicd_read_icpendr(unsigned int base, unsigned int interrupt_id);
+unsigned int gicd_read_isactiver(unsigned int base, unsigned int interrupt_id);
+unsigned int gicd_read_icactiver(unsigned int base, unsigned int interrupt_id);
+unsigned int gicd_read_ipriorityr(unsigned int base, unsigned int interrupt_id);
+unsigned int gicd_get_ipriorityr(unsigned int base, unsigned int interrupt_id);
+unsigned int gicd_read_icfgr(unsigned int base, unsigned int interrupt_id);
+void gicd_write_isenabler(unsigned int base, unsigned int interrupt_id,
+ unsigned int val);
+void gicd_write_icenabler(unsigned int base, unsigned int interrupt_id,
+ unsigned int val);
+void gicd_write_ispendr(unsigned int base, unsigned int interrupt_id,
+ unsigned int val);
+void gicd_write_icpendr(unsigned int base, unsigned int interrupt_id,
+ unsigned int val);
+void gicd_write_isactiver(unsigned int base, unsigned int interrupt_id,
+ unsigned int val);
+void gicd_write_icactiver(unsigned int base, unsigned int interrupt_id,
+ unsigned int val);
+void gicd_write_ipriorityr(unsigned int base, unsigned int interrupt_id,
+ unsigned int val);
+void gicd_write_icfgr(unsigned int base, unsigned int interrupt_id,
+ unsigned int val);
+unsigned int gicd_get_isenabler(unsigned int base, unsigned int interrupt_id);
+void gicd_set_isenabler(unsigned int base, unsigned int interrupt_id);
+void gicd_set_icenabler(unsigned int base, unsigned int interrupt_id);
+void gicd_set_ispendr(unsigned int base, unsigned int interrupt_id);
+void gicd_set_icpendr(unsigned int base, unsigned int interrupt_id);
+void gicd_set_isactiver(unsigned int base, unsigned int interrupt_id);
+void gicd_set_icactiver(unsigned int base, unsigned int interrupt_id);
+void gicd_set_ipriorityr(unsigned int base, unsigned int interrupt_id,
+ unsigned int priority);
+
+/*******************************************************************************
+ * Private GIC Distributor interface accessors for reading and writing
+ * entire registers
+ ******************************************************************************/
+static inline unsigned int gicd_read_ctlr(unsigned int base)
+{
+ return mmio_read_32(base + GICD_CTLR);
+}
+
+static inline unsigned int gicd_read_typer(unsigned int base)
+{
+ return mmio_read_32(base + GICD_TYPER);
+}
+
+static inline void gicd_write_ctlr(unsigned int base, unsigned int val)
+{
+ mmio_write_32(base + GICD_CTLR, val);
+}
+
+
+#endif /*__ASSEMBLY__*/
+#endif /* __GIC_COMMON_H__ */
diff --git a/include/drivers/arm/gic_v2.h b/include/drivers/arm/gic_v2.h
new file mode 100644
index 000000000..8432a3fc6
--- /dev/null
+++ b/include/drivers/arm/gic_v2.h
@@ -0,0 +1,336 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __GIC_V2_H__
+#define __GIC_V2_H__
+
+/***************************************************************************
+ * Defines and prototypes specific to GIC v2.
+ **************************************************************************/
+
+/* GICD_CTLR bit definitions */
+#define GICD_CTLR_ENABLE (1 << 0)
+
+/* Distributor interface register offsets */
+#define GICD_ITARGETSR 0x800
+#define GICD_SGIR 0xF00
+#define GICD_CPENDSGIR 0xF10
+#define GICD_SPENDSGIR 0xF20
+
+/* GIC Distributor register shifts */
+#define ITARGETSR_SHIFT 2
+#define CPENDSGIR_SHIFT 2
+#define SPENDSGIR_SHIFT CPENDSGIR_SHIFT
+
+/* GICD_SGIR bit shifts */
+#define GICD_SGIR_INTID_SHIFT 0
+#define GICD_SGIR_CPUTL_SHIFT 16
+
+/* Physical CPU Interface register offsets */
+#define GICC_CTLR 0x0
+#define GICC_PMR 0x4
+#define GICC_BPR 0x8
+#define GICC_IAR 0xC
+#define GICC_EOIR 0x10
+#define GICC_RPR 0x14
+#define GICC_HPPIR 0x18
+#define GICC_AHPPIR 0x28
+#define GICC_IIDR 0xFC
+#define GICC_DIR 0x1000
+#define GICC_PRIODROP GICC_EOIR
+
+/* GICC_IIDR bit masks and shifts */
+#define GICC_IIDR_PID_SHIFT 20
+#define GICC_IIDR_ARCH_SHIFT 16
+#define GICC_IIDR_REV_SHIFT 12
+#define GICC_IIDR_IMP_SHIFT 0
+
+#define GICC_IIDR_PID_MASK 0xfff
+#define GICC_IIDR_ARCH_MASK 0xf
+#define GICC_IIDR_REV_MASK 0xf
+#define GICC_IIDR_IMP_MASK 0xfff
+
+/* HYP view virtual CPU Interface register offsets */
+#define GICH_CTL 0x0
+#define GICH_VTR 0x4
+#define GICH_ELRSR0 0x30
+#define GICH_ELRSR1 0x34
+#define GICH_APR0 0xF0
+#define GICH_LR_BASE 0x100
+
+/* Virtual CPU Interface register offsets */
+#define GICV_CTL 0x0
+#define GICV_PRIMASK 0x4
+#define GICV_BP 0x8
+#define GICV_INTACK 0xC
+#define GICV_EOI 0x10
+#define GICV_RUNNINGPRI 0x14
+#define GICV_HIGHESTPEND 0x18
+#define GICV_DEACTIVATE 0x1000
+
+/* GICC_IAR bit masks and shifts */
+#define GICC_IAR_INTID_SHIFT 0
+#define GICC_IAR_CPUID_SHIFT 10
+
+#define GICC_IAR_INTID_MASK 0x3ff
+#define GICC_IAR_CPUID_MASK 0x7
+
+#define get_gicc_iar_intid(val) (((val) >> GICC_IAR_INTID_SHIFT) \
+ & GICC_IAR_INTID_MASK)
+#define get_gicc_iar_cpuid(val) (((val) >> GICC_IAR_CPUID_SHIFT) \
+ & GICC_IAR_CPUID_MASK)
+
+/*
+ * GICC_CTLR is banked to provide Secure and Non-secure copies and the register
+ * bit assignments are different in the Secure and Non-secure copies.
+ * These are the bit assignments for the Non-secure copy.
+ */
+#define GICC_CTLR_ENABLE (1 << 0)
+#define FIQ_BYP_DIS_GRP1 (1 << 5)
+#define IRQ_BYP_DIS_GRP1 (1 << 6)
+#define EOI_MODE_NS (1 << 9)
+
+#ifndef __ASSEMBLY__
+
+#include <mmio.h>
+
+/*******************************************************************************
+ * Private Interfaces for internal use by the GICv2 driver
+ ******************************************************************************/
+
+/*******************************************************************************
+ * GICv2 Distributor interface accessors for reading/writing entire registers
+ ******************************************************************************/
+static inline unsigned int gicd_read_sgir(unsigned int base)
+{
+ return mmio_read_32(base + GICD_SGIR);
+}
+
+static inline void gicd_write_sgir(unsigned int base, unsigned int val)
+{
+ mmio_write_32(base + GICD_SGIR, val);
+}
+
+/*******************************************************************************
+ * GICv2 CPU interface accessors for reading entire registers
+ ******************************************************************************/
+
+static inline unsigned int gicc_read_ctlr(unsigned int base)
+{
+ return mmio_read_32(base + GICC_CTLR);
+}
+
+static inline unsigned int gicc_read_pmr(unsigned int base)
+{
+ return mmio_read_32(base + GICC_PMR);
+}
+
+static inline unsigned int gicc_read_bpr(unsigned int base)
+{
+ return mmio_read_32(base + GICC_BPR);
+}
+
+static inline unsigned int gicc_read_iar(unsigned int base)
+{
+ return mmio_read_32(base + GICC_IAR);
+}
+
+static inline unsigned int gicc_read_eoir(unsigned int base)
+{
+ return mmio_read_32(base + GICC_EOIR);
+}
+
+static inline unsigned int gicc_read_hppir(unsigned int base)
+{
+ return mmio_read_32(base + GICC_HPPIR);
+}
+
+static inline unsigned int gicc_read_ahppir(unsigned int base)
+{
+ return mmio_read_32(base + GICC_AHPPIR);
+}
+
+static inline unsigned int gicc_read_dir(unsigned int base)
+{
+ return mmio_read_32(base + GICC_DIR);
+}
+
+static inline unsigned int gicc_read_iidr(unsigned int base)
+{
+ return mmio_read_32(base + GICC_IIDR);
+}
+
+
+/*******************************************************************************
+ * GICv2 CPU interface accessors for writing entire registers
+ ******************************************************************************/
+
+static inline void gicc_write_ctlr(unsigned int base, unsigned int val)
+{
+ mmio_write_32(base + GICC_CTLR, val);
+}
+
+static inline void gicc_write_pmr(unsigned int base, unsigned int val)
+{
+ mmio_write_32(base + GICC_PMR, val);
+}
+
+static inline void gicc_write_bpr(unsigned int base, unsigned int val)
+{
+ mmio_write_32(base + GICC_BPR, val);
+}
+
+
+static inline void gicc_write_iar(unsigned int base, unsigned int val)
+{
+ mmio_write_32(base + GICC_IAR, val);
+}
+
+static inline void gicc_write_eoir(unsigned int base, unsigned int val)
+{
+ mmio_write_32(base + GICC_EOIR, val);
+}
+
+static inline void gicc_write_hppir(unsigned int base, unsigned int val)
+{
+ mmio_write_32(base + GICC_HPPIR, val);
+}
+
+static inline void gicc_write_dir(unsigned int base, unsigned int val)
+{
+ mmio_write_32(base + GICC_DIR, val);
+}
+
+/******************************************************************************
+ * GICv2 public driver API
+ *****************************************************************************/
+
+/*
+ * Initialize the GICv2 driver. The base addresses of GIC CPU interface
+ * `gicc_base` and the Distributor interface `gicd_base` must be provided
+ * as arguments.
+ */
+void gicv2_init(uintptr_t gicc_base, uintptr_t gicd_base);
+
+/*
+ * Write the GICv2 EOIR register with `val` passed as argument. `val`
+ * should be the raw value read from IAR register.
+ */
+void gicv2_gicc_write_eoir(unsigned int val);
+
+/*
+ * Set the bit corresponding to `interrupt_id` in the GICD ISPENDR register.
+ */
+void gicv2_gicd_set_ispendr(unsigned int interrupt_id);
+
+/*
+ * Set the bit corresponding to `interrupt_id` in the GICD ICPENDR register.
+ */
+void gicv2_gicd_set_icpendr(unsigned int interrupt_id);
+
+/*
+ * Get the bit corresponding to `interrupt_id` from the GICD ISPENDR register.
+ */
+unsigned int gicv2_gicd_get_ispendr(unsigned int interrupt_id);
+
+/*
+ * Read and return the value in GICC IAR register
+ */
+unsigned int gicv2_gicc_read_iar(void);
+
+/*
+ * Set the bit corresponding to `num` in the GICD ICENABLER register.
+ */
+void gicv2_gicd_set_icenabler(unsigned int num);
+
+/*
+ * Get the bit corresponding to `num` in the GICD ISENABLER register.
+ */
+unsigned int gicv2_gicd_get_isenabler(unsigned int num);
+
+/*
+ * Set the bit corresponding to `num` in the GICD ISENABLER register.
+ */
+void gicv2_gicd_set_isenabler(unsigned int num);
+
+/*
+ * Set the target of interrupt ID `num` to core with index `core_pos`.
+ */
+void gicv2_set_itargetsr(unsigned int num, unsigned int core_pos);
+
+/*
+ * Set the target of interrupt ID `num` to the desired core mask.
+ */
+void gicv2_set_itargetsr_value(unsigned int num, unsigned int val);
+
+/*
+ * Send SGI with ID `sgi_id` to core with index `core_pos`.
+ */
+void gicv2_send_sgi(unsigned int sgi_id, unsigned int core_pos);
+
+/*
+ * Get the priority of the interrupt `interrupt_id`.
+ */
+unsigned int gicv2_gicd_get_ipriorityr(unsigned int interrupt_id);
+
+/*
+ * Set the priority of the interrupt `interrupt_id` to `priority`.
+ */
+void gicv2_gicd_set_ipriorityr(unsigned int interrupt_id, unsigned int priority);
+
+/*
+ * Setup the GIC Distributor interface.
+ */
+void gicv2_setup_distif(void);
+
+/*
+ * Save the GICv2 SGI and PPI context prior to powering down the
+ * GIC Distributor.
+ */
+void gicv2_save_sgi_ppi_context(void);
+
+/*
+ * Restore the GICv2 SGI and PPI context after powering up the
+ * GIC Distributor.
+ */
+void gicv2_restore_sgi_ppi_context(void);
+
+/*
+ * Disable the GIC CPU interface.
+ */
+void gicv2_disable_cpuif(void);
+
+/*
+ * Setup the GIC CPU interface.
+ */
+void gicv2_setup_cpuif(void);
+
+/*
+ * Enable the GIC CPU interface.
+ */
+void gicv2_enable_cpuif(void);
+
+/*
+ * Save the GICv2 CPU interface prior to powering down the CPU interface.
+ */
+void gicv2_save_cpuif_context(void);
+
+/*
+ * Restore the GICv2 CPU interface after powering up the CPU interface.
+ */
+void gicv2_restore_cpuif_context(void);
+
+/*
+ * Read the GICD ITARGETR0 to figure out the GIC ID for the current core.
+ * This function is required to be invoked on successful boot of a core.
+ * The GIC ID will be stored internally by the driver to convert core index
+ * to GIC CPU ID when required.
+ */
+void gicv2_probe_gic_cpu_id(void);
+
+
+#endif /*__ASSEMBLY__*/
+#endif /* __GIC_V2_H__ */
diff --git a/include/drivers/arm/gic_v3.h b/include/drivers/arm/gic_v3.h
new file mode 100644
index 000000000..1b028b6e8
--- /dev/null
+++ b/include/drivers/arm/gic_v3.h
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __GIC_V3_H__
+#define __GIC_V3_H__
+
+/***************************************************************************
+ * Defines and prototypes specific to GIC v3.
+ *************************************************************************/
+
+/* GICD register offsets */
+#define GICD_IROUTER 0x6000
+
+/* GICD_CTLR bit definitions */
+#define GICD_CTLR_ENABLE_GRP1A (1 << 1)
+#define GICD_CTLR_ARE_NS_SHIFT 4
+#define GICD_CTLR_ARE_NS_MASK 0x1
+
+/* GICR_TYPER bit definitions */
+#define TYPER_AFF_VAL_SHIFT 32
+#define TYPER_PROC_NUM_SHIFT 8
+#define TYPER_LAST_SHIFT 4
+
+#define TYPER_AFF_VAL_MASK 0xffffffff
+#define TYPER_PROC_NUM_MASK 0xffff
+#define TYPER_LAST_MASK 0x1
+
+#define TYPER_LAST_BIT (1 << TYPER_LAST_SHIFT)
+
+/* GICD_IROUTER shifts and masks */
+#define IROUTER_IRM_SHIFT 31
+#define IROUTER_IRM_MASK 0x1
+
+/*******************************************************************************
+ * GICv3 Re-distributor interface registers & constants
+ ******************************************************************************/
+#define GICR_PCPUBASE_SHIFT 0x11
+#define GICR_SGIBASE_OFFSET (1 << 0x10) /* 64 KB */
+#define GICR_CTLR 0x0
+#define GICR_TYPER 0x08
+#define GICR_WAKER 0x14
+#define GICR_IGROUPR0 (GICR_SGIBASE_OFFSET + 0x80)
+#define GICR_ISENABLER0 (GICR_SGIBASE_OFFSET + 0x100)
+#define GICR_ICENABLER0 (GICR_SGIBASE_OFFSET + 0x180)
+#define GICR_ISPENDR0 (GICR_SGIBASE_OFFSET + 0x200)
+#define GICR_ICPENDR0 (GICR_SGIBASE_OFFSET + 0x280)
+#define GICR_IPRIORITYR (GICR_SGIBASE_OFFSET + 0x400)
+#define GICR_ICFGR0 (GICR_SGIBASE_OFFSET + 0xc00)
+#define GICR_ICFGR1 (GICR_SGIBASE_OFFSET + 0xc04)
+#define GICR_IGRPMODR0 (GICR_SGIBASE_OFFSET + 0xd00)
+
+/*******************************************************************************
+ * GICv3 CPU interface registers & constants
+ ******************************************************************************/
+/* ICC_SRE bit definitions*/
+#define ICC_SRE_EN_BIT (1 << 3)
+#define ICC_SRE_DIB_BIT (1 << 2)
+#define ICC_SRE_DFB_BIT (1 << 1)
+#define ICC_SRE_SRE_BIT (1 << 0)
+
+/* ICC_IAR1_EL1 bit definitions */
+#define IAR1_EL1_INTID_SHIFT 0
+#define IAR1_EL1_INTID_MASK 0xffffff
+
+/* ICC_SGI1R bit definitions */
+#define SGI1R_TARGET_LIST_MASK 0xffff
+#define SGI1R_TARGET_LIST_SHIFT 0x0
+#define SGI1R_AFF_MASK 0xff
+#define SGI1R_AFF1_SHIFT 16ULL
+#define SGI1R_AFF2_SHIFT 32ULL
+#ifndef AARCH32
+#define SGI1R_AFF3_SHIFT 48ULL
+#endif
+#define SGI1R_INTID_MASK 0xf
+#define SGI1R_INTID_SHIFT 24
+#define SGI1R_IRM_MASK 0x1
+#define SGI1R_IRM_SHIFT 0x40
+
+/* ICC_IGRPEN1_EL1 bit definitions */
+#define IGRPEN1_EL1_ENABLE_SHIFT 0
+#define IGRPEN1_EL1_ENABLE_BIT (1 << IGRPEN1_EL1_ENABLE_SHIFT)
+
+/* The highest affinity 0 that can be a SGI target*/
+#define SGI_TARGET_MAX_AFF0 16
+
+#ifndef ASSEMBLY
+
+/*******************************************************************************
+ * Helper GICv3 macros
+ ******************************************************************************/
+#define gicv3_acknowledge_interrupt() read_icc_iar1_el1() &\
+ IAR1_EL1_INTID_MASK
+#define gicv3_end_of_interrupt(id) write_icc_eoir1_el1(id)
+
+#define is_sre_enabled() \
+ (IS_IN_EL2() ? (read_icc_sre_el2() & ICC_SRE_SRE_BIT) :\
+ (read_icc_sre_el1() & ICC_SRE_SRE_BIT))
+
+/******************************************************************************
+ * GICv3 public driver API
+ *****************************************************************************/
+ /*
+ * Initialize the GICv3 driver. The base addresses of GIC Re-distributor
+ * interface `gicr_base` and the Distributor interface `gicd_base` must
+ * be provided as arguments.
+ */
+void gicv3_init(uintptr_t gicr_base, uintptr_t gicd_base);
+
+/*
+ * Setup the GIC Distributor interface.
+ */
+void gicv3_setup_distif(void);
+
+/*
+ * Probe the Re-distributor base corresponding to this core.
+ * This function is required to be invoked on successful boot of a core.
+ * The base address will be stored internally by the driver and will be
+ * used when accessing the Re-distributor interface.
+ */
+void gicv3_probe_redistif_addr(void);
+
+/*
+ * Set the bit corresponding to `interrupt_id` in the ICPENDR register
+ * at either Distributor or Re-distributor depending on the interrupt.
+ */
+void gicv3_set_icpendr(unsigned int interrupt_id);
+
+/*
+ * Get the bit corresponding to `interrupt_id` in the ISPENDR register
+ * at either Distributor or Re-distributor depending on the interrupt.
+ */
+unsigned int gicv3_get_ispendr(unsigned int interrupt_id);
+
+/*
+ * Set the bit corresponding to `interrupt_id` in the ICENABLER register
+ * at either Distributor or Re-distributor depending on the interrupt.
+ */
+void gicv3_set_icenabler(unsigned int interrupt_id);
+
+/*
+ * Get the bit corresponding to `interrupt_id` in the ISENABLER register
+ * at either Distributor or Re-distributor depending on the interrupt.
+ */
+unsigned int gicv3_get_isenabler(unsigned int interrupt_id);
+
+/*
+ * Set the bit corresponding to `interrupt_id` in the ISENABLER register
+ * at either Distributor or Re-distributor depending on the interrupt.
+ */
+void gicv3_set_isenabler(unsigned int interrupt_id);
+
+/*
+ * Set the `route` corresponding to `interrupt_id` in the IROUTER register
+ * at Distributor.
+ */
+void gicv3_set_intr_route(unsigned int interrupt_id, unsigned int core_pos);
+
+/*
+ * Send SGI with ID `sgi_id` to core with index `core_pos`.
+ */
+void gicv3_send_sgi(unsigned int sgi_id, unsigned int core_pos);
+
+/*
+ * Get the priority of the interrupt `interrupt_id`.
+ */
+unsigned int gicv3_get_ipriorityr(unsigned int interrupt_id);
+
+/*
+ * Set the priority of the interrupt `interrupt_id` to `priority`.
+ */
+void gicv3_set_ipriorityr(unsigned int interrupt_id, unsigned int priority);
+
+/*
+ * Restore the GICv3 SGI and PPI context after powering up the
+ * GIC Re-distributor.
+ */
+void gicv3_restore_sgi_ppi_context(void);
+
+/*
+ * Save the GICv3 SGI and PPI context prior to powering down the
+ * GIC Re-distributor.
+ */
+void gicv3_save_sgi_ppi_context(void);
+
+/*
+ * Restore the GICv3 CPU interface after powering up the CPU interface.
+ */
+void gicv3_restore_cpuif_context(void);
+
+/*
+ * Save the GICv3 CPU interface prior to powering down the CPU interface.
+ */
+void gicv3_save_cpuif_context(void);
+
+/*
+ * Disable the GIC CPU interface.
+ */
+void gicv3_disable_cpuif(void);
+
+/*
+ * Setup the GIC CPU interface.
+ */
+void gicv3_setup_cpuif(void);
+
+/*
+ * Enable the GIC CPU interface.
+ */
+void gicv3_enable_cpuif(void);
+
+
+#endif /*__ASSEMBLY__*/
+#endif /* __GIC_V3_H__ */
diff --git a/include/drivers/arm/pl011.h b/include/drivers/arm/pl011.h
new file mode 100644
index 000000000..cba325d5a
--- /dev/null
+++ b/include/drivers/arm/pl011.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PL011_H__
+#define __PL011_H__
+
+/* PL011 Registers */
+#define UARTDR 0x000
+#define UARTRSR 0x004
+#define UARTECR 0x004
+#define UARTFR 0x018
+#define UARTILPR 0x020
+#define UARTIBRD 0x024
+#define UARTFBRD 0x028
+#define UARTLCR_H 0x02C
+#define UARTCR 0x030
+#define UARTIFLS 0x034
+#define UARTIMSC 0x038
+#define UARTRIS 0x03C
+#define UARTMIS 0x040
+#define UARTICR 0x044
+#define UARTDMACR 0x048
+
+/* Data status bits */
+#define UART_DATA_ERROR_MASK 0x0F00
+
+/* Status reg bits */
+#define UART_STATUS_ERROR_MASK 0x0F
+
+/* Flag reg bits */
+#define PL011_UARTFR_RI (1 << 8) /* Ring indicator */
+#define PL011_UARTFR_TXFE (1 << 7) /* Transmit FIFO empty */
+#define PL011_UARTFR_RXFF (1 << 6) /* Receive FIFO full */
+#define PL011_UARTFR_TXFF (1 << 5) /* Transmit FIFO full */
+#define PL011_UARTFR_RXFE (1 << 4) /* Receive FIFO empty */
+#define PL011_UARTFR_BUSY (1 << 3) /* UART busy */
+#define PL011_UARTFR_DCD (1 << 2) /* Data carrier detect */
+#define PL011_UARTFR_DSR (1 << 1) /* Data set ready */
+#define PL011_UARTFR_CTS (1 << 0) /* Clear to send */
+
+#define PL011_UARTFR_TXFF_BIT 5 /* Transmit FIFO full bit in UARTFR register */
+#define PL011_UARTFR_RXFE_BIT 4 /* Receive FIFO empty bit in UARTFR register */
+#define PL011_UARTFR_BUSY_BIT 3 /* UART busy bit in UARTFR register */
+
+/* Control reg bits */
+#define PL011_UARTCR_CTSEN (1 << 15) /* CTS hardware flow control enable */
+#define PL011_UARTCR_RTSEN (1 << 14) /* RTS hardware flow control enable */
+#define PL011_UARTCR_RTS (1 << 11) /* Request to send */
+#define PL011_UARTCR_DTR (1 << 10) /* Data transmit ready. */
+#define PL011_UARTCR_RXE (1 << 9) /* Receive enable */
+#define PL011_UARTCR_TXE (1 << 8) /* Transmit enable */
+#define PL011_UARTCR_LBE (1 << 7) /* Loopback enable */
+#define PL011_UARTCR_UARTEN (1 << 0) /* UART Enable */
+
+#if !defined(PL011_LINE_CONTROL)
+/* FIFO Enabled / No Parity / 8 Data bit / One Stop Bit */
+#define PL011_LINE_CONTROL (PL011_UARTLCR_H_FEN | PL011_UARTLCR_H_WLEN_8)
+#endif
+
+/* Line Control Register Bits */
+#define PL011_UARTLCR_H_SPS (1 << 7) /* Stick parity select */
+#define PL011_UARTLCR_H_WLEN_8 (3 << 5)
+#define PL011_UARTLCR_H_WLEN_7 (2 << 5)
+#define PL011_UARTLCR_H_WLEN_6 (1 << 5)
+#define PL011_UARTLCR_H_WLEN_5 (0 << 5)
+#define PL011_UARTLCR_H_FEN (1 << 4) /* FIFOs Enable */
+#define PL011_UARTLCR_H_STP2 (1 << 3) /* Two stop bits select */
+#define PL011_UARTLCR_H_EPS (1 << 2) /* Even parity select */
+#define PL011_UARTLCR_H_PEN (1 << 1) /* Parity Enable */
+#define PL011_UARTLCR_H_BRK (1 << 0) /* Send break */
+
+/* Constants */
+#define PL011_BAUDRATE 115200
+
+#endif /* __PL011_H__ */
diff --git a/include/drivers/arm/private_timer.h b/include/drivers/arm/private_timer.h
new file mode 100644
index 000000000..aff0b8ec5
--- /dev/null
+++ b/include/drivers/arm/private_timer.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PRIVATE_TIMER_H__
+#define __PRIVATE_TIMER_H__
+
+void private_timer_start(unsigned long timeo);
+void private_timer_stop(void);
+void private_timer_save(void);
+void private_timer_restore(void);
+
+#endif /* __PRIVATE_TIMER_H__ */
diff --git a/include/drivers/arm/sp804.h b/include/drivers/arm/sp804.h
new file mode 100644
index 000000000..7ca556762
--- /dev/null
+++ b/include/drivers/arm/sp804.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SP804_H__
+#define __SP804_H__
+
+#define SP804_LOAD_OFFSET 0x0
+#define SP804_CURRENT_VALUE_OFFSET 0x4
+#define SP804_CTRL_OFFSET 0x8
+#define SP804_INT_CLR_OFFSET 0xC
+#define SP804_INT_STATUS_OFFSET 0x10
+#define SP804_MASKED_INT_STATUS_OFFSET 0x14
+#define SP804_BG_LOAD_OFFSET 0x18
+
+/* SP804 Timer control register bit-fields */
+#define ONESHOT_MODE (0x1 << 0) /* Bit [0] */
+#define TIMER_SIZE (0x1 << 1) /* Bit [1] */
+#define TIMER_PRE_DIV1 (0x00 << 2) /* Bits [2:3] */
+#define INT_ENABLE (0x01 << 5) /* Bit [5] */
+#define TIMER_MODE_FREE_RUN (0x0 << 6) /* Bit [6] */
+#define TIMER_EN (0x01 << 7) /* Bit [7] */
+
+/*
+ * Program sp804 timer to fire an interrupt after `time_out_ms` milliseconds.
+ *
+ * Always return 0
+ */
+int sp804_timer_program(unsigned long time_out_ms);
+
+/*
+ * Cancel the currently programmed sp804 timer interrupt
+ *
+ * Always return 0
+ */
+int sp804_timer_cancel(void);
+
+/*
+ * Initializes the sp804 timer so that it can be used for programming
+ * timer interrupt.
+ * Must be called by the primary CPU only.
+ *
+ * Always return 0
+ */
+int sp804_timer_init(uintptr_t base_addr, unsigned int timer_freq);
+
+/*
+ * Handler to acknowledge and de-activate the sp804 timer interrupt
+ *
+ * Always return 0
+ */
+int sp804_timer_handler(void);
+
+#endif /* __SP804_H__ */
diff --git a/include/drivers/arm/sp805.h b/include/drivers/arm/sp805.h
new file mode 100644
index 000000000..c033ccfda
--- /dev/null
+++ b/include/drivers/arm/sp805.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SP805_H__
+#define __SP805_H__
+
+/* SP805 register offset */
+#define SP805_WDOG_LOAD_OFF 0x000
+#define SP805_WDOG_VALUE_0FF 0x004
+#define SP805_WDOG_CTRL_OFF 0x008
+#define SP805_WDOG_INT_CLR_OFF 0x00c
+#define SP805_WDOG_RIS_OFF 0x010
+#define SP805_WDOG_MIS_OFF 0x014
+#define SP805_WDOG_LOCK_OFF 0xc00
+#define SP805_WDOG_ITCR_OFF 0xf00
+#define SP805_WDOG_ITOP_OFF 0xf04
+#define SP805_WDOG_PERIPH_ID_OFF 0xfe0
+#define SP805_WDOG_PCELL_ID_OFF 0xff0
+
+/*
+ * Magic word to unlock access to all other watchdog registers, Writing any other
+ * value locks them.
+ */
+#define SP805_WDOG_UNLOCK_ACCESS 0x1ACCE551
+
+/* Register field definitions */
+#define SP805_WDOG_CTRL_MASK 0x03
+#define SP805_WDOG_CTRL_RESEN (1 << 1)
+#define SP805_WDOG_CTRL_INTEN (1 << 0)
+#define SP805_WDOG_RIS_WDOGRIS (1 << 0)
+#define SP805_WDOG_RIS_MASK 0x1
+#define SP805_WDOG_MIS_WDOGMIS (1 << 0)
+#define SP805_WDOG_MIS_MASK 0x1
+#define SP805_WDOG_ITCR_MASK 0x1
+#define SP805_WDOG_ITOP_MASK 0x3
+#define SP805_WDOG_PART_NUM_SHIFT 0
+#define SP805_WDOG_PART_NUM_MASK 0xfff
+#define SP805_WDOG_DESIGNER_ID_SHIFT 12
+#define SP805_WDOG_DESIGNER_ID_MASK 0xff
+#define SP805_WDOG_REV_SHIFT 20
+#define SP805_WDOG_REV_MASK 0xf
+#define SP805_WDOG_CFG_SHIFT 24
+#define SP805_WDOG_CFG_MASK 0xff
+#define SP805_WDOG_PCELL_ID_SHIFT 0
+#define SP805_WDOG_PCELL_ID_MASK 0xff
+
+void sp805_wdog_start(unsigned int wdog_cycles);
+void sp805_wdog_stop(void);
+void sp805_wdog_refresh(void);
+
+#endif /* __SP805_H__ */
+
diff --git a/include/drivers/arm/system_timer.h b/include/drivers/arm/system_timer.h
new file mode 100644
index 000000000..8cd0c8e69
--- /dev/null
+++ b/include/drivers/arm/system_timer.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SYSTEM_TIMER_H__
+#define __SYSTEM_TIMER_H__
+
+/*
+ * Program systimer to fire an interrupt after time_out_ms
+ *
+ * Always return 0
+ */
+int program_systimer(unsigned long time_out_ms);
+/*
+ * Cancel the currently programmed systimer interrupt
+ *
+ * Always return 0
+ */
+int cancel_systimer(void);
+/*
+ * Initialises the systimer so that it can be used for programming timer
+ * interrupt.
+ * Must be called by the primary CPU only.
+ *
+ * Always return 0
+ */
+int init_systimer(uintptr_t systimer_base);
+/*
+ * Handler to acknowledge and de-activate the systimer interrupt
+ *
+ * Always return 0
+ */
+int handler_systimer(void);
+
+#endif /* __SYSTEM_TIMER_H__ */
diff --git a/include/drivers/console.h b/include/drivers/console.h
new file mode 100644
index 000000000..abbe35612
--- /dev/null
+++ b/include/drivers/console.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __CONSOLE_H__
+#define __CONSOLE_H__
+
+/* Returned by getc callbacks when receive FIFO is empty. */
+#define ERROR_NO_PENDING_CHAR -1
+/* Returned by console_xxx() if the registered console doesn't implement xxx. */
+#define ERROR_NO_VALID_CONSOLE (-128)
+
+#ifndef __ASSEMBLY__
+
+#include <types.h>
+
+int console_init(uintptr_t base_addr,
+ unsigned int uart_clk, unsigned int baud_rate);
+int console_putc(int c);
+int console_getc(void);
+int console_try_getc(void);
+int console_flush(void);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __CONSOLE_H__ */
diff --git a/include/drivers/io/io_driver.h b/include/drivers/io/io_driver.h
new file mode 100644
index 000000000..e2e2d52b6
--- /dev/null
+++ b/include/drivers/io/io_driver.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __IO_DRIVER_H__
+#define __IO_DRIVER_H__
+
+#include <io_storage.h>
+#include <stdint.h>
+
+
+/* Generic IO entity structure,representing an accessible IO construct on the
+ * device, such as a file */
+typedef struct io_entity {
+ struct io_dev_info *dev_handle;
+ uintptr_t info;
+} io_entity_t;
+
+
+/* Device info structure, providing device-specific functions and a means of
+ * adding driver-specific state */
+typedef struct io_dev_info {
+ const struct io_dev_funcs *funcs;
+ uintptr_t info;
+} io_dev_info_t;
+
+
+/* Structure used to create a connection to a type of device */
+typedef struct io_dev_connector {
+ /* dev_open opens a connection to a particular device driver */
+ int (*dev_open)(const uintptr_t dev_spec, io_dev_info_t **dev_info);
+} io_dev_connector_t;
+
+
+/* Structure to hold device driver function pointers */
+typedef struct io_dev_funcs {
+ io_type_t (*type)(void);
+ int (*open)(io_dev_info_t *dev_info, const uintptr_t spec,
+ io_entity_t *entity);
+ int (*seek)(io_entity_t *entity, int mode, ssize_t offset);
+ int (*size)(io_entity_t *entity, size_t *length);
+ int (*read)(io_entity_t *entity, uintptr_t buffer, size_t length,
+ size_t *length_read);
+ int (*write)(io_entity_t *entity, const uintptr_t buffer,
+ size_t length, size_t *length_written);
+ int (*close)(io_entity_t *entity);
+ int (*dev_init)(io_dev_info_t *dev_info, const uintptr_t init_params);
+ int (*dev_close)(io_dev_info_t *dev_info);
+} io_dev_funcs_t;
+
+
+/* Operations intended to be performed during platform initialisation */
+
+/* Register an IO device */
+int io_register_device(const io_dev_info_t *dev_info);
+
+#endif /* __IO_DRIVER_H__ */
diff --git a/include/drivers/io/io_fip.h b/include/drivers/io/io_fip.h
new file mode 100644
index 000000000..973515200
--- /dev/null
+++ b/include/drivers/io/io_fip.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __IO_FIP_H__
+#define __IO_FIP_H__
+
+#include <assert.h>
+
+struct io_dev_connector;
+
+int register_io_dev_fip(const struct io_dev_connector **dev_con);
+
+enum {
+ /* Non-Trusted Updater Firmware NS_BL1U */
+ NS_BL1U_IMAGE_ID = 16,
+
+ /* Trusted FWU Certificate */
+ FWU_CERT_ID,
+
+ /* SCP Firmware SCP_BL2U */
+ SCP_BL2U_IMAGE_ID,
+
+ /* Trusted Updater Firmware BL2U */
+ BL2U_IMAGE_ID,
+
+ /* Non-Trusted Updater Firmware NS_BL2U */
+ NS_BL2U_IMAGE_ID,
+
+ /* FWU Firmware Image Package */
+ FWU_FIP_IMAGE_ID
+};
+
+static inline const char *get_image_name(unsigned int image_id)
+{
+ static const char *image_names[] = {
+ "Non-Trusted Updater Firmware (NS_BL1U)",
+ "Trusted FWU Certificate",
+ "SCP Firmware (SCP_BL2U)",
+ "Trusted Updater Firmware (BL2U)",
+ "Non-Trusted Updater Firmware (NS_BL2U)"
+ "FWU Firmware Image Package",
+ };
+ assert((image_id >= NS_BL1U_IMAGE_ID) && (image_id <= FWU_FIP_IMAGE_ID));
+ return image_names[image_id - NS_BL1U_IMAGE_ID];
+}
+
+#endif /* __IO_FIP_H__ */
diff --git a/include/drivers/io/io_memmap.h b/include/drivers/io/io_memmap.h
new file mode 100644
index 000000000..2094b9396
--- /dev/null
+++ b/include/drivers/io/io_memmap.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __IO_MEMMAP_H__
+#define __IO_MEMMAP_H__
+
+struct io_dev_connector;
+
+int register_io_dev_memmap(const struct io_dev_connector **dev_con);
+
+#endif /* __IO_MEMMAP_H__ */
diff --git a/include/drivers/io/io_nor_flash.h b/include/drivers/io/io_nor_flash.h
new file mode 100644
index 000000000..03058e9c9
--- /dev/null
+++ b/include/drivers/io/io_nor_flash.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __IO_NOR_FLASH_H__
+#define __IO_NOR_FLASH_H__
+
+#include <platform_def.h>
+
+#ifndef NOR_FLASH_BLOCK_SIZE
+ #error NOR_FLASH_BLOCK_SIZE must be defined as the block \
+ size of the NOR Flash seen by the software
+#endif
+
+/* IO NOR Flash specification - used to refer to data on a memory map device
+ * supporting block-like entities */
+typedef struct io_nor_spec {
+ /* Base Address of the NOR Flash device - it is required to program
+ * the flash */
+ uintptr_t device_address;
+ uintptr_t region_address;
+ uint32_t block_size;
+ uint32_t block_count;
+} io_nor_flash_spec_t;
+
+struct io_dev_connector;
+
+int register_io_dev_nor_flash(const struct io_dev_connector **dev_con);
+
+#endif /* __IO_NOR_FLASH_H__ */
diff --git a/include/lib/aarch32/arch.h b/include/lib/aarch32/arch.h
new file mode 100644
index 000000000..a537dcfee
--- /dev/null
+++ b/include/lib/aarch32/arch.h
@@ -0,0 +1,595 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __ARCH_H__
+#define __ARCH_H__
+
+#include <utils_def.h>
+
+/*******************************************************************************
+ * MIDR bit definitions
+ ******************************************************************************/
+#define MIDR_IMPL_MASK U(0xff)
+#define MIDR_IMPL_SHIFT U(24)
+#define MIDR_VAR_SHIFT U(20)
+#define MIDR_VAR_BITS U(4)
+#define MIDR_REV_SHIFT U(0)
+#define MIDR_REV_BITS U(4)
+#define MIDR_PN_MASK U(0xfff)
+#define MIDR_PN_SHIFT U(4)
+
+/*******************************************************************************
+ * MPIDR macros
+ ******************************************************************************/
+#define MPIDR_MT_MASK (U(1) << 24)
+#define MPIDR_CPU_MASK MPIDR_AFFLVL_MASK
+#define MPIDR_CLUSTER_MASK (MPIDR_AFFLVL_MASK << MPIDR_AFFINITY_BITS)
+#define MPIDR_AFFINITY_BITS U(8)
+#define MPIDR_AFFLVL_MASK U(0xff)
+#define MPIDR_AFFLVL_SHIFT U(3)
+#define MPIDR_AFF0_SHIFT U(0)
+#define MPIDR_AFF1_SHIFT U(8)
+#define MPIDR_AFF2_SHIFT U(16)
+#define MPIDR_AFF_SHIFT(_n) MPIDR_AFF##_n##_SHIFT
+#define MPIDR_AFFINITY_MASK U(0x00ffffff)
+#define MPIDR_AFFLVL0 U(0)
+#define MPIDR_AFFLVL1 U(1)
+#define MPIDR_AFFLVL2 U(2)
+#define MPIDR_AFFLVL(_n) MPIDR_AFFLVL##_n
+
+#define MPIDR_AFFLVL0_VAL(mpidr) \
+ (((mpidr) >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK)
+#define MPIDR_AFFLVL1_VAL(mpidr) \
+ (((mpidr) >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK)
+#define MPIDR_AFFLVL2_VAL(mpidr) \
+ (((mpidr) >> MPIDR_AFF2_SHIFT) & MPIDR_AFFLVL_MASK)
+
+#define MPIDR_AFF_ID(mpid, n) \
+ (((mpid) >> MPIDR_AFF_SHIFT(n)) & MPIDR_AFFLVL_MASK)
+
+#define MPIDR_CLUSTER_ID(mpid) MPIDR_AFF_ID(mpid, 1)
+#define MPIDR_CPU_ID(mpid) MPIDR_AFF_ID(mpid, 0)
+
+/* Constant to highlight the assumption that MPIDR allocation starts from 0 */
+#define FIRST_MPIDR U(0)
+
+#define MPID_MASK (MPIDR_MT_MASK |\
+ (MPIDR_AFFLVL_MASK << MPIDR_AFF2_SHIFT)|\
+ (MPIDR_AFFLVL_MASK << MPIDR_AFF1_SHIFT)|\
+ (MPIDR_AFFLVL_MASK << MPIDR_AFF0_SHIFT))
+
+/*
+ * An invalid MPID. This value can be used by functions that return an MPID to
+ * indicate an error.
+ */
+#define INVALID_MPID U(0xFFFFFFFF)
+
+/*
+ * The MPIDR_MAX_AFFLVL count starts from 0. Take care to
+ * add one while using this macro to define array sizes.
+ */
+#define MPIDR_MAX_AFFLVL U(2)
+
+/* Data Cache set/way op type defines */
+#define DC_OP_ISW U(0x0)
+#define DC_OP_CISW U(0x1)
+#define DC_OP_CSW U(0x2)
+
+/*******************************************************************************
+ * Generic timer memory mapped registers & offsets
+ ******************************************************************************/
+#define CNTCR_OFF U(0x000)
+#define CNTFID_OFF U(0x020)
+
+#define CNTCR_EN (U(1) << 0)
+#define CNTCR_HDBG (U(1) << 1)
+#define CNTCR_FCREQ(x) ((x) << 8)
+
+/*******************************************************************************
+ * System register bit definitions
+ ******************************************************************************/
+/* CLIDR definitions */
+#define LOUIS_SHIFT U(21)
+#define LOC_SHIFT U(24)
+#define CLIDR_FIELD_WIDTH U(3)
+
+/* CSSELR definitions */
+#define LEVEL_SHIFT U(1)
+
+/* ID_PFR0 definitions */
+#define ID_PFR0_AMU_SHIFT U(20)
+#define ID_PFR0_AMU_LENGTH U(4)
+#define ID_PFR0_AMU_MASK U(0xf)
+
+/* ID_PFR1 definitions */
+#define ID_PFR1_VIRTEXT_SHIFT U(12)
+#define ID_PFR1_VIRTEXT_MASK U(0xf)
+#define GET_VIRT_EXT(id) (((id) >> ID_PFR1_VIRTEXT_SHIFT) \
+ & ID_PFR1_VIRTEXT_MASK)
+#define ID_PFR1_GIC_SHIFT U(28)
+#define ID_PFR1_GIC_MASK U(0xf)
+
+/* SCTLR definitions */
+#define SCTLR_RES1 ((U(1) << 23) | (U(1) << 22) | (U(1) << 11) | \
+ (U(1) << 4) | (U(1) << 3) | SCTLR_CP15BEN_BIT \
+ | SCTLR_NTWI_BIT | SCTLR_NTWE_BIT)
+#define SCTLR_M_BIT (U(1) << 0)
+#define SCTLR_A_BIT (U(1) << 1)
+#define SCTLR_C_BIT (U(1) << 2)
+#define SCTLR_CP15BEN_BIT (U(1) << 5)
+#define SCTLR_ITD_BIT (U(1) << 7)
+#define SCTLR_Z_BIT (U(1) << 11)
+#define SCTLR_I_BIT (U(1) << 12)
+#define SCTLR_V_BIT (U(1) << 13)
+#define SCTLR_RR_BIT (U(1) << 14)
+#define SCTLR_NTWI_BIT (U(1) << 16)
+#define SCTLR_NTWE_BIT (U(1) << 18)
+#define SCTLR_WXN_BIT (U(1) << 19)
+#define SCTLR_UWXN_BIT (U(1) << 20)
+#define SCTLR_EE_BIT (U(1) << 25)
+#define SCTLR_TRE_BIT (U(1) << 28)
+#define SCTLR_AFE_BIT (U(1) << 29)
+#define SCTLR_TE_BIT (U(1) << 30)
+
+/* HSCTLR definitions */
+#define HSCTLR_RES1 ((U(1) << 29) | (U(1) << 28) | (U(1) << 23) | \
+ (U(1) << 22) | (U(1) << 18) | (U(1) << 16) | \
+ (U(1) << 11) | (U(1) << 4) | (U(1) << 3) | \
+ HSCTLR_CP15BEN_BIT)
+#define HSCTLR_M_BIT (U(1) << 0)
+#define HSCTLR_A_BIT (U(1) << 1)
+#define HSCTLR_C_BIT (U(1) << 2)
+#define HSCTLR_CP15BEN_BIT (U(1) << 5)
+#define HSCTLR_ITD_BIT (U(1) << 7)
+#define HSCTLR_SED_BIT (U(1) << 8)
+#define HSCTLR_I_BIT (U(1) << 12)
+#define HSCTLR_WXN_BIT (U(1) << 19)
+#define HSCTLR_EE_BIT (U(1) << 25)
+#define HSCTLR_TE_BIT (U(1) << 30)
+
+/* CPACR definitions */
+#define CPACR_FPEN(x) ((x) << 20)
+#define CPACR_FP_TRAP_PL0 U(0x1)
+#define CPACR_FP_TRAP_ALL U(0x2)
+#define CPACR_FP_TRAP_NONE U(0x3)
+
+/* SCR definitions */
+#define SCR_TWE_BIT (U(1) << 13)
+#define SCR_TWI_BIT (U(1) << 12)
+#define SCR_SIF_BIT (U(1) << 9)
+#define SCR_HCE_BIT (U(1) << 8)
+#define SCR_SCD_BIT (U(1) << 7)
+#define SCR_NET_BIT (U(1) << 6)
+#define SCR_AW_BIT (U(1) << 5)
+#define SCR_FW_BIT (U(1) << 4)
+#define SCR_EA_BIT (U(1) << 3)
+#define SCR_FIQ_BIT (U(1) << 2)
+#define SCR_IRQ_BIT (U(1) << 1)
+#define SCR_NS_BIT (U(1) << 0)
+#define SCR_VALID_BIT_MASK U(0x33ff)
+#define SCR_RESET_VAL U(0x0)
+
+#define GET_NS_BIT(scr) ((scr) & SCR_NS_BIT)
+
+/* HCR definitions */
+#define HCR_TGE_BIT (U(1) << 27)
+#define HCR_AMO_BIT (U(1) << 5)
+#define HCR_IMO_BIT (U(1) << 4)
+#define HCR_FMO_BIT (U(1) << 3)
+#define HCR_RESET_VAL U(0x0)
+
+/* CNTHCTL definitions */
+#define CNTHCTL_RESET_VAL U(0x0)
+#define EVNTEN_BIT (U(1) << 2)
+#define PL1PCEN_BIT (U(1) << 1)
+#define PL1PCTEN_BIT (U(1) << 0)
+
+/* CNTKCTL definitions */
+#define PL0PTEN_BIT (U(1) << 9)
+#define PL0VTEN_BIT (U(1) << 8)
+#define PL0PCTEN_BIT (U(1) << 0)
+#define PL0VCTEN_BIT (U(1) << 1)
+#define EVNTEN_BIT (U(1) << 2)
+#define EVNTDIR_BIT (U(1) << 3)
+#define EVNTI_SHIFT U(4)
+#define EVNTI_MASK U(0xf)
+
+/* HCPTR definitions */
+#define HCPTR_RES1 ((U(1) << 13) | (U(1) << 12) | U(0x3ff))
+#define TCPAC_BIT (U(1) << 31)
+#define TAM_BIT (U(1) << 30)
+#define TTA_BIT (U(1) << 20)
+#define TCP11_BIT (U(1) << 11)
+#define TCP10_BIT (U(1) << 10)
+#define HCPTR_RESET_VAL HCPTR_RES1
+
+/* NASCR definitions */
+#define NSASEDIS_BIT (U(1) << 15)
+#define NSTRCDIS_BIT (U(1) << 20)
+#define NSACR_CP11_BIT (U(1) << 11)
+#define NSACR_CP10_BIT (U(1) << 10)
+#define NSACR_IMP_DEF_MASK (U(0x7) << 16)
+#define NSACR_ENABLE_FP_ACCESS (NSACR_CP11_BIT | NSACR_CP10_BIT)
+#define NSACR_RESET_VAL U(0x0)
+
+/* CPACR definitions */
+#define ASEDIS_BIT (U(1) << 31)
+#define TRCDIS_BIT (U(1) << 28)
+#define CPACR_CP11_SHIFT U(22)
+#define CPACR_CP10_SHIFT U(20)
+#define CPACR_ENABLE_FP_ACCESS ((U(0x3) << CPACR_CP11_SHIFT) |\
+ (U(0x3) << CPACR_CP10_SHIFT))
+#define CPACR_RESET_VAL U(0x0)
+
+/* FPEXC definitions */
+#define FPEXC_RES1 ((U(1) << 10) | (U(1) << 9) | (U(1) << 8))
+#define FPEXC_EN_BIT (U(1) << 30)
+#define FPEXC_RESET_VAL FPEXC_RES1
+
+/* SPSR/CPSR definitions */
+#define SPSR_FIQ_BIT (U(1) << 0)
+#define SPSR_IRQ_BIT (U(1) << 1)
+#define SPSR_ABT_BIT (U(1) << 2)
+#define SPSR_AIF_SHIFT U(6)
+#define SPSR_AIF_MASK U(0x7)
+
+#define SPSR_E_SHIFT U(9)
+#define SPSR_E_MASK U(0x1)
+#define SPSR_E_LITTLE U(0)
+#define SPSR_E_BIG U(1)
+
+#define SPSR_T_SHIFT U(5)
+#define SPSR_T_MASK U(0x1)
+#define SPSR_T_ARM U(0)
+#define SPSR_T_THUMB U(1)
+
+#define SPSR_MODE_SHIFT U(0)
+#define SPSR_MODE_MASK U(0x7)
+
+#define DISABLE_ALL_EXCEPTIONS \
+ (SPSR_FIQ_BIT | SPSR_IRQ_BIT | SPSR_ABT_BIT)
+
+/*
+ * TTBCR definitions
+ */
+/* The translation tables library uses the long descriptor format */
+#define TTBCR_EAE_BIT (U(1) << 31)
+
+#define TTBCR_SH1_NON_SHAREABLE (U(0x0) << 28)
+#define TTBCR_SH1_OUTER_SHAREABLE (U(0x2) << 28)
+#define TTBCR_SH1_INNER_SHAREABLE (U(0x3) << 28)
+
+#define TTBCR_RGN1_OUTER_NC (U(0x0) << 26)
+#define TTBCR_RGN1_OUTER_WBA (U(0x1) << 26)
+#define TTBCR_RGN1_OUTER_WT (U(0x2) << 26)
+#define TTBCR_RGN1_OUTER_WBNA (U(0x3) << 26)
+
+#define TTBCR_RGN1_INNER_NC (U(0x0) << 24)
+#define TTBCR_RGN1_INNER_WBA (U(0x1) << 24)
+#define TTBCR_RGN1_INNER_WT (U(0x2) << 24)
+#define TTBCR_RGN1_INNER_WBNA (U(0x3) << 24)
+
+#define TTBCR_EPD1_BIT (U(1) << 23)
+#define TTBCR_A1_BIT (U(1) << 22)
+
+#define TTBCR_T1SZ_SHIFT U(16)
+#define TTBCR_T1SZ_MASK U(0x7)
+#define TTBCR_TxSZ_MIN U(0)
+#define TTBCR_TxSZ_MAX U(7)
+
+#define TTBCR_SH0_NON_SHAREABLE (U(0x0) << 12)
+#define TTBCR_SH0_OUTER_SHAREABLE (U(0x2) << 12)
+#define TTBCR_SH0_INNER_SHAREABLE (U(0x3) << 12)
+
+#define TTBCR_RGN0_OUTER_NC (U(0x0) << 10)
+#define TTBCR_RGN0_OUTER_WBA (U(0x1) << 10)
+#define TTBCR_RGN0_OUTER_WT (U(0x2) << 10)
+#define TTBCR_RGN0_OUTER_WBNA (U(0x3) << 10)
+
+#define TTBCR_RGN0_INNER_NC (U(0x0) << 8)
+#define TTBCR_RGN0_INNER_WBA (U(0x1) << 8)
+#define TTBCR_RGN0_INNER_WT (U(0x2) << 8)
+#define TTBCR_RGN0_INNER_WBNA (U(0x3) << 8)
+
+#define TTBCR_EPD0_BIT (U(1) << 7)
+#define TTBCR_T0SZ_SHIFT U(0)
+#define TTBCR_T0SZ_MASK U(0x7)
+
+/*
+ * HTCR definitions
+ */
+#define HTCR_RES1 ((1 << 31) | (1 << 23))
+
+#define HTCR_SH0_NON_SHAREABLE (0x0 << 12)
+#define HTCR_SH0_OUTER_SHAREABLE (0x2 << 12)
+#define HTCR_SH0_INNER_SHAREABLE (0x3 << 12)
+
+#define HTCR_RGN0_OUTER_NC (0x0 << 10)
+#define HTCR_RGN0_OUTER_WBA (0x1 << 10)
+#define HTCR_RGN0_OUTER_WT (0x2 << 10)
+#define HTCR_RGN0_OUTER_WBNA (0x3 << 10)
+
+#define HTCR_RGN0_INNER_NC (0x0 << 8)
+#define HTCR_RGN0_INNER_WBA (0x1 << 8)
+#define HTCR_RGN0_INNER_WT (0x2 << 8)
+#define HTCR_RGN0_INNER_WBNA (0x3 << 8)
+
+#define HTCR_T0SZ_SHIFT 0
+#define HTCR_T0SZ_MASK (0x7)
+
+#define MODE_RW_SHIFT U(0x4)
+#define MODE_RW_MASK U(0x1)
+#define MODE_RW_32 U(0x1)
+
+#define MODE32_SHIFT U(0)
+#define MODE32_MASK U(0x1f)
+#define MODE32_usr U(0x10)
+#define MODE32_fiq U(0x11)
+#define MODE32_irq U(0x12)
+#define MODE32_svc U(0x13)
+#define MODE32_mon U(0x16)
+#define MODE32_abt U(0x17)
+#define MODE32_hyp U(0x1a)
+#define MODE32_und U(0x1b)
+#define MODE32_sys U(0x1f)
+
+#define GET_M32(mode) (((mode) >> MODE32_SHIFT) & MODE32_MASK)
+
+#define SPSR_MODE32(mode, isa, endian, aif) \
+ (MODE_RW_32 << MODE_RW_SHIFT | \
+ ((mode) & MODE32_MASK) << MODE32_SHIFT | \
+ ((isa) & SPSR_T_MASK) << SPSR_T_SHIFT | \
+ ((endian) & SPSR_E_MASK) << SPSR_E_SHIFT | \
+ ((aif) & SPSR_AIF_MASK) << SPSR_AIF_SHIFT)
+
+/*
+ * TTBR definitions
+ */
+#define TTBR_CNP_BIT U(0x1)
+
+/*******************************************************************************
+ * Definitions of register offsets, fields and macros for CPU system
+ * instructions.
+ ******************************************************************************/
+
+#define TLBI_ADDR_SHIFT U(0)
+#define TLBI_ADDR_MASK U(0xFFFFF000)
+#define TLBI_ADDR(x) (((x) >> TLBI_ADDR_SHIFT) & TLBI_ADDR_MASK)
+
+/*******************************************************************************
+ * Definitions of register offsets and fields in the CNTBaseN Frame of the
+ * system level implementation of the Generic Timer.
+ ******************************************************************************/
+/* Physical Count register. */
+#define CNTPCT_LO 0x0
+/* Counter Frequency register. */
+#define CNTBASEN_CNTFRQ 0x10
+/* Physical Timer CompareValue register. */
+#define CNTP_CVAL_LO 0x20
+/* Physical Timer Control register. */
+#define CNTP_CTL 0x2c
+
+/* Physical timer control register bit fields shifts and masks */
+#define CNTP_CTL_ENABLE_SHIFT 0
+#define CNTP_CTL_IMASK_SHIFT 1
+#define CNTP_CTL_ISTATUS_SHIFT 2
+
+#define CNTP_CTL_ENABLE_MASK 1
+#define CNTP_CTL_IMASK_MASK 1
+#define CNTP_CTL_ISTATUS_MASK 1
+
+#define get_cntp_ctl_enable(x) ((x >> CNTP_CTL_ENABLE_SHIFT) & \
+ CNTP_CTL_ENABLE_MASK)
+#define get_cntp_ctl_imask(x) ((x >> CNTP_CTL_IMASK_SHIFT) & \
+ CNTP_CTL_IMASK_MASK)
+#define get_cntp_ctl_istatus(x) ((x >> CNTP_CTL_ISTATUS_SHIFT) & \
+ CNTP_CTL_ISTATUS_MASK)
+
+#define set_cntp_ctl_enable(x) (x |= 1 << CNTP_CTL_ENABLE_SHIFT)
+#define set_cntp_ctl_imask(x) (x |= 1 << CNTP_CTL_IMASK_SHIFT)
+
+#define clr_cntp_ctl_enable(x) (x &= ~(1 << CNTP_CTL_ENABLE_SHIFT))
+#define clr_cntp_ctl_imask(x) (x &= ~(1 << CNTP_CTL_IMASK_SHIFT))
+
+/* MAIR macros */
+#define MAIR0_ATTR_SET(attr, index) ((attr) << ((index) << U(3)))
+#define MAIR1_ATTR_SET(attr, index) ((attr) << (((index) - U(3)) << U(3)))
+
+/* System register defines The format is: coproc, opt1, CRn, CRm, opt2 */
+#define SCR p15, 0, c1, c1, 0
+#define SCTLR p15, 0, c1, c0, 0
+#define MPIDR p15, 0, c0, c0, 5
+#define MIDR p15, 0, c0, c0, 0
+#define HVBAR p15, 4, c12, c0, 0
+#define VBAR p15, 0, c12, c0, 0
+#define MVBAR p15, 0, c12, c0, 1
+#define NSACR p15, 0, c1, c1, 2
+#define CPACR p15, 0, c1, c0, 2
+#define DCCIMVAC p15, 0, c7, c14, 1
+#define DCCMVAC p15, 0, c7, c10, 1
+#define DCIMVAC p15, 0, c7, c6, 1
+#define DCCISW p15, 0, c7, c14, 2
+#define DCCSW p15, 0, c7, c10, 2
+#define DCISW p15, 0, c7, c6, 2
+#define CTR p15, 0, c0, c0, 1
+#define CNTFRQ p15, 0, c14, c0, 0
+#define ID_PFR0 p15, 0, c0, c1, 0
+#define ID_PFR1 p15, 0, c0, c1, 1
+#define MAIR0 p15, 0, c10, c2, 0
+#define MAIR1 p15, 0, c10, c2, 1
+#define TTBCR p15, 0, c2, c0, 2
+#define TTBR0 p15, 0, c2, c0, 0
+#define TTBR1 p15, 0, c2, c0, 1
+#define TLBIALL p15, 0, c8, c7, 0
+#define TLBIALLH p15, 4, c8, c7, 0
+#define TLBIALLIS p15, 0, c8, c3, 0
+#define TLBIMVA p15, 0, c8, c7, 1
+#define TLBIMVAA p15, 0, c8, c7, 3
+#define TLBIMVAAIS p15, 0, c8, c3, 3
+#define TLBIMVAHIS p15, 4, c8, c3, 1
+#define BPIALLIS p15, 0, c7, c1, 6
+#define BPIALL p15, 0, c7, c5, 6
+#define HSCTLR p15, 4, c1, c0, 0
+#define HCR p15, 4, c1, c1, 0
+#define HCPTR p15, 4, c1, c1, 2
+#define CNTHCTL p15, 4, c14, c1, 0
+#define VPIDR p15, 4, c0, c0, 0
+#define VMPIDR p15, 4, c0, c0, 5
+#define ISR p15, 0, c12, c1, 0
+#define CLIDR p15, 1, c0, c0, 1
+#define CSSELR p15, 2, c0, c0, 0
+#define CCSIDR p15, 1, c0, c0, 0
+#define HTCR p15, 4, c2, c0, 2
+#define HMAIR0 p15, 4, c10, c2, 0
+#define CNTHP_CTL p15, 4, c14, c2, 1
+#define CNTHP_TVAL p15, 4, c14, c2, 0
+
+/* GICv3 CPU Interface system register defines. The format is: coproc, opt1, CRn, CRm, opt2 */
+#define ICC_IAR1 p15, 0, c12, c12, 0
+#define ICC_IAR0 p15, 0, c12, c8, 0
+#define ICC_EOIR1 p15, 0, c12, c12, 1
+#define ICC_EOIR0 p15, 0, c12, c8, 1
+#define ICC_HPPIR1 p15, 0, c12, c12, 2
+#define ICC_HPPIR0 p15, 0, c12, c8, 2
+#define ICC_BPR1 p15, 0, c12, c12, 3
+#define ICC_BPR0 p15, 0, c12, c8, 3
+#define ICC_DIR p15, 0, c12, c11, 1
+#define ICC_PMR p15, 0, c4, c6, 0
+#define ICC_RPR p15, 0, c12, c11, 3
+#define ICC_CTLR p15, 0, c12, c12, 4
+#define ICC_MCTLR p15, 6, c12, c12, 4
+#define ICC_SRE p15, 0, c12, c12, 5
+#define ICC_HSRE p15, 4, c12, c9, 5
+#define ICC_MSRE p15, 6, c12, c12, 5
+#define ICC_IGRPEN0 p15, 0, c12, c12, 6
+#define ICC_IGRPEN1 p15, 0, c12, c12, 7
+#define ICC_MGRPEN1 p15, 6, c12, c12, 7
+
+/* 64 bit system register defines The format is: coproc, opt1, CRm */
+#define TTBR0_64 p15, 0, c2
+#define TTBR1_64 p15, 1, c2
+#define CNTVOFF_64 p15, 4, c14
+#define VTTBR_64 p15, 6, c2
+#define CNTPCT_64 p15, 0, c14
+#define HTTBR_64 p15, 4, c2
+#define CNTHP_CVAL_64 p15, 6, c14
+
+/* 64 bit GICv3 CPU Interface system register defines. The format is: coproc, opt1, CRm */
+#define ICC_SGI1R_EL1_64 p15, 0, c12
+#define ICC_ASGI1R_EL1_64 p15, 1, c12
+#define ICC_SGI0R_EL1_64 p15, 2, c12
+
+/*******************************************************************************
+ * Definitions of MAIR encodings for device and normal memory
+ ******************************************************************************/
+/*
+ * MAIR encodings for device memory attributes.
+ */
+#define MAIR_DEV_nGnRnE U(0x0)
+#define MAIR_DEV_nGnRE U(0x4)
+#define MAIR_DEV_nGRE U(0x8)
+#define MAIR_DEV_GRE U(0xc)
+
+/*
+ * MAIR encodings for normal memory attributes.
+ *
+ * Cache Policy
+ * WT: Write Through
+ * WB: Write Back
+ * NC: Non-Cacheable
+ *
+ * Transient Hint
+ * NTR: Non-Transient
+ * TR: Transient
+ *
+ * Allocation Policy
+ * RA: Read Allocate
+ * WA: Write Allocate
+ * RWA: Read and Write Allocate
+ * NA: No Allocation
+ */
+#define MAIR_NORM_WT_TR_WA U(0x1)
+#define MAIR_NORM_WT_TR_RA U(0x2)
+#define MAIR_NORM_WT_TR_RWA U(0x3)
+#define MAIR_NORM_NC U(0x4)
+#define MAIR_NORM_WB_TR_WA U(0x5)
+#define MAIR_NORM_WB_TR_RA U(0x6)
+#define MAIR_NORM_WB_TR_RWA U(0x7)
+#define MAIR_NORM_WT_NTR_NA U(0x8)
+#define MAIR_NORM_WT_NTR_WA U(0x9)
+#define MAIR_NORM_WT_NTR_RA U(0xa)
+#define MAIR_NORM_WT_NTR_RWA U(0xb)
+#define MAIR_NORM_WB_NTR_NA U(0xc)
+#define MAIR_NORM_WB_NTR_WA U(0xd)
+#define MAIR_NORM_WB_NTR_RA U(0xe)
+#define MAIR_NORM_WB_NTR_RWA U(0xf)
+
+#define MAIR_NORM_OUTER_SHIFT U(4)
+
+#define MAKE_MAIR_NORMAL_MEMORY(inner, outer) \
+ ((inner) | ((outer) << MAIR_NORM_OUTER_SHIFT))
+
+/*******************************************************************************
+ * Definitions for system register interface to AMU for ARMv8.4 onwards
+ ******************************************************************************/
+#define AMCR p15, 0, c13, c2, 0
+#define AMCFGR p15, 0, c13, c2, 1
+#define AMCGCR p15, 0, c13, c2, 2
+#define AMUSERENR p15, 0, c13, c2, 3
+#define AMCNTENCLR0 p15, 0, c13, c2, 4
+#define AMCNTENSET0 p15, 0, c13, c2, 5
+#define AMCNTENCLR1 p15, 0, c13, c3, 0
+#define AMCNTENSET1 p15, 0, c13, c3, 1
+
+/* Activity Monitor Group 0 Event Counter Registers */
+#define AMEVCNTR00 p15, 0, c0
+#define AMEVCNTR01 p15, 1, c0
+#define AMEVCNTR02 p15, 2, c0
+#define AMEVCNTR03 p15, 3, c0
+
+/* Activity Monitor Group 0 Event Type Registers */
+#define AMEVTYPER00 p15, 0, c13, c6, 0
+#define AMEVTYPER01 p15, 0, c13, c6, 1
+#define AMEVTYPER02 p15, 0, c13, c6, 2
+#define AMEVTYPER03 p15, 0, c13, c6, 3
+
+/* Activity Monitor Group 1 Event Counter Registers */
+#define AMEVCNTR10 p15, 0, c4
+#define AMEVCNTR11 p15, 1, c4
+#define AMEVCNTR12 p15, 2, c4
+#define AMEVCNTR13 p15, 3, c4
+#define AMEVCNTR14 p15, 4, c4
+#define AMEVCNTR15 p15, 5, c4
+#define AMEVCNTR16 p15, 6, c4
+#define AMEVCNTR17 p15, 7, c4
+#define AMEVCNTR18 p15, 0, c5
+#define AMEVCNTR19 p15, 1, c5
+#define AMEVCNTR1A p15, 2, c5
+#define AMEVCNTR1B p15, 3, c5
+#define AMEVCNTR1C p15, 4, c5
+#define AMEVCNTR1D p15, 5, c5
+#define AMEVCNTR1E p15, 6, c5
+#define AMEVCNTR1F p15, 7, c5
+
+/* Activity Monitor Group 1 Event Type Registers */
+#define AMEVTYPER10 p15, 0, c13, c14, 0
+#define AMEVTYPER11 p15, 0, c13, c14, 1
+#define AMEVTYPER12 p15, 0, c13, c14, 2
+#define AMEVTYPER13 p15, 0, c13, c14, 3
+#define AMEVTYPER14 p15, 0, c13, c14, 4
+#define AMEVTYPER15 p15, 0, c13, c14, 5
+#define AMEVTYPER16 p15, 0, c13, c14, 6
+#define AMEVTYPER17 p15, 0, c13, c14, 7
+#define AMEVTYPER18 p15, 0, c13, c15, 0
+#define AMEVTYPER19 p15, 0, c13, c15, 1
+#define AMEVTYPER1A p15, 0, c13, c15, 2
+#define AMEVTYPER1B p15, 0, c13, c15, 3
+#define AMEVTYPER1C p15, 0, c13, c15, 4
+#define AMEVTYPER1D p15, 0, c13, c15, 5
+#define AMEVTYPER1E p15, 0, c13, c15, 6
+#define AMEVTYPER1F p15, 0, c13, c15, 7
+
+#endif /* __ARCH_H__ */
diff --git a/include/lib/aarch32/arch_helpers.h b/include/lib/aarch32/arch_helpers.h
new file mode 100644
index 000000000..7991711ff
--- /dev/null
+++ b/include/lib/aarch32/arch_helpers.h
@@ -0,0 +1,358 @@
+/*
+ * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __ARCH_HELPERS_H__
+#define __ARCH_HELPERS_H__
+
+#include <arch.h> /* for additional register definitions */
+#include <misc_utils.h>
+#include <stdint.h>
+#include <types.h>
+
+/**********************************************************************
+ * Macros which create inline functions to read or write CPU system
+ * registers
+ *********************************************************************/
+
+#define _DEFINE_COPROCR_WRITE_FUNC(_name, coproc, opc1, CRn, CRm, opc2) \
+static inline void write_## _name(u_register_t v) \
+{ \
+ __asm__ volatile ("mcr "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : : "r" (v));\
+}
+
+#define _DEFINE_COPROCR_READ_FUNC(_name, coproc, opc1, CRn, CRm, opc2) \
+static inline u_register_t read_ ## _name(void) \
+{ \
+ u_register_t v; \
+ __asm__ volatile ("mrc "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : "=r" (v));\
+ return v; \
+}
+
+/*
+ * The undocumented %Q and %R extended asm are used to implemented the below
+ * 64 bit `mrrc` and `mcrr` instructions.
+ */
+
+#define _DEFINE_COPROCR_WRITE_FUNC_64(_name, coproc, opc1, CRm) \
+static inline void write64_## _name(uint64_t v) \
+{ \
+ __asm__ volatile ("mcrr "#coproc","#opc1", %Q0, %R0,"#CRm : : "r" (v));\
+}
+
+#define _DEFINE_COPROCR_READ_FUNC_64(_name, coproc, opc1, CRm) \
+static inline uint64_t read64_## _name(void) \
+{ uint64_t v; \
+ __asm__ volatile ("mrrc "#coproc","#opc1", %Q0, %R0,"#CRm : "=r" (v));\
+ return v; \
+}
+
+#define _DEFINE_SYSREG_READ_FUNC(_name, _reg_name) \
+static inline u_register_t read_ ## _name(void) \
+{ \
+ u_register_t v; \
+ __asm__ volatile ("mrs %0, " #_reg_name : "=r" (v)); \
+ return v; \
+}
+
+#define _DEFINE_SYSREG_WRITE_FUNC(_name, _reg_name) \
+static inline void write_ ## _name(u_register_t v) \
+{ \
+ __asm__ volatile ("msr " #_reg_name ", %0" : : "r" (v)); \
+}
+
+#define _DEFINE_SYSREG_WRITE_CONST_FUNC(_name, _reg_name) \
+static inline void write_ ## _name(const u_register_t v) \
+{ \
+ __asm__ volatile ("msr " #_reg_name ", %0" : : "i" (v)); \
+}
+
+/* Define read function for coproc register */
+#define DEFINE_COPROCR_READ_FUNC(_name, ...) \
+ _DEFINE_COPROCR_READ_FUNC(_name, __VA_ARGS__)
+
+/* Define read & write function for coproc register */
+#define DEFINE_COPROCR_RW_FUNCS(_name, ...) \
+ _DEFINE_COPROCR_READ_FUNC(_name, __VA_ARGS__) \
+ _DEFINE_COPROCR_WRITE_FUNC(_name, __VA_ARGS__)
+
+/* Define 64 bit read function for coproc register */
+#define DEFINE_COPROCR_READ_FUNC_64(_name, ...) \
+ _DEFINE_COPROCR_READ_FUNC_64(_name, __VA_ARGS__)
+
+/* Define 64 bit write function for coproc register */
+#define DEFINE_COPROCR_WRITE_FUNC_64(_name, ...) \
+ _DEFINE_COPROCR_WRITE_FUNC_64(_name, __VA_ARGS__)
+
+/* Define 64 bit read & write function for coproc register */
+#define DEFINE_COPROCR_RW_FUNCS_64(_name, ...) \
+ _DEFINE_COPROCR_READ_FUNC_64(_name, __VA_ARGS__) \
+ _DEFINE_COPROCR_WRITE_FUNC_64(_name, __VA_ARGS__)
+
+/* Define read & write function for system register */
+#define DEFINE_SYSREG_RW_FUNCS(_name) \
+ _DEFINE_SYSREG_READ_FUNC(_name, _name) \
+ _DEFINE_SYSREG_WRITE_FUNC(_name, _name)
+
+/**********************************************************************
+ * Macros to create inline functions for tlbi operations
+ *********************************************************************/
+
+#define _DEFINE_TLBIOP_FUNC(_op, coproc, opc1, CRn, CRm, opc2) \
+static inline void tlbi##_op(void) \
+{ \
+ u_register_t v = 0; \
+ __asm__ volatile ("mcr "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : : "r" (v));\
+}
+
+#define _DEFINE_TLBIOP_PARAM_FUNC(_op, coproc, opc1, CRn, CRm, opc2) \
+static inline void tlbi##_op(u_register_t v) \
+{ \
+ __asm__ volatile ("mcr "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : : "r" (v));\
+}
+
+#define _DEFINE_BPIOP_FUNC(_op, coproc, opc1, CRn, CRm, opc2) \
+static inline void bpi##_op(void) \
+{ \
+ u_register_t v = 0; \
+ __asm__ volatile ("mcr "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : : "r" (v));\
+}
+
+/* Define function for simple TLBI operation */
+#define DEFINE_TLBIOP_FUNC(_op, ...) \
+ _DEFINE_TLBIOP_FUNC(_op, __VA_ARGS__)
+
+/* Define function for TLBI operation with register parameter */
+#define DEFINE_TLBIOP_PARAM_FUNC(_op, ...) \
+ _DEFINE_TLBIOP_PARAM_FUNC(_op, __VA_ARGS__)
+
+/* Define function for simple BPI operation */
+#define DEFINE_BPIOP_FUNC(_op, ...) \
+ _DEFINE_BPIOP_FUNC(_op, __VA_ARGS__)
+
+/**********************************************************************
+ * Macros to create inline functions for DC operations
+ *********************************************************************/
+#define _DEFINE_DCOP_PARAM_FUNC(_op, coproc, opc1, CRn, CRm, opc2) \
+static inline void dc##_op(u_register_t v) \
+{ \
+ __asm__ volatile ("mcr "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : : "r" (v));\
+}
+
+/* Define function for DC operation with register parameter */
+#define DEFINE_DCOP_PARAM_FUNC(_op, ...) \
+ _DEFINE_DCOP_PARAM_FUNC(_op, __VA_ARGS__)
+
+/**********************************************************************
+ * Macros to create inline functions for system instructions
+ *********************************************************************/
+ /* Define function for simple system instruction */
+#define DEFINE_SYSOP_FUNC(_op) \
+static inline void _op(void) \
+{ \
+ __asm__ (#_op); \
+}
+
+
+/* Define function for system instruction with type specifier */
+#define DEFINE_SYSOP_TYPE_FUNC(_op, _type) \
+static inline void _op ## _type(void) \
+{ \
+ __asm__ (#_op " " #_type); \
+}
+
+/* Define function for system instruction with register parameter */
+#define DEFINE_SYSOP_TYPE_PARAM_FUNC(_op, _type) \
+static inline void _op ## _type(u_register_t v) \
+{ \
+ __asm__ (#_op " " #_type ", %0" : : "r" (v)); \
+}
+
+void flush_dcache_range(uintptr_t addr, size_t size);
+void clean_dcache_range(uintptr_t addr, size_t size);
+void inv_dcache_range(uintptr_t addr, size_t size);
+
+void dcsw_op_louis(u_register_t op_type);
+void dcsw_op_all(u_register_t op_type);
+
+DEFINE_SYSOP_FUNC(wfi)
+DEFINE_SYSOP_FUNC(wfe)
+DEFINE_SYSOP_FUNC(sev)
+DEFINE_SYSOP_TYPE_FUNC(dsb, sy)
+DEFINE_SYSOP_TYPE_FUNC(dmb, sy)
+DEFINE_SYSOP_TYPE_FUNC(dsb, ish)
+DEFINE_SYSOP_TYPE_FUNC(dsb, ishst)
+DEFINE_SYSOP_TYPE_FUNC(dmb, ish)
+DEFINE_SYSOP_TYPE_FUNC(dmb, ishst)
+DEFINE_SYSOP_FUNC(isb)
+
+DEFINE_SYSREG_RW_FUNCS(spsr)
+DEFINE_SYSREG_RW_FUNCS(cpsr)
+
+/*******************************************************************************
+ * System register accessor prototypes
+ ******************************************************************************/
+DEFINE_COPROCR_READ_FUNC(mpidr, MPIDR)
+DEFINE_COPROCR_READ_FUNC(midr, MIDR)
+DEFINE_COPROCR_READ_FUNC(id_pfr0, ID_PFR0)
+DEFINE_COPROCR_READ_FUNC(id_pfr1, ID_PFR1)
+DEFINE_COPROCR_READ_FUNC(isr, ISR)
+DEFINE_COPROCR_READ_FUNC(clidr, CLIDR)
+DEFINE_COPROCR_READ_FUNC_64(cntpct, CNTPCT_64)
+
+DEFINE_COPROCR_RW_FUNCS(scr, SCR)
+DEFINE_COPROCR_RW_FUNCS(sctlr, SCTLR)
+DEFINE_COPROCR_RW_FUNCS(hsctlr, HSCTLR)
+DEFINE_COPROCR_RW_FUNCS(hcr, HCR)
+DEFINE_COPROCR_RW_FUNCS(hcptr, HCPTR)
+DEFINE_COPROCR_RW_FUNCS(cntfrq, CNTFRQ)
+DEFINE_COPROCR_RW_FUNCS(cnthctl, CNTHCTL)
+DEFINE_COPROCR_RW_FUNCS(mair0, MAIR0)
+DEFINE_COPROCR_RW_FUNCS(mair1, MAIR1)
+DEFINE_COPROCR_RW_FUNCS(hmair0, HMAIR0)
+DEFINE_COPROCR_RW_FUNCS(ttbcr, TTBCR)
+DEFINE_COPROCR_RW_FUNCS(htcr, HTCR)
+DEFINE_COPROCR_RW_FUNCS(ttbr0, TTBR0)
+DEFINE_COPROCR_RW_FUNCS_64(ttbr0, TTBR0_64)
+DEFINE_COPROCR_RW_FUNCS(ttbr1, TTBR1)
+DEFINE_COPROCR_RW_FUNCS_64(httbr, HTTBR_64)
+DEFINE_COPROCR_RW_FUNCS(vpidr, VPIDR)
+DEFINE_COPROCR_RW_FUNCS(vmpidr, VMPIDR)
+DEFINE_COPROCR_RW_FUNCS_64(vttbr, VTTBR_64)
+DEFINE_COPROCR_RW_FUNCS_64(ttbr1, TTBR1_64)
+DEFINE_COPROCR_RW_FUNCS_64(cntvoff, CNTVOFF_64)
+DEFINE_COPROCR_RW_FUNCS(csselr, CSSELR)
+DEFINE_COPROCR_RW_FUNCS(cnthp_ctl_el2, CNTHP_CTL)
+DEFINE_COPROCR_RW_FUNCS(cnthp_tval_el2, CNTHP_TVAL)
+DEFINE_COPROCR_RW_FUNCS_64(cnthp_cval_el2, CNTHP_CVAL_64)
+
+DEFINE_COPROCR_RW_FUNCS(icc_sre_el1, ICC_SRE)
+DEFINE_COPROCR_RW_FUNCS(icc_sre_el2, ICC_HSRE)
+DEFINE_COPROCR_RW_FUNCS(icc_sre_el3, ICC_MSRE)
+DEFINE_COPROCR_RW_FUNCS(icc_pmr_el1, ICC_PMR)
+DEFINE_COPROCR_RW_FUNCS(icc_igrpen1_el3, ICC_MGRPEN1)
+DEFINE_COPROCR_RW_FUNCS(icc_igrpen1_el1, ICC_IGRPEN1)
+DEFINE_COPROCR_RW_FUNCS(icc_igrpen0_el1, ICC_IGRPEN0)
+DEFINE_COPROCR_RW_FUNCS(icc_hppir0_el1, ICC_HPPIR0)
+DEFINE_COPROCR_RW_FUNCS(icc_hppir1_el1, ICC_HPPIR1)
+DEFINE_COPROCR_RW_FUNCS(icc_iar0_el1, ICC_IAR0)
+DEFINE_COPROCR_RW_FUNCS(icc_iar1_el1, ICC_IAR1)
+DEFINE_COPROCR_RW_FUNCS(icc_eoir0_el1, ICC_EOIR0)
+DEFINE_COPROCR_RW_FUNCS(icc_eoir1_el1, ICC_EOIR1)
+DEFINE_COPROCR_WRITE_FUNC_64(icc_sgi1r, ICC_SGI1R_EL1_64)
+
+DEFINE_COPROCR_RW_FUNCS(amcntenset0, AMCNTENSET0)
+DEFINE_COPROCR_RW_FUNCS(amcntenset1, AMCNTENSET1)
+DEFINE_COPROCR_RW_FUNCS(amcntenclr0, AMCNTENCLR0)
+DEFINE_COPROCR_RW_FUNCS(amcntenclr1, AMCNTENCLR1)
+
+DEFINE_COPROCR_RW_FUNCS_64(amevcntr00, AMEVCNTR00)
+DEFINE_COPROCR_RW_FUNCS_64(amevcntr01, AMEVCNTR01)
+DEFINE_COPROCR_RW_FUNCS_64(amevcntr02, AMEVCNTR02)
+DEFINE_COPROCR_RW_FUNCS_64(amevcntr03, AMEVCNTR03)
+
+/*
+ * TLBI operation prototypes
+ */
+DEFINE_TLBIOP_FUNC(all, TLBIALL)
+DEFINE_TLBIOP_FUNC(allis, TLBIALLIS)
+DEFINE_TLBIOP_PARAM_FUNC(mva, TLBIMVA)
+DEFINE_TLBIOP_PARAM_FUNC(mvaa, TLBIMVAA)
+DEFINE_TLBIOP_PARAM_FUNC(mvaais, TLBIMVAAIS)
+DEFINE_TLBIOP_PARAM_FUNC(mvahis, TLBIMVAHIS)
+
+/*
+ * BPI operation prototypes.
+ */
+DEFINE_BPIOP_FUNC(allis, BPIALLIS)
+
+/*
+ * DC operation prototypes
+ */
+DEFINE_DCOP_PARAM_FUNC(civac, DCCIMVAC)
+DEFINE_DCOP_PARAM_FUNC(ivac, DCIMVAC)
+DEFINE_DCOP_PARAM_FUNC(cvac, DCCMVAC)
+
+/* Helper functions to manipulate CPSR */
+static inline void enable_irq(void)
+{
+ /*
+ * The compiler memory barrier will prevent the compiler from
+ * scheduling non-volatile memory access after the write to the
+ * register.
+ *
+ * This could happen if some initialization code issues non-volatile
+ * accesses to an area used by an interrupt handler, in the assumption
+ * that it is safe as the interrupts are disabled at the time it does
+ * that (according to program order). However, non-volatile accesses
+ * are not necessarily in program order relatively with volatile inline
+ * assembly statements (and volatile accesses).
+ */
+ COMPILER_BARRIER();
+ __asm__ volatile ("cpsie i");
+ isb();
+}
+
+static inline void enable_serror(void)
+{
+ COMPILER_BARRIER();
+ __asm__ volatile ("cpsie a");
+ isb();
+}
+
+static inline void enable_fiq(void)
+{
+ COMPILER_BARRIER();
+ __asm__ volatile ("cpsie f");
+ isb();
+}
+
+static inline void disable_irq(void)
+{
+ COMPILER_BARRIER();
+ __asm__ volatile ("cpsid i");
+ isb();
+}
+
+static inline void disable_serror(void)
+{
+ COMPILER_BARRIER();
+ __asm__ volatile ("cpsid a");
+ isb();
+}
+
+static inline void disable_fiq(void)
+{
+ COMPILER_BARRIER();
+ __asm__ volatile ("cpsid f");
+ isb();
+}
+
+/* Previously defined accessor functions with incomplete register names */
+#define dsb() dsbsy()
+
+/*
+ * Helper function to detect the processor mode.
+ */
+#define IS_IN_HYP() (GET_M32(read_cpsr()) == MODE32_hyp)
+#define IS_IN_SVC() (GET_M32(read_cpsr()) == MODE32_svc)
+#define IS_IN_MON() (GET_M32(read_cpsr()) == MODE32_mon)
+#define IS_IN_EL2() IS_IN_HYP()
+
+/* Accessor functions defined for compatibility with AArch32 register names */
+
+#define read_mpidr_el1() read_mpidr()
+#define read_daif() read_cpsr()
+#define write_daif(flags) write_cpsr(flags)
+#define read_cntfrq_el0() read_cntfrq()
+#define read_cntpct_el0() read64_cntpct()
+#define read_cnthp_cval_el2() read64_cnthp_cval_el2()
+#define write_cnthp_cval_el2(v) write64_cnthp_cval_el2(v)
+#define read_amcntenset0_el0() read_amcntenset0()
+#define read_amcntenset1_el0() read_amcntenset1()
+
+void disable_mmu_icache(void);
+
+#endif /* __ARCH_HELPERS_H__ */
diff --git a/include/lib/aarch64/arch.h b/include/lib/aarch64/arch.h
new file mode 100644
index 000000000..215730dfd
--- /dev/null
+++ b/include/lib/aarch64/arch.h
@@ -0,0 +1,621 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __ARCH_H__
+#define __ARCH_H__
+
+#include <utils_def.h>
+
+/*******************************************************************************
+ * MIDR bit definitions
+ ******************************************************************************/
+#define MIDR_IMPL_MASK U(0xff)
+#define MIDR_IMPL_SHIFT U(0x18)
+#define MIDR_VAR_SHIFT U(20)
+#define MIDR_VAR_BITS U(4)
+#define MIDR_VAR_MASK U(0xf)
+#define MIDR_REV_SHIFT U(0)
+#define MIDR_REV_BITS U(4)
+#define MIDR_REV_MASK U(0xf)
+#define MIDR_PN_MASK U(0xfff)
+#define MIDR_PN_SHIFT U(0x4)
+
+/*******************************************************************************
+ * MPIDR macros
+ ******************************************************************************/
+#define MPIDR_MT_MASK (ULL(1) << 24)
+#define MPIDR_CPU_MASK MPIDR_AFFLVL_MASK
+#define MPIDR_CLUSTER_MASK (MPIDR_AFFLVL_MASK << MPIDR_AFFINITY_BITS)
+#define MPIDR_AFFINITY_BITS U(8)
+#define MPIDR_AFFLVL_MASK ULL(0xff)
+#define MPIDR_AFF0_SHIFT U(0)
+#define MPIDR_AFF1_SHIFT U(8)
+#define MPIDR_AFF2_SHIFT U(16)
+#define MPIDR_AFF3_SHIFT U(32)
+#define MPIDR_AFF_SHIFT(_n) MPIDR_AFF##_n##_SHIFT
+#define MPIDR_AFFINITY_MASK ULL(0xff00ffffff)
+#define MPIDR_AFFLVL_SHIFT U(3)
+#define MPIDR_AFFLVL0 ULL(0x0)
+#define MPIDR_AFFLVL1 ULL(0x1)
+#define MPIDR_AFFLVL2 ULL(0x2)
+#define MPIDR_AFFLVL3 ULL(0x3)
+#define MPIDR_AFFLVL(_n) MPIDR_AFFLVL##_n
+#define MPIDR_AFFLVL0_VAL(mpidr) \
+ (((mpidr) >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK)
+#define MPIDR_AFFLVL1_VAL(mpidr) \
+ (((mpidr) >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK)
+#define MPIDR_AFFLVL2_VAL(mpidr) \
+ (((mpidr) >> MPIDR_AFF2_SHIFT) & MPIDR_AFFLVL_MASK)
+#define MPIDR_AFFLVL3_VAL(mpidr) \
+ (((mpidr) >> MPIDR_AFF3_SHIFT) & MPIDR_AFFLVL_MASK)
+/*
+ * The MPIDR_MAX_AFFLVL count starts from 0. Take care to
+ * add one while using this macro to define array sizes.
+ * TODO: Support only the first 3 affinity levels for now.
+ */
+#define MPIDR_MAX_AFFLVL U(2)
+
+/* Constant to highlight the assumption that MPIDR allocation starts from 0 */
+#define FIRST_MPIDR ULL(0)
+
+#define MPID_MASK (MPIDR_MT_MASK |\
+ (MPIDR_AFFLVL_MASK << MPIDR_AFF2_SHIFT)|\
+ (MPIDR_AFFLVL_MASK << MPIDR_AFF1_SHIFT)|\
+ (MPIDR_AFFLVL_MASK << MPIDR_AFF0_SHIFT))
+
+#define MPIDR_AFF_ID(mpid, n) \
+ (((mpid) >> MPIDR_AFF_SHIFT(n)) & MPIDR_AFFLVL_MASK)
+
+#define MPIDR_CLUSTER_ID(mpid) MPIDR_AFF_ID(mpid, 1)
+#define MPIDR_CPU_ID(mpid) MPIDR_AFF_ID(mpid, 0)
+
+/*
+ * An invalid MPID. This value can be used by functions that return an MPID to
+ * indicate an error.
+ */
+#define INVALID_MPID 0xFFFFFFFF
+
+/*******************************************************************************
+ * Definitions for CPU system register interface to GICv3
+ ******************************************************************************/
+#define ICC_IGRPEN1_EL1 S3_0_C12_C12_7
+#define ICC_SGI1R S3_0_C12_C11_5
+#define ICC_SRE_EL1 S3_0_C12_C12_5
+#define ICC_SRE_EL2 S3_4_C12_C9_5
+#define ICC_SRE_EL3 S3_6_C12_C12_5
+#define ICC_CTLR_EL1 S3_0_C12_C12_4
+#define ICC_CTLR_EL3 S3_6_C12_C12_4
+#define ICC_PMR_EL1 S3_0_C4_C6_0
+#define ICC_RPR_EL1 S3_0_C12_C11_3
+#define ICC_IGRPEN1_EL3 S3_6_c12_c12_7
+#define ICC_IGRPEN0_EL1 S3_0_c12_c12_6
+#define ICC_HPPIR0_EL1 S3_0_c12_c8_2
+#define ICC_HPPIR1_EL1 S3_0_c12_c12_2
+#define ICC_IAR0_EL1 S3_0_c12_c8_0
+#define ICC_IAR1_EL1 S3_0_c12_c12_0
+#define ICC_EOIR0_EL1 S3_0_c12_c8_1
+#define ICC_EOIR1_EL1 S3_0_c12_c12_1
+#define ICC_SGI0R_EL1 S3_0_c12_c11_7
+
+/*******************************************************************************
+ * Generic timer memory mapped registers & offsets
+ ******************************************************************************/
+#define CNTCR_OFF U(0x000)
+#define CNTFID_OFF U(0x020)
+
+#define CNTCR_EN (U(1) << 0)
+#define CNTCR_HDBG (U(1) << 1)
+#define CNTCR_FCREQ(x) ((x) << 8)
+
+/*******************************************************************************
+ * System register bit definitions
+ ******************************************************************************/
+/* CLIDR definitions */
+#define LOUIS_SHIFT U(21)
+#define LOC_SHIFT U(24)
+#define CLIDR_FIELD_WIDTH U(3)
+
+/* CSSELR definitions */
+#define LEVEL_SHIFT U(1)
+
+/* Data cache set/way op type defines */
+#define DCISW U(0x0)
+#define DCCISW U(0x1)
+#define DCCSW U(0x2)
+
+/* ID_AA64PFR0_EL1 definitions */
+#define ID_AA64PFR0_EL0_SHIFT U(0)
+#define ID_AA64PFR0_EL1_SHIFT U(4)
+#define ID_AA64PFR0_EL2_SHIFT U(8)
+#define ID_AA64PFR0_EL3_SHIFT U(12)
+#define ID_AA64PFR0_AMU_SHIFT U(44)
+#define ID_AA64PFR0_AMU_LENGTH U(4)
+#define ID_AA64PFR0_AMU_MASK ULL(0xf)
+#define ID_AA64PFR0_ELX_MASK ULL(0xf)
+#define ID_AA64PFR0_SVE_SHIFT U(32)
+#define ID_AA64PFR0_SVE_MASK ULL(0xf)
+#define ID_AA64PFR0_SVE_LENGTH U(4)
+#define ID_AA64PFR0_CSV2_SHIFT U(56)
+#define ID_AA64PFR0_CSV2_MASK ULL(0xf)
+#define ID_AA64PFR0_CSV2_LENGTH U(4)
+
+/* ID_AA64DFR0_EL1.PMS definitions (for ARMv8.2+) */
+#define ID_AA64DFR0_PMS_SHIFT U(32)
+#define ID_AA64DFR0_PMS_LENGTH U(4)
+#define ID_AA64DFR0_PMS_MASK ULL(0xf)
+
+#define EL_IMPL_NONE ULL(0)
+#define EL_IMPL_A64ONLY ULL(1)
+#define EL_IMPL_A64_A32 ULL(2)
+
+#define ID_AA64PFR0_GIC_SHIFT U(24)
+#define ID_AA64PFR0_GIC_WIDTH U(4)
+#define ID_AA64PFR0_GIC_MASK ((ULL(1) << ID_AA64PFR0_GIC_WIDTH) - ULL(1))
+
+/* ID_AA64MMFR0_EL1 definitions */
+#define ID_AA64MMFR0_EL1_PARANGE_SHIFT U(0)
+#define ID_AA64MMFR0_EL1_PARANGE_MASK ULL(0xf)
+
+#define PARANGE_0000 U(32)
+#define PARANGE_0001 U(36)
+#define PARANGE_0010 U(40)
+#define PARANGE_0011 U(42)
+#define PARANGE_0100 U(44)
+#define PARANGE_0101 U(48)
+#define PARANGE_0110 U(52)
+
+#define ID_AA64MMFR0_EL1_TGRAN4_SHIFT U(28)
+#define ID_AA64MMFR0_EL1_TGRAN4_MASK ULL(0xf)
+#define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED ULL(0x0)
+#define ID_AA64MMFR0_EL1_TGRAN4_NOT_SUPPORTED ULL(0xf)
+
+#define ID_AA64MMFR0_EL1_TGRAN64_SHIFT U(24)
+#define ID_AA64MMFR0_EL1_TGRAN64_MASK ULL(0xf)
+#define ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED ULL(0x0)
+#define ID_AA64MMFR0_EL1_TGRAN64_NOT_SUPPORTED ULL(0xf)
+
+#define ID_AA64MMFR0_EL1_TGRAN16_SHIFT U(20)
+#define ID_AA64MMFR0_EL1_TGRAN16_MASK ULL(0xf)
+#define ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED ULL(0x1)
+#define ID_AA64MMFR0_EL1_TGRAN16_NOT_SUPPORTED ULL(0x0)
+
+/* SCTLR definitions */
+#define SCTLR_EL2_RES1 ((U(1) << 29) | (U(1) << 28) | (U(1) << 23) | \
+ (U(1) << 22) | (U(1) << 18) | (U(1) << 16) | \
+ (U(1) << 11) | (U(1) << 5) | (U(1) << 4))
+
+#define SCTLR_EL1_RES1 ((U(1) << 29) | (U(1) << 28) | (U(1) << 23) | \
+ (U(1) << 22) | (U(1) << 20) | (U(1) << 11))
+
+#define SCTLR_M_BIT (U(1) << 0)
+#define SCTLR_A_BIT (U(1) << 1)
+#define SCTLR_C_BIT (U(1) << 2)
+#define SCTLR_SA_BIT (U(1) << 3)
+#define SCTLR_SA0_BIT (U(1) << 4)
+#define SCTLR_CP15BEN_BIT (U(1) << 5)
+#define SCTLR_ITD_BIT (U(1) << 7)
+#define SCTLR_SED_BIT (U(1) << 8)
+#define SCTLR_UMA_BIT (U(1) << 9)
+#define SCTLR_I_BIT (U(1) << 12)
+#define SCTLR_V_BIT (U(1) << 13)
+#define SCTLR_DZE_BIT (U(1) << 14)
+#define SCTLR_UCT_BIT (U(1) << 15)
+#define SCTLR_NTWI_BIT (U(1) << 16)
+#define SCTLR_NTWE_BIT (U(1) << 18)
+#define SCTLR_WXN_BIT (U(1) << 19)
+#define SCTLR_UWXN_BIT (U(1) << 20)
+#define SCTLR_E0E_BIT (U(1) << 24)
+#define SCTLR_EE_BIT (U(1) << 25)
+#define SCTLR_UCI_BIT (U(1) << 26)
+#define SCTLR_TRE_BIT (U(1) << 28)
+#define SCTLR_AFE_BIT (U(1) << 29)
+#define SCTLR_TE_BIT (U(1) << 30)
+#define SCTLR_RESET_VAL SCTLR_EL3_RES1
+
+/* CPACR_El1 definitions */
+#define CPACR_EL1_FPEN(x) ((x) << 20)
+#define CPACR_EL1_FP_TRAP_EL0 U(0x1)
+#define CPACR_EL1_FP_TRAP_ALL U(0x2)
+#define CPACR_EL1_FP_TRAP_NONE U(0x3)
+
+/* SCR definitions */
+#define SCR_RES1_BITS ((U(1) << 4) | (U(1) << 5))
+#define SCR_FIEN_BIT (U(1) << 21)
+#define SCR_TWE_BIT (U(1) << 13)
+#define SCR_TWI_BIT (U(1) << 12)
+#define SCR_ST_BIT (U(1) << 11)
+#define SCR_RW_BIT (U(1) << 10)
+#define SCR_SIF_BIT (U(1) << 9)
+#define SCR_HCE_BIT (U(1) << 8)
+#define SCR_SMD_BIT (U(1) << 7)
+#define SCR_EA_BIT (U(1) << 3)
+#define SCR_FIQ_BIT (U(1) << 2)
+#define SCR_IRQ_BIT (U(1) << 1)
+#define SCR_NS_BIT (U(1) << 0)
+#define SCR_VALID_BIT_MASK U(0x2f8f)
+#define SCR_RESET_VAL SCR_RES1_BITS
+
+/* HCR definitions */
+#define HCR_RW_BIT (1ull << 31)
+#define HCR_TGE_BIT (1 << 27)
+#define HCR_AMO_BIT (1 << 5)
+#define HCR_IMO_BIT (1 << 4)
+#define HCR_FMO_BIT (1 << 3)
+
+/* CNTHCTL_EL2 definitions */
+#define EL1PCEN_BIT (1 << 1)
+#define EL1PCTEN_BIT (1 << 0)
+
+/* CNTKCTL_EL1 definitions */
+#define EL0PTEN_BIT (U(1) << 9)
+#define EL0VTEN_BIT (U(1) << 8)
+#define EL0PCTEN_BIT (U(1) << 0)
+#define EL0VCTEN_BIT (U(1) << 1)
+#define EVNTEN_BIT (U(1) << 2)
+#define EVNTDIR_BIT (U(1) << 3)
+#define EVNTI_SHIFT U(4)
+#define EVNTI_MASK U(0xf)
+
+/* CPTR_EL2 definitions */
+#define TCPAC_BIT (1 << 31)
+#define TTA_BIT (1 << 20)
+#define TFP_BIT (1 << 10)
+
+/* CPSR/SPSR definitions */
+#define DAIF_FIQ_BIT (U(1) << 0)
+#define DAIF_IRQ_BIT (U(1) << 1)
+#define DAIF_ABT_BIT (U(1) << 2)
+#define DAIF_DBG_BIT (U(1) << 3)
+#define SPSR_DAIF_SHIFT U(6)
+#define SPSR_DAIF_MASK U(0xf)
+
+#define SPSR_AIF_SHIFT U(6)
+#define SPSR_AIF_MASK U(0x7)
+
+#define SPSR_E_SHIFT U(9)
+#define SPSR_E_MASK U(0x1)
+#define SPSR_E_LITTLE U(0x0)
+#define SPSR_E_BIG U(0x1)
+
+#define SPSR_T_SHIFT U(5)
+#define SPSR_T_MASK U(0x1)
+#define SPSR_T_ARM U(0x0)
+#define SPSR_T_THUMB U(0x1)
+
+#define SPSR_M_SHIFT U(4)
+#define SPSR_M_MASK U(0x1)
+#define SPSR_M_AARCH64 U(0x0)
+#define SPSR_M_AARCH32 U(0x1)
+
+#define DISABLE_ALL_EXCEPTIONS \
+ (DAIF_FIQ_BIT | DAIF_IRQ_BIT | DAIF_ABT_BIT | DAIF_DBG_BIT)
+
+#define DISABLE_INTERRUPTS (DAIF_FIQ_BIT | DAIF_IRQ_BIT)
+
+/*
+ * TCR defintions
+ */
+#define TCR_EL2_RES1 ((ULL(1) << 31) | (ULL(1) << 23))
+#define TCR_EL3_RES1 ((U(1) << 31) | (U(1) << 23))
+#define TCR_EL1_IPS_SHIFT U(32)
+#define TCR_EL2_PS_SHIFT U(16)
+#define TCR_EL3_PS_SHIFT U(16)
+
+#define TCR_TxSZ_MIN ULL(16)
+#define TCR_TxSZ_MAX ULL(39)
+
+/* (internal) physical address size bits in EL3/EL1 */
+#define TCR_PS_BITS_4GB ULL(0x0)
+#define TCR_PS_BITS_64GB ULL(0x1)
+#define TCR_PS_BITS_1TB ULL(0x2)
+#define TCR_PS_BITS_4TB ULL(0x3)
+#define TCR_PS_BITS_16TB ULL(0x4)
+#define TCR_PS_BITS_256TB ULL(0x5)
+
+#define ADDR_MASK_48_TO_63 ULL(0xFFFF000000000000)
+#define ADDR_MASK_44_TO_47 ULL(0x0000F00000000000)
+#define ADDR_MASK_42_TO_43 ULL(0x00000C0000000000)
+#define ADDR_MASK_40_TO_41 ULL(0x0000030000000000)
+#define ADDR_MASK_36_TO_39 ULL(0x000000F000000000)
+#define ADDR_MASK_32_TO_35 ULL(0x0000000F00000000)
+
+#define TCR_RGN_INNER_NC (ULL(0x0) << 8)
+#define TCR_RGN_INNER_WBA (ULL(0x1) << 8)
+#define TCR_RGN_INNER_WT (ULL(0x2) << 8)
+#define TCR_RGN_INNER_WBNA (ULL(0x3) << 8)
+
+#define TCR_RGN_OUTER_NC (ULL(0x0) << 10)
+#define TCR_RGN_OUTER_WBA (ULL(0x1) << 10)
+#define TCR_RGN_OUTER_WT (ULL(0x2) << 10)
+#define TCR_RGN_OUTER_WBNA (ULL(0x3) << 10)
+
+#define TCR_SH_NON_SHAREABLE (ULL(0x0) << 12)
+#define TCR_SH_OUTER_SHAREABLE (ULL(0x2) << 12)
+#define TCR_SH_INNER_SHAREABLE (ULL(0x3) << 12)
+
+#define TCR_TG0_SHIFT U(14)
+#define TCR_TG0_MASK ULL(3)
+#define TCR_TG0_4K (ULL(0) << TCR_TG0_SHIFT)
+#define TCR_TG0_64K (ULL(1) << TCR_TG0_SHIFT)
+#define TCR_TG0_16K (ULL(2) << TCR_TG0_SHIFT)
+
+#define TCR_EPD0_BIT (ULL(1) << 7)
+#define TCR_EPD1_BIT (ULL(1) << 23)
+
+#define MODE_SP_SHIFT U(0x0)
+#define MODE_SP_MASK U(0x1)
+#define MODE_SP_EL0 U(0x0)
+#define MODE_SP_ELX U(0x1)
+
+#define MODE_RW_SHIFT U(0x4)
+#define MODE_RW_MASK U(0x1)
+#define MODE_RW_64 U(0x0)
+#define MODE_RW_32 U(0x1)
+
+#define MODE_EL_SHIFT U(0x2)
+#define MODE_EL_MASK U(0x3)
+#define MODE_EL3 U(0x3)
+#define MODE_EL2 U(0x2)
+#define MODE_EL1 U(0x1)
+#define MODE_EL0 U(0x0)
+
+#define MODE32_SHIFT U(0)
+#define MODE32_MASK U(0xf)
+#define MODE32_usr U(0x0)
+#define MODE32_fiq U(0x1)
+#define MODE32_irq U(0x2)
+#define MODE32_svc U(0x3)
+#define MODE32_mon U(0x6)
+#define MODE32_abt U(0x7)
+#define MODE32_hyp U(0xa)
+#define MODE32_und U(0xb)
+#define MODE32_sys U(0xf)
+
+#define GET_RW(mode) (((mode) >> MODE_RW_SHIFT) & MODE_RW_MASK)
+#define GET_EL(mode) (((mode) >> MODE_EL_SHIFT) & MODE_EL_MASK)
+#define GET_SP(mode) (((mode) >> MODE_SP_SHIFT) & MODE_SP_MASK)
+#define GET_M32(mode) (((mode) >> MODE32_SHIFT) & MODE32_MASK)
+
+#define SPSR_64(el, sp, daif) \
+ ((MODE_RW_64 << MODE_RW_SHIFT) | \
+ (((el) & MODE_EL_MASK) << MODE_EL_SHIFT) | \
+ (((sp) & MODE_SP_MASK) << MODE_SP_SHIFT) | \
+ (((daif) & SPSR_DAIF_MASK) << SPSR_DAIF_SHIFT))
+
+#define SPSR_MODE32(mode, isa, endian, aif) \
+ ((MODE_RW_32 << MODE_RW_SHIFT) | \
+ (((mode) & MODE32_MASK) << MODE32_SHIFT) | \
+ (((isa) & SPSR_T_MASK) << SPSR_T_SHIFT) | \
+ (((endian) & SPSR_E_MASK) << SPSR_E_SHIFT) | \
+ (((aif) & SPSR_AIF_MASK) << SPSR_AIF_SHIFT))
+
+/*
+ * TTBR Definitions
+ */
+#define TTBR_CNP_BIT ULL(0x1)
+
+/* Physical timer control register bit fields shifts and masks */
+#define CNTP_CTL_ENABLE_SHIFT 0
+#define CNTP_CTL_IMASK_SHIFT 1
+#define CNTP_CTL_ISTATUS_SHIFT 2
+
+#define CNTP_CTL_ENABLE_MASK 1
+#define CNTP_CTL_IMASK_MASK 1
+#define CNTP_CTL_ISTATUS_MASK 1
+
+#define get_cntp_ctl_enable(x) ((x >> CNTP_CTL_ENABLE_SHIFT) & \
+ CNTP_CTL_ENABLE_MASK)
+#define get_cntp_ctl_imask(x) ((x >> CNTP_CTL_IMASK_SHIFT) & \
+ CNTP_CTL_IMASK_MASK)
+#define get_cntp_ctl_istatus(x) ((x >> CNTP_CTL_ISTATUS_SHIFT) & \
+ CNTP_CTL_ISTATUS_MASK)
+
+#define set_cntp_ctl_enable(x) (x |= 1 << CNTP_CTL_ENABLE_SHIFT)
+#define set_cntp_ctl_imask(x) (x |= 1 << CNTP_CTL_IMASK_SHIFT)
+
+#define clr_cntp_ctl_enable(x) (x &= ~(1 << CNTP_CTL_ENABLE_SHIFT))
+#define clr_cntp_ctl_imask(x) (x &= ~(1 << CNTP_CTL_IMASK_SHIFT))
+
+/* Exception Syndrome register bits and bobs */
+#define ESR_EC_SHIFT U(26)
+#define ESR_EC_MASK U(0x3f)
+#define ESR_EC_LENGTH U(6)
+#define EC_UNKNOWN U(0x0)
+#define EC_WFE_WFI U(0x1)
+#define EC_AARCH32_CP15_MRC_MCR U(0x3)
+#define EC_AARCH32_CP15_MRRC_MCRR U(0x4)
+#define EC_AARCH32_CP14_MRC_MCR U(0x5)
+#define EC_AARCH32_CP14_LDC_STC U(0x6)
+#define EC_FP_SIMD U(0x7)
+#define EC_AARCH32_CP10_MRC U(0x8)
+#define EC_AARCH32_CP14_MRRC_MCRR U(0xc)
+#define EC_ILLEGAL U(0xe)
+#define EC_AARCH32_SVC U(0x11)
+#define EC_AARCH32_HVC U(0x12)
+#define EC_AARCH32_SMC U(0x13)
+#define EC_AARCH64_SVC U(0x15)
+#define EC_AARCH64_HVC U(0x16)
+#define EC_AARCH64_SMC U(0x17)
+#define EC_AARCH64_SYS U(0x18)
+#define EC_IABORT_LOWER_EL U(0x20)
+#define EC_IABORT_CUR_EL U(0x21)
+#define EC_PC_ALIGN U(0x22)
+#define EC_DABORT_LOWER_EL U(0x24)
+#define EC_DABORT_CUR_EL U(0x25)
+#define EC_SP_ALIGN U(0x26)
+#define EC_AARCH32_FP U(0x28)
+#define EC_AARCH64_FP U(0x2c)
+#define EC_SERROR U(0x2f)
+
+#define EC_BITS(x) (x >> ESR_EC_SHIFT) & ESR_EC_MASK
+
+/*******************************************************************************
+ * Definitions of register offsets, fields and macros for CPU system
+ * instructions.
+ ******************************************************************************/
+
+#define TLBI_ADDR_SHIFT U(12)
+#define TLBI_ADDR_MASK ULL(0x00000FFFFFFFFFFF)
+#define TLBI_ADDR(x) (((x) >> TLBI_ADDR_SHIFT) & TLBI_ADDR_MASK)
+
+/*******************************************************************************
+ * Definitions of register offsets and fields in the CNTBaseN Frame of the
+ * system level implementation of the Generic Timer.
+ ******************************************************************************/
+/* Physical Count register. */
+#define CNTPCT_LO 0x0
+/* Counter Frequency register. */
+#define CNTBASEN_CNTFRQ 0x10
+/* Physical Timer CompareValue register. */
+#define CNTP_CVAL_LO 0x20
+/* Physical Timer Control register. */
+#define CNTP_CTL 0x2c
+
+/*******************************************************************************
+ * Definitions of MAIR encodings for device and normal memory
+ ******************************************************************************/
+/*
+ * MAIR encodings for device memory attributes.
+ */
+#define MAIR_DEV_nGnRnE ULL(0x0)
+#define MAIR_DEV_nGnRE ULL(0x4)
+#define MAIR_DEV_nGRE ULL(0x8)
+#define MAIR_DEV_GRE ULL(0xc)
+
+/*
+ * MAIR encodings for normal memory attributes.
+ *
+ * Cache Policy
+ * WT: Write Through
+ * WB: Write Back
+ * NC: Non-Cacheable
+ *
+ * Transient Hint
+ * NTR: Non-Transient
+ * TR: Transient
+ *
+ * Allocation Policy
+ * RA: Read Allocate
+ * WA: Write Allocate
+ * RWA: Read and Write Allocate
+ * NA: No Allocation
+ */
+#define MAIR_NORM_WT_TR_WA ULL(0x1)
+#define MAIR_NORM_WT_TR_RA ULL(0x2)
+#define MAIR_NORM_WT_TR_RWA ULL(0x3)
+#define MAIR_NORM_NC ULL(0x4)
+#define MAIR_NORM_WB_TR_WA ULL(0x5)
+#define MAIR_NORM_WB_TR_RA ULL(0x6)
+#define MAIR_NORM_WB_TR_RWA ULL(0x7)
+#define MAIR_NORM_WT_NTR_NA ULL(0x8)
+#define MAIR_NORM_WT_NTR_WA ULL(0x9)
+#define MAIR_NORM_WT_NTR_RA ULL(0xa)
+#define MAIR_NORM_WT_NTR_RWA ULL(0xb)
+#define MAIR_NORM_WB_NTR_NA ULL(0xc)
+#define MAIR_NORM_WB_NTR_WA ULL(0xd)
+#define MAIR_NORM_WB_NTR_RA ULL(0xe)
+#define MAIR_NORM_WB_NTR_RWA ULL(0xf)
+
+#define MAIR_NORM_OUTER_SHIFT U(4)
+
+#define MAKE_MAIR_NORMAL_MEMORY(inner, outer) ((inner) | ((outer) << MAIR_NORM_OUTER_SHIFT))
+
+/* PAR_EL1 fields */
+#define PAR_F_SHIFT U(0)
+#define PAR_F_MASK ULL(0x1)
+#define PAR_ADDR_SHIFT U(12)
+#define PAR_ADDR_MASK (BIT(40) - ULL(1)) /* 40-bits-wide page address */
+
+/*******************************************************************************
+ * Definitions for system register interface to AMU for ARMv8.4 onwards
+ ******************************************************************************/
+#define AMCR_EL0 S3_3_C13_C2_0
+#define AMCFGR_EL0 S3_3_C13_C2_1
+#define AMCGCR_EL0 S3_3_C13_C2_2
+#define AMUSERENR_EL0 S3_3_C13_C2_3
+#define AMCNTENCLR0_EL0 S3_3_C13_C2_4
+#define AMCNTENSET0_EL0 S3_3_C13_C2_5
+#define AMCNTENCLR1_EL0 S3_3_C13_C3_0
+#define AMCNTENSET1_EL0 S3_3_C13_C3_1
+
+/* Activity Monitor Group 0 Event Counter Registers */
+#define AMEVCNTR00_EL0 S3_3_C13_C4_0
+#define AMEVCNTR01_EL0 S3_3_C13_C4_1
+#define AMEVCNTR02_EL0 S3_3_C13_C4_2
+#define AMEVCNTR03_EL0 S3_3_C13_C4_3
+
+/* Activity Monitor Group 0 Event Type Registers */
+#define AMEVTYPER00_EL0 S3_3_C13_C6_0
+#define AMEVTYPER01_EL0 S3_3_C13_C6_1
+#define AMEVTYPER02_EL0 S3_3_C13_C6_2
+#define AMEVTYPER03_EL0 S3_3_C13_C6_3
+
+/* Activity Monitor Group 1 Event Counter Registers */
+#define AMEVCNTR10_EL0 S3_3_C13_C12_0
+#define AMEVCNTR11_EL0 S3_3_C13_C12_1
+#define AMEVCNTR12_EL0 S3_3_C13_C12_2
+#define AMEVCNTR13_EL0 S3_3_C13_C12_3
+#define AMEVCNTR14_EL0 S3_3_C13_C12_4
+#define AMEVCNTR15_EL0 S3_3_C13_C12_5
+#define AMEVCNTR16_EL0 S3_3_C13_C12_6
+#define AMEVCNTR17_EL0 S3_3_C13_C12_7
+#define AMEVCNTR18_EL0 S3_3_C13_C13_0
+#define AMEVCNTR19_EL0 S3_3_C13_C13_1
+#define AMEVCNTR1A_EL0 S3_3_C13_C13_2
+#define AMEVCNTR1B_EL0 S3_3_C13_C13_3
+#define AMEVCNTR1C_EL0 S3_3_C13_C13_4
+#define AMEVCNTR1D_EL0 S3_3_C13_C13_5
+#define AMEVCNTR1E_EL0 S3_3_C13_C13_6
+#define AMEVCNTR1F_EL0 S3_3_C13_C13_7
+
+/* Activity Monitor Group 1 Event Type Registers */
+#define AMEVTYPER10_EL0 S3_3_C13_C14_0
+#define AMEVTYPER11_EL0 S3_3_C13_C14_1
+#define AMEVTYPER12_EL0 S3_3_C13_C14_2
+#define AMEVTYPER13_EL0 S3_3_C13_C14_3
+#define AMEVTYPER14_EL0 S3_3_C13_C14_4
+#define AMEVTYPER15_EL0 S3_3_C13_C14_5
+#define AMEVTYPER16_EL0 S3_3_C13_C14_6
+#define AMEVTYPER17_EL0 S3_3_C13_C14_7
+#define AMEVTYPER18_EL0 S3_3_C13_C15_0
+#define AMEVTYPER19_EL0 S3_3_C13_C15_1
+#define AMEVTYPER1A_EL0 S3_3_C13_C15_2
+#define AMEVTYPER1B_EL0 S3_3_C13_C15_3
+#define AMEVTYPER1C_EL0 S3_3_C13_C15_4
+#define AMEVTYPER1D_EL0 S3_3_C13_C15_5
+#define AMEVTYPER1E_EL0 S3_3_C13_C15_6
+#define AMEVTYPER1F_EL0 S3_3_C13_C15_7
+
+/* AMCGCR_EL0 definitions */
+#define AMCGCR_EL0_CG1NC_SHIFT U(8)
+#define AMCGCR_EL0_CG1NC_LENGTH U(8)
+#define AMCGCR_EL0_CG1NC_MASK U(0xff)
+
+/*******************************************************************************
+ * RAS system registers
+ *******************************************************************************/
+#define DISR_EL1 S3_0_C12_C1_1
+#define DISR_A_BIT U(31)
+
+#define ERRIDR_EL1 S3_0_C5_C3_0
+#define ERRIDR_MASK U(0xffff)
+
+#define ERRSELR_EL1 S3_0_C5_C3_1
+
+/* Fault injection registers */
+#define ERXPFGF_EL1 S3_0_C5_C4_4
+#define ERXCTLR_EL1 S3_0_C5_C4_1
+#define ERXPFGCTL_EL1 S3_0_C5_C4_5
+#define ERXPFGCDN_EL1 S3_0_C5_C4_6
+
+#define ERXCTLR_ED_BIT (1 << 0)
+#define ERXCTLR_UE_BIT (1 << 4)
+
+#define ERXPFGCTL_UC_BIT (1 << 1)
+#define ERXPFGCTL_UEU_BIT (1 << 2)
+#define ERXPFGCTL_CDEN_BIT (1 << 31)
+
+#endif /* __ARCH_H__ */
diff --git a/include/lib/aarch64/arch_helpers.h b/include/lib/aarch64/arch_helpers.h
new file mode 100644
index 000000000..c313d6098
--- /dev/null
+++ b/include/lib/aarch64/arch_helpers.h
@@ -0,0 +1,332 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __ARCH_HELPERS_H__
+#define __ARCH_HELPERS_H__
+
+#include <arch.h> /* for additional register definitions */
+#include <cdefs.h> /* For __dead2 */
+#include <misc_utils.h>
+#include <stdint.h>
+#include <sys/types.h>
+
+/**********************************************************************
+ * Macros which create inline functions to read or write CPU system
+ * registers
+ *********************************************************************/
+
+#define _DEFINE_SYSREG_READ_FUNC(_name, _reg_name) \
+static inline u_register_t read_ ## _name(void) \
+{ \
+ u_register_t v; \
+ __asm__ volatile ("mrs %0, " #_reg_name : "=r" (v)); \
+ return v; \
+}
+
+#define _DEFINE_SYSREG_WRITE_FUNC(_name, _reg_name) \
+static inline void write_ ## _name(u_register_t v) \
+{ \
+ __asm__ volatile ("msr " #_reg_name ", %0" : : "r" (v)); \
+}
+
+#define SYSREG_WRITE_CONST(reg_name, v) \
+ __asm__ volatile ("msr " #reg_name ", %0" : : "i" (v))
+
+/* Define read function for system register */
+#define DEFINE_SYSREG_READ_FUNC(_name) \
+ _DEFINE_SYSREG_READ_FUNC(_name, _name)
+
+/* Define read & write function for system register */
+#define DEFINE_SYSREG_RW_FUNCS(_name) \
+ _DEFINE_SYSREG_READ_FUNC(_name, _name) \
+ _DEFINE_SYSREG_WRITE_FUNC(_name, _name)
+
+/* Define read & write function for renamed system register */
+#define DEFINE_RENAME_SYSREG_RW_FUNCS(_name, _reg_name) \
+ _DEFINE_SYSREG_READ_FUNC(_name, _reg_name) \
+ _DEFINE_SYSREG_WRITE_FUNC(_name, _reg_name)
+
+/* Define read function for renamed system register */
+#define DEFINE_RENAME_SYSREG_READ_FUNC(_name, _reg_name) \
+ _DEFINE_SYSREG_READ_FUNC(_name, _reg_name)
+
+/* Define write function for renamed system register */
+#define DEFINE_RENAME_SYSREG_WRITE_FUNC(_name, _reg_name) \
+ _DEFINE_SYSREG_WRITE_FUNC(_name, _reg_name)
+
+/**********************************************************************
+ * Macros to create inline functions for system instructions
+ *********************************************************************/
+
+/* Define function for simple system instruction */
+#define DEFINE_SYSOP_FUNC(_op) \
+static inline void _op(void) \
+{ \
+ __asm__ (#_op); \
+}
+
+/* Define function for system instruction with type specifier */
+#define DEFINE_SYSOP_TYPE_FUNC(_op, _type) \
+static inline void _op ## _type(void) \
+{ \
+ __asm__ (#_op " " #_type); \
+}
+
+/* Define function for system instruction with register parameter */
+#define DEFINE_SYSOP_TYPE_PARAM_FUNC(_op, _type) \
+static inline void _op ## _type(uint64_t v) \
+{ \
+ __asm__ (#_op " " #_type ", %0" : : "r" (v)); \
+}
+
+/*******************************************************************************
+ * TLB maintenance accessor prototypes
+ ******************************************************************************/
+
+DEFINE_SYSOP_TYPE_FUNC(tlbi, alle1)
+DEFINE_SYSOP_TYPE_FUNC(tlbi, alle1is)
+DEFINE_SYSOP_TYPE_FUNC(tlbi, alle2)
+DEFINE_SYSOP_TYPE_FUNC(tlbi, alle2is)
+DEFINE_SYSOP_TYPE_FUNC(tlbi, alle3)
+DEFINE_SYSOP_TYPE_FUNC(tlbi, alle3is)
+DEFINE_SYSOP_TYPE_FUNC(tlbi, vmalle1)
+
+DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vaae1is)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vaale1is)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vae2is)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vale2is)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vae3is)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vale3is)
+
+/*******************************************************************************
+ * Cache maintenance accessor prototypes
+ ******************************************************************************/
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, isw)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, cisw)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, csw)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, cvac)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, ivac)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, civac)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, cvau)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, zva)
+
+void flush_dcache_range(uintptr_t addr, size_t size);
+void clean_dcache_range(uintptr_t addr, size_t size);
+void inv_dcache_range(uintptr_t addr, size_t size);
+
+void dcsw_op_louis(u_register_t op_type);
+void dcsw_op_all(u_register_t op_type);
+
+void disable_mmu(void);
+void disable_mmu_icache(void);
+
+/*******************************************************************************
+ * Misc. accessor prototypes
+ ******************************************************************************/
+
+DEFINE_SYSREG_READ_FUNC(id_pfr1_el1)
+DEFINE_SYSREG_READ_FUNC(id_aa64pfr0_el1)
+DEFINE_SYSREG_READ_FUNC(CurrentEl)
+DEFINE_SYSREG_READ_FUNC(ctr_el0)
+DEFINE_SYSREG_RW_FUNCS(daif)
+DEFINE_SYSREG_RW_FUNCS(spsr_el1)
+DEFINE_SYSREG_RW_FUNCS(spsr_el2)
+DEFINE_SYSREG_RW_FUNCS(elr_el1)
+DEFINE_SYSREG_RW_FUNCS(elr_el2)
+
+DEFINE_SYSOP_FUNC(wfi)
+DEFINE_SYSOP_FUNC(wfe)
+DEFINE_SYSOP_FUNC(sev)
+DEFINE_SYSOP_TYPE_FUNC(dsb, sy)
+DEFINE_SYSOP_TYPE_FUNC(dsb, ish)
+DEFINE_SYSOP_TYPE_FUNC(dsb, ishst)
+DEFINE_SYSOP_FUNC(isb)
+DEFINE_SYSOP_TYPE_FUNC(dmb, oshld)
+DEFINE_SYSOP_TYPE_FUNC(dmb, oshst)
+DEFINE_SYSOP_TYPE_FUNC(dmb, osh)
+DEFINE_SYSOP_TYPE_FUNC(dmb, nshld)
+DEFINE_SYSOP_TYPE_FUNC(dmb, nshst)
+DEFINE_SYSOP_TYPE_FUNC(dmb, nsh)
+DEFINE_SYSOP_TYPE_FUNC(dmb, ishld)
+DEFINE_SYSOP_TYPE_FUNC(dmb, ishst)
+DEFINE_SYSOP_TYPE_FUNC(dmb, ish)
+DEFINE_SYSOP_TYPE_FUNC(dmb, ld)
+DEFINE_SYSOP_TYPE_FUNC(dmb, st)
+DEFINE_SYSOP_TYPE_FUNC(dmb, sy)
+
+#define write_daifclr(val) SYSREG_WRITE_CONST(daifclr, val)
+#define write_daifset(val) SYSREG_WRITE_CONST(daifset, val)
+
+static inline void enable_irq(void)
+{
+ /*
+ * The compiler memory barrier will prevent the compiler from
+ * scheduling non-volatile memory access after the write to the
+ * register.
+ *
+ * This could happen if some initialization code issues non-volatile
+ * accesses to an area used by an interrupt handler, in the assumption
+ * that it is safe as the interrupts are disabled at the time it does
+ * that (according to program order). However, non-volatile accesses
+ * are not necessarily in program order relatively with volatile inline
+ * assembly statements (and volatile accesses).
+ */
+ COMPILER_BARRIER();
+ write_daifclr(DAIF_IRQ_BIT);
+ isb();
+}
+
+static inline void enable_fiq(void)
+{
+ COMPILER_BARRIER();
+ write_daifclr(DAIF_FIQ_BIT);
+ isb();
+}
+
+static inline void enable_serror(void)
+{
+ COMPILER_BARRIER();
+ write_daifclr(DAIF_ABT_BIT);
+ isb();
+}
+
+static inline void enable_debug_exceptions(void)
+{
+ COMPILER_BARRIER();
+ write_daifclr(DAIF_DBG_BIT);
+ isb();
+}
+
+static inline void disable_irq(void)
+{
+ COMPILER_BARRIER();
+ write_daifset(DAIF_IRQ_BIT);
+ isb();
+}
+
+static inline void disable_fiq(void)
+{
+ COMPILER_BARRIER();
+ write_daifset(DAIF_FIQ_BIT);
+ isb();
+}
+
+static inline void disable_serror(void)
+{
+ COMPILER_BARRIER();
+ write_daifset(DAIF_ABT_BIT);
+ isb();
+}
+
+static inline void disable_debug_exceptions(void)
+{
+ COMPILER_BARRIER();
+ write_daifset(DAIF_DBG_BIT);
+ isb();
+}
+
+uint32_t get_afflvl_shift(uint32_t);
+uint32_t mpidr_mask_lower_afflvls(uint64_t, uint32_t);
+
+void __dead2 eret(uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3,
+ uint64_t x4, uint64_t x5, uint64_t x6, uint64_t x7);
+void __dead2 smc(uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3,
+ uint64_t x4, uint64_t x5, uint64_t x6, uint64_t x7);
+
+/*******************************************************************************
+ * System register accessor prototypes
+ ******************************************************************************/
+DEFINE_SYSREG_READ_FUNC(midr_el1)
+DEFINE_SYSREG_READ_FUNC(mpidr_el1)
+DEFINE_SYSREG_READ_FUNC(id_aa64mmfr0_el1)
+
+DEFINE_SYSREG_RW_FUNCS(hcr_el2)
+
+DEFINE_SYSREG_RW_FUNCS(vbar_el1)
+DEFINE_SYSREG_RW_FUNCS(vbar_el2)
+
+DEFINE_SYSREG_RW_FUNCS(sctlr_el1)
+DEFINE_SYSREG_RW_FUNCS(sctlr_el2)
+DEFINE_SYSREG_RW_FUNCS(sctlr_el3)
+
+DEFINE_SYSREG_RW_FUNCS(actlr_el1)
+DEFINE_SYSREG_RW_FUNCS(actlr_el2)
+
+DEFINE_SYSREG_RW_FUNCS(esr_el1)
+DEFINE_SYSREG_RW_FUNCS(esr_el2)
+
+DEFINE_SYSREG_RW_FUNCS(afsr0_el1)
+DEFINE_SYSREG_RW_FUNCS(afsr0_el2)
+
+DEFINE_SYSREG_RW_FUNCS(afsr1_el1)
+DEFINE_SYSREG_RW_FUNCS(afsr1_el2)
+
+DEFINE_SYSREG_RW_FUNCS(far_el1)
+DEFINE_SYSREG_RW_FUNCS(far_el2)
+
+DEFINE_SYSREG_RW_FUNCS(mair_el1)
+DEFINE_SYSREG_RW_FUNCS(mair_el2)
+
+DEFINE_SYSREG_RW_FUNCS(amair_el1)
+DEFINE_SYSREG_RW_FUNCS(amair_el2)
+
+DEFINE_SYSREG_READ_FUNC(rvbar_el1)
+DEFINE_SYSREG_READ_FUNC(rvbar_el2)
+
+DEFINE_SYSREG_RW_FUNCS(rmr_el1)
+DEFINE_SYSREG_RW_FUNCS(rmr_el2)
+
+DEFINE_SYSREG_RW_FUNCS(tcr_el1)
+DEFINE_SYSREG_RW_FUNCS(tcr_el2)
+
+DEFINE_SYSREG_RW_FUNCS(ttbr0_el1)
+DEFINE_SYSREG_RW_FUNCS(ttbr0_el2)
+
+DEFINE_SYSREG_RW_FUNCS(ttbr1_el1)
+
+DEFINE_SYSREG_RW_FUNCS(cptr_el2)
+
+DEFINE_SYSREG_RW_FUNCS(cpacr_el1)
+DEFINE_SYSREG_RW_FUNCS(cntfrq_el0)
+DEFINE_SYSREG_RW_FUNCS(cnthp_ctl_el2)
+DEFINE_SYSREG_RW_FUNCS(cnthp_tval_el2)
+DEFINE_SYSREG_RW_FUNCS(cnthp_cval_el2)
+DEFINE_SYSREG_RW_FUNCS(cntps_ctl_el1)
+DEFINE_SYSREG_RW_FUNCS(cntps_tval_el1)
+DEFINE_SYSREG_RW_FUNCS(cntps_cval_el1)
+DEFINE_SYSREG_RW_FUNCS(cntp_ctl_el0)
+DEFINE_SYSREG_RW_FUNCS(cntp_tval_el0)
+DEFINE_SYSREG_RW_FUNCS(cntp_cval_el0)
+DEFINE_SYSREG_READ_FUNC(cntpct_el0)
+DEFINE_SYSREG_RW_FUNCS(cnthctl_el2)
+
+DEFINE_SYSREG_RW_FUNCS(vpidr_el2)
+DEFINE_SYSREG_RW_FUNCS(vmpidr_el2)
+
+/* GICv3 System Registers */
+
+DEFINE_RENAME_SYSREG_RW_FUNCS(icc_sre_el1, ICC_SRE_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(icc_sre_el2, ICC_SRE_EL2)
+DEFINE_RENAME_SYSREG_RW_FUNCS(icc_pmr_el1, ICC_PMR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(icc_igrpen1_el1, ICC_IGRPEN1_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(icc_sgi1r, ICC_SGI1R)
+DEFINE_RENAME_SYSREG_READ_FUNC(icc_hppir1_el1, ICC_HPPIR1_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(icc_iar1_el1, ICC_IAR1_EL1)
+DEFINE_RENAME_SYSREG_WRITE_FUNC(icc_eoir1_el1, ICC_EOIR1_EL1)
+
+DEFINE_RENAME_SYSREG_RW_FUNCS(amcgcr_el0, AMCGCR_EL0)
+DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenclr0_el0, AMCNTENCLR0_EL0)
+DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenset0_el0, AMCNTENSET0_EL0)
+DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenclr1_el0, AMCNTENCLR1_EL0)
+DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenset1_el0, AMCNTENSET1_EL0)
+
+#define IS_IN_EL(x) \
+ (GET_EL(read_CurrentEl()) == MODE_EL##x)
+
+#define IS_IN_EL1() IS_IN_EL(1)
+#define IS_IN_EL2() IS_IN_EL(2)
+
+#endif /* __ARCH_HELPERS_H__ */
diff --git a/include/lib/cassert.h b/include/lib/cassert.h
new file mode 100644
index 000000000..8844e8b02
--- /dev/null
+++ b/include/lib/cassert.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __CASSERT_H__
+#define __CASSERT_H__
+
+/*******************************************************************************
+ * Macro to flag a compile time assertion. It uses the preprocessor to generate
+ * an invalid C construct if 'cond' evaluates to false.
+ * The following compilation error is triggered if the assertion fails:
+ * "error: size of array 'msg' is negative"
+ ******************************************************************************/
+#define CASSERT(cond, msg) typedef char msg[(cond) ? 1 : -1]
+
+#endif /* __CASSERT_H__ */
diff --git a/include/lib/events.h b/include/lib/events.h
new file mode 100644
index 000000000..34c5b2f76
--- /dev/null
+++ b/include/lib/events.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __EVENTS_H__
+#define __EVENTS_H__
+
+#include <spinlock.h>
+
+typedef struct {
+ /*
+ * Counter that keeps track of the minimum number of recipients of the
+ * event. When the event is sent, this counter is incremented. When it
+ * is received, it is decremented. Therefore, a zero value means that
+ * the event hasn't been sent yet, or that all recipients have already
+ * received it.
+ *
+ * Volatile is needed as it will enforce ordering relatively to
+ * accesses to the lock.
+ */
+ volatile unsigned int cnt;
+
+ /* Lock used to avoid concurrent accesses to the counter */
+ spinlock_t lock;
+} event_t;
+
+/*
+ * Initialise an event.
+ * event: Address of the event to initialise
+ *
+ * This function can be used either to initialise a newly created event
+ * structure or to recycle one.
+ *
+ * Note: This function is not MP-safe. It can't use the event lock as it is
+ * responsible for initialising it. Care must be taken to ensure this function
+ * is called in the right circumstances.
+ */
+void tftf_init_event(event_t *event);
+
+/*
+ * Send an event to a CPU.
+ * event: Address of the variable that acts as a synchronisation object.
+ *
+ * Which CPU receives the event is determined on a first-come, first-served
+ * basis. If several CPUs are waiting for the same event then the first CPU
+ * which takes the event will reflect that in the event structure.
+ *
+ * Note: This is equivalent to calling:
+ * tftf_send_event_to(event, 1);
+ */
+void tftf_send_event(event_t *event);
+
+/*
+ * Send an event to all CPUs.
+ * event: Address of the variable that acts as a synchronisation object.
+ *
+ * Note: This is equivalent to calling:
+ * tftf_send_event_to(event, PLATFORM_CORE_COUNT);
+ */
+void tftf_send_event_to_all(event_t *event);
+
+/*
+ * Send an event to a given number of CPUs.
+ * event: Address of the variable that acts as a synchronisation object.
+ * cpus_count: Number of CPUs to send the event to.
+ *
+ * Which CPUs receive the event is determined on a first-come, first-served
+ * basis. If more than 'cpus_count' CPUs are waiting for the same event then the
+ * first 'cpus_count' CPUs which take the event will reflect that in the event
+ * structure.
+ */
+void tftf_send_event_to(event_t *event, unsigned int cpus_count);
+
+/*
+ * Wait for an event.
+ * event: Address of the variable that acts as a synchronisation object.
+ */
+void tftf_wait_for_event(event_t *event);
+
+#endif /* __EVENTS_H__ */
diff --git a/include/lib/extensions/amu.h b/include/lib/extensions/amu.h
new file mode 100644
index 000000000..30714643b
--- /dev/null
+++ b/include/lib/extensions/amu.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __AMU_H__
+#define __AMU_H__
+
+#include <platform_def.h>
+#include <stdint.h>
+
+#define AMU_GROUP0_NR_COUNTERS 4
+#define AMU_GROUP0_COUNTERS_MASK 0xf
+
+#ifdef PLAT_AMU_GROUP1_COUNTERS_MASK
+#define AMU_GROUP1_COUNTERS_MASK PLAT_AMU_GROUP1_COUNTERS_MASK
+#else
+#define AMU_GROUP1_COUNTERS_MASK 0
+#endif
+
+#ifdef PLAT_AMU_GROUP1_NR_COUNTERS
+#define AMU_GROUP1_NR_COUNTERS PLAT_AMU_GROUP1_NR_COUNTERS
+#else
+#define AMU_GROUP1_NR_COUNTERS 0
+#endif
+
+#define AMU_GROUP0_MAX_NR_COUNTERS 4
+#define AMU_GROUP1_MAX_NR_COUNTERS 16
+
+int amu_supported(void);
+uint64_t amu_group0_cnt_read(int idx);
+uint64_t amu_group1_cnt_read(int idx);
+
+#endif /* __AMU_H__ */
diff --git a/include/lib/extensions/amu_private.h b/include/lib/extensions/amu_private.h
new file mode 100644
index 000000000..e6cd2906e
--- /dev/null
+++ b/include/lib/extensions/amu_private.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2017, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __AMU_PRIVATE_H__
+#define __AMU_PRIVATE_H__
+
+#include <stdint.h>
+
+uint64_t amu_group0_cnt_read_internal(int idx);
+uint64_t amu_group1_cnt_read_internal(int idx);
+
+#endif /* __AMU_PRIVATE_H__ */
diff --git a/include/lib/io_storage.h b/include/lib/io_storage.h
new file mode 100644
index 000000000..42cebd3fd
--- /dev/null
+++ b/include/lib/io_storage.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __IO_H__
+#define __IO_H__
+
+#include <stdint.h>
+#include <stdio.h> /* For ssize_t */
+#include <uuid.h>
+
+/* Device type which can be used to enable policy decisions about which device
+ * to access */
+typedef enum {
+ IO_TYPE_INVALID,
+ IO_TYPE_FLASH,
+ IO_TYPE_MEMMAP,
+ IO_TYPE_FIRMWARE_IMAGE_PACKAGE,
+ IO_TYPE_MAX
+} io_type_t;
+
+
+/* Modes used when seeking data on a supported device */
+typedef enum {
+ IO_SEEK_INVALID,
+ IO_SEEK_SET,
+ IO_SEEK_END,
+ IO_SEEK_CUR,
+ IO_SEEK_MAX
+} io_seek_mode_t;
+
+
+/* Connector type, providing a means of identifying a device to open */
+struct io_dev_connector;
+
+
+/* File specification - used to refer to data on a device supporting file-like
+ * entities */
+typedef struct io_file_spec {
+ const char *path;
+ unsigned int mode;
+} io_file_spec_t;
+
+/* UUID specification - used to refer to data accessed using UUIDs (i.e. FIP
+ * images) */
+typedef struct io_uuid_spec {
+ const uuid_t uuid;
+} io_uuid_spec_t;
+
+
+/* Block specification - used to refer to data on a device supporting
+ * block-like entities */
+typedef struct io_block_spec {
+ size_t offset;
+ size_t length;
+} io_block_spec_t;
+
+
+/* Access modes used when accessing data on a device */
+#define IO_MODE_INVALID (0)
+#define IO_MODE_RO (1 << 0)
+#define IO_MODE_RW (1 << 1)
+
+
+/* Return codes reported by 'io_*' APIs */
+#define IO_SUCCESS (0)
+#define IO_FAIL (-1)
+#define IO_NOT_SUPPORTED (-2)
+#define IO_RESOURCES_EXHAUSTED (-3)
+
+
+/* Open a connection to a device */
+int io_dev_open(const struct io_dev_connector *dev_con,
+ const uintptr_t dev_spec,
+ uintptr_t *dev_handle);
+
+
+/* Initialise a device explicitly - to permit lazy initialisation or
+ * re-initialisation */
+int io_dev_init(uintptr_t dev_handle, const uintptr_t init_params);
+
+/* TODO: Consider whether an explicit "shutdown" API should be included */
+
+/* Close a connection to a device */
+int io_dev_close(uintptr_t dev_handle);
+
+
+/* Synchronous operations */
+int io_open(uintptr_t dev_handle, const uintptr_t spec, uintptr_t *handle);
+
+int io_seek(uintptr_t handle, io_seek_mode_t mode, ssize_t offset);
+
+int io_size(uintptr_t handle, size_t *length);
+
+int io_read(uintptr_t handle, uintptr_t buffer, size_t length,
+ size_t *length_read);
+
+int io_write(uintptr_t handle, const uintptr_t buffer, size_t length,
+ size_t *length_written);
+
+int io_close(uintptr_t handle);
+
+
+#endif /* __IO_H__ */
diff --git a/include/lib/irq.h b/include/lib/irq.h
new file mode 100644
index 000000000..1628221a0
--- /dev/null
+++ b/include/lib/irq.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __IRQ_H__
+#define __IRQ_H__
+
+#include <platform_def.h> /* For CACHE_WRITEBACK_GRANULE */
+#include <stdint.h>
+
+/*
+ * SGI sent by the timer management framework to notify CPUs when the system
+ * timer fires off
+ */
+#define IRQ_WAKE_SGI IRQ_NS_SGI_7
+
+#ifndef __ASSEMBLY__
+
+/* Prototype of a handler function for an IRQ */
+typedef int (*irq_handler_t)(void *data);
+
+/* Keep track of the IRQ handler registered for a given SPI */
+typedef struct {
+ irq_handler_t handler;
+} spi_desc;
+
+/* Keep track of the IRQ handler registered for a spurious interrupt */
+typedef irq_handler_t spurious_desc;
+
+/*
+ * PPIs and SGIs are interrupts that are private to a GIC CPU interface. These
+ * interrupts are banked in the GIC Distributor. Therefore, each CPU can
+ * set up a different IRQ handler for a given PPI/SGI.
+ *
+ * So we define a data structure representing an IRQ handler aligned on the
+ * size of a cache line. This guarantees that in an array of these, each element
+ * is loaded in a separate cache line. This allows efficient concurrent
+ * manipulation of these elements on different CPUs.
+ */
+typedef struct {
+ irq_handler_t handler;
+} __aligned(CACHE_WRITEBACK_GRANULE) irq_handler_banked_t;
+
+typedef irq_handler_banked_t ppi_desc;
+typedef irq_handler_banked_t sgi_desc;
+
+void tftf_irq_setup(void);
+
+/*
+ * Generic handler called upon reception of an IRQ.
+ *
+ * This function acknowledges the interrupt, calls the user-defined handler
+ * if one has been registered then marks the processing of the interrupt as
+ * complete.
+ */
+int tftf_irq_handler_dispatcher(void);
+
+/*
+ * Enable interrupt #irq_num for the calling core.
+ */
+void tftf_irq_enable(unsigned int irq_num, uint8_t irq_priority);
+
+/*
+ * Disable interrupt #irq_num for the calling core.
+ */
+void tftf_irq_disable(unsigned int irq_num);
+
+/*
+ * Register an interrupt handler for a given interrupt number.
+ * Will fail if there is already an interrupt handler registered for the same
+ * interrupt.
+ *
+ * Return 0 on success, a negative value otherwise.
+ */
+int tftf_irq_register_handler(unsigned int num, irq_handler_t irq_handler);
+
+/*
+ * Unregister an interrupt handler for a given interrupt number.
+ * Will fail if there is no interrupt handler registered for that interrupt.
+ *
+ * Return 0 on success, a negative value otherwise.
+ */
+int tftf_irq_unregister_handler(unsigned int irq_num);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __IRQ_H__ */
diff --git a/include/lib/mmio.h b/include/lib/mmio.h
new file mode 100644
index 000000000..e8a7df02b
--- /dev/null
+++ b/include/lib/mmio.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __MMIO_H__
+#define __MMIO_H__
+
+#include <stdint.h>
+
+static inline void mmio_write_8(uintptr_t addr, uint8_t value)
+{
+ *(volatile uint8_t*)addr = value;
+}
+
+static inline uint8_t mmio_read_8(uintptr_t addr)
+{
+ return *(volatile uint8_t*)addr;
+}
+
+static inline void mmio_write_32(uintptr_t addr, uint32_t value)
+{
+ *(volatile uint32_t*)addr = value;
+}
+
+static inline uint32_t mmio_read_32(uintptr_t addr)
+{
+ return *(volatile uint32_t*)addr;
+}
+
+static inline void mmio_write_64(uintptr_t addr, uint64_t value)
+{
+ *(volatile uint64_t*)addr = value;
+}
+
+static inline uint64_t mmio_read_64(uintptr_t addr)
+{
+ return *(volatile uint64_t*)addr;
+}
+
+#endif /* __MMIO_H__ */
diff --git a/include/lib/power_management.h b/include/lib/power_management.h
new file mode 100644
index 000000000..d2d690144
--- /dev/null
+++ b/include/lib/power_management.h
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __POWER_MANAGEMENT_H__
+#define __POWER_MANAGEMENT_H__
+
+#include <platform_def.h>
+#include <psci.h>
+#include <spinlock.h>
+#include <stdint.h>
+#include <types.h>
+
+/* Set of states of an affinity node as seen by the Test Framework */
+typedef enum {
+ TFTF_AFFINITY_STATE_OFF = 0,
+ TFTF_AFFINITY_STATE_ON_PENDING,
+ TFTF_AFFINITY_STATE_ON,
+} tftf_affinity_info_t;
+
+/* Structure for keeping track of CPU state */
+typedef struct {
+ volatile tftf_affinity_info_t state;
+ spinlock_t lock;
+} __aligned(CACHE_WRITEBACK_GRANULE) tftf_cpu_state_t;
+
+/*
+ * Suspend information passed to the TFTF suspend helpers.
+ */
+typedef struct suspend_info {
+ /* The power state parameter to be passed to PSCI_CPU_SUSPEND */
+ unsigned int power_state;
+ /* SMC function ID of the PSCI suspend call */
+ unsigned int psci_api;
+ /* Whether the system context needs to be saved and restored */
+ unsigned int save_system_context;
+} suspend_info_t;
+
+/*
+ * Power up a core.
+ * This uses the PSCI CPU_ON API, which means it relies on the EL3 firmware's
+ * runtime services capabilities.
+ * The core will be boostrapped by the framework before handing it over
+ * to the entry point specified as the 2nd argument.
+ *
+ * target_cpu: MPID of the CPU to power up
+ * entrypoint: Address where the CPU will jump once the framework has
+ * initialized it
+ * context_id: Context identifier as defined by the PSCI specification
+ *
+ * Return: Return code of the PSCI CPU_ON call
+ * (refer to the PSCI specification for details)
+ */
+int32_t tftf_cpu_on(u_register_t target_cpu,
+ uintptr_t entrypoint,
+ u_register_t context_id);
+
+/*
+ * Tries to power up a core.
+ * This API is similar to tftf_cpu_on API with the difference being it
+ * does a SMC call to EL3 firmware without checking the status of the
+ * core with respect to the framework.
+ *
+ * A caller is expected to handle the return code given by the EL3 firmware.
+ *
+ * target_cpu: MPID of the CPU to power up
+ * entrypoint: Address where the CPU will jump once the framework has
+ * initialised it
+ * context_id: Context identifier as defined by the PSCI specification
+ *
+ * Return: Return code of the PSCI CPU_ON call
+ * (refer to the PSCI specification for details)
+ */
+int32_t tftf_try_cpu_on(u_register_t target_cpu,
+ uintptr_t entrypoint,
+ u_register_t context_id);
+
+/*
+ * Power down the calling core.
+ * This uses the PSCI CPU_OFF API, which means it relies on the EL3 firmware's
+ * runtime services capabilities.
+ *
+ * Return: This function does not return when successful.
+ * Otherwise, return the same error code as the PSCI CPU_OFF call
+ * (refer to the PSCI specification for details)
+ */
+int32_t tftf_cpu_off(void);
+
+/*
+ * It is an Api used to enter a suspend state. It does the following:
+ * - Allocates space for saving architectural and non-architectural CPU state on
+ * stack
+ * - Saves architecture state of the CPU in the space allocated which consists:
+ * a. Callee registers
+ * b. System control registers. ex: MMU, SCTLR_EL1
+ * - Depending on the state of `save_system_context` flag in suspend_info
+ * saves the context of system peripherals like GIC, timer etc.
+ * - Sets context ID to the base of the stack allocated for saving context
+ * - Calls Secure Platform Firmware to enter suspend
+ * - If suspend fails, It restores the callee registers
+ * power state: PSCI power state to be sent via SMC
+ * Returns: PSCI_E_SUCCESS or PSCI_E_INVALID_PARAMS
+ *
+ * Note: This api might not test all use cases, as the context ID and resume
+ * entrypoint is in the control of the framework.
+ */
+int tftf_suspend(const suspend_info_t *info);
+
+
+/* ----------------------------------------------------------------------------
+ * The above APIs might not be suitable in all test scenarios.
+ * A test case could want to bypass those APIs i.e. call the PSCI APIs
+ * directly. In this case, it is the responsibility of the test case to preserve
+ * the state of the framework. The below APIs are provided to this end.
+ * ----------------------------------------------------------------------------
+ */
+
+/*
+ * The 3 following functions are used to manipulate the reference count tracking
+ * the number of CPUs participating in a test.
+ */
+
+/*
+ * Increment the reference count.
+ * Return the new, incremented value.
+ */
+unsigned int tftf_inc_ref_cnt(void);
+
+/*
+ * Decrement the reference count.
+ * Return the new, decremented value.
+ */
+unsigned int tftf_dec_ref_cnt(void);
+
+/* Return the current reference count value */
+unsigned int tftf_get_ref_cnt(void);
+
+/*
+ * Set the calling CPU online/offline. This only adjusts the view of the core
+ * from the framework's point of view, it doesn't actually power up/down the
+ * core.
+ */
+void tftf_set_cpu_online(void);
+void tftf_init_cpus_status_map(void);
+void tftf_set_cpu_offline(void);
+
+/*
+ * Query the state of a core.
+ * Return: 1 if the core is online, 0 otherwise.
+ */
+unsigned int tftf_is_cpu_online(unsigned int mpid);
+
+unsigned int tftf_is_core_pos_online(unsigned int core_pos);
+
+/* TFTF Suspend helpers */
+static inline int tftf_cpu_suspend(unsigned int pwr_state)
+{
+ suspend_info_t info = {
+ .power_state = pwr_state,
+ .save_system_context = 0,
+ .psci_api = SMC_PSCI_CPU_SUSPEND,
+ };
+
+ return tftf_suspend(&info);
+}
+
+static inline int tftf_cpu_suspend_save_sys_ctx(unsigned int pwr_state)
+{
+ suspend_info_t info = {
+ .power_state = pwr_state,
+ .save_system_context = 1,
+ .psci_api = SMC_PSCI_CPU_SUSPEND,
+ };
+
+ return tftf_suspend(&info);
+}
+
+
+static inline int tftf_system_suspend(void)
+{
+ suspend_info_t info = {
+ .power_state = 0,
+ .save_system_context = 1,
+ .psci_api = SMC_PSCI_SYSTEM_SUSPEND,
+ };
+
+ return tftf_suspend(&info);
+}
+
+#endif /* __POWER_MANAGEMENT_H__ */
diff --git a/include/lib/semihosting.h b/include/lib/semihosting.h
new file mode 100644
index 000000000..9938a566c
--- /dev/null
+++ b/include/lib/semihosting.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SEMIHOSTING_H__
+#define __SEMIHOSTING_H__
+
+#include <stdint.h>
+#include <stdio.h> /* For ssize_t */
+
+
+#define SEMIHOSTING_SYS_OPEN 0x01
+#define SEMIHOSTING_SYS_CLOSE 0x02
+#define SEMIHOSTING_SYS_WRITE0 0x04
+#define SEMIHOSTING_SYS_WRITEC 0x03
+#define SEMIHOSTING_SYS_WRITE 0x05
+#define SEMIHOSTING_SYS_READ 0x06
+#define SEMIHOSTING_SYS_READC 0x07
+#define SEMIHOSTING_SYS_SEEK 0x0A
+#define SEMIHOSTING_SYS_FLEN 0x0C
+#define SEMIHOSTING_SYS_REMOVE 0x0E
+#define SEMIHOSTING_SYS_SYSTEM 0x12
+#define SEMIHOSTING_SYS_ERRNO 0x13
+
+#define FOPEN_MODE_R 0x0
+#define FOPEN_MODE_RB 0x1
+#define FOPEN_MODE_RPLUS 0x2
+#define FOPEN_MODE_RPLUSB 0x3
+#define FOPEN_MODE_W 0x4
+#define FOPEN_MODE_WB 0x5
+#define FOPEN_MODE_WPLUS 0x6
+#define FOPEN_MODE_WPLUSB 0x7
+#define FOPEN_MODE_A 0x8
+#define FOPEN_MODE_AB 0x9
+#define FOPEN_MODE_APLUS 0xa
+#define FOPEN_MODE_APLUSB 0xb
+
+long semihosting_connection_supported(void);
+long semihosting_file_open(const char *file_name, size_t mode);
+long semihosting_file_seek(long file_handle, ssize_t offset);
+long semihosting_file_read(long file_handle, size_t *length, uintptr_t buffer);
+long semihosting_file_write(long file_handle,
+ size_t *length,
+ const uintptr_t buffer);
+long semihosting_file_close(long file_handle);
+long semihosting_file_length(long file_handle);
+long semihosting_system(char *command_line);
+long semihosting_get_flen(const char *file_name);
+long semihosting_download_file(const char *file_name,
+ size_t buf_size,
+ uintptr_t buf);
+void semihosting_write_char(char character);
+void semihosting_write_string(char *string);
+char semihosting_read_char(void);
+
+#endif /* __SEMIHOSTING_H__ */
diff --git a/include/lib/sgi.h b/include/lib/sgi.h
new file mode 100644
index 000000000..d2f4b37f4
--- /dev/null
+++ b/include/lib/sgi.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SGI_H__
+#define __SGI_H__
+
+/* Data associated with the reception of an SGI */
+typedef struct {
+ /* Interrupt ID of the signaled interrupt */
+ unsigned int irq_id;
+} sgi_data_t;
+
+/*
+ * Send an SGI to a given core.
+ */
+void tftf_send_sgi(unsigned int sgi_id, unsigned int core_pos);
+
+#endif /* __SGI_H__ */
diff --git a/include/lib/spinlock.h b/include/lib/spinlock.h
new file mode 100644
index 000000000..27ea7305e
--- /dev/null
+++ b/include/lib/spinlock.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SPINLOCK_H__
+#define __SPINLOCK_H__
+
+typedef struct spinlock {
+ volatile unsigned int lock;
+} spinlock_t;
+
+void init_spinlock(spinlock_t *lock);
+void spin_lock(spinlock_t *lock);
+void spin_unlock(spinlock_t *lock);
+
+#endif /* __SPINLOCK_H__ */
diff --git a/include/lib/status.h b/include/lib/status.h
new file mode 100644
index 000000000..8cb61456d
--- /dev/null
+++ b/include/lib/status.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __STATUS_H__
+#define __STATUS_H__
+
+/* Status Code definitions */
+#define STATUS_SUCCESS 0x00
+#define STATUS_INVALID_PARAMETER 0x01
+#define STATUS_UNSUPPORTED 0x02
+#define STATUS_OUT_OF_RESOURCES 0x03
+#define STATUS_NOT_FOUND 0x04
+#define STATUS_ABORTED 0x05
+#define STATUS_LOAD_ERROR 0x06
+#define STATUS_NEVER_RETURN 0x07
+#define STATUS_BUSY 0x08
+#define STATUS_NOT_INIT 0x09
+#define STATUS_BUFFER_TOO_SMALL 0x0A
+#define STATUS_COMPROMISED_DATA 0x0B
+#define STATUS_ALREADY_LOADED 0x0C
+#define STATUS_FAIL 0x0D
+
+typedef unsigned int STATUS;
+
+#endif /* __STATUS_H__ */
diff --git a/include/lib/stdlib/assert.h b/include/lib/stdlib/assert.h
new file mode 100644
index 000000000..1bcd1ead5
--- /dev/null
+++ b/include/lib/stdlib/assert.h
@@ -0,0 +1,60 @@
+/*-
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ * (c) UNIX System Laboratories, Inc.
+ * All or some portions of this file are derived from material licensed
+ * to the University of California by American Telephone and Telegraph
+ * Co. or Unix System Laboratories, Inc. and are reproduced herein with
+ * the permission of UNIX System Laboratories, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)assert.h 8.2 (Berkeley) 1/21/94
+ * $FreeBSD$
+ */
+/*
+ * Portions copyright (c) 2017, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef _ASSERT_H_
+#define _ASSERT_H_
+
+#include <sys/cdefs.h>
+
+#if ENABLE_ASSERTIONS
+#define _assert(e) assert(e)
+#define assert(e) ((e) ? (void)0 : __assert(__func__, __FILE__, \
+ __LINE__, #e))
+#else
+#define assert(e) ((void)0)
+#define _assert(e) ((void)0)
+#endif /* ENABLE_ASSERTIONS */
+
+__BEGIN_DECLS
+void __assert(const char *, const char *, int, const char *) __dead2;
+__END_DECLS
+
+#endif /* !_ASSERT_H_ */
diff --git a/include/lib/stdlib/machine/_limits.h b/include/lib/stdlib/machine/_limits.h
new file mode 100644
index 000000000..bf9488693
--- /dev/null
+++ b/include/lib/stdlib/machine/_limits.h
@@ -0,0 +1,11 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifdef AARCH64
+# include <machine/aarch64/_limits.h>
+#else /* AARCH32 */
+# include <machine/aarch32/_limits.h>
+#endif
diff --git a/include/lib/stdlib/machine/_stdint.h b/include/lib/stdlib/machine/_stdint.h
new file mode 100644
index 000000000..d8f38ff88
--- /dev/null
+++ b/include/lib/stdlib/machine/_stdint.h
@@ -0,0 +1,192 @@
+/*-
+ * Copyright (c) 2001, 2002 Mike Barcroft <mike@FreeBSD.org>
+ * Copyright (c) 2001 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Klaus Klein.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Portions copyright (c) 2016, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef _MACHINE__STDINT_H_
+#define _MACHINE__STDINT_H_
+
+#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS)
+
+#define INT8_C(c) (c)
+#define INT16_C(c) (c)
+#define INT32_C(c) (c)
+#define INT64_C(c) (c ## LL)
+
+#define UINT8_C(c) (c)
+#define UINT16_C(c) (c)
+#define UINT32_C(c) (c ## U)
+#define UINT64_C(c) (c ## ULL)
+
+#define INTMAX_C(c) INT64_C(c)
+#define UINTMAX_C(c) UINT64_C(c)
+
+#endif /* !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) */
+
+#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS)
+
+/*
+ * ISO/IEC 9899:1999
+ * 7.18.2.1 Limits of exact-width integer types
+ */
+/* Minimum values of exact-width signed integer types. */
+#define INT8_MIN (-0x7f-1)
+#define INT16_MIN (-0x7fff-1)
+#define INT32_MIN (-0x7fffffff-1)
+#define INT64_MIN (-0x7fffffffffffffffLL-1)
+
+/* Maximum values of exact-width signed integer types. */
+#define INT8_MAX 0x7f
+#define INT16_MAX 0x7fff
+#define INT32_MAX 0x7fffffff
+#define INT64_MAX 0x7fffffffffffffffLL
+
+/* Maximum values of exact-width unsigned integer types. */
+#define UINT8_MAX 0xff
+#define UINT16_MAX 0xffff
+#define UINT32_MAX 0xffffffffU
+#define UINT64_MAX 0xffffffffffffffffULL
+
+/*
+ * ISO/IEC 9899:1999
+ * 7.18.2.2 Limits of minimum-width integer types
+ */
+/* Minimum values of minimum-width signed integer types. */
+#define INT_LEAST8_MIN INT8_MIN
+#define INT_LEAST16_MIN INT16_MIN
+#define INT_LEAST32_MIN INT32_MIN
+#define INT_LEAST64_MIN INT64_MIN
+
+/* Maximum values of minimum-width signed integer types. */
+#define INT_LEAST8_MAX INT8_MAX
+#define INT_LEAST16_MAX INT16_MAX
+#define INT_LEAST32_MAX INT32_MAX
+#define INT_LEAST64_MAX INT64_MAX
+
+/* Maximum values of minimum-width unsigned integer types. */
+#define UINT_LEAST8_MAX UINT8_MAX
+#define UINT_LEAST16_MAX UINT16_MAX
+#define UINT_LEAST32_MAX UINT32_MAX
+#define UINT_LEAST64_MAX UINT64_MAX
+
+/*
+ * ISO/IEC 9899:1999
+ * 7.18.2.3 Limits of fastest minimum-width integer types
+ */
+/* Minimum values of fastest minimum-width signed integer types. */
+#define INT_FAST8_MIN INT32_MIN
+#define INT_FAST16_MIN INT32_MIN
+#define INT_FAST32_MIN INT32_MIN
+#define INT_FAST64_MIN INT64_MIN
+
+/* Maximum values of fastest minimum-width signed integer types. */
+#define INT_FAST8_MAX INT32_MAX
+#define INT_FAST16_MAX INT32_MAX
+#define INT_FAST32_MAX INT32_MAX
+#define INT_FAST64_MAX INT64_MAX
+
+/* Maximum values of fastest minimum-width unsigned integer types. */
+#define UINT_FAST8_MAX UINT32_MAX
+#define UINT_FAST16_MAX UINT32_MAX
+#define UINT_FAST32_MAX UINT32_MAX
+#define UINT_FAST64_MAX UINT64_MAX
+/*
+ * ISO/IEC 9899:1999
+ * 7.18.2.5 Limits of greatest-width integer types
+ */
+#define INTMAX_MIN INT64_MIN
+#define INTMAX_MAX INT64_MAX
+#define UINTMAX_MAX UINT64_MAX
+
+#ifdef AARCH64
+/*
+ * ISO/IEC 9899:1999
+ * 7.18.2.4 Limits of integer types capable of holding object pointers
+ */
+#define INTPTR_MIN INT64_MIN
+#define INTPTR_MAX INT64_MAX
+#define UINTPTR_MAX UINT64_MAX
+
+/*
+ * ISO/IEC 9899:1999
+ * 7.18.3 Limits of other integer types
+ */
+/* Limits of ptrdiff_t. */
+#define PTRDIFF_MIN INT64_MIN
+#define PTRDIFF_MAX INT64_MAX
+
+/* Limit of size_t. */
+#define SIZE_MAX UINT64_MAX
+
+#else /* AARCH32 */
+/*
+ * ISO/IEC 9899:1999
+ * 7.18.2.4 Limits of integer types capable of holding object pointers
+ */
+#define INTPTR_MIN INT32_MIN
+#define INTPTR_MAX INT32_MAX
+#define UINTPTR_MAX UINT32_MAX
+
+/*
+ * ISO/IEC 9899:1999
+ * 7.18.3 Limits of other integer types
+ */
+/* Limits of ptrdiff_t. */
+#define PTRDIFF_MIN INT32_MIN
+#define PTRDIFF_MAX INT32_MAX
+
+/* Limit of size_t. */
+#define SIZE_MAX UINT32_MAX
+
+#endif /* AARCH64 */
+
+
+/* Limits of sig_atomic_t. */
+#define SIG_ATOMIC_MIN INT32_MIN
+#define SIG_ATOMIC_MAX INT32_MAX
+
+#ifndef WCHAR_MIN /* Also possibly defined in <wchar.h> */
+/* Limits of wchar_t. */
+#define WCHAR_MIN INT32_MIN
+#define WCHAR_MAX INT32_MAX
+#endif
+
+/* Limits of wint_t. */
+#define WINT_MIN INT32_MIN
+#define WINT_MAX INT32_MAX
+
+#endif /* !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) */
+
+#endif /* !_MACHINE__STDINT_H_ */
diff --git a/include/lib/stdlib/machine/_types.h b/include/lib/stdlib/machine/_types.h
new file mode 100644
index 000000000..037fdf2f3
--- /dev/null
+++ b/include/lib/stdlib/machine/_types.h
@@ -0,0 +1,121 @@
+/*-
+ * Copyright (c) 2002 Mike Barcroft <mike@FreeBSD.org>
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * From: @(#)ansi.h 8.2 (Berkeley) 1/4/94
+ * From: @(#)types.h 8.3 (Berkeley) 1/5/94
+ * $FreeBSD$
+ */
+/*
+ * Portions copyright (c) 2016, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef _MACHINE__TYPES_H_
+#define _MACHINE__TYPES_H_
+
+#ifndef _SYS_CDEFS_H_
+#error this file needs sys/cdefs.h as a prerequisite
+#endif
+
+/*
+ * Basic types upon which most other types are built.
+ */
+typedef __signed char __int8_t;
+typedef unsigned char __uint8_t;
+typedef short __int16_t;
+typedef unsigned short __uint16_t;
+typedef int __int32_t;
+typedef unsigned int __uint32_t;
+typedef long long __int64_t;
+typedef unsigned long long __uint64_t;
+
+/*
+ * Standard type definitions.
+ */
+typedef __int32_t __clock_t; /* clock()... */
+typedef long __critical_t;
+typedef double __double_t;
+typedef float __float_t;
+typedef long __intfptr_t;
+typedef __int64_t __intmax_t;
+typedef long __intptr_t;
+typedef __int32_t __int_fast8_t;
+typedef __int32_t __int_fast16_t;
+typedef __int32_t __int_fast32_t;
+typedef __int64_t __int_fast64_t;
+typedef __int8_t __int_least8_t;
+typedef __int16_t __int_least16_t;
+typedef __int32_t __int_least32_t;
+typedef __int64_t __int_least64_t;
+typedef long __ptrdiff_t; /* ptr1 - ptr2 */
+typedef long __register_t;
+typedef long __segsz_t; /* segment size (in pages) */
+#ifdef AARCH32
+typedef unsigned int __size_t; /* sizeof() */
+typedef int __ssize_t; /* byte count or error */
+#elif defined AARCH64
+typedef unsigned long __size_t; /* sizeof() */
+typedef long __ssize_t; /* byte count or error */
+#else
+#error "Only AArch32 or AArch64 supported"
+#endif /* AARCH32 */
+typedef __int64_t __time_t; /* time()... */
+typedef unsigned long __uintfptr_t;
+typedef __uint64_t __uintmax_t;
+typedef unsigned long __uintptr_t;
+typedef __uint32_t __uint_fast8_t;
+typedef __uint32_t __uint_fast16_t;
+typedef __uint32_t __uint_fast32_t;
+typedef __uint64_t __uint_fast64_t;
+typedef __uint8_t __uint_least8_t;
+typedef __uint16_t __uint_least16_t;
+typedef __uint32_t __uint_least32_t;
+typedef __uint64_t __uint_least64_t;
+typedef unsigned long __u_register_t;
+typedef unsigned long __vm_offset_t;
+typedef __int64_t __vm_ooffset_t;
+typedef unsigned long __vm_paddr_t;
+typedef __uint64_t __vm_pindex_t;
+typedef unsigned long __vm_size_t;
+
+/*
+ * Unusual type definitions.
+ */
+#ifdef __GNUCLIKE_BUILTIN_VARARGS
+typedef __builtin_va_list __va_list; /* internally known to gcc */
+#else
+typedef char * __va_list;
+#endif /* __GNUCLIKE_BUILTIN_VARARGS */
+#if defined(__GNUCLIKE_BUILTIN_VAALIST) && !defined(__GNUC_VA_LIST) \
+ && !defined(__NO_GNUC_VA_LIST)
+#define __GNUC_VA_LIST
+typedef __va_list __gnuc_va_list; /* compatibility w/GNU headers*/
+#endif
+
+#endif /* !_MACHINE__TYPES_H_ */
diff --git a/include/lib/stdlib/machine/aarch32/_limits.h b/include/lib/stdlib/machine/aarch32/_limits.h
new file mode 100644
index 000000000..fe816e50f
--- /dev/null
+++ b/include/lib/stdlib/machine/aarch32/_limits.h
@@ -0,0 +1,89 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1988, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)limits.h 8.3 (Berkeley) 1/4/94
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE__LIMITS_H_
+#define _MACHINE__LIMITS_H_
+
+/*
+ * According to ANSI (section 2.2.4.2), the values below must be usable by
+ * #if preprocessing directives. Additionally, the expression must have the
+ * same type as would an expression that is an object of the corresponding
+ * type converted according to the integral promotions. The subtraction for
+ * INT_MIN, etc., is so the value is not unsigned; e.g., 0x80000000 is an
+ * unsigned int for 32-bit two's complement ANSI compilers (section 3.1.3.2).
+ */
+
+#define __CHAR_BIT 8 /* number of bits in a char */
+
+#define __SCHAR_MAX 0x7f /* max value for a signed char */
+#define __SCHAR_MIN (-0x7f - 1) /* min value for a signed char */
+
+#define __UCHAR_MAX 0xff /* max value for an unsigned char */
+
+#define __USHRT_MAX 0xffff /* max value for an unsigned short */
+#define __SHRT_MAX 0x7fff /* max value for a short */
+#define __SHRT_MIN (-0x7fff - 1) /* min value for a short */
+
+#define __UINT_MAX 0xffffffff /* max value for an unsigned int */
+#define __INT_MAX 0x7fffffff /* max value for an int */
+#define __INT_MIN (-0x7fffffff - 1) /* min value for an int */
+
+#define __ULONG_MAX 0xffffffffUL /* max value for an unsigned long */
+#define __LONG_MAX 0x7fffffffL /* max value for a long */
+#define __LONG_MIN (-0x7fffffffL - 1) /* min value for a long */
+
+ /* max value for an unsigned long long */
+#define __ULLONG_MAX 0xffffffffffffffffULL
+#define __LLONG_MAX 0x7fffffffffffffffLL /* max value for a long long */
+#define __LLONG_MIN (-0x7fffffffffffffffLL - 1) /* min for a long long */
+
+#define __SSIZE_MAX __INT_MAX /* max value for a ssize_t */
+
+#define __SIZE_T_MAX __UINT_MAX /* max value for a size_t */
+
+#define __OFF_MAX __LLONG_MAX /* max value for a off_t */
+#define __OFF_MIN __LLONG_MIN /* min value for a off_t */
+
+/* Quads and long longs are the same size. Ensure they stay in sync. */
+#define __UQUAD_MAX __ULLONG_MAX /* max value for a uquad_t */
+#define __QUAD_MAX __LLONG_MAX /* max value for a quad_t */
+#define __QUAD_MIN __LLONG_MIN /* min value for a quad_t */
+
+#define __LONG_BIT 32
+#define __WORD_BIT 32
+
+/* Minimum signal stack size. */
+#define __MINSIGSTKSZ (1024 * 4)
+
+#endif /* !_MACHINE__LIMITS_H_ */
diff --git a/include/lib/stdlib/machine/aarch64/_limits.h b/include/lib/stdlib/machine/aarch64/_limits.h
new file mode 100644
index 000000000..39f0bcf0f
--- /dev/null
+++ b/include/lib/stdlib/machine/aarch64/_limits.h
@@ -0,0 +1,85 @@
+/*-
+ * Copyright (c) 1988, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)limits.h 8.3 (Berkeley) 1/4/94
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE__LIMITS_H_
+#define _MACHINE__LIMITS_H_
+
+/*
+ * According to ANSI (section 2.2.4.2), the values below must be usable by
+ * #if preprocessing directives. Additionally, the expression must have the
+ * same type as would an expression that is an object of the corresponding
+ * type converted according to the integral promotions. The subtraction for
+ * INT_MIN, etc., is so the value is not unsigned; e.g., 0x80000000 is an
+ * unsigned int for 32-bit two's complement ANSI compilers (section 3.1.3.2).
+ */
+
+#define __CHAR_BIT 8 /* number of bits in a char */
+
+#define __SCHAR_MAX 0x7f /* max value for a signed char */
+#define __SCHAR_MIN (-0x7f - 1) /* min value for a signed char */
+
+#define __UCHAR_MAX 0xff /* max value for an unsigned char */
+
+#define __USHRT_MAX 0xffff /* max value for an unsigned short */
+#define __SHRT_MAX 0x7fff /* max value for a short */
+#define __SHRT_MIN (-0x7fff - 1) /* min value for a short */
+
+#define __UINT_MAX 0xffffffff /* max value for an unsigned int */
+#define __INT_MAX 0x7fffffff /* max value for an int */
+#define __INT_MIN (-0x7fffffff - 1) /* min value for an int */
+
+#define __ULONG_MAX 0xffffffffffffffffUL /* max for an unsigned long */
+#define __LONG_MAX 0x7fffffffffffffffL /* max for a long */
+#define __LONG_MIN (-0x7fffffffffffffffL - 1) /* min for a long */
+
+/* Long longs have the same size but not the same type as longs. */
+ /* max for an unsigned long long */
+#define __ULLONG_MAX 0xffffffffffffffffULL
+#define __LLONG_MAX 0x7fffffffffffffffLL /* max for a long long */
+#define __LLONG_MIN (-0x7fffffffffffffffLL - 1) /* min for a long long */
+
+#define __SSIZE_MAX __LONG_MAX /* max value for a ssize_t */
+
+#define __SIZE_T_MAX __ULONG_MAX /* max value for a size_t */
+
+#define __OFF_MAX __LONG_MAX /* max value for an off_t */
+#define __OFF_MIN __LONG_MIN /* min value for an off_t */
+
+/* Quads and longs are the same size. Ensure they stay in sync. */
+#define __UQUAD_MAX (__ULONG_MAX) /* max value for a uquad_t */
+#define __QUAD_MAX (__LONG_MAX) /* max value for a quad_t */
+#define __QUAD_MIN (__LONG_MIN) /* min value for a quad_t */
+
+#define __LONG_BIT 64
+#define __WORD_BIT 32
+
+/* Minimum signal stack size. */
+#define __MINSIGSTKSZ (1024 * 4)
+
+#endif /* !_MACHINE__LIMITS_H_ */
diff --git a/include/lib/stdlib/stdbool.h b/include/lib/stdlib/stdbool.h
new file mode 100644
index 000000000..e39aef7d3
--- /dev/null
+++ b/include/lib/stdlib/stdbool.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef STDBOOL_H
+#define STDBOOL_H
+
+#define bool _Bool
+
+#define true 1
+#define false 0
+
+#define __bool_true_false_are_defined 1
+
+#endif /* STDBOOL_H */
diff --git a/include/lib/stdlib/stddef.h b/include/lib/stdlib/stddef.h
new file mode 100644
index 000000000..ea88214f7
--- /dev/null
+++ b/include/lib/stdlib/stddef.h
@@ -0,0 +1,64 @@
+/*-
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)stddef.h 8.1 (Berkeley) 6/2/93
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _STDDEF_H_
+#define _STDDEF_H_
+
+#include <sys/cdefs.h>
+#include <sys/_null.h>
+#include <sys/_types.h>
+
+typedef __ptrdiff_t ptrdiff_t;
+
+#if __BSD_VISIBLE
+#ifndef _RUNE_T_DECLARED
+typedef __rune_t rune_t;
+#define _RUNE_T_DECLARED
+#endif
+#endif
+
+#ifndef _SIZE_T_DECLARED
+typedef __size_t size_t;
+#define _SIZE_T_DECLARED
+#endif
+
+#ifndef __cplusplus
+#ifndef _WCHAR_T_DECLARED
+typedef __wchar_t wchar_t;
+#define _WCHAR_T_DECLARED
+#endif
+#endif
+
+#define offsetof(type, member) __offsetof(type, member)
+
+#endif /* _STDDEF_H_ */
diff --git a/include/lib/stdlib/stdio.h b/include/lib/stdlib/stdio.h
new file mode 100644
index 000000000..80110a8b4
--- /dev/null
+++ b/include/lib/stdlib/stdio.h
@@ -0,0 +1,76 @@
+/*-
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Chris Torek.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)stdio.h 8.5 (Berkeley) 4/29/95
+ * $FreeBSD$
+ */
+
+/*
+ * Portions copyright (c) 2013-2014, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+
+#ifndef _STDIO_H_
+#define _STDIO_H_
+
+#include <sys/cdefs.h>
+#include <sys/_null.h>
+#include <sys/_types.h>
+
+#ifndef _SIZE_T_DECLARED
+typedef __size_t size_t;
+#define _SIZE_T_DECLARED
+#endif
+
+#ifndef _SSIZE_T_DECLARED
+#define _SSIZE_T_DECLARED
+typedef __ssize_t ssize_t;
+#endif
+
+#define EOF (-1)
+
+int printf(const char * __restrict, ...) __printflike(1, 2);
+int putchar(int);
+int puts(const char *);
+int sprintf(char * __restrict, const char * __restrict, ...)
+ __printflike(2, 3);
+int vsprintf(char * __restrict, const char * __restrict,
+ __va_list) __printflike(2, 0);
+
+#if __ISO_C_VISIBLE >= 1999
+int snprintf(char * __restrict, size_t, const char * __restrict,
+ ...) __printflike(3, 4);
+int vsnprintf(char * __restrict, size_t, const char * __restrict,
+ __va_list) __printflike(3, 0);
+#endif
+
+#endif /* !_STDIO_H_ */
diff --git a/include/lib/stdlib/stdlib.h b/include/lib/stdlib/stdlib.h
new file mode 100644
index 000000000..c02532f7a
--- /dev/null
+++ b/include/lib/stdlib/stdlib.h
@@ -0,0 +1,54 @@
+/*-
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)stdlib.h 8.5 (Berkeley) 5/19/95
+ * $FreeBSD$
+ */
+
+/*
+ * Portions copyright (c) 2014, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef _STDLIB_H_
+#define _STDLIB_H_
+
+#include <sys/cdefs.h>
+#include <sys/_null.h>
+#include <sys/_types.h>
+
+#define RAND_MAX 0x7ffffffd
+
+__BEGIN_DECLS
+
+int rand(void);
+void srand(unsigned);
+
+__END_DECLS
+
+#endif /* !_STDLIB_H_ */
diff --git a/include/lib/stdlib/string.h b/include/lib/stdlib/string.h
new file mode 100644
index 000000000..5d3046721
--- /dev/null
+++ b/include/lib/stdlib/string.h
@@ -0,0 +1,66 @@
+/*-
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)string.h 8.1 (Berkeley) 6/2/93
+ * $FreeBSD$
+ */
+
+/*
+ * Portions copyright (c) 2013-2014, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef _STRING_H_
+#define _STRING_H_
+
+#include <sys/cdefs.h>
+#include <sys/_null.h>
+#include <sys/_types.h>
+
+#ifndef _SIZE_T_DECLARED
+typedef __size_t size_t;
+#define _SIZE_T_DECLARED
+#endif
+
+__BEGIN_DECLS
+
+void *memchr(const void *, int, size_t) __pure;
+int memcmp(const void *, const void *, size_t) __pure;
+void *memcpy(void * __restrict, const void * __restrict, size_t);
+void *memmove(void *, const void *, size_t);
+void *memset(void *, int, size_t);
+
+char *strchr(const char *, int) __pure;
+int strcmp(const char *, const char *) __pure;
+size_t strlen(const char *) __pure;
+int strncmp(const char *, const char *, size_t) __pure;
+char *strncpy(char * __restrict, const char * __restrict, size_t);
+
+__END_DECLS
+
+#endif /* _STRING_H_ */
diff --git a/include/lib/stdlib/sys/_null.h b/include/lib/stdlib/sys/_null.h
new file mode 100644
index 000000000..92706c6a0
--- /dev/null
+++ b/include/lib/stdlib/sys/_null.h
@@ -0,0 +1,47 @@
+/*-
+ * Copyright (c) 2003 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef NULL
+
+#if !defined(__cplusplus)
+#define NULL ((void *)0)
+#else
+#if __cplusplus >= 201103L
+#define NULL nullptr
+#elif defined(__GNUG__) && defined(__GNUC__) && __GNUC__ >= 4
+#define NULL __null
+#else
+#if defined(__LP64__)
+#define NULL (0L)
+#else
+#define NULL 0
+#endif /* __LP64__ */
+#endif /* __GNUG__ */
+#endif /* !__cplusplus */
+
+#endif
diff --git a/include/lib/stdlib/sys/_stdint.h b/include/lib/stdlib/sys/_stdint.h
new file mode 100644
index 000000000..d0f92493b
--- /dev/null
+++ b/include/lib/stdlib/sys/_stdint.h
@@ -0,0 +1,82 @@
+/*-
+ * Copyright (c) 2011 David E. O'Brien <obrien@FreeBSD.org>
+ * Copyright (c) 2001 Mike Barcroft <mike@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS__STDINT_H_
+#define _SYS__STDINT_H_
+
+#ifndef _INT8_T_DECLARED
+typedef __int8_t int8_t;
+#define _INT8_T_DECLARED
+#endif
+
+#ifndef _INT16_T_DECLARED
+typedef __int16_t int16_t;
+#define _INT16_T_DECLARED
+#endif
+
+#ifndef _INT32_T_DECLARED
+typedef __int32_t int32_t;
+#define _INT32_T_DECLARED
+#endif
+
+#ifndef _INT64_T_DECLARED
+typedef __int64_t int64_t;
+#define _INT64_T_DECLARED
+#endif
+
+#ifndef _UINT8_T_DECLARED
+typedef __uint8_t uint8_t;
+#define _UINT8_T_DECLARED
+#endif
+
+#ifndef _UINT16_T_DECLARED
+typedef __uint16_t uint16_t;
+#define _UINT16_T_DECLARED
+#endif
+
+#ifndef _UINT32_T_DECLARED
+typedef __uint32_t uint32_t;
+#define _UINT32_T_DECLARED
+#endif
+
+#ifndef _UINT64_T_DECLARED
+typedef __uint64_t uint64_t;
+#define _UINT64_T_DECLARED
+#endif
+
+#ifndef _INTPTR_T_DECLARED
+typedef __intptr_t intptr_t;
+#define _INTPTR_T_DECLARED
+#endif
+#ifndef _UINTPTR_T_DECLARED
+typedef __uintptr_t uintptr_t;
+#define _UINTPTR_T_DECLARED
+#endif
+
+#endif /* !_SYS__STDINT_H_ */
diff --git a/include/lib/stdlib/sys/_types.h b/include/lib/stdlib/sys/_types.h
new file mode 100644
index 000000000..c59afd31c
--- /dev/null
+++ b/include/lib/stdlib/sys/_types.h
@@ -0,0 +1,106 @@
+/*-
+ * Copyright (c) 2002 Mike Barcroft <mike@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS__TYPES_H_
+#define _SYS__TYPES_H_
+
+#include <sys/cdefs.h>
+#include <machine/_types.h>
+
+/*
+ * Standard type definitions.
+ */
+typedef __uint32_t __blksize_t; /* file block size */
+typedef __int64_t __blkcnt_t; /* file block count */
+typedef __int32_t __clockid_t; /* clock_gettime()... */
+typedef __uint64_t __cap_rights_t; /* capability rights */
+typedef __uint32_t __fflags_t; /* file flags */
+typedef __uint64_t __fsblkcnt_t;
+typedef __uint64_t __fsfilcnt_t;
+typedef __uint32_t __gid_t;
+typedef __int64_t __id_t; /* can hold a gid_t, pid_t, or uid_t */
+typedef __uint32_t __ino_t; /* inode number */
+typedef long __key_t; /* IPC key (for Sys V IPC) */
+typedef __int32_t __lwpid_t; /* Thread ID (a.k.a. LWP) */
+typedef __uint16_t __mode_t; /* permissions */
+typedef int __accmode_t; /* access permissions */
+typedef int __nl_item;
+typedef __uint16_t __nlink_t; /* link count */
+typedef __int64_t __off_t; /* file offset */
+typedef __int32_t __pid_t; /* process [group] */
+typedef __int64_t __rlim_t; /* resource limit - intentionally */
+ /* signed, because of legacy code */
+ /* that uses -1 for RLIM_INFINITY */
+typedef __uint8_t __sa_family_t;
+typedef __uint32_t __socklen_t;
+typedef long __suseconds_t; /* microseconds (signed) */
+typedef struct __timer *__timer_t; /* timer_gettime()... */
+typedef struct __mq *__mqd_t; /* mq_open()... */
+typedef __uint32_t __uid_t;
+typedef unsigned int __useconds_t; /* microseconds (unsigned) */
+typedef int __cpuwhich_t; /* which parameter for cpuset. */
+typedef int __cpulevel_t; /* level parameter for cpuset. */
+typedef int __cpusetid_t; /* cpuset identifier. */
+
+/*
+ * Unusual type definitions.
+ */
+/*
+ * rune_t is declared to be an ``int'' instead of the more natural
+ * ``unsigned long'' or ``long''. Two things are happening here. It is not
+ * unsigned so that EOF (-1) can be naturally assigned to it and used. Also,
+ * it looks like 10646 will be a 31 bit standard. This means that if your
+ * ints cannot hold 32 bits, you will be in trouble. The reason an int was
+ * chosen over a long is that the is*() and to*() routines take ints (says
+ * ANSI C), but they use __ct_rune_t instead of int.
+ *
+ * NOTE: rune_t is not covered by ANSI nor other standards, and should not
+ * be instantiated outside of lib/libc/locale. Use wchar_t. wchar_t and
+ * rune_t must be the same type. Also, wint_t must be no narrower than
+ * wchar_t, and should be able to hold all members of the largest
+ * character set plus one extra value (WEOF), and must be at least 16 bits.
+ */
+typedef int __ct_rune_t; /* arg type for ctype funcs */
+typedef __ct_rune_t __rune_t; /* rune_t (see above) */
+typedef __ct_rune_t __wchar_t; /* wchar_t (see above) */
+typedef __ct_rune_t __wint_t; /* wint_t (see above) */
+
+typedef __uint32_t __dev_t; /* device number */
+
+typedef __uint32_t __fixpt_t; /* fixed point number */
+
+/*
+ * mbstate_t is an opaque object to keep conversion state during multibyte
+ * stream conversions.
+ */
+typedef union {
+ char __mbstate8[128];
+ __int64_t _mbstateL; /* for alignment */
+} __mbstate_t;
+
+#endif /* !_SYS__TYPES_H_ */
diff --git a/include/lib/stdlib/sys/cdefs.h b/include/lib/stdlib/sys/cdefs.h
new file mode 100644
index 000000000..16fb15190
--- /dev/null
+++ b/include/lib/stdlib/sys/cdefs.h
@@ -0,0 +1,686 @@
+/*-
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Berkeley Software Design, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)cdefs.h 8.8 (Berkeley) 1/9/95
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_CDEFS_H_
+#define _SYS_CDEFS_H_
+
+#if defined(__cplusplus)
+#define __BEGIN_DECLS extern "C" {
+#define __END_DECLS }
+#else
+#define __BEGIN_DECLS
+#define __END_DECLS
+#endif
+
+/*
+ * This code has been put in place to help reduce the addition of
+ * compiler specific defines in FreeBSD code. It helps to aid in
+ * having a compiler-agnostic source tree.
+ */
+
+#if defined(__GNUC__) || defined(__INTEL_COMPILER)
+
+#if __GNUC__ >= 3 || defined(__INTEL_COMPILER)
+#define __GNUCLIKE_ASM 3
+#define __GNUCLIKE_MATH_BUILTIN_CONSTANTS
+#else
+#define __GNUCLIKE_ASM 2
+#endif
+#define __GNUCLIKE___TYPEOF 1
+#define __GNUCLIKE___OFFSETOF 1
+#define __GNUCLIKE___SECTION 1
+
+#ifndef __INTEL_COMPILER
+# define __GNUCLIKE_CTOR_SECTION_HANDLING 1
+#endif
+
+#define __GNUCLIKE_BUILTIN_CONSTANT_P 1
+# if defined(__INTEL_COMPILER) && defined(__cplusplus) \
+ && __INTEL_COMPILER < 800
+# undef __GNUCLIKE_BUILTIN_CONSTANT_P
+# endif
+
+#if (__GNUC_MINOR__ > 95 || __GNUC__ >= 3) && !defined(__INTEL_COMPILER)
+# define __GNUCLIKE_BUILTIN_VARARGS 1
+# define __GNUCLIKE_BUILTIN_STDARG 1
+# define __GNUCLIKE_BUILTIN_VAALIST 1
+#endif
+
+#if defined(__GNUC__)
+# define __GNUC_VA_LIST_COMPATIBILITY 1
+#endif
+
+#ifndef __INTEL_COMPILER
+# define __GNUCLIKE_BUILTIN_NEXT_ARG 1
+# define __GNUCLIKE_MATH_BUILTIN_RELOPS
+#endif
+
+#define __GNUCLIKE_BUILTIN_MEMCPY 1
+
+/* XXX: if __GNUC__ >= 2: not tested everywhere originally, where replaced */
+#define __CC_SUPPORTS_INLINE 1
+#define __CC_SUPPORTS___INLINE 1
+#define __CC_SUPPORTS___INLINE__ 1
+
+#define __CC_SUPPORTS___FUNC__ 1
+#define __CC_SUPPORTS_WARNING 1
+
+#define __CC_SUPPORTS_VARADIC_XXX 1 /* see varargs.h */
+
+#define __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 1
+
+#endif /* __GNUC__ || __INTEL_COMPILER */
+
+/*
+ * Macro to test if we're using a specific version of gcc or later.
+ */
+#if defined(__GNUC__) && !defined(__INTEL_COMPILER)
+#define __GNUC_PREREQ__(ma, mi) \
+ (__GNUC__ > (ma) || __GNUC__ == (ma) && __GNUC_MINOR__ >= (mi))
+#else
+#define __GNUC_PREREQ__(ma, mi) 0
+#endif
+
+/*
+ * The __CONCAT macro is used to concatenate parts of symbol names, e.g.
+ * with "#define OLD(foo) __CONCAT(old,foo)", OLD(foo) produces oldfoo.
+ * The __CONCAT macro is a bit tricky to use if it must work in non-ANSI
+ * mode -- there must be no spaces between its arguments, and for nested
+ * __CONCAT's, all the __CONCAT's must be at the left. __CONCAT can also
+ * concatenate double-quoted strings produced by the __STRING macro, but
+ * this only works with ANSI C.
+ *
+ * __XSTRING is like __STRING, but it expands any macros in its argument
+ * first. It is only available with ANSI C.
+ */
+#if defined(__STDC__) || defined(__cplusplus)
+#define __P(protos) protos /* full-blown ANSI C */
+#define __CONCAT1(x,y) x ## y
+#define __CONCAT(x,y) __CONCAT1(x,y)
+#define __STRING(x) #x /* stringify without expanding x */
+#define __XSTRING(x) __STRING(x) /* expand x, then stringify */
+
+#define __const const /* define reserved names to standard */
+#define __signed signed
+#define __volatile volatile
+#if defined(__cplusplus)
+#define __inline inline /* convert to C++ keyword */
+#else
+#if !(defined(__CC_SUPPORTS___INLINE))
+#define __inline /* delete GCC keyword */
+#endif /* ! __CC_SUPPORTS___INLINE */
+#endif /* !__cplusplus */
+
+#else /* !(__STDC__ || __cplusplus) */
+#define __P(protos) () /* traditional C preprocessor */
+#define __CONCAT(x,y) x/**/y
+#define __STRING(x) "x"
+
+#if !defined(__CC_SUPPORTS___INLINE)
+#define __const /* delete pseudo-ANSI C keywords */
+#define __inline
+#define __signed
+#define __volatile
+/*
+ * In non-ANSI C environments, new programs will want ANSI-only C keywords
+ * deleted from the program and old programs will want them left alone.
+ * When using a compiler other than gcc, programs using the ANSI C keywords
+ * const, inline etc. as normal identifiers should define -DNO_ANSI_KEYWORDS.
+ * When using "gcc -traditional", we assume that this is the intent; if
+ * __GNUC__ is defined but __STDC__ is not, we leave the new keywords alone.
+ */
+#ifndef NO_ANSI_KEYWORDS
+#define const /* delete ANSI C keywords */
+#define inline
+#define signed
+#define volatile
+#endif /* !NO_ANSI_KEYWORDS */
+#endif /* !__CC_SUPPORTS___INLINE */
+#endif /* !(__STDC__ || __cplusplus) */
+
+/*
+ * Compiler-dependent macros to help declare dead (non-returning) and
+ * pure (no side effects) functions, and unused variables. They are
+ * null except for versions of gcc that are known to support the features
+ * properly (old versions of gcc-2 supported the dead and pure features
+ * in a different (wrong) way). If we do not provide an implementation
+ * for a given compiler, let the compile fail if it is told to use
+ * a feature that we cannot live without.
+ */
+#ifdef lint
+#define __dead2
+#define __pure2
+#define __unused
+#define __packed
+#define __aligned(x)
+#define __section(x)
+#else
+#if !__GNUC_PREREQ__(2, 5) && !defined(__INTEL_COMPILER)
+#define __dead2
+#define __pure2
+#define __unused
+#endif
+#if __GNUC__ == 2 && __GNUC_MINOR__ >= 5 && __GNUC_MINOR__ < 7 && !defined(__INTEL_COMPILER)
+#define __dead2 __attribute__((__noreturn__))
+#define __pure2 __attribute__((__const__))
+#define __unused
+/* XXX Find out what to do for __packed, __aligned and __section */
+#endif
+#if __GNUC_PREREQ__(2, 7)
+#define __dead2 __attribute__((__noreturn__))
+#define __pure2 __attribute__((__const__))
+#define __unused __attribute__((__unused__))
+#define __used __attribute__((__used__))
+#define __packed __attribute__((__packed__))
+#define __aligned(x) __attribute__((__aligned__(x)))
+#define __section(x) __attribute__((__section__(x)))
+#endif
+#if defined(__INTEL_COMPILER)
+#define __dead2 __attribute__((__noreturn__))
+#define __pure2 __attribute__((__const__))
+#define __unused __attribute__((__unused__))
+#define __used __attribute__((__used__))
+#define __packed __attribute__((__packed__))
+#define __aligned(x) __attribute__((__aligned__(x)))
+#define __section(x) __attribute__((__section__(x)))
+#endif
+#endif
+
+#if !__GNUC_PREREQ__(2, 95)
+#define __alignof(x) __offsetof(struct { char __a; x __b; }, __b)
+#endif
+
+/*
+ * Keywords added in C11.
+ */
+#if defined(__cplusplus) && __cplusplus >= 201103L
+#define _Alignas(e) alignas(e)
+#define _Alignof(e) alignof(e)
+#define _Noreturn [[noreturn]]
+#define _Static_assert(e, s) static_assert(e, s)
+/* FIXME: change this to thread_local when clang in base supports it */
+#define _Thread_local __thread
+#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L
+/* Do nothing. They are language keywords. */
+#else
+/* Not supported. Implement them using our versions. */
+#define _Alignas(x) __aligned(x)
+#define _Alignof(x) __alignof(x)
+#define _Noreturn __dead2
+#define _Thread_local __thread
+#ifdef __COUNTER__
+#define _Static_assert(x, y) __Static_assert(x, __COUNTER__)
+#define __Static_assert(x, y) ___Static_assert(x, y)
+#define ___Static_assert(x, y) typedef char __assert_ ## y[(x) ? 1 : -1]
+#else
+#define _Static_assert(x, y) struct __hack
+#endif
+#endif
+
+/*
+ * Emulation of C11 _Generic(). Unlike the previously defined C11
+ * keywords, it is not possible to implement this using exactly the same
+ * syntax. Therefore implement something similar under the name
+ * __generic(). Unlike _Generic(), this macro can only distinguish
+ * between a single type, so it requires nested invocations to
+ * distinguish multiple cases.
+ */
+
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L
+#define __generic(expr, t, yes, no) \
+ _Generic(expr, t: yes, default: no)
+#elif __GNUC_PREREQ__(3, 1) && !defined(__cplusplus)
+#define __generic(expr, t, yes, no) \
+ __builtin_choose_expr( \
+ __builtin_types_compatible_p(__typeof(expr), t), yes, no)
+#endif
+
+#if __GNUC_PREREQ__(2, 96)
+#define __malloc_like __attribute__((__malloc__))
+#define __pure __attribute__((__pure__))
+#else
+#define __malloc_like
+#define __pure
+#endif
+
+#if __GNUC_PREREQ__(3, 1) || (defined(__INTEL_COMPILER) && __INTEL_COMPILER >= 800)
+#define __always_inline __attribute__((__always_inline__))
+#else
+#define __always_inline
+#endif
+
+#if __GNUC_PREREQ__(3, 1)
+#define __noinline __attribute__ ((__noinline__))
+#else
+#define __noinline
+#endif
+
+#if __GNUC_PREREQ__(3, 3)
+#define __nonnull(x) __attribute__((__nonnull__(x)))
+#else
+#define __nonnull(x)
+#endif
+
+#if __GNUC_PREREQ__(3, 4)
+#define __fastcall __attribute__((__fastcall__))
+#else
+#define __fastcall
+#endif
+
+#if __GNUC_PREREQ__(4, 1)
+#define __returns_twice __attribute__((__returns_twice__))
+#else
+#define __returns_twice
+#endif
+
+/* XXX: should use `#if __STDC_VERSION__ < 199901'. */
+#if !__GNUC_PREREQ__(2, 7) && !defined(__INTEL_COMPILER)
+#define __func__ NULL
+#endif
+
+#if (defined(__INTEL_COMPILER) || (defined(__GNUC__) && __GNUC__ >= 2)) && !defined(__STRICT_ANSI__) || __STDC_VERSION__ >= 199901
+#define __LONG_LONG_SUPPORTED
+#endif
+
+/* C++11 exposes a load of C99 stuff */
+#if defined(__cplusplus) && __cplusplus >= 201103L
+#define __LONG_LONG_SUPPORTED
+#ifndef __STDC_LIMIT_MACROS
+#define __STDC_LIMIT_MACROS
+#endif
+#ifndef __STDC_CONSTANT_MACROS
+#define __STDC_CONSTANT_MACROS
+#endif
+#endif
+
+/*
+ * GCC 2.95 provides `__restrict' as an extension to C90 to support the
+ * C99-specific `restrict' type qualifier. We happen to use `__restrict' as
+ * a way to define the `restrict' type qualifier without disturbing older
+ * software that is unaware of C99 keywords.
+ */
+#if !(__GNUC__ == 2 && __GNUC_MINOR__ == 95)
+#if !defined(__STDC_VERSION__) || __STDC_VERSION__ < 199901 || defined(lint)
+#define __restrict
+#else
+#define __restrict restrict
+#endif
+#endif
+
+/*
+ * GNU C version 2.96 adds explicit branch prediction so that
+ * the CPU back-end can hint the processor and also so that
+ * code blocks can be reordered such that the predicted path
+ * sees a more linear flow, thus improving cache behavior, etc.
+ *
+ * The following two macros provide us with a way to utilize this
+ * compiler feature. Use __predict_true() if you expect the expression
+ * to evaluate to true, and __predict_false() if you expect the
+ * expression to evaluate to false.
+ *
+ * A few notes about usage:
+ *
+ * * Generally, __predict_false() error condition checks (unless
+ * you have some _strong_ reason to do otherwise, in which case
+ * document it), and/or __predict_true() `no-error' condition
+ * checks, assuming you want to optimize for the no-error case.
+ *
+ * * Other than that, if you don't know the likelihood of a test
+ * succeeding from empirical or other `hard' evidence, don't
+ * make predictions.
+ *
+ * * These are meant to be used in places that are run `a lot'.
+ * It is wasteful to make predictions in code that is run
+ * seldomly (e.g. at subsystem initialization time) as the
+ * basic block reordering that this affects can often generate
+ * larger code.
+ */
+#if __GNUC_PREREQ__(2, 96)
+#define __predict_true(exp) __builtin_expect((exp), 1)
+#define __predict_false(exp) __builtin_expect((exp), 0)
+#else
+#define __predict_true(exp) (exp)
+#define __predict_false(exp) (exp)
+#endif
+
+#if __GNUC_PREREQ__(4, 2)
+#define __hidden __attribute__((__visibility__("hidden")))
+#define __exported __attribute__((__visibility__("default")))
+#else
+#define __hidden
+#define __exported
+#endif
+
+/*
+ * We define this here since <stddef.h>, <sys/queue.h>, and <sys/types.h>
+ * require it.
+ */
+#if __GNUC_PREREQ__(4, 1)
+#define __offsetof(type, field) __builtin_offsetof(type, field)
+#else
+#ifndef __cplusplus
+#define __offsetof(type, field) \
+ ((__size_t)(__uintptr_t)((const volatile void *)&((type *)0)->field))
+#else
+#define __offsetof(type, field) \
+ (__offsetof__ (reinterpret_cast <__size_t> \
+ (&reinterpret_cast <const volatile char &> \
+ (static_cast<type *> (0)->field))))
+#endif
+#endif
+#define __rangeof(type, start, end) \
+ (__offsetof(type, end) - __offsetof(type, start))
+
+/*
+ * Given the pointer x to the member m of the struct s, return
+ * a pointer to the containing structure. When using GCC, we first
+ * assign pointer x to a local variable, to check that its type is
+ * compatible with member m.
+ */
+#if __GNUC_PREREQ__(3, 1)
+#define __containerof(x, s, m) ({ \
+ const volatile __typeof(((s *)0)->m) *__x = (x); \
+ __DEQUALIFY(s *, (const volatile char *)__x - __offsetof(s, m));\
+})
+#else
+#define __containerof(x, s, m) \
+ __DEQUALIFY(s *, (const volatile char *)(x) - __offsetof(s, m))
+#endif
+
+/*
+ * Compiler-dependent macros to declare that functions take printf-like
+ * or scanf-like arguments. They are null except for versions of gcc
+ * that are known to support the features properly (old versions of gcc-2
+ * didn't permit keeping the keywords out of the application namespace).
+ */
+#if !__GNUC_PREREQ__(2, 7) && !defined(__INTEL_COMPILER)
+#define __printflike(fmtarg, firstvararg)
+#define __scanflike(fmtarg, firstvararg)
+#define __format_arg(fmtarg)
+#define __strfmonlike(fmtarg, firstvararg)
+#define __strftimelike(fmtarg, firstvararg)
+#else
+#define __printflike(fmtarg, firstvararg) \
+ __attribute__((__format__ (__printf__, fmtarg, firstvararg)))
+#define __scanflike(fmtarg, firstvararg) \
+ __attribute__((__format__ (__scanf__, fmtarg, firstvararg)))
+#define __format_arg(fmtarg) __attribute__((__format_arg__ (fmtarg)))
+#define __strfmonlike(fmtarg, firstvararg) \
+ __attribute__((__format__ (__strfmon__, fmtarg, firstvararg)))
+#define __strftimelike(fmtarg, firstvararg) \
+ __attribute__((__format__ (__strftime__, fmtarg, firstvararg)))
+#endif
+
+/* Compiler-dependent macros that rely on FreeBSD-specific extensions. */
+#if __FreeBSD_cc_version >= 300001 && defined(__GNUC__) && !defined(__INTEL_COMPILER)
+#define __printf0like(fmtarg, firstvararg) \
+ __attribute__((__format__ (__printf0__, fmtarg, firstvararg)))
+#else
+#define __printf0like(fmtarg, firstvararg)
+#endif
+
+#if defined(__GNUC__) || defined(__INTEL_COMPILER)
+#ifndef __INTEL_COMPILER
+#define __strong_reference(sym,aliassym) \
+ extern __typeof (sym) aliassym __attribute__ ((__alias__ (#sym)))
+#endif
+#ifdef __STDC__
+#define __weak_reference(sym,alias) \
+ __asm__(".weak " #alias); \
+ __asm__(".equ " #alias ", " #sym)
+#define __warn_references(sym,msg) \
+ __asm__(".section .gnu.warning." #sym); \
+ __asm__(".asciz \"" msg "\""); \
+ __asm__(".previous")
+#define __sym_compat(sym,impl,verid) \
+ __asm__(".symver " #impl ", " #sym "@" #verid)
+#define __sym_default(sym,impl,verid) \
+ __asm__(".symver " #impl ", " #sym "@@" #verid)
+#else
+#define __weak_reference(sym,alias) \
+ __asm__(".weak alias"); \
+ __asm__(".equ alias, sym")
+#define __warn_references(sym,msg) \
+ __asm__(".section .gnu.warning.sym"); \
+ __asm__(".asciz \"msg\""); \
+ __asm__(".previous")
+#define __sym_compat(sym,impl,verid) \
+ __asm__(".symver impl, sym@verid")
+#define __sym_default(impl,sym,verid) \
+ __asm__(".symver impl, sym@@verid")
+#endif /* __STDC__ */
+#endif /* __GNUC__ || __INTEL_COMPILER */
+
+#define __GLOBL1(sym) __asm__(".globl " #sym)
+#define __GLOBL(sym) __GLOBL1(sym)
+
+#if defined(__GNUC__) || defined(__INTEL_COMPILER)
+#define __IDSTRING(name,string) __asm__(".ident\t\"" string "\"")
+#else
+/*
+ * The following definition might not work well if used in header files,
+ * but it should be better than nothing. If you want a "do nothing"
+ * version, then it should generate some harmless declaration, such as:
+ * #define __IDSTRING(name,string) struct __hack
+ */
+#define __IDSTRING(name,string) static const char name[] __unused = string
+#endif
+
+/*
+ * Embed the rcs id of a source file in the resulting library. Note that in
+ * more recent ELF binutils, we use .ident allowing the ID to be stripped.
+ * Usage:
+ * __FBSDID("$FreeBSD$");
+ */
+#ifndef __FBSDID
+#if !defined(lint) && !defined(STRIP_FBSDID)
+#define __FBSDID(s) __IDSTRING(__CONCAT(__rcsid_,__LINE__),s)
+#else
+#define __FBSDID(s) struct __hack
+#endif
+#endif
+
+#ifndef __RCSID
+#ifndef NO__RCSID
+#define __RCSID(s) __IDSTRING(__CONCAT(__rcsid_,__LINE__),s)
+#else
+#define __RCSID(s) struct __hack
+#endif
+#endif
+
+#ifndef __RCSID_SOURCE
+#ifndef NO__RCSID_SOURCE
+#define __RCSID_SOURCE(s) __IDSTRING(__CONCAT(__rcsid_source_,__LINE__),s)
+#else
+#define __RCSID_SOURCE(s) struct __hack
+#endif
+#endif
+
+#ifndef __SCCSID
+#ifndef NO__SCCSID
+#define __SCCSID(s) __IDSTRING(__CONCAT(__sccsid_,__LINE__),s)
+#else
+#define __SCCSID(s) struct __hack
+#endif
+#endif
+
+#ifndef __COPYRIGHT
+#ifndef NO__COPYRIGHT
+#define __COPYRIGHT(s) __IDSTRING(__CONCAT(__copyright_,__LINE__),s)
+#else
+#define __COPYRIGHT(s) struct __hack
+#endif
+#endif
+
+#ifndef __DECONST
+#define __DECONST(type, var) ((type)(__uintptr_t)(const void *)(var))
+#endif
+
+#ifndef __DEVOLATILE
+#define __DEVOLATILE(type, var) ((type)(__uintptr_t)(volatile void *)(var))
+#endif
+
+#ifndef __DEQUALIFY
+#define __DEQUALIFY(type, var) ((type)(__uintptr_t)(const volatile void *)(var))
+#endif
+
+/*-
+ * The following definitions are an extension of the behavior originally
+ * implemented in <sys/_posix.h>, but with a different level of granularity.
+ * POSIX.1 requires that the macros we test be defined before any standard
+ * header file is included.
+ *
+ * Here's a quick run-down of the versions:
+ * defined(_POSIX_SOURCE) 1003.1-1988
+ * _POSIX_C_SOURCE == 1 1003.1-1990
+ * _POSIX_C_SOURCE == 2 1003.2-1992 C Language Binding Option
+ * _POSIX_C_SOURCE == 199309 1003.1b-1993
+ * _POSIX_C_SOURCE == 199506 1003.1c-1995, 1003.1i-1995,
+ * and the omnibus ISO/IEC 9945-1: 1996
+ * _POSIX_C_SOURCE == 200112 1003.1-2001
+ * _POSIX_C_SOURCE == 200809 1003.1-2008
+ *
+ * In addition, the X/Open Portability Guide, which is now the Single UNIX
+ * Specification, defines a feature-test macro which indicates the version of
+ * that specification, and which subsumes _POSIX_C_SOURCE.
+ *
+ * Our macros begin with two underscores to avoid namespace screwage.
+ */
+
+/* Deal with IEEE Std. 1003.1-1990, in which _POSIX_C_SOURCE == 1. */
+#if defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE == 1
+#undef _POSIX_C_SOURCE /* Probably illegal, but beyond caring now. */
+#define _POSIX_C_SOURCE 199009
+#endif
+
+/* Deal with IEEE Std. 1003.2-1992, in which _POSIX_C_SOURCE == 2. */
+#if defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE == 2
+#undef _POSIX_C_SOURCE
+#define _POSIX_C_SOURCE 199209
+#endif
+
+/* Deal with various X/Open Portability Guides and Single UNIX Spec. */
+#ifdef _XOPEN_SOURCE
+#if _XOPEN_SOURCE - 0 >= 700
+#define __XSI_VISIBLE 700
+#undef _POSIX_C_SOURCE
+#define _POSIX_C_SOURCE 200809
+#elif _XOPEN_SOURCE - 0 >= 600
+#define __XSI_VISIBLE 600
+#undef _POSIX_C_SOURCE
+#define _POSIX_C_SOURCE 200112
+#elif _XOPEN_SOURCE - 0 >= 500
+#define __XSI_VISIBLE 500
+#undef _POSIX_C_SOURCE
+#define _POSIX_C_SOURCE 199506
+#endif
+#endif
+
+/*
+ * Deal with all versions of POSIX. The ordering relative to the tests above is
+ * important.
+ */
+#if defined(_POSIX_SOURCE) && !defined(_POSIX_C_SOURCE)
+#define _POSIX_C_SOURCE 198808
+#endif
+#ifdef _POSIX_C_SOURCE
+#if _POSIX_C_SOURCE >= 200809
+#define __POSIX_VISIBLE 200809
+#define __ISO_C_VISIBLE 1999
+#elif _POSIX_C_SOURCE >= 200112
+#define __POSIX_VISIBLE 200112
+#define __ISO_C_VISIBLE 1999
+#elif _POSIX_C_SOURCE >= 199506
+#define __POSIX_VISIBLE 199506
+#define __ISO_C_VISIBLE 1990
+#elif _POSIX_C_SOURCE >= 199309
+#define __POSIX_VISIBLE 199309
+#define __ISO_C_VISIBLE 1990
+#elif _POSIX_C_SOURCE >= 199209
+#define __POSIX_VISIBLE 199209
+#define __ISO_C_VISIBLE 1990
+#elif _POSIX_C_SOURCE >= 199009
+#define __POSIX_VISIBLE 199009
+#define __ISO_C_VISIBLE 1990
+#else
+#define __POSIX_VISIBLE 198808
+#define __ISO_C_VISIBLE 0
+#endif /* _POSIX_C_SOURCE */
+#else
+/*-
+ * Deal with _ANSI_SOURCE:
+ * If it is defined, and no other compilation environment is explicitly
+ * requested, then define our internal feature-test macros to zero. This
+ * makes no difference to the preprocessor (undefined symbols in preprocessing
+ * expressions are defined to have value zero), but makes it more convenient for
+ * a test program to print out the values.
+ *
+ * If a program mistakenly defines _ANSI_SOURCE and some other macro such as
+ * _POSIX_C_SOURCE, we will assume that it wants the broader compilation
+ * environment (and in fact we will never get here).
+ */
+#if defined(_ANSI_SOURCE) /* Hide almost everything. */
+#define __POSIX_VISIBLE 0
+#define __XSI_VISIBLE 0
+#define __BSD_VISIBLE 0
+#define __ISO_C_VISIBLE 1990
+#elif defined(_C99_SOURCE) /* Localism to specify strict C99 env. */
+#define __POSIX_VISIBLE 0
+#define __XSI_VISIBLE 0
+#define __BSD_VISIBLE 0
+#define __ISO_C_VISIBLE 1999
+#else /* Default environment: show everything. */
+#define __POSIX_VISIBLE 200809
+#define __XSI_VISIBLE 700
+#define __BSD_VISIBLE 1
+#define __ISO_C_VISIBLE 1999
+#endif
+#endif
+
+#ifndef __has_feature
+#define __has_feature(x) 0
+#endif
+#ifndef __has_include
+#define __has_include(x) 0
+#endif
+#ifndef __has_builtin
+#define __has_builtin(x) 0
+#endif
+
+#if defined(__mips) || defined(__powerpc64__) || defined(__arm__)
+#define __NO_TLS 1
+#endif
+
+#endif /* !_SYS_CDEFS_H_ */
diff --git a/include/lib/stdlib/sys/ctype.h b/include/lib/stdlib/sys/ctype.h
new file mode 100644
index 000000000..f2758b774
--- /dev/null
+++ b/include/lib/stdlib/sys/ctype.h
@@ -0,0 +1,59 @@
+/*-
+ * Copyright (c) 1982, 1988, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ * (c) UNIX System Laboratories, Inc.
+ * All or some portions of this file are derived from material licensed
+ * to the University of California by American Telephone and Telegraph
+ * Co. or Unix System Laboratories, Inc. and are reproduced herein with
+ * the permission of UNIX System Laboratories, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Portions copyright (c) 2009-2014, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef _SYS_CTYPE_H_
+#define _SYS_CTYPE_H_
+
+#define isspace(c) ((c) == ' ' || ((c) >= '\t' && (c) <= '\r'))
+#define isascii(c) (((c) & ~0x7f) == 0)
+#define isupper(c) ((c) >= 'A' && (c) <= 'Z')
+#define islower(c) ((c) >= 'a' && (c) <= 'z')
+#define isalpha(c) (isupper(c) || islower(c))
+#define isdigit(c) ((c) >= '0' && (c) <= '9')
+#define isxdigit(c) (isdigit(c) \
+ || ((c) >= 'A' && (c) <= 'F') \
+ || ((c) >= 'a' && (c) <= 'f'))
+#define isprint(c) ((c) >= ' ' && (c) <= '~')
+
+#define toupper(c) ((c) - 0x20 * (((c) >= 'a') && ((c) <= 'z')))
+#define tolower(c) ((c) + 0x20 * (((c) >= 'A') && ((c) <= 'Z')))
+
+#endif /* !_SYS_CTYPE_H_ */
diff --git a/include/lib/stdlib/sys/errno.h b/include/lib/stdlib/sys/errno.h
new file mode 100644
index 000000000..f59551451
--- /dev/null
+++ b/include/lib/stdlib/sys/errno.h
@@ -0,0 +1,193 @@
+/*-
+ * Copyright (c) 1982, 1986, 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ * (c) UNIX System Laboratories, Inc.
+ * All or some portions of this file are derived from material licensed
+ * to the University of California by American Telephone and Telegraph
+ * Co. or Unix System Laboratories, Inc. and are reproduced herein with
+ * the permission of UNIX System Laboratories, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)errno.h 8.5 (Berkeley) 1/21/94
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_ERRNO_H_
+#define _SYS_ERRNO_H_
+
+#ifndef _KERNEL
+#include <sys/cdefs.h>
+__BEGIN_DECLS
+int * __error(void);
+__END_DECLS
+#define errno (* __error())
+#endif
+
+#define EPERM 1 /* Operation not permitted */
+#define ENOENT 2 /* No such file or directory */
+#define ESRCH 3 /* No such process */
+#define EINTR 4 /* Interrupted system call */
+#define EIO 5 /* Input/output error */
+#define ENXIO 6 /* Device not configured */
+#define E2BIG 7 /* Argument list too long */
+#define ENOEXEC 8 /* Exec format error */
+#define EBADF 9 /* Bad file descriptor */
+#define ECHILD 10 /* No child processes */
+#define EDEADLK 11 /* Resource deadlock avoided */
+ /* 11 was EAGAIN */
+#define ENOMEM 12 /* Cannot allocate memory */
+#define EACCES 13 /* Permission denied */
+#define EFAULT 14 /* Bad address */
+#ifndef _POSIX_SOURCE
+#define ENOTBLK 15 /* Block device required */
+#endif
+#define EBUSY 16 /* Device busy */
+#define EEXIST 17 /* File exists */
+#define EXDEV 18 /* Cross-device link */
+#define ENODEV 19 /* Operation not supported by device */
+#define ENOTDIR 20 /* Not a directory */
+#define EISDIR 21 /* Is a directory */
+#define EINVAL 22 /* Invalid argument */
+#define ENFILE 23 /* Too many open files in system */
+#define EMFILE 24 /* Too many open files */
+#define ENOTTY 25 /* Inappropriate ioctl for device */
+#ifndef _POSIX_SOURCE
+#define ETXTBSY 26 /* Text file busy */
+#endif
+#define EFBIG 27 /* File too large */
+#define ENOSPC 28 /* No space left on device */
+#define ESPIPE 29 /* Illegal seek */
+#define EROFS 30 /* Read-only filesystem */
+#define EMLINK 31 /* Too many links */
+#define EPIPE 32 /* Broken pipe */
+
+/* math software */
+#define EDOM 33 /* Numerical argument out of domain */
+#define ERANGE 34 /* Result too large */
+
+/* non-blocking and interrupt i/o */
+#define EAGAIN 35 /* Resource temporarily unavailable */
+#ifndef _POSIX_SOURCE
+#define EWOULDBLOCK EAGAIN /* Operation would block */
+#define EINPROGRESS 36 /* Operation now in progress */
+#define EALREADY 37 /* Operation already in progress */
+
+/* ipc/network software -- argument errors */
+#define ENOTSOCK 38 /* Socket operation on non-socket */
+#define EDESTADDRREQ 39 /* Destination address required */
+#define EMSGSIZE 40 /* Message too long */
+#define EPROTOTYPE 41 /* Protocol wrong type for socket */
+#define ENOPROTOOPT 42 /* Protocol not available */
+#define EPROTONOSUPPORT 43 /* Protocol not supported */
+#define ESOCKTNOSUPPORT 44 /* Socket type not supported */
+#define EOPNOTSUPP 45 /* Operation not supported */
+#define ENOTSUP EOPNOTSUPP /* Operation not supported */
+#define EPFNOSUPPORT 46 /* Protocol family not supported */
+#define EAFNOSUPPORT 47 /* Address family not supported by protocol family */
+#define EADDRINUSE 48 /* Address already in use */
+#define EADDRNOTAVAIL 49 /* Can't assign requested address */
+
+/* ipc/network software -- operational errors */
+#define ENETDOWN 50 /* Network is down */
+#define ENETUNREACH 51 /* Network is unreachable */
+#define ENETRESET 52 /* Network dropped connection on reset */
+#define ECONNABORTED 53 /* Software caused connection abort */
+#define ECONNRESET 54 /* Connection reset by peer */
+#define ENOBUFS 55 /* No buffer space available */
+#define EISCONN 56 /* Socket is already connected */
+#define ENOTCONN 57 /* Socket is not connected */
+#define ESHUTDOWN 58 /* Can't send after socket shutdown */
+#define ETOOMANYREFS 59 /* Too many references: can't splice */
+#define ETIMEDOUT 60 /* Operation timed out */
+#define ECONNREFUSED 61 /* Connection refused */
+
+#define ELOOP 62 /* Too many levels of symbolic links */
+#endif /* _POSIX_SOURCE */
+#define ENAMETOOLONG 63 /* File name too long */
+
+/* should be rearranged */
+#ifndef _POSIX_SOURCE
+#define EHOSTDOWN 64 /* Host is down */
+#define EHOSTUNREACH 65 /* No route to host */
+#endif /* _POSIX_SOURCE */
+#define ENOTEMPTY 66 /* Directory not empty */
+
+/* quotas & mush */
+#ifndef _POSIX_SOURCE
+#define EPROCLIM 67 /* Too many processes */
+#define EUSERS 68 /* Too many users */
+#define EDQUOT 69 /* Disc quota exceeded */
+
+/* Network File System */
+#define ESTALE 70 /* Stale NFS file handle */
+#define EREMOTE 71 /* Too many levels of remote in path */
+#define EBADRPC 72 /* RPC struct is bad */
+#define ERPCMISMATCH 73 /* RPC version wrong */
+#define EPROGUNAVAIL 74 /* RPC prog. not avail */
+#define EPROGMISMATCH 75 /* Program version wrong */
+#define EPROCUNAVAIL 76 /* Bad procedure for program */
+#endif /* _POSIX_SOURCE */
+
+#define ENOLCK 77 /* No locks available */
+#define ENOSYS 78 /* Function not implemented */
+
+#ifndef _POSIX_SOURCE
+#define EFTYPE 79 /* Inappropriate file type or format */
+#define EAUTH 80 /* Authentication error */
+#define ENEEDAUTH 81 /* Need authenticator */
+#define EIDRM 82 /* Identifier removed */
+#define ENOMSG 83 /* No message of desired type */
+#define EOVERFLOW 84 /* Value too large to be stored in data type */
+#define ECANCELED 85 /* Operation canceled */
+#define EILSEQ 86 /* Illegal byte sequence */
+#define ENOATTR 87 /* Attribute not found */
+
+#define EDOOFUS 88 /* Programming error */
+#endif /* _POSIX_SOURCE */
+
+#define EBADMSG 89 /* Bad message */
+#define EMULTIHOP 90 /* Multihop attempted */
+#define ENOLINK 91 /* Link has been severed */
+#define EPROTO 92 /* Protocol error */
+
+#ifndef _POSIX_SOURCE
+#define ENOTCAPABLE 93 /* Capabilities insufficient */
+#define ECAPMODE 94 /* Not permitted in capability mode */
+#endif /* _POSIX_SOURCE */
+
+#ifndef _POSIX_SOURCE
+#define ELAST 94 /* Must be equal largest errno */
+#endif /* _POSIX_SOURCE */
+
+#ifdef _KERNEL
+/* pseudo-errors returned inside kernel to modify return to process */
+#define ERESTART (-1) /* restart syscall */
+#define EJUSTRETURN (-2) /* don't modify regs, just return */
+#define ENOIOCTL (-3) /* ioctl not handled by this layer */
+#define EDIRIOCTL (-4) /* do direct ioctl in GEOM */
+#endif
+
+#endif
diff --git a/include/lib/stdlib/sys/limits.h b/include/lib/stdlib/sys/limits.h
new file mode 100644
index 000000000..d4fb8b7e0
--- /dev/null
+++ b/include/lib/stdlib/sys/limits.h
@@ -0,0 +1,102 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1988, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_LIMITS_H_
+#define _SYS_LIMITS_H_
+
+#include <sys/cdefs.h>
+#include <machine/_limits.h>
+
+#define CHAR_BIT __CHAR_BIT /* number of bits in a char */
+
+#define SCHAR_MAX __SCHAR_MAX /* max value for a signed char */
+#define SCHAR_MIN __SCHAR_MIN /* min value for a signed char */
+
+#define UCHAR_MAX __UCHAR_MAX /* max value for an unsigned char */
+
+#ifdef __CHAR_UNSIGNED__
+#define CHAR_MAX UCHAR_MAX /* max value for a char */
+#define CHAR_MIN 0 /* min value for a char */
+#else
+#define CHAR_MAX SCHAR_MAX
+#define CHAR_MIN SCHAR_MIN
+#endif
+
+#define USHRT_MAX __USHRT_MAX /* max value for an unsigned short */
+#define SHRT_MAX __SHRT_MAX /* max value for a short */
+#define SHRT_MIN __SHRT_MIN /* min value for a short */
+
+#define UINT_MAX __UINT_MAX /* max value for an unsigned int */
+#define INT_MAX __INT_MAX /* max value for an int */
+#define INT_MIN __INT_MIN /* min value for an int */
+
+#define ULONG_MAX __ULONG_MAX /* max for an unsigned long */
+#define LONG_MAX __LONG_MAX /* max for a long */
+#define LONG_MIN __LONG_MIN /* min for a long */
+
+#ifdef __LONG_LONG_SUPPORTED
+#define ULLONG_MAX __ULLONG_MAX /* max for an unsigned long long */
+#define LLONG_MAX __LLONG_MAX /* max for a long long */
+#define LLONG_MIN __LLONG_MIN /* min for a long long */
+#endif
+
+#if __POSIX_VISIBLE || __XSI_VISIBLE
+#define SSIZE_MAX __SSIZE_MAX /* max value for an ssize_t */
+#endif
+
+#if __POSIX_VISIBLE >= 200112 || __XSI_VISIBLE
+#define SIZE_T_MAX __SIZE_T_MAX /* max value for a size_t */
+
+#define OFF_MAX __OFF_MAX /* max value for an off_t */
+#define OFF_MIN __OFF_MIN /* min value for an off_t */
+#endif
+
+#if __BSD_VISIBLE
+#define GID_MAX UINT_MAX /* max value for a gid_t */
+#define UID_MAX UINT_MAX /* max value for a uid_t */
+
+#define UQUAD_MAX (__UQUAD_MAX) /* max value for a uquad_t */
+#define QUAD_MAX (__QUAD_MAX) /* max value for a quad_t */
+#define QUAD_MIN (__QUAD_MIN) /* min value for a quad_t */
+#endif
+
+#if __XSI_VISIBLE || __POSIX_VISIBLE >= 200809
+#define LONG_BIT __LONG_BIT
+#define WORD_BIT __WORD_BIT
+#endif
+
+#if __POSIX_VISIBLE
+#define MQ_PRIO_MAX 64
+#endif
+
+#endif /* !_SYS_LIMITS_H_ */
diff --git a/include/lib/stdlib/sys/stdarg.h b/include/lib/stdlib/sys/stdarg.h
new file mode 100644
index 000000000..c315dfcee
--- /dev/null
+++ b/include/lib/stdlib/sys/stdarg.h
@@ -0,0 +1,75 @@
+/*-
+ * Copyright (c) 2002 David E. O'Brien. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_STDARG_H_
+#define _MACHINE_STDARG_H_
+
+#include <sys/cdefs.h>
+#include <sys/_types.h>
+
+#ifndef _VA_LIST_DECLARED
+#define _VA_LIST_DECLARED
+typedef __va_list va_list;
+#endif
+
+#ifdef __GNUCLIKE_BUILTIN_STDARG
+
+#define va_start(ap, last) \
+ __builtin_va_start((ap), (last))
+
+#define va_arg(ap, type) \
+ __builtin_va_arg((ap), type)
+
+#define __va_copy(dest, src) \
+ __builtin_va_copy((dest), (src))
+
+#if __ISO_C_VISIBLE >= 1999
+#define va_copy(dest, src) \
+ __va_copy(dest, src)
+#endif
+
+#define va_end(ap) \
+ __builtin_va_end(ap)
+
+#elif defined(lint)
+/* Provide a fake implementation for lint's benefit */
+#define __va_size(type) \
+ (((sizeof(type) + sizeof(long) - 1) / sizeof(long)) * sizeof(long))
+#define va_start(ap, last) \
+ ((ap) = (va_list)&(last) + __va_size(last))
+#define va_arg(ap, type) \
+ (*(type *)((ap) += __va_size(type), (ap) - __va_size(type)))
+#define va_end(ap)
+
+#else
+#error this file needs to be ported to your compiler
+#endif
+
+#endif /* !_MACHINE_STDARG_H_ */
diff --git a/include/lib/stdlib/sys/stdint.h b/include/lib/stdlib/sys/stdint.h
new file mode 100644
index 000000000..aa5ac81d1
--- /dev/null
+++ b/include/lib/stdlib/sys/stdint.h
@@ -0,0 +1,74 @@
+/*-
+ * Copyright (c) 2001 Mike Barcroft <mike@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_STDINT_H_
+#define _SYS_STDINT_H_
+
+#include <sys/cdefs.h>
+#include <sys/_types.h>
+
+#include <machine/_stdint.h>
+#include <sys/_stdint.h>
+
+typedef __int_least8_t int_least8_t;
+typedef __int_least16_t int_least16_t;
+typedef __int_least32_t int_least32_t;
+typedef __int_least64_t int_least64_t;
+
+typedef __uint_least8_t uint_least8_t;
+typedef __uint_least16_t uint_least16_t;
+typedef __uint_least32_t uint_least32_t;
+typedef __uint_least64_t uint_least64_t;
+
+typedef __int_fast8_t int_fast8_t;
+typedef __int_fast16_t int_fast16_t;
+typedef __int_fast32_t int_fast32_t;
+typedef __int_fast64_t int_fast64_t;
+
+typedef __uint_fast8_t uint_fast8_t;
+typedef __uint_fast16_t uint_fast16_t;
+typedef __uint_fast32_t uint_fast32_t;
+typedef __uint_fast64_t uint_fast64_t;
+
+#ifndef _INTMAX_T_DECLARED
+typedef __intmax_t intmax_t;
+#define _INTMAX_T_DECLARED
+#endif
+#ifndef _UINTMAX_T_DECLARED
+typedef __uintmax_t uintmax_t;
+#define _UINTMAX_T_DECLARED
+#endif
+
+/* GNU and Darwin define this and people seem to think it's portable */
+#if defined(UINTPTR_MAX) && defined(UINT64_MAX) && (UINTPTR_MAX == UINT64_MAX)
+#define __WORDSIZE 64
+#else
+#define __WORDSIZE 32
+#endif
+
+#endif /* !_SYS_STDINT_H_ */
diff --git a/include/lib/stdlib/sys/types.h b/include/lib/stdlib/sys/types.h
new file mode 100644
index 000000000..ae2ea33a5
--- /dev/null
+++ b/include/lib/stdlib/sys/types.h
@@ -0,0 +1,245 @@
+/*-
+ * Copyright (c) 1982, 1986, 1991, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ * (c) UNIX System Laboratories, Inc.
+ * All or some portions of this file are derived from material licensed
+ * to the University of California by American Telephone and Telegraph
+ * Co. or Unix System Laboratories, Inc. and are reproduced herein with
+ * the permission of UNIX System Laboratories, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)types.h 8.6 (Berkeley) 2/19/95
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_TYPES_H_
+#define _SYS_TYPES_H_
+
+#include <sys/cdefs.h>
+
+/* Machine type dependent parameters. */
+#include <sys/_types.h>
+
+#if __BSD_VISIBLE
+typedef unsigned char u_char;
+typedef unsigned short u_short;
+typedef unsigned int u_int;
+typedef unsigned long u_long;
+#ifndef _KERNEL
+typedef unsigned short ushort; /* Sys V compatibility */
+typedef unsigned int uint; /* Sys V compatibility */
+#endif
+#endif
+
+/*
+ * XXX POSIX sized integrals that should appear only in <sys/stdint.h>.
+ */
+#include <sys/_stdint.h>
+
+typedef __uint8_t u_int8_t; /* unsigned integrals (deprecated) */
+typedef __uint16_t u_int16_t;
+typedef __uint32_t u_int32_t;
+typedef __uint64_t u_int64_t;
+
+typedef __uint64_t u_quad_t; /* quads (deprecated) */
+typedef __int64_t quad_t;
+typedef quad_t *qaddr_t;
+
+typedef char *caddr_t; /* core address */
+typedef const char *c_caddr_t; /* core address, pointer to const */
+
+#ifndef _BLKSIZE_T_DECLARED
+typedef __blksize_t blksize_t;
+#define _BLKSIZE_T_DECLARED
+#endif
+
+typedef __cpuwhich_t cpuwhich_t;
+typedef __cpulevel_t cpulevel_t;
+typedef __cpusetid_t cpusetid_t;
+
+#ifndef _BLKCNT_T_DECLARED
+typedef __blkcnt_t blkcnt_t;
+#define _BLKCNT_T_DECLARED
+#endif
+
+#ifndef _CLOCK_T_DECLARED
+typedef __clock_t clock_t;
+#define _CLOCK_T_DECLARED
+#endif
+
+#ifndef _CLOCKID_T_DECLARED
+typedef __clockid_t clockid_t;
+#define _CLOCKID_T_DECLARED
+#endif
+
+typedef __critical_t critical_t; /* Critical section value */
+typedef __int64_t daddr_t; /* disk address */
+
+#ifndef _DEV_T_DECLARED
+typedef __dev_t dev_t; /* device number or struct cdev */
+#define _DEV_T_DECLARED
+#endif
+
+#ifndef _FFLAGS_T_DECLARED
+typedef __fflags_t fflags_t; /* file flags */
+#define _FFLAGS_T_DECLARED
+#endif
+
+typedef __fixpt_t fixpt_t; /* fixed point number */
+
+#ifndef _FSBLKCNT_T_DECLARED /* for statvfs() */
+typedef __fsblkcnt_t fsblkcnt_t;
+typedef __fsfilcnt_t fsfilcnt_t;
+#define _FSBLKCNT_T_DECLARED
+#endif
+
+#ifndef _GID_T_DECLARED
+typedef __gid_t gid_t; /* group id */
+#define _GID_T_DECLARED
+#endif
+
+#ifndef _IN_ADDR_T_DECLARED
+typedef __uint32_t in_addr_t; /* base type for internet address */
+#define _IN_ADDR_T_DECLARED
+#endif
+
+#ifndef _IN_PORT_T_DECLARED
+typedef __uint16_t in_port_t;
+#define _IN_PORT_T_DECLARED
+#endif
+
+#ifndef _ID_T_DECLARED
+typedef __id_t id_t; /* can hold a uid_t or pid_t */
+#define _ID_T_DECLARED
+#endif
+
+#ifndef _INO_T_DECLARED
+typedef __ino_t ino_t; /* inode number */
+#define _INO_T_DECLARED
+#endif
+
+#ifndef _KEY_T_DECLARED
+typedef __key_t key_t; /* IPC key (for Sys V IPC) */
+#define _KEY_T_DECLARED
+#endif
+
+#ifndef _LWPID_T_DECLARED
+typedef __lwpid_t lwpid_t; /* Thread ID (a.k.a. LWP) */
+#define _LWPID_T_DECLARED
+#endif
+
+#ifndef _MODE_T_DECLARED
+typedef __mode_t mode_t; /* permissions */
+#define _MODE_T_DECLARED
+#endif
+
+#ifndef _ACCMODE_T_DECLARED
+typedef __accmode_t accmode_t; /* access permissions */
+#define _ACCMODE_T_DECLARED
+#endif
+
+#ifndef _NLINK_T_DECLARED
+typedef __nlink_t nlink_t; /* link count */
+#define _NLINK_T_DECLARED
+#endif
+
+#ifndef _OFF_T_DECLARED
+typedef __off_t off_t; /* file offset */
+#define _OFF_T_DECLARED
+#endif
+
+#ifndef _PID_T_DECLARED
+typedef __pid_t pid_t; /* process id */
+#define _PID_T_DECLARED
+#endif
+
+typedef __register_t register_t;
+
+#ifndef _RLIM_T_DECLARED
+typedef __rlim_t rlim_t; /* resource limit */
+#define _RLIM_T_DECLARED
+#endif
+
+typedef __int64_t sbintime_t;
+
+typedef __segsz_t segsz_t; /* segment size (in pages) */
+
+#ifndef _SIZE_T_DECLARED
+typedef __size_t size_t;
+#define _SIZE_T_DECLARED
+#endif
+
+#ifndef _SSIZE_T_DECLARED
+typedef __ssize_t ssize_t;
+#define _SSIZE_T_DECLARED
+#endif
+
+#ifndef _SUSECONDS_T_DECLARED
+typedef __suseconds_t suseconds_t; /* microseconds (signed) */
+#define _SUSECONDS_T_DECLARED
+#endif
+
+#ifndef _TIME_T_DECLARED
+typedef __time_t time_t;
+#define _TIME_T_DECLARED
+#endif
+
+#ifndef _TIMER_T_DECLARED
+typedef __timer_t timer_t;
+#define _TIMER_T_DECLARED
+#endif
+
+#ifndef _MQD_T_DECLARED
+typedef __mqd_t mqd_t;
+#define _MQD_T_DECLARED
+#endif
+
+typedef __u_register_t u_register_t;
+
+#ifndef _UID_T_DECLARED
+typedef __uid_t uid_t; /* user id */
+#define _UID_T_DECLARED
+#endif
+
+#ifndef _USECONDS_T_DECLARED
+typedef __useconds_t useconds_t; /* microseconds (unsigned) */
+#define _USECONDS_T_DECLARED
+#endif
+
+#ifndef _CAP_RIGHTS_T_DECLARED
+#define _CAP_RIGHTS_T_DECLARED
+struct cap_rights;
+
+typedef struct cap_rights cap_rights_t;
+#endif
+
+typedef __vm_offset_t vm_offset_t;
+typedef __vm_ooffset_t vm_ooffset_t;
+typedef __vm_paddr_t vm_paddr_t;
+typedef __vm_pindex_t vm_pindex_t;
+typedef __vm_size_t vm_size_t;
+
+#endif /* !_SYS_TYPES_H_ */
diff --git a/include/lib/stdlib/sys/uuid.h b/include/lib/stdlib/sys/uuid.h
new file mode 100644
index 000000000..934fb4fdf
--- /dev/null
+++ b/include/lib/stdlib/sys/uuid.h
@@ -0,0 +1,61 @@
+/*-
+ * Copyright (c) 2002 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Portions copyright (c) 2014, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef _SYS_UUID_H_
+#define _SYS_UUID_H_
+
+#include <sys/cdefs.h>
+#include <sys/_stdint.h>
+
+/* Length of a node address (an IEEE 802 address). */
+#define _UUID_NODE_LEN 6
+
+/*
+ * See also:
+ * http://www.opengroup.org/dce/info/draft-leach-uuids-guids-01.txt
+ * http://www.opengroup.org/onlinepubs/009629399/apdxa.htm
+ *
+ * A DCE 1.1 compatible source representation of UUIDs.
+ */
+struct uuid {
+ uint32_t time_low;
+ uint16_t time_mid;
+ uint16_t time_hi_and_version;
+ uint8_t clock_seq_hi_and_reserved;
+ uint8_t clock_seq_low;
+ uint8_t node[_UUID_NODE_LEN];
+};
+
+typedef struct uuid uuid_t;
+
+#endif /* _SYS_UUID_H_ */
diff --git a/include/lib/tftf_lib.h b/include/lib/tftf_lib.h
new file mode 100644
index 000000000..e5900b8a1
--- /dev/null
+++ b/include/lib/tftf_lib.h
@@ -0,0 +1,216 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __TFTF_LIB_H__
+#define __TFTF_LIB_H__
+
+#ifndef __ASSEMBLY__
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <sys/types.h>
+
+/*
+ * Possible error codes for signaling the result of a test
+ * TEST_RESULT_MIN and TEST_RESULT_MAX are only used as bounds in the enum.
+ */
+typedef enum {
+ /*
+ * NA = Not applicable.
+ * Initial value for a test result.
+ * Used for CPUs that don't participate in the test.
+ */
+ TEST_RESULT_NA = -1,
+
+ TEST_RESULT_MIN = 0,
+ TEST_RESULT_SKIPPED = TEST_RESULT_MIN,
+ TEST_RESULT_SUCCESS,
+ TEST_RESULT_FAIL,
+ TEST_RESULT_CRASHED,
+
+ TEST_RESULT_MAX
+} test_result_t;
+
+const char *test_result_to_string(test_result_t result);
+
+#define TEST_RESULT_IS_VALID(result) \
+ ((result >= TEST_RESULT_MIN) && (result < TEST_RESULT_MAX))
+
+/*
+ * PSCI Function Wrappers
+ *
+ * SMC calls to PSCI functions
+ */
+int32_t tftf_psci_cpu_on(u_register_t target_cpu,
+ uintptr_t entry_point_address,
+ u_register_t context_id);
+int32_t tftf_psci_cpu_off(void);
+int32_t tftf_psci_affinity_info(u_register_t target_affinity,
+ uint32_t lowest_affinity_level);
+int32_t tftf_psci_node_hw_state(u_register_t target_cpu, uint32_t power_level);
+int32_t tftf_get_psci_feature_info(uint32_t psci_func_id);
+u_register_t tftf_psci_stat_count(u_register_t target_cpu,
+ uint32_t power_state);
+u_register_t tftf_psci_stat_residency(u_register_t target_cpu,
+ uint32_t power_state);
+
+/*
+ * PSCI Helper functions
+ */
+
+/*
+ * Gets the context ID used when calling tftf_psci_cpu_on().
+ */
+u_register_t tftf_get_cpu_on_ctx_id(unsigned int core_pos);
+
+/*
+ * Sets the context ID used when calling tftf_psci_cpu_on().
+ */
+void tftf_set_cpu_on_ctx_id(unsigned int core_pos, u_register_t context_id);
+
+/*
+ * Gets the PSCI version of Trusted Firmware-A. The version number returned
+ * is a 32-bit unsigned integer, with the upper 16 bits denoting the major
+ * revision, and the lower 16 bits denoting the minor revision.
+ */
+unsigned int tftf_get_psci_version(void);
+
+/*
+ * Returns 0 if version is not a valid PSCI version supported by TFTF.
+ * Otherwise it returns a value different of 0.
+ */
+int tftf_is_valid_psci_version(unsigned int version);
+
+
+/*
+ * The function constructs a composite state_id up-to the specified
+ * affinity level querying the relevant state property from the platform.
+ * It chooses the first matching state property from the array returned
+ * by platform. In case the requested affinity level is not supported by
+ * the platform, then this function uses DUMMY_STATE_ID as the local state
+ * for that level. This allows the tests to construct composite state-id
+ * for invalid affinity levels as well. It returns the expected return
+ * value from CPU SUSPEND call.
+ */
+int tftf_psci_make_composite_state_id(uint32_t affinity_level,
+ uint32_t state_type, uint32_t *state_id);
+
+/*
+ * This function composes the power state parameter in the right format
+ * needed by PSCI. The detection of the power state format is done during
+ * cold boot by tftf_detect_psci_pstate_format() function.
+ */
+uint32_t tftf_make_psci_pstate(uint32_t affinity_level,
+ uint32_t state_type,
+ uint32_t state_id);
+
+/*
+ * Returns 1, if the EL3 software supports PSCI's original format state ID as
+ * NULL else returns zero
+ */
+unsigned int tftf_is_psci_state_id_null(void);
+
+/*
+ * Returns 1, if the EL3 software supports PSCI's original state format else
+ * returns zero
+ */
+unsigned int tftf_is_psci_pstate_format_original(void);
+
+/* Functions to wait for a specified number of ms or us */
+void waitms(uint64_t ms);
+void waitus(uint64_t us);
+
+/*
+ * SMC calls take a function identifier and up to 6 arguments.
+ * Additionally, for SMC calls that originate from EL2, an optional seventh
+ * argument can be added. Given that TFTF runs in EL2, we need to be able to
+ * specify it.
+ */
+typedef struct {
+ u_register_t arg0;
+ u_register_t arg1;
+ u_register_t arg2;
+ u_register_t arg3;
+ u_register_t arg4;
+ u_register_t arg5;
+ u_register_t arg6;
+ u_register_t arg7;
+} smc_args;
+
+/* SMC calls can return up to 4 register values */
+typedef struct {
+ u_register_t ret0;
+ u_register_t ret1;
+ u_register_t ret2;
+ u_register_t ret3;
+} smc_ret_values;
+
+/*
+ * Trigger an SMC call.
+ */
+smc_ret_values tftf_smc(const smc_args *args);
+
+/*
+ * Write a formatted string in the test output buffer.
+ * Just like the standard libc's printf() function, the string produced is under
+ * the control of a format string that specifies how subsequent arguments are
+ * converted.
+ *
+ * The string will appear in the test report.
+ * Use mp_printf() instead for volatile debug messages that are not meant to be
+ * stored into the test report.
+ * Note: The test output buffer referred here is a temporary buffer stored in
+ * RAM. This function doesn't write anything into NVM.
+ *
+ * Upon successful return, return the number of characters printed (not
+ * including the final '\0' character). If an output error is encountered,
+ * a negative value is returned. If the function is not able to print any
+ * character at all, this is considered as an output error. Note that a partial
+ * write (i.e. when the string is truncated) is not considered as an output
+ * error.
+ */
+__attribute__((format(printf, 1, 2)))
+int tftf_testcase_printf(const char *format, ...);
+
+/*
+ * This function is meant to be used by tests.
+ * It tells the framework that the test is going to reset the platform.
+ *
+ * It the test omits to call this function before resetting, the framework will
+ * consider the test has crashed upon resumption.
+ */
+void tftf_notify_reboot(void);
+
+/*
+ * Returns 0 if the test function is executed for the first time,
+ * or 1 if the test rebooted the platform and the test function is being
+ * executed again.
+ * This function is used for tests that reboot the platform, so that they can
+ * execute different code paths on 1st execution and subsequent executions.
+ */
+unsigned int tftf_is_rebooted(void);
+
+static inline unsigned int make_mpid(unsigned int clusterid,
+ unsigned int coreid)
+{
+ /*
+ * If MT bit is set then need to shift the affinities and also set the
+ * MT bit.
+ */
+ if ((read_mpidr_el1() & MPIDR_MT_MASK) != 0)
+ return MPIDR_MT_MASK |
+ ((clusterid & MPIDR_AFFLVL_MASK) << MPIDR_AFF2_SHIFT) |
+ ((coreid & MPIDR_AFFLVL_MASK) << MPIDR_AFF1_SHIFT);
+ else
+ return ((clusterid & MPIDR_AFFLVL_MASK) << MPIDR_AFF1_SHIFT) |
+ ((coreid & MPIDR_AFFLVL_MASK) << MPIDR_AFF0_SHIFT);
+
+}
+
+#endif /* __ASSEMBLY__ */
+#endif /* __TFTF_LIB_H__ */
diff --git a/include/lib/timer.h b/include/lib/timer.h
new file mode 100644
index 000000000..0bfff01e7
--- /dev/null
+++ b/include/lib/timer.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __TIMER_H__
+#define __TIMER_H__
+
+#include <irq.h>
+
+typedef struct plat_timer {
+ int (*program)(unsigned long time_out_ms);
+ int (*cancel)(void);
+ int (*handler)(void);
+
+ /*
+ * Duration of the atomic time slice in milliseconds. All timer
+ * requests within the same time slice are merged into one. This value
+ * should be chosen such that it is greater than the time required to
+ * program the timer.
+ */
+ unsigned int timer_step_value;
+ unsigned int timer_irq;
+} plat_timer_t;
+
+/*
+ * Gets the platform specific timer implementation information and initialises
+ * the timer framework and peripheral.
+ * Returns 0 on success or return value of timer peripheral intialisation
+ * function.
+ */
+int tftf_initialise_timer(void);
+
+/*
+ * Requests the timer framework to send an interrupt after milli_secs.
+ * The interrupt is sent to the calling core of this api. The actual
+ * time the interrupt is received by the core can be greater than
+ * the requested time.
+ * Returns 0 on success and -1 on failure.
+ */
+int tftf_program_timer(unsigned long milli_secs);
+
+/*
+ * Requests the timer framework to send an interrupt after milli_secs and to
+ * suspend the CPU to the desired power state. The interrupt is sent to the
+ * calling core of this api. The actual time the interrupt is received by the
+ * core can be greater than the requested time.
+ *
+ * Return codes from tftf_program_timer calls and tftf_cpu_suspend are stored
+ * respectively in timer_rc and suspend_rc output parameters.
+ * If a function is not executed, the return value stored in the output
+ * parameters will be as if the correponding call succeeded. NULL pointers are
+ * accepted to discard the return codes.
+ * Returns 0 on success and -1 on failure.
+ */
+
+int tftf_program_timer_and_suspend(unsigned long milli_secs,
+ unsigned int pwr_state,
+ int *timer_rc, int *suspend_rc);
+
+/*
+ * Requests the timer framework to send an interrupt after milli_secs and to
+ * suspend the system. The interrupt is sent to the calling core of this api.
+ * The actual time the interrupt is received by the core can be greater than
+ * the requested time. For the system suspend to succeed, all cores other than
+ * the calling core should be in the OFF state.
+ *
+ * Return codes from tftf_program_timer calls and tftf_cpu_system suspend
+ * are stored respectively in timer_rc and suspend_rc output parameters.
+ * If a function is not executed, the return value stored in the output
+ * parameters will be as if the correponding call succeeded. NULL pointers are
+ * accepted to discard the return codes.
+ * Returns 0 on success and -1 on failure.
+ */
+int tftf_program_timer_and_sys_suspend(unsigned long milli_secs,
+ int *timer_rc, int *suspend_rc);
+
+/*
+ * Suspends the calling CPU for specified milliseconds.
+ *
+ * Returns 0 on success, and -1 otherwise.
+ */
+int tftf_timer_sleep(unsigned long milli_secs);
+
+/*
+ * Common handler for servicing all the timer interrupts. It in turn calls the
+ * peripheral specific handler. It also sends WAKE_SGI to all the cores which
+ * requested an interrupt within a time frame of timer_step_value.
+ * Also, if there are pending interrupt requests, reprograms the timer
+ * accordingly to fire an interrupt at the right time.
+ *
+ * Returns 0 on success.
+ */
+int tftf_timer_framework_handler(void *data);
+
+/*
+ * Cancels the previously programmed value by the called core.
+ * This api should be used only for cancelling the self interrupt request
+ * by a core.
+ * Returns 0 on success, negative value otherwise.
+ */
+int tftf_cancel_timer(void);
+
+/*
+ * It is used to register a handler which needs to be called when a timer
+ * interrupt is fired.
+ * Returns 0 on success, negative value otherwise.
+ */
+int tftf_timer_register_handler(irq_handler_t irq_handler);
+
+/*
+ * It is used to unregister a previously registered handler.
+ * Returns 0 on success, negative value otherwise.
+ */
+int tftf_timer_unregister_handler(void);
+
+/*
+ * Return the IRQ Number of the registered timer interrupt
+ */
+unsigned int tftf_get_timer_irq(void);
+
+/*
+ * Returns the timer step value in a platform and is used by test cases.
+ */
+unsigned int tftf_get_timer_step_value(void);
+
+/*
+ * Restore the GIC state after wake-up from system suspend
+ */
+void tftf_timer_gic_state_restore(void);
+
+#endif /* __TIMER_H__ */
diff --git a/include/lib/utils/math_utils.h b/include/lib/utils/math_utils.h
new file mode 100644
index 000000000..9d8e88d71
--- /dev/null
+++ b/include/lib/utils/math_utils.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __MATH_UTILS_H__
+#define __MATH_UTILS_H__
+
+#include <stdint.h>
+
+/* Simple utility to calculate `power` of a `base` number. */
+static inline unsigned int pow(unsigned int base, unsigned int power)
+{
+ unsigned int result = 1;
+ while (power) {
+ result *= base;
+ power--;
+ }
+ return result;
+}
+
+#endif /* __MATH_UTILS_H__ */
diff --git a/include/lib/utils/misc_utils.h b/include/lib/utils/misc_utils.h
new file mode 100644
index 000000000..0949816e0
--- /dev/null
+++ b/include/lib/utils/misc_utils.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __MISC_UTILS_H__
+#define __MISC_UTILS_H__
+
+#define COMPILER_BARRIER() __asm__ volatile ("" ::: "memory")
+
+#endif /* __MISC_UTILS_H__ */
diff --git a/include/lib/utils/uuid_utils.h b/include/lib/utils/uuid_utils.h
new file mode 100644
index 000000000..dda824160
--- /dev/null
+++ b/include/lib/utils/uuid_utils.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __UUID_UTILS_H__
+#define __UUID_UTILS_H__
+
+#include <stdint.h>
+#include <uuid.h>
+
+/* Size (in bytes) of a UUID string as formatted by uuid_to_str() */
+#define UUID_STR_SIZE 79
+
+/*
+ * Convert a UUID into a string.
+ *
+ * The caller is responsible for allocating the output string buffer
+ * pointed by 'str'. It must be at least UUID_STR_SIZE bytes long.
+ *
+ * Return the UUID string.
+ */
+char *uuid_to_str(const uuid_t *uuid, char *str);
+
+/*
+ * Return 1 if uuid == uuid_null, 0 otherwise.
+ */
+unsigned int is_uuid_null(const uuid_t *uuid);
+
+/*
+ * Return 1 if uuid1 == uuid2, 0 otherwise.
+ */
+unsigned int uuid_equal(const uuid_t *uuid1, const uuid_t *uuid2);
+
+/*
+ * Take four 32-bit words of data and combine them into a UUID.
+ *
+ * The caller is responsible for allocating the output UUID variable
+ * pointed by 'uuid'.
+ *
+ * Return the UUID.
+ */
+uuid_t *make_uuid_from_4words(uuid_t *uuid,
+ uint32_t uuid0,
+ uint32_t uuid1,
+ uint32_t uuid2,
+ uint32_t uuid3);
+
+#endif /* __UUID_UTILS_H__ */
diff --git a/include/lib/utils_def.h b/include/lib/utils_def.h
new file mode 100644
index 000000000..5b4fd7808
--- /dev/null
+++ b/include/lib/utils_def.h
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef UTILS_DEF_H
+#define UTILS_DEF_H
+
+/* Compute the number of elements in the given array */
+#define ARRAY_SIZE(a) \
+ (sizeof(a) / sizeof((a)[0]))
+
+#define IS_POWER_OF_TWO(x) \
+ (((x) & ((x) - 1)) == 0)
+
+#define SIZE_FROM_LOG2_WORDS(n) (4 << (n))
+
+#define BIT_32(nr) (U(1) << (nr))
+#define BIT_64(nr) (ULL(1) << (nr))
+
+#ifdef AARCH32
+#define BIT BIT_32
+#else
+#define BIT BIT_64
+#endif
+
+/*
+ * Create a contiguous bitmask starting at bit position @l and ending at
+ * position @h. For example
+ * GENMASK_64(39, 21) gives us the 64bit vector 0x000000ffffe00000.
+ */
+#define GENMASK_32(h, l) \
+ (((~UINT32_C(0)) << (l)) & (~UINT32_C(0) >> (32 - 1 - (h))))
+
+#define GENMASK_64(h, l) \
+ (((~UINT64_C(0)) << (l)) & (~UINT64_C(0) >> (64 - 1 - (h))))
+
+#ifdef AARCH32
+#define GENMASK GENMASK_32
+#else
+#define GENMASK GENMASK_64
+#endif
+
+/*
+ * This variant of div_round_up can be used in macro definition but should not
+ * be used in C code as the `div` parameter is evaluated twice.
+ */
+#define DIV_ROUND_UP_2EVAL(n, d) (((n) + (d) - 1) / (d))
+
+#define div_round_up(val, div) __extension__ ({ \
+ __typeof__(div) _div = (div); \
+ ((val) + _div - 1) / _div; \
+})
+
+#define MIN(x, y) __extension__ ({ \
+ __typeof__(x) _x = (x); \
+ __typeof__(y) _y = (y); \
+ (void)(&_x == &_y); \
+ _x < _y ? _x : _y; \
+})
+
+#define MAX(x, y) __extension__ ({ \
+ __typeof__(x) _x = (x); \
+ __typeof__(y) _y = (y); \
+ (void)(&_x == &_y); \
+ _x > _y ? _x : _y; \
+})
+
+/*
+ * The round_up() macro rounds up a value to the given boundary in a
+ * type-agnostic yet type-safe manner. The boundary must be a power of two.
+ * In other words, it computes the smallest multiple of boundary which is
+ * greater than or equal to value.
+ *
+ * round_down() is similar but rounds the value down instead.
+ */
+#define round_boundary(value, boundary) \
+ ((__typeof__(value))((boundary) - 1))
+
+#define round_up(value, boundary) \
+ ((((value) - 1) | round_boundary(value, boundary)) + 1)
+
+#define round_down(value, boundary) \
+ ((value) & ~round_boundary(value, boundary))
+
+/*
+ * Evaluates to 1 if (ptr + inc) overflows, 0 otherwise.
+ * Both arguments must be unsigned pointer values (i.e. uintptr_t).
+ */
+#define check_uptr_overflow(_ptr, _inc) \
+ ((_ptr) > (UINTPTR_MAX - (_inc)))
+
+/*
+ * Evaluates to 1 if (u32 + inc) overflows, 0 otherwise.
+ * Both arguments must be 32-bit unsigned integers (i.e. effectively uint32_t).
+ */
+#define check_u32_overflow(_u32, _inc) \
+ ((_u32) > (UINT32_MAX - (_inc)))
+
+/*
+ * For those constants to be shared between C and other sources, apply a 'U',
+ * 'UL', 'ULL', 'L' or 'LL' suffix to the argument only in C, to avoid
+ * undefined or unintended behaviour.
+ *
+ * The GNU assembler and linker do not support these suffixes (it causes the
+ * build process to fail) therefore the suffix is omitted when used in linker
+ * scripts and assembler files.
+*/
+#if defined(__LINKER__) || defined(__ASSEMBLY__)
+# define U(_x) (_x)
+# define UL(_x) (_x)
+# define ULL(_x) (_x)
+# define L(_x) (_x)
+# define LL(_x) (_x)
+#else
+# define U(_x) (_x##U)
+# define UL(_x) (_x##UL)
+# define ULL(_x) (_x##ULL)
+# define L(_x) (_x##L)
+# define LL(_x) (_x##LL)
+#endif
+
+/* Register size of the current architecture. */
+#ifdef AARCH32
+#define REGSZ U(4)
+#else
+#define REGSZ U(8)
+#endif
+
+/*
+ * Test for the current architecture version to be at least the version
+ * expected.
+ */
+#define ARM_ARCH_AT_LEAST(_maj, _min) \
+ ((ARM_ARCH_MAJOR > (_maj)) || \
+ ((ARM_ARCH_MAJOR == (_maj)) && (ARM_ARCH_MINOR >= (_min))))
+
+/*
+ * Import an assembly or linker symbol as a C expression with the specified
+ * type
+ */
+#define IMPORT_SYM(type, sym, name) \
+ extern char sym[];\
+ static const __attribute__((unused)) type name = (type) sym;
+
+/*
+ * When the symbol is used to hold a pointer, its alignment can be asserted
+ * with this macro. For example, if there is a linker symbol that is going to
+ * be used as a 64-bit pointer, the value of the linker symbol must also be
+ * aligned to 64 bit. This macro makes sure this is the case.
+ */
+#define ASSERT_SYM_PTR_ALIGN(sym) assert(((size_t)(sym) % __alignof__(*(sym))) == 0)
+
+
+#endif /* UTILS_DEF_H */
diff --git a/include/lib/xlat_tables/aarch32/xlat_tables_aarch32.h b/include/lib/xlat_tables/aarch32/xlat_tables_aarch32.h
new file mode 100644
index 000000000..37f3b53b7
--- /dev/null
+++ b/include/lib/xlat_tables/aarch32/xlat_tables_aarch32.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef XLAT_TABLES_AARCH32_H
+#define XLAT_TABLES_AARCH32_H
+
+#include <arch.h>
+#include <utils_def.h>
+#include <xlat_tables_defs.h>
+
+#if !defined(PAGE_SIZE)
+#error "PAGE_SIZE is not defined."
+#endif
+
+/*
+ * In AArch32 state, the MMU only supports 4KB page granularity, which means
+ * that the first translation table level is either 1 or 2. Both of them are
+ * allowed to have block and table descriptors. See section G4.5.6 of the
+ * ARMv8-A Architecture Reference Manual (DDI 0487A.k) for more information.
+ *
+ * The define below specifies the first table level that allows block
+ * descriptors.
+ */
+#if PAGE_SIZE != PAGE_SIZE_4KB
+#error "Invalid granule size. AArch32 supports 4KB pages only."
+#endif
+
+#define MIN_LVL_BLOCK_DESC U(1)
+
+#define XLAT_TABLE_LEVEL_MIN U(1)
+
+/*
+ * Define the architectural limits of the virtual address space in AArch32
+ * state.
+ *
+ * TTBCR.TxSZ is calculated as 32 minus the width of said address space. The
+ * value of TTBCR.TxSZ must be in the range 0 to 7 [1], which means that the
+ * virtual address space width must be in the range 32 to 25 bits.
+ *
+ * [1] See the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more
+ * information, Section G4.6.5
+ */
+#define MIN_VIRT_ADDR_SPACE_SIZE (ULL(1) << (U(32) - TTBCR_TxSZ_MAX))
+#define MAX_VIRT_ADDR_SPACE_SIZE (ULL(1) << (U(32) - TTBCR_TxSZ_MIN))
+
+/*
+ * Here we calculate the initial lookup level from the value of the given
+ * virtual address space size. For a 4 KB page size,
+ * - level 1 supports virtual address spaces of widths 32 to 31 bits;
+ * - level 2 from 30 to 25.
+ *
+ * Wider or narrower address spaces are not supported. As a result, level 3
+ * cannot be used as the initial lookup level with 4 KB granularity.
+ * See the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more
+ * information, Section G4.6.5
+ *
+ * For example, for a 31-bit address space (i.e. virt_addr_space_size ==
+ * 1 << 31), TTBCR.TxSZ will be programmed to (32 - 31) = 1. According to Table
+ * G4-5 in the ARM ARM, the initial lookup level for an address space like that
+ * is 1.
+ *
+ * Note that this macro assumes that the given virtual address space size is
+ * valid. Therefore, the caller is expected to check it is the case using the
+ * CHECK_VIRT_ADDR_SPACE_SIZE() macro first.
+ */
+#define GET_XLAT_TABLE_LEVEL_BASE(_virt_addr_space_sz) \
+ (((_virt_addr_space_sz) > (ULL(1) << L1_XLAT_ADDRESS_SHIFT)) ? \
+ U(1) : U(2))
+
+#endif /* XLAT_TABLES_AARCH32_H */
diff --git a/include/lib/xlat_tables/aarch64/xlat_tables_aarch64.h b/include/lib/xlat_tables/aarch64/xlat_tables_aarch64.h
new file mode 100644
index 000000000..91ca8e477
--- /dev/null
+++ b/include/lib/xlat_tables/aarch64/xlat_tables_aarch64.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef XLAT_TABLES_AARCH64_H
+#define XLAT_TABLES_AARCH64_H
+
+#include <arch.h>
+#include <utils_def.h>
+#include <xlat_tables_defs.h>
+
+#if !defined(PAGE_SIZE)
+#error "PAGE_SIZE is not defined."
+#endif
+
+/*
+ * Encode a Physical Address Space size for its use in TCR_ELx.
+ */
+unsigned long long tcr_physical_addr_size_bits(unsigned long long max_addr);
+
+/*
+ * In AArch64 state, the MMU may support 4 KB, 16 KB and 64 KB page
+ * granularity. For 4KB granularity, a level 0 table descriptor doesn't support
+ * block translation. For 16KB, the same thing happens to levels 0 and 1. For
+ * 64KB, same for level 1. See section D4.3.1 of the ARMv8-A Architecture
+ * Reference Manual (DDI 0487A.k) for more information.
+ *
+ * The define below specifies the first table level that allows block
+ * descriptors.
+ */
+#if PAGE_SIZE == PAGE_SIZE_4KB
+# define MIN_LVL_BLOCK_DESC U(1)
+#elif (PAGE_SIZE == PAGE_SIZE_16KB) || (PAGE_SIZE == PAGE_SIZE_64KB)
+# define MIN_LVL_BLOCK_DESC U(2)
+#endif
+
+#define XLAT_TABLE_LEVEL_MIN U(0)
+
+/*
+ * Define the architectural limits of the virtual address space in AArch64
+ * state.
+ *
+ * TCR.TxSZ is calculated as 64 minus the width of said address space.
+ * The value of TCR.TxSZ must be in the range 16 to 39 [1], which means that
+ * the virtual address space width must be in the range 48 to 25 bits.
+ *
+ * [1] See the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more
+ * information:
+ * Page 1730: 'Input address size', 'For all translation stages'.
+ */
+#define MIN_VIRT_ADDR_SPACE_SIZE (ULL(1) << (U(64) - TCR_TxSZ_MAX))
+#define MAX_VIRT_ADDR_SPACE_SIZE (ULL(1) << (U(64) - TCR_TxSZ_MIN))
+
+/*
+ * Here we calculate the initial lookup level from the value of the given
+ * virtual address space size. For a 4 KB page size,
+ * - level 0 supports virtual address spaces of widths 48 to 40 bits;
+ * - level 1 from 39 to 31;
+ * - level 2 from 30 to 25.
+ *
+ * Wider or narrower address spaces are not supported. As a result, level 3
+ * cannot be used as initial lookup level with 4 KB granularity. See section
+ * D4.2.5 in the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more
+ * information.
+ *
+ * For example, for a 35-bit address space (i.e. virt_addr_space_size ==
+ * 1 << 35), TCR.TxSZ will be programmed to (64 - 35) = 29. According to Table
+ * D4-11 in the ARM ARM, the initial lookup level for an address space like that
+ * is 1.
+ *
+ * Note that this macro assumes that the given virtual address space size is
+ * valid. Therefore, the caller is expected to check it is the case using the
+ * CHECK_VIRT_ADDR_SPACE_SIZE() macro first.
+ */
+#define GET_XLAT_TABLE_LEVEL_BASE(_virt_addr_space_sz) \
+ (((_virt_addr_space_sz) > (ULL(1) << L0_XLAT_ADDRESS_SHIFT)) \
+ ? 0U \
+ : (((_virt_addr_space_sz) > (ULL(1) << L1_XLAT_ADDRESS_SHIFT)) \
+ ? 1U : 2U))
+
+#endif /* XLAT_TABLES_AARCH64_H */
diff --git a/include/lib/xlat_tables/xlat_mmu_helpers.h b/include/lib/xlat_tables/xlat_mmu_helpers.h
new file mode 100644
index 000000000..ffb5abe02
--- /dev/null
+++ b/include/lib/xlat_tables/xlat_mmu_helpers.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2014-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef XLAT_MMU_HELPERS_H
+#define XLAT_MMU_HELPERS_H
+
+/*
+ * The following flags are passed to enable_mmu_xxx() to override the default
+ * values used to program system registers while enabling the MMU.
+ */
+
+/*
+ * When this flag is used, all data access to Normal memory from this EL and all
+ * Normal memory accesses to the translation tables of this EL are non-cacheable
+ * for all levels of data and unified cache until the caches are enabled by
+ * setting the bit SCTLR_ELx.C.
+ */
+#define DISABLE_DCACHE (U(1) << 0)
+
+/*
+ * Mark the translation tables as non-cacheable for the MMU table walker, which
+ * is a different observer from the PE/CPU. If the flag is not specified, the
+ * tables are cacheable for the MMU table walker.
+ *
+ * Note that, as far as the PE/CPU observer is concerned, the attributes used
+ * are the ones specified in the translation tables themselves. The MAIR
+ * register specifies the cacheability through the field AttrIndx of the lower
+ * attributes of the translation tables. The shareability is specified in the SH
+ * field of the lower attributes.
+ *
+ * The MMU table walker uses the attributes specified in the fields ORGNn, IRGNn
+ * and SHn of the TCR register to access the translation tables.
+ *
+ * The attributes specified in the TCR register and the tables can be different
+ * as there are no checks to prevent that. Special care must be taken to ensure
+ * that there aren't mismatches. The behaviour in that case is described in the
+ * sections 'Mismatched memory attributes' in the ARMv8 ARM.
+ */
+#define XLAT_TABLE_NC (U(1) << 1)
+
+/*
+ * Offsets into a mmu_cfg_params array generated by setup_mmu_cfg(). All
+ * parameters are 64 bits wide.
+ */
+#define MMU_CFG_MAIR 0
+#define MMU_CFG_TCR 1
+#define MMU_CFG_TTBR0 2
+#define MMU_CFG_PARAM_MAX 3
+
+#ifndef __ASSEMBLY__
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <sys/types.h>
+
+/*
+ * Return the values that the MMU configuration registers must contain for the
+ * specified translation context. `params` must be a pointer to array of size
+ * MMU_CFG_PARAM_MAX.
+ */
+void setup_mmu_cfg(uint64_t *params, unsigned int flags,
+ const uint64_t *base_table, unsigned long long max_pa,
+ uintptr_t max_va, int xlat_regime);
+
+#ifdef AARCH32
+/* AArch32 specific translation table API */
+void enable_mmu_svc_mon(unsigned int flags);
+void enable_mmu_hyp(unsigned int flags);
+
+void enable_mmu_direct_svc_mon(unsigned int flags);
+void enable_mmu_direct_hyp(unsigned int flags);
+#else
+/* AArch64 specific translation table APIs */
+void enable_mmu_el1(unsigned int flags);
+void enable_mmu_el2(unsigned int flags);
+void enable_mmu_el3(unsigned int flags);
+
+void enable_mmu_direct_el1(unsigned int flags);
+void enable_mmu_direct_el2(unsigned int flags);
+void enable_mmu_direct_el3(unsigned int flags);
+#endif /* AARCH32 */
+
+bool xlat_arch_is_granule_size_supported(size_t size);
+size_t xlat_arch_get_max_supported_granule_size(void);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* XLAT_MMU_HELPERS_H */
diff --git a/include/lib/xlat_tables/xlat_tables_arch.h b/include/lib/xlat_tables/xlat_tables_arch.h
new file mode 100644
index 000000000..251b0206a
--- /dev/null
+++ b/include/lib/xlat_tables/xlat_tables_arch.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef XLAT_TABLES_ARCH_H
+#define XLAT_TABLES_ARCH_H
+
+#ifdef AARCH32
+#include "aarch32/xlat_tables_aarch32.h"
+#else
+#include "aarch64/xlat_tables_aarch64.h"
+#endif
+
+/*
+ * Evaluates to 1 if the given virtual address space size is valid, or 0 if it's
+ * not.
+ *
+ * A valid size is one that is a power of 2 and is within the architectural
+ * limits. Not that these limits are different for AArch32 and AArch64.
+ */
+#define CHECK_VIRT_ADDR_SPACE_SIZE(size) \
+ (((unsigned long long)(size) >= MIN_VIRT_ADDR_SPACE_SIZE) && \
+ ((unsigned long long)(size) <= MAX_VIRT_ADDR_SPACE_SIZE) && \
+ IS_POWER_OF_TWO(size))
+
+/*
+ * Evaluates to 1 if the given physical address space size is a power of 2,
+ * or 0 if it's not.
+ */
+#define CHECK_PHY_ADDR_SPACE_SIZE(size) \
+ (IS_POWER_OF_TWO(size))
+
+/*
+ * Compute the number of entries required at the initial lookup level to address
+ * the whole virtual address space.
+ */
+#define GET_NUM_BASE_LEVEL_ENTRIES(addr_space_size) \
+ ((addr_space_size) >> \
+ XLAT_ADDR_SHIFT(GET_XLAT_TABLE_LEVEL_BASE(addr_space_size)))
+
+#endif /* XLAT_TABLES_ARCH_H */
diff --git a/include/lib/xlat_tables/xlat_tables_defs.h b/include/lib/xlat_tables/xlat_tables_defs.h
new file mode 100644
index 000000000..d260c3ef0
--- /dev/null
+++ b/include/lib/xlat_tables/xlat_tables_defs.h
@@ -0,0 +1,173 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef XLAT_TABLES_DEFS_H
+#define XLAT_TABLES_DEFS_H
+
+#include <arch.h>
+#include <utils_def.h>
+#include <xlat_mmu_helpers.h>
+
+/* Miscellaneous MMU related constants */
+#define NUM_2MB_IN_GB (U(1) << 9)
+#define NUM_4K_IN_2MB (U(1) << 9)
+#define NUM_GB_IN_4GB (U(1) << 2)
+
+#define TWO_MB_SHIFT U(21)
+#define ONE_GB_SHIFT U(30)
+#define FOUR_KB_SHIFT U(12)
+
+#define ONE_GB_INDEX(x) ((x) >> ONE_GB_SHIFT)
+#define TWO_MB_INDEX(x) ((x) >> TWO_MB_SHIFT)
+#define FOUR_KB_INDEX(x) ((x) >> FOUR_KB_SHIFT)
+
+#define PAGE_SIZE_4KB U(4096)
+#define PAGE_SIZE_16KB U(16384)
+#define PAGE_SIZE_64KB U(65536)
+
+#define INVALID_DESC U(0x0)
+/*
+ * A block descriptor points to a region of memory bigger than the granule size
+ * (e.g. a 2MB region when the granule size is 4KB).
+ */
+#define BLOCK_DESC U(0x1) /* Table levels 0-2 */
+/* A table descriptor points to the next level of translation table. */
+#define TABLE_DESC U(0x3) /* Table levels 0-2 */
+/*
+ * A page descriptor points to a page, i.e. a memory region whose size is the
+ * translation granule size (e.g. 4KB).
+ */
+#define PAGE_DESC U(0x3) /* Table level 3 */
+
+#define DESC_MASK U(0x3)
+
+#define FIRST_LEVEL_DESC_N ONE_GB_SHIFT
+#define SECOND_LEVEL_DESC_N TWO_MB_SHIFT
+#define THIRD_LEVEL_DESC_N FOUR_KB_SHIFT
+
+/* XN: Translation regimes that support one VA range (EL2 and EL3). */
+#define XN (ULL(1) << 2)
+/* UXN, PXN: Translation regimes that support two VA ranges (EL1&0). */
+#define UXN (ULL(1) << 2)
+#define PXN (ULL(1) << 1)
+#define CONT_HINT (ULL(1) << 0)
+#define UPPER_ATTRS(x) (((x) & ULL(0x7)) << 52)
+
+#define NON_GLOBAL (U(1) << 9)
+#define ACCESS_FLAG (U(1) << 8)
+#define NSH (U(0x0) << 6)
+#define OSH (U(0x2) << 6)
+#define ISH (U(0x3) << 6)
+
+#define TABLE_ADDR_MASK ULL(0x0000FFFFFFFFF000)
+
+/*
+ * The ARMv8-A architecture allows translation granule sizes of 4KB, 16KB or
+ * 64KB. However, only 4KB are supported at the moment.
+ */
+#define PAGE_SIZE_SHIFT FOUR_KB_SHIFT
+#define PAGE_SIZE (U(1) << PAGE_SIZE_SHIFT)
+#define PAGE_SIZE_MASK (PAGE_SIZE - U(1))
+#define IS_PAGE_ALIGNED(addr) (((addr) & PAGE_SIZE_MASK) == U(0))
+
+#define XLAT_ENTRY_SIZE_SHIFT U(3) /* Each MMU table entry is 8 bytes (1 << 3) */
+#define XLAT_ENTRY_SIZE (U(1) << XLAT_ENTRY_SIZE_SHIFT)
+
+#define XLAT_TABLE_SIZE_SHIFT PAGE_SIZE_SHIFT /* Size of one complete table */
+#define XLAT_TABLE_SIZE (U(1) << XLAT_TABLE_SIZE_SHIFT)
+
+#define XLAT_TABLE_LEVEL_MAX U(3)
+
+/* Values for number of entries in each MMU translation table */
+#define XLAT_TABLE_ENTRIES_SHIFT (XLAT_TABLE_SIZE_SHIFT - XLAT_ENTRY_SIZE_SHIFT)
+#define XLAT_TABLE_ENTRIES (U(1) << XLAT_TABLE_ENTRIES_SHIFT)
+#define XLAT_TABLE_ENTRIES_MASK (XLAT_TABLE_ENTRIES - U(1))
+
+/* Values to convert a memory address to an index into a translation table */
+#define L3_XLAT_ADDRESS_SHIFT PAGE_SIZE_SHIFT
+#define L2_XLAT_ADDRESS_SHIFT (L3_XLAT_ADDRESS_SHIFT + XLAT_TABLE_ENTRIES_SHIFT)
+#define L1_XLAT_ADDRESS_SHIFT (L2_XLAT_ADDRESS_SHIFT + XLAT_TABLE_ENTRIES_SHIFT)
+#define L0_XLAT_ADDRESS_SHIFT (L1_XLAT_ADDRESS_SHIFT + XLAT_TABLE_ENTRIES_SHIFT)
+#define XLAT_ADDR_SHIFT(level) (PAGE_SIZE_SHIFT + \
+ ((XLAT_TABLE_LEVEL_MAX - (level)) * XLAT_TABLE_ENTRIES_SHIFT))
+
+#define XLAT_BLOCK_SIZE(level) (UL(1) << XLAT_ADDR_SHIFT(level))
+/* Mask to get the bits used to index inside a block of a certain level */
+#define XLAT_BLOCK_MASK(level) (XLAT_BLOCK_SIZE(level) - UL(1))
+/* Mask to get the address bits common to a block of a certain table level*/
+#define XLAT_ADDR_MASK(level) (~XLAT_BLOCK_MASK(level))
+/*
+ * Extract from the given virtual address the index into the given lookup level.
+ * This macro assumes the system is using the 4KB translation granule.
+ */
+#define XLAT_TABLE_IDX(virtual_addr, level) \
+ (((virtual_addr) >> XLAT_ADDR_SHIFT(level)) & ULL(0x1FF))
+
+/*
+ * The ARMv8 translation table descriptor format defines AP[2:1] as the Access
+ * Permissions bits, and does not define an AP[0] bit.
+ *
+ * AP[1] is valid only for a stage 1 translation that supports two VA ranges
+ * (i.e. in the ARMv8A.0 architecture, that is the S-EL1&0 regime). It is RES1
+ * when stage 1 translations can only support one VA range.
+ */
+#define AP2_SHIFT U(0x7)
+#define AP2_RO ULL(0x1)
+#define AP2_RW ULL(0x0)
+
+#define AP1_SHIFT U(0x6)
+#define AP1_ACCESS_UNPRIVILEGED ULL(0x1)
+#define AP1_NO_ACCESS_UNPRIVILEGED ULL(0x0)
+#define AP1_RES1 ULL(0x1)
+
+/*
+ * The following definitions must all be passed to the LOWER_ATTRS() macro to
+ * get the right bitmask.
+ */
+#define AP_RO (AP2_RO << 5)
+#define AP_RW (AP2_RW << 5)
+#define AP_ACCESS_UNPRIVILEGED (AP1_ACCESS_UNPRIVILEGED << 4)
+#define AP_NO_ACCESS_UNPRIVILEGED (AP1_NO_ACCESS_UNPRIVILEGED << 4)
+#define AP_ONE_VA_RANGE_RES1 (AP1_RES1 << 4)
+#define NS (U(0x1) << 3)
+#define ATTR_NON_CACHEABLE_INDEX ULL(0x2)
+#define ATTR_DEVICE_INDEX ULL(0x1)
+#define ATTR_IWBWA_OWBWA_NTR_INDEX ULL(0x0)
+#define LOWER_ATTRS(x) (((x) & U(0xfff)) << 2)
+
+/* Normal Memory, Outer Write-Through non-transient, Inner Non-cacheable */
+#define ATTR_NON_CACHEABLE MAKE_MAIR_NORMAL_MEMORY(MAIR_NORM_NC, MAIR_NORM_NC)
+/* Device-nGnRE */
+#define ATTR_DEVICE MAIR_DEV_nGnRE
+/* Normal Memory, Outer Write-Back non-transient, Inner Write-Back non-transient */
+#define ATTR_IWBWA_OWBWA_NTR MAKE_MAIR_NORMAL_MEMORY(MAIR_NORM_WB_NTR_RWA, MAIR_NORM_WB_NTR_RWA)
+#define MAIR_ATTR_SET(attr, index) ((attr) << ((index) << 3))
+#define ATTR_INDEX_MASK U(0x3)
+#define ATTR_INDEX_GET(attr) (((attr) >> 2) & ATTR_INDEX_MASK)
+
+/*
+ * Shift values for the attributes fields in a block or page descriptor.
+ * See section D4.3.3 in the ARMv8-A ARM (issue B.a).
+ */
+
+/* Memory attributes index field, AttrIndx[2:0]. */
+#define ATTR_INDEX_SHIFT 2
+/* Non-secure bit, NS. */
+#define NS_SHIFT 5
+/* Shareability field, SH[1:0] */
+#define SHAREABILITY_SHIFT 8
+/* The Access Flag, AF. */
+#define ACCESS_FLAG_SHIFT 10
+/* The not global bit, nG. */
+#define NOT_GLOBAL_SHIFT 11
+/* Contiguous hint bit. */
+#define CONT_HINT_SHIFT 52
+/* Execute-never bits, XN. */
+#define PXN_SHIFT 53
+#define XN_SHIFT 54
+#define UXN_SHIFT XN_SHIFT
+
+#endif /* __XLAT_TABLES_DEFS_H__ */
diff --git a/include/lib/xlat_tables/xlat_tables_v2.h b/include/lib/xlat_tables/xlat_tables_v2.h
new file mode 100644
index 000000000..4299c3143
--- /dev/null
+++ b/include/lib/xlat_tables/xlat_tables_v2.h
@@ -0,0 +1,360 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef XLAT_TABLES_V2_H
+#define XLAT_TABLES_V2_H
+
+#include <xlat_tables_defs.h>
+#include <xlat_tables_v2_helpers.h>
+
+#ifndef __ASSEMBLY__
+#include <stddef.h>
+#include <stdint.h>
+#include <xlat_mmu_helpers.h>
+
+/*
+ * Default granularity size for an mmap_region_t.
+ * Useful when no specific granularity is required.
+ *
+ * By default, choose the biggest possible block size allowed by the
+ * architectural state and granule size in order to minimize the number of page
+ * tables required for the mapping.
+ */
+#define REGION_DEFAULT_GRANULARITY XLAT_BLOCK_SIZE(MIN_LVL_BLOCK_DESC)
+
+/* Helper macro to define an mmap_region_t. */
+#define MAP_REGION(_pa, _va, _sz, _attr) \
+ MAP_REGION_FULL_SPEC(_pa, _va, _sz, _attr, REGION_DEFAULT_GRANULARITY)
+
+/* Helper macro to define an mmap_region_t with an identity mapping. */
+#define MAP_REGION_FLAT(_adr, _sz, _attr) \
+ MAP_REGION(_adr, _adr, _sz, _attr)
+
+/*
+ * Helper macro to define entries for mmap_region_t. It allows to define 'pa'
+ * and sets 'va' to 0 for each region. To be used with mmap_add_alloc_va().
+ */
+#define MAP_REGION_ALLOC_VA(pa, sz, attr) MAP_REGION(pa, 0, sz, attr)
+
+/*
+ * Helper macro to define an mmap_region_t to map with the desired granularity
+ * of translation tables.
+ *
+ * The granularity value passed to this macro must be a valid block or page
+ * size. When using a 4KB translation granule, this might be 4KB, 2MB or 1GB.
+ * Passing REGION_DEFAULT_GRANULARITY is also allowed and means that the library
+ * is free to choose the granularity for this region. In this case, it is
+ * equivalent to the MAP_REGION() macro.
+ */
+#define MAP_REGION2(_pa, _va, _sz, _attr, _gr) \
+ MAP_REGION_FULL_SPEC(_pa, _va, _sz, _attr, _gr)
+
+/*
+ * Shifts and masks to access fields of an mmap attribute
+ */
+#define MT_TYPE_MASK U(0x7)
+#define MT_TYPE(_attr) ((_attr) & MT_TYPE_MASK)
+/* Access permissions (RO/RW) */
+#define MT_PERM_SHIFT U(3)
+/* Security state (SECURE/NS) */
+#define MT_SEC_SHIFT U(4)
+/* Access permissions for instruction execution (EXECUTE/EXECUTE_NEVER) */
+#define MT_EXECUTE_SHIFT U(5)
+/* In the EL1&0 translation regime, User (EL0) or Privileged (EL1). */
+#define MT_USER_SHIFT U(6)
+/* All other bits are reserved */
+
+/*
+ * Memory mapping attributes
+ */
+
+/*
+ * Memory types supported.
+ * These are organised so that, going down the list, the memory types are
+ * getting weaker; conversely going up the list the memory types are getting
+ * stronger.
+ */
+#define MT_DEVICE U(0)
+#define MT_NON_CACHEABLE U(1)
+#define MT_MEMORY U(2)
+/* Values up to 7 are reserved to add new memory types in the future */
+
+#define MT_RO (U(0) << MT_PERM_SHIFT)
+#define MT_RW (U(1) << MT_PERM_SHIFT)
+
+#define MT_SECURE (U(0) << MT_SEC_SHIFT)
+#define MT_NS (U(1) << MT_SEC_SHIFT)
+
+/*
+ * Access permissions for instruction execution are only relevant for normal
+ * read-only memory, i.e. MT_MEMORY | MT_RO. They are ignored (and potentially
+ * overridden) otherwise:
+ * - Device memory is always marked as execute-never.
+ * - Read-write normal memory is always marked as execute-never.
+ */
+#define MT_EXECUTE (U(0) << MT_EXECUTE_SHIFT)
+#define MT_EXECUTE_NEVER (U(1) << MT_EXECUTE_SHIFT)
+
+/*
+ * When mapping a region at EL0 or EL1, this attribute will be used to determine
+ * if a User mapping (EL0) will be created or a Privileged mapping (EL1).
+ */
+#define MT_USER (U(1) << MT_USER_SHIFT)
+#define MT_PRIVILEGED (U(0) << MT_USER_SHIFT)
+
+/* Compound attributes for most common usages */
+#define MT_CODE (MT_MEMORY | MT_RO | MT_EXECUTE)
+#define MT_RO_DATA (MT_MEMORY | MT_RO | MT_EXECUTE_NEVER)
+#define MT_RW_DATA (MT_MEMORY | MT_RW | MT_EXECUTE_NEVER)
+
+#if !ERROR_DEPRECATED
+typedef unsigned int mmap_attr_t;
+#endif
+
+/*
+ * Structure for specifying a single region of memory.
+ */
+typedef struct mmap_region {
+ unsigned long long base_pa;
+ uintptr_t base_va;
+ size_t size;
+ unsigned int attr;
+ /* Desired granularity. See the MAP_REGION2() macro for more details. */
+ size_t granularity;
+} mmap_region_t;
+
+/*
+ * Translation regimes supported by this library. EL_REGIME_INVALID tells the
+ * library to detect it at runtime.
+ */
+#define EL1_EL0_REGIME 1
+#define EL2_REGIME 2
+#define EL3_REGIME 3
+#define EL_REGIME_INVALID -1
+
+/*
+ * Declare the translation context type.
+ * Its definition is private.
+ */
+typedef struct xlat_ctx xlat_ctx_t;
+
+/*
+ * Statically allocate a translation context and associated structures. Also
+ * initialize them.
+ *
+ * _ctx_name:
+ * Prefix for the translation context variable.
+ * E.g. If _ctx_name is 'foo', the variable will be called 'foo_xlat_ctx'.
+ * Useful to distinguish multiple contexts from one another.
+ *
+ * _mmap_count:
+ * Number of mmap_region_t to allocate.
+ * Would typically be MAX_MMAP_REGIONS for the translation context describing
+ * the software image currently executing.
+ *
+ * _xlat_tables_count:
+ * Number of sub-translation tables to allocate.
+ * Would typically be MAX_XLAT_TABLES for the translation context describing
+ * the software image currently executing.
+ * Note that this is only for sub-tables ; at the initial lookup level, there
+ * is always a single table.
+ *
+ * _virt_addr_space_size, _phy_addr_space_size:
+ * Size (in bytes) of the virtual (resp. physical) address space.
+ * Would typically be PLAT_VIRT_ADDR_SPACE_SIZE
+ * (resp. PLAT_PHY_ADDR_SPACE_SIZE) for the translation context describing the
+ * software image currently executing.
+ */
+#define REGISTER_XLAT_CONTEXT(_ctx_name, _mmap_count, _xlat_tables_count, \
+ _virt_addr_space_size, _phy_addr_space_size) \
+ REGISTER_XLAT_CONTEXT_FULL_SPEC(_ctx_name, (_mmap_count), \
+ (_xlat_tables_count), \
+ (_virt_addr_space_size), \
+ (_phy_addr_space_size), \
+ EL_REGIME_INVALID, "xlat_table")
+
+/*
+ * Same as REGISTER_XLAT_CONTEXT plus the additional parameters:
+ *
+ * _xlat_regime:
+ * Specify the translation regime managed by this xlat_ctx_t instance. The
+ * values are the one from the EL*_REGIME definitions.
+ *
+ * _section_name:
+ * Specify the name of the section where the translation tables have to be
+ * placed by the linker.
+ */
+#define REGISTER_XLAT_CONTEXT2(_ctx_name, _mmap_count, _xlat_tables_count, \
+ _virt_addr_space_size, _phy_addr_space_size, \
+ _xlat_regime, _section_name) \
+ REGISTER_XLAT_CONTEXT_FULL_SPEC(_ctx_name, (_mmap_count), \
+ (_xlat_tables_count), \
+ (_virt_addr_space_size), \
+ (_phy_addr_space_size), \
+ (_xlat_regime), (_section_name))
+
+/******************************************************************************
+ * Generic translation table APIs.
+ * Each API comes in 2 variants:
+ * - one that acts on the current translation context for this software image
+ * - another that acts on the given translation context instead. This variant
+ * is named after the 1st version, with an additional '_ctx' suffix.
+ *****************************************************************************/
+
+/*
+ * Initialize translation tables from the current list of mmap regions. Calling
+ * this function marks the transition point after which static regions can no
+ * longer be added.
+ */
+void init_xlat_tables(void);
+void init_xlat_tables_ctx(xlat_ctx_t *ctx);
+
+/*
+ * Add a static region with defined base PA and base VA. This function can only
+ * be used before initializing the translation tables. The region cannot be
+ * removed afterwards.
+ */
+void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
+ size_t size, unsigned int attr);
+void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm);
+
+/*
+ * Add an array of static regions with defined base PA and base VA. This
+ * function can only be used before initializing the translation tables. The
+ * regions cannot be removed afterwards.
+ */
+void mmap_add(const mmap_region_t *mm);
+void mmap_add_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm);
+
+/*
+ * Add a region with defined base PA. Returns base VA calculated using the
+ * highest existing region in the mmap array even if it fails to allocate the
+ * region.
+ */
+void mmap_add_region_alloc_va(unsigned long long base_pa, uintptr_t *base_va,
+ size_t size, unsigned int attr);
+void mmap_add_region_alloc_va_ctx(xlat_ctx_t *ctx, mmap_region_t *mm);
+
+/*
+ * Add an array of static regions with defined base PA, and fill the base VA
+ * field on the array of structs. This function can only be used before
+ * initializing the translation tables. The regions cannot be removed afterwards.
+ */
+void mmap_add_alloc_va(mmap_region_t *mm);
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+/*
+ * Add a dynamic region with defined base PA and base VA. This type of region
+ * can be added and removed even after the translation tables are initialized.
+ *
+ * Returns:
+ * 0: Success.
+ * EINVAL: Invalid values were used as arguments.
+ * ERANGE: Memory limits were surpassed.
+ * ENOMEM: Not enough space in the mmap array or not enough free xlat tables.
+ * EPERM: It overlaps another region in an invalid way.
+ */
+int mmap_add_dynamic_region(unsigned long long base_pa, uintptr_t base_va,
+ size_t size, unsigned int attr);
+int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm);
+
+/*
+ * Add a dynamic region with defined base PA. Returns base VA calculated using
+ * the highest existing region in the mmap array even if it fails to allocate
+ * the region.
+ *
+ * mmap_add_dynamic_region_alloc_va() returns the allocated VA in 'base_va'.
+ * mmap_add_dynamic_region_alloc_va_ctx() returns it in 'mm->base_va'.
+ *
+ * It returns the same error values as mmap_add_dynamic_region().
+ */
+int mmap_add_dynamic_region_alloc_va(unsigned long long base_pa,
+ uintptr_t *base_va,
+ size_t size, unsigned int attr);
+int mmap_add_dynamic_region_alloc_va_ctx(xlat_ctx_t *ctx, mmap_region_t *mm);
+
+/*
+ * Remove a region with the specified base VA and size. Only dynamic regions can
+ * be removed, and they can be removed even if the translation tables are
+ * initialized.
+ *
+ * Returns:
+ * 0: Success.
+ * EINVAL: The specified region wasn't found.
+ * EPERM: Trying to remove a static region.
+ */
+int mmap_remove_dynamic_region(uintptr_t base_va, size_t size);
+int mmap_remove_dynamic_region_ctx(xlat_ctx_t *ctx,
+ uintptr_t base_va,
+ size_t size);
+
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+/*
+ * Change the memory attributes of the memory region starting from a given
+ * virtual address in a set of translation tables.
+ *
+ * This function can only be used after the translation tables have been
+ * initialized.
+ *
+ * The base address of the memory region must be aligned on a page boundary.
+ * The size of this memory region must be a multiple of a page size.
+ * The memory region must be already mapped by the given translation tables
+ * and it must be mapped at the granularity of a page.
+ *
+ * Return 0 on success, a negative value on error.
+ *
+ * In case of error, the memory attributes remain unchanged and this function
+ * has no effect.
+ *
+ * ctx
+ * Translation context to work on.
+ * base_va:
+ * Virtual address of the 1st page to change the attributes of.
+ * size:
+ * Size in bytes of the memory region.
+ * attr:
+ * New attributes of the page tables. The attributes that can be changed are
+ * data access (MT_RO/MT_RW), instruction access (MT_EXECUTE_NEVER/MT_EXECUTE)
+ * and user/privileged access (MT_USER/MT_PRIVILEGED) in the case of contexts
+ * that are used in the EL1&0 translation regime. Also, note that this
+ * function doesn't allow to remap a region as RW and executable, or to remap
+ * device memory as executable.
+ *
+ * NOTE: The caller of this function must be able to write to the translation
+ * tables, i.e. the memory where they are stored must be mapped with read-write
+ * access permissions. This function assumes it is the case. If this is not
+ * the case then this function might trigger a data abort exception.
+ *
+ * NOTE2: The caller is responsible for making sure that the targeted
+ * translation tables are not modified by any other code while this function is
+ * executing.
+ */
+int xlat_change_mem_attributes_ctx(const xlat_ctx_t *ctx, uintptr_t base_va,
+ size_t size, uint32_t attr);
+int xlat_change_mem_attributes(uintptr_t base_va, size_t size, uint32_t attr);
+
+/*
+ * Query the memory attributes of a memory page in a set of translation tables.
+ *
+ * Return 0 on success, a negative error code on error.
+ * On success, the attributes are stored into *attr.
+ *
+ * ctx
+ * Translation context to work on.
+ * base_va
+ * Virtual address of the page to get the attributes of.
+ * There are no alignment restrictions on this address. The attributes of the
+ * memory page it lies within are returned.
+ * attr
+ * Output parameter where to store the attributes of the targeted memory page.
+ */
+int xlat_get_mem_attributes_ctx(const xlat_ctx_t *ctx, uintptr_t base_va,
+ uint32_t *attr);
+int xlat_get_mem_attributes(uintptr_t base_va, uint32_t *attr);
+
+#endif /*__ASSEMBLY__*/
+#endif /* XLAT_TABLES_V2_H */
diff --git a/include/lib/xlat_tables/xlat_tables_v2_helpers.h b/include/lib/xlat_tables/xlat_tables_v2_helpers.h
new file mode 100644
index 000000000..fa8995886
--- /dev/null
+++ b/include/lib/xlat_tables/xlat_tables_v2_helpers.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * This header file contains internal definitions that are not supposed to be
+ * used outside of this library code.
+ */
+
+#ifndef XLAT_TABLES_V2_HELPERS_H
+#define XLAT_TABLES_V2_HELPERS_H
+
+#ifndef XLAT_TABLES_V2_H
+#error "Do not include this header file directly. Include xlat_tables_v2.h instead."
+#endif
+
+#ifndef __ASSEMBLY__
+
+#include <cassert.h>
+#include <platform_def.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <xlat_tables_arch.h>
+#include <xlat_tables_defs.h>
+
+/* Forward declaration */
+struct mmap_region;
+
+/*
+ * Helper macro to define an mmap_region_t. This macro allows to specify all
+ * the fields of the structure but its parameter list is not guaranteed to
+ * remain stable as we add members to mmap_region_t.
+ */
+#define MAP_REGION_FULL_SPEC(_pa, _va, _sz, _attr, _gr) \
+ { \
+ .base_pa = (_pa), \
+ .base_va = (_va), \
+ .size = (_sz), \
+ .attr = (_attr), \
+ .granularity = (_gr), \
+ }
+
+/* Struct that holds all information about the translation tables. */
+struct xlat_ctx {
+ /*
+ * Max allowed Virtual and Physical Addresses.
+ */
+ unsigned long long pa_max_address;
+ uintptr_t va_max_address;
+
+ /*
+ * Array of all memory regions stored in order of ascending end address
+ * and ascending size to simplify the code that allows overlapping
+ * regions. The list is terminated by the first entry with size == 0.
+ * The max size of the list is stored in `mmap_num`. `mmap` points to an
+ * array of mmap_num + 1 elements, so that there is space for the final
+ * null entry.
+ */
+ struct mmap_region *mmap;
+ int mmap_num;
+
+ /*
+ * Array of finer-grain translation tables.
+ * For example, if the initial lookup level is 1 then this array would
+ * contain both level-2 and level-3 entries.
+ */
+ uint64_t (*tables)[XLAT_TABLE_ENTRIES];
+ int tables_num;
+ /*
+ * Keep track of how many regions are mapped in each table. The base
+ * table can't be unmapped so it isn't needed to keep track of it.
+ */
+#if PLAT_XLAT_TABLES_DYNAMIC
+ int *tables_mapped_regions;
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+ int next_table;
+
+ /*
+ * Base translation table. It doesn't need to have the same amount of
+ * entries as the ones used for other levels.
+ */
+ uint64_t *base_table;
+ unsigned int base_table_entries;
+
+ /*
+ * Max Physical and Virtual addresses currently in use by the
+ * translation tables. These might get updated as we map/unmap memory
+ * regions but they will never go beyond pa/va_max_address.
+ */
+ unsigned long long max_pa;
+ uintptr_t max_va;
+
+ /* Level of the base translation table. */
+ unsigned int base_level;
+
+ /* Set to true when the translation tables are initialized. */
+ bool initialized;
+
+ /*
+ * Translation regime managed by this xlat_ctx_t. It should be one of
+ * the EL*_REGIME defines.
+ */
+ int xlat_regime;
+};
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+#define XLAT_ALLOC_DYNMAP_STRUCT(_ctx_name, _xlat_tables_count) \
+ static int _ctx_name##_mapped_regions[_xlat_tables_count];
+
+#define XLAT_REGISTER_DYNMAP_STRUCT(_ctx_name) \
+ .tables_mapped_regions = _ctx_name##_mapped_regions,
+#else
+#define XLAT_ALLOC_DYNMAP_STRUCT(_ctx_name, _xlat_tables_count) \
+ /* do nothing */
+
+#define XLAT_REGISTER_DYNMAP_STRUCT(_ctx_name) \
+ /* do nothing */
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+#define REGISTER_XLAT_CONTEXT_FULL_SPEC(_ctx_name, _mmap_count, \
+ _xlat_tables_count, _virt_addr_space_size, \
+ _phy_addr_space_size, _xlat_regime, _section_name)\
+ CASSERT(CHECK_VIRT_ADDR_SPACE_SIZE(_virt_addr_space_size), \
+ assert_invalid_virtual_addr_space_size_for_##_ctx_name);\
+ \
+ CASSERT(CHECK_PHY_ADDR_SPACE_SIZE(_phy_addr_space_size), \
+ assert_invalid_physical_addr_space_sizefor_##_ctx_name);\
+ \
+ static mmap_region_t _ctx_name##_mmap[_mmap_count + 1]; \
+ \
+ static uint64_t _ctx_name##_xlat_tables[_xlat_tables_count] \
+ [XLAT_TABLE_ENTRIES] \
+ __aligned(XLAT_TABLE_SIZE) __section(_section_name); \
+ \
+ static uint64_t _ctx_name##_base_xlat_table \
+ [GET_NUM_BASE_LEVEL_ENTRIES(_virt_addr_space_size)] \
+ __aligned(GET_NUM_BASE_LEVEL_ENTRIES(_virt_addr_space_size)\
+ * sizeof(uint64_t)); \
+ \
+ XLAT_ALLOC_DYNMAP_STRUCT(_ctx_name, _xlat_tables_count) \
+ \
+ static xlat_ctx_t _ctx_name##_xlat_ctx = { \
+ .va_max_address = (_virt_addr_space_size) - 1UL, \
+ .pa_max_address = (_phy_addr_space_size) - 1ULL, \
+ .mmap = _ctx_name##_mmap, \
+ .mmap_num = (_mmap_count), \
+ .base_level = GET_XLAT_TABLE_LEVEL_BASE(_virt_addr_space_size),\
+ .base_table = _ctx_name##_base_xlat_table, \
+ .base_table_entries = \
+ GET_NUM_BASE_LEVEL_ENTRIES(_virt_addr_space_size),\
+ .tables = _ctx_name##_xlat_tables, \
+ .tables_num = _xlat_tables_count, \
+ XLAT_REGISTER_DYNMAP_STRUCT(_ctx_name) \
+ .xlat_regime = (_xlat_regime), \
+ .max_pa = 0U, \
+ .max_va = 0U, \
+ .next_table = 0, \
+ .initialized = false, \
+ }
+
+#endif /*__ASSEMBLY__*/
+
+#endif /* XLAT_TABLES_V2_HELPERS_H */
diff --git a/include/plat/arm/common/arm_def.h b/include/plat/arm/common/arm_def.h
new file mode 100644
index 000000000..a23fc506f
--- /dev/null
+++ b/include/plat/arm/common/arm_def.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __ARM_DEF_H__
+#define __ARM_DEF_H__
+
+/******************************************************************************
+ * Definitions common to all ARM standard platforms
+ *****************************************************************************/
+
+/*******************************************************************************
+ * Location of the memory buffer shared between Normal World (i.e. TFTF) and the
+ * Secure Partition (e.g. Cactus) to pass data associated to secure service
+ * requests.
+ * Note: This address has to match the one used in TF (see ARM_SP_IMAGE_NS_BUF_*
+ * macros).
+ ******************************************************************************/
+#define ARM_SECURE_SERVICE_BUFFER_BASE 0xff600000ull
+#define ARM_SECURE_SERVICE_BUFFER_SIZE 0x10000ull
+
+#endif /* __ARM_DEF_H__ */
diff --git a/include/plat/arm/common/plat_arm.h b/include/plat/arm/common/plat_arm.h
new file mode 100644
index 000000000..3b0b5a613
--- /dev/null
+++ b/include/plat/arm/common/plat_arm.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLAT_ARM_H__
+#define __PLAT_ARM_H__
+
+/*
+ * Initialises the IO
+ * Returns: IO_SUCCESS
+ * IO_FAIL
+ * IO_NOT_SUPPORTED
+ * IO_RESOURCES_EXHAUSTED
+ */
+int arm_io_setup(void);
+
+/* Initialises the IO and the GIC. */
+void arm_platform_setup(void);
+
+/*******************************************************************************
+ * ARM platforms porting interfaces are located below.
+ ******************************************************************************/
+
+/* Initialises the Generic Interrupt Controller (GIC). */
+void plat_arm_gic_init(void);
+
+#endif /* __PLAT_ARM_H__ */
diff --git a/include/plat/common/plat_topology.h b/include/plat/common/plat_topology.h
new file mode 100644
index 000000000..1846b9173
--- /dev/null
+++ b/include/plat/common/plat_topology.h
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLAT_TOPOLOGY_H__
+#define __PLAT_TOPOLOGY_H__
+
+#include <arch.h>
+#include <platform_def.h>
+#include <stdint.h>
+
+/*
+ * This is the initial value of the power domain index when used
+ * as argument to the tftf topology helpers. They are also used
+ * to indicate the end of iterative topology navigation when returned
+ * by the tftf topology helpers.
+ */
+#define PWR_DOMAIN_INIT ((unsigned int) -1)
+
+/*
+ * Return the total number of clusters in the system.
+ * Currently there is correspondence between power level and affinity
+ * level and hence cluster power level corresponds to affinity level 1.
+ */
+#define tftf_get_total_clusters_count() tftf_get_total_aff_count(1)
+
+/*
+ * Return the total number of CPUs in the system (across all clusters).
+ * Currently there is correspondence between power level and affinity
+ * level and hence CPU power level correspond to affinity level 0.
+ */
+#define tftf_get_total_cpus_count() tftf_get_total_aff_count(0)
+
+/*
+ * Converts a 'core_pos' into an MPIDR. The 'core_pos' is a unique number
+ * corresponding to a CPU as returned by platform_get_core_pos() API
+ */
+#define tftf_core_pos_to_mpidr(core_pos) \
+ tftf_get_mpidr_from_node(core_pos + tftf_pwr_domain_start_idx[0])
+
+/*
+ * The following array stores the start index of each level in the power
+ * domain topology tree.
+ */
+extern unsigned int tftf_pwr_domain_start_idx[PLATFORM_MAX_AFFLVL + 1];
+
+/*
+ * The following data structure represents a TFTF power domain node.
+ */
+typedef struct tftf_pwr_domain_node {
+ /*
+ * Array index of the first CPU in the topology array for which this
+ * power domain is the parent. If this power domain is a CPU, then
+ * `cpu_start_node` will be its own index in the power domain
+ * topology array.
+ */
+ unsigned int cpu_start_node;
+
+ /*
+ * Number of CPU power domains which belong to this power domain.
+ * i.e. all the CPUs in the range 'cpu_start_node
+ * -> cpu_start_node + ncpus - 1 ' will belong to this power domain.
+ * If this power domain is a CPU then 'ncpus' will be 1.
+ */
+ unsigned int ncpus;
+
+ /* Valid only for CPU power domains */
+ unsigned int mpidr;
+
+ /* Index of the parent power domain node */
+ unsigned int parent_node;
+
+ /*
+ * The level of this power domain node in the power domain topology
+ * tree. It could correspond to the affinity level but the platform
+ * could have power levels which do not match affinity levels.
+ */
+ unsigned int level;
+
+ /*
+ * The 'is_present' field is used to cater for power domains
+ * which are absent.
+ */
+ unsigned char is_present;
+} tftf_pwr_domain_node_t;
+
+/*
+ * Detect and store the platform topology so that test cases can query it later.
+ */
+void tftf_init_topology(void);
+
+/*
+ * Return the number of affinity instances implemented at the affinity level
+ * passed as an argument. This function returns 0 for any other affinity
+ * level higher than that supported by the platform.
+ */
+unsigned int tftf_get_total_aff_count(unsigned int aff_lvl);
+
+/*
+ * Returns the index of the next power domain after `pwr_domain_idx`
+ * in the topology tree at the same `aff_lvl`. The `pwr_domain_idx`
+ * has to be initialized to PWR_DOMAIN_INIT to get the first entry.
+ * It returns PWR_DOMAIN_INIT if none is found.
+ */
+unsigned int tftf_get_next_peer_domain(unsigned int pwr_domain_idx,
+ unsigned int pwr_lvl);
+
+/*
+ * Returns the index of the next CPU after the current CPU `cpu_node`
+ * which belongs to the power domain `pwr_domain_idx`. The `cpu_node`
+ * has to be initialized to PWR_DOMAIN_INIT to get the first entry.
+ * It returns PWR_DOMAIN_INIT if none is found.
+ */
+unsigned int tftf_get_next_cpu_in_pwr_domain(unsigned int pwr_domain_idx,
+ unsigned int cpu_node);
+
+/*
+ * Return the node index of the next CPU after 'cpu_node' in the topology tree.
+ * Skip absent CPUs.
+ * cpu_node: Node index of the current CPU.
+ */
+unsigned int tftf_topology_next_cpu(unsigned int cpu_node);
+
+/*
+ * Iterate over every CPU. Skip absent CPUs.
+ * cpu: unsigned integer corresponding to the index of the cpu in
+ * the topology array. After the loop, cpu is equal to PWR_DOMAIN_INIT.
+ */
+#define for_each_cpu(cpu) \
+ for ((cpu) = PWR_DOMAIN_INIT; \
+ (cpu) = tftf_topology_next_cpu(cpu), \
+ (cpu) != PWR_DOMAIN_INIT;)
+
+/*
+ * Iterate over every power domain idx for a given level.
+ * - idx: unsigned integer corresponding to the power domain index. After the
+ * loop, idx is equal to PWR_DOMAIN_INIT.
+ * - lvl: level
+ */
+#define for_each_power_domain_idx(idx, lvl) \
+ for (idx = PWR_DOMAIN_INIT; \
+ idx = tftf_get_next_peer_domain(idx, lvl), \
+ idx != PWR_DOMAIN_INIT;)
+
+/*
+ * Iterate over every CPU in a power domain idx.
+ * - pwr_domain_idx: unsigned integer corresponding to the power domain index.
+ * After the loop, idx is equal to PWR_DOMAIN_INIT.
+ * - cpu_idx: CPU index
+ */
+#define for_each_cpu_in_power_domain(pwr_domain_idx, cpu_idx) \
+ for (cpu_idx = PWR_DOMAIN_INIT; \
+ cpu_idx = tftf_get_next_cpu_in_pwr_domain(pwr_domain_idx, cpu_idx),\
+ cpu_idx != PWR_DOMAIN_INIT;)
+
+/*
+ * Returns the MPIDR of the CPU power domain node indexed by `cpu_node`
+ * or INVALID_MPID if it is absent.
+ */
+unsigned int tftf_get_mpidr_from_node(unsigned int cpu_node);
+
+/*
+ * Query the platform topology to find another CPU than the one specified
+ * as an argument.
+ * Return the MPID of this other CPU, or INVALID_MPID if none could be found.
+ */
+unsigned int tftf_find_any_cpu_other_than(unsigned int exclude_mpid);
+
+/*
+ * Query the platform topology to find a random CPU other than the one specified
+ * as an argument.
+ * The difference between this function and tftf_find_any_cpu_other_than is
+ * the randomness in selecting a CPU.
+ * Return the MPID of this other CPU, or INVALID_MPID if none could be found.
+ */
+unsigned int tftf_find_random_cpu_other_than(unsigned int exclude_mpid);
+
+#endif /* __PLAT_TOPOLOGY_H__ */
diff --git a/include/plat/common/platform.h b/include/plat/common/platform.h
new file mode 100644
index 000000000..d3a486c0a
--- /dev/null
+++ b/include/plat/common/platform.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLATFORM_H__
+#define __PLATFORM_H__
+
+#include <stdint.h>
+#include <timer.h>
+#include <xlat_tables_v2.h>
+
+#define PLAT_PSCI_DUMMY_STATE_ID 0xF
+
+#define PWR_STATE_INIT_INDEX (-1)
+
+#define INIT_PWR_LEVEL_INDEX(array_name) \
+ do { \
+ unsigned int var; \
+ assert(ARRAY_SIZE(array_name) == (PLAT_MAX_PWR_LEVEL + 1)); \
+ for (var = 0; var <= PLAT_MAX_PWR_LEVEL; var++) \
+ array_name[var] = PWR_STATE_INIT_INDEX; \
+ } while (0)
+
+/*
+ * The platform structure to represent the valid local power state
+ * properties for a particular affinity level. The platform needs to
+ * export the array of valid local low power states for each affinity level
+ * it supports which can be queried by TFTF tests to construct the required
+ * composite power state.
+ *
+ * TODO: Currently the power levels are identity mapped to affinity level in
+ * TFTF which need to be decoupled.
+ */
+typedef struct plat_state_prop {
+ /*
+ * This field has a value in the increasing order of the suspend
+ * depth. Deeper the suspend state, higher the value.
+ */
+ unsigned int suspend_depth;
+ /* The local state ID for the idle state at this level. */
+ unsigned int state_ID;
+ /* Flag which indicates whether is a retention or power down state */
+ unsigned int is_pwrdown;
+} plat_state_prop_t;
+
+void tftf_plat_arch_setup(void);
+void tftf_early_platform_setup(void);
+void tftf_platform_setup(void);
+
+void tftf_plat_enable_mmu(void);
+void tftf_plat_configure_mmu(void);
+
+void tftf_platform_end(void);
+void tftf_platform_watchdog_set(void);
+void tftf_platform_watchdog_reset(void);
+
+/* Gets a handle for the initialised IO entity */
+void plat_get_nvm_handle(uintptr_t *handle);
+
+/*
+ * Returns the platform topology description array. The size of this
+ * array should be PLATFORM_NUM_AFFS - PLATFORM_CORE_COUNT + 1.
+ */
+const unsigned char *tftf_plat_get_pwr_domain_tree_desc(void);
+
+/*
+ * Function to query the MPIDR of a CPU identified by 'core_pos' which is
+ * the number returned by platform_get_core() API.
+ * In case the CPU is absent, then this API returns INVALID_MPID. This
+ * function will be queried only during topology setup in TFTF and thereafter
+ * the internal node data will be used to get the MPIDR corresponding
+ * to the 'core_pos'.
+ */
+uint64_t tftf_plat_get_mpidr(unsigned int core_pos);
+
+/*
+ * Get the state property array for all the valid states from platform for
+ * a specified 'level'. The array is expected to be NULL terminated after the
+ * last entry.
+ */
+const plat_state_prop_t *plat_get_state_prop(unsigned int level);
+
+/*
+ * Initialises state info data structures for generating various combinations
+ * of state ID's. It also calls tftf_detect_pstate_format() which detects the
+ * PSTATE format accepted by EL3 firmware.
+ * This function needs to be invoked once during cold boot prior to the
+ * invocation of any PSCI power state helper functions.
+ */
+void tftf_init_pstate_framework(void);
+
+/*
+ * This function is used to generate all possible combinations of composite
+ * state ID's possible for a given set of power states at each level.
+ * Ex: If a system implements 4 levels and each level has 3 local power states.
+ * Then, the total combinations of composite power down states possible are:
+ * 3 * 3 * 3 * 3 = 81
+ *
+ * A single call to set_next_state_id_pointers(), sets pointer to pstate_id_idx
+ * at all levels for a possible combination out of 81.
+ *
+ * A caller can confirm when all combinations are completed by checking if
+ * pwr_lvel_state_indexes for power_level 0 is PWR_STATE_INIT_INDEX
+ */
+void tftf_set_next_state_id_idx(unsigned int power_level,
+ unsigned int pstate_id_idx[]);
+
+/*
+ * This function sets the index for the next state ID of the given power level
+ */
+void tftf_set_next_local_state_id_idx(unsigned int power_level,
+ unsigned int pstate_id_idx[]);
+
+/*
+ * This function sets the index corresponding to the deepest power state at
+ * a given power level.
+ */
+void tftf_set_deepest_pstate_idx(unsigned int power_level,
+ unsigned int pstate_id_idx[]);
+
+/*
+ * Helper function to get the state ID, state type, power level in power_state
+ * parameter of CPU_SUSPEND. The generated values are based on the
+ * pstate_id_idx values of a core.
+ *
+ * This helper expects a valid pstate_id_idx till the max valid levels
+ * and it detects the max valid level to be terminated by PWR_STATE_INIT value
+ *
+ * It returns the expected PSCI return value of a suspend request
+ */
+int tftf_get_pstate_vars(unsigned int *test_power_level,
+ unsigned int *test_suspend_type,
+ unsigned int *suspend_state_id,
+ unsigned int pstate_id_idx[]);
+
+/*
+ * This function gets the platform specific timer driver information and
+ * initialises platform specific drivers.
+ * Returns 0 on success.
+ */
+int plat_initialise_timer_ops(const plat_timer_t **timer_ops);
+
+struct mem_region {
+ uintptr_t addr;
+ size_t size;
+};
+
+typedef struct mem_region mem_region_t;
+
+/*******************************************************************************
+ * Optional functions. A default, weak implementation of those functions is
+ * provided, it may be overridden by platform code.
+ ******************************************************************************/
+unsigned int platform_get_core_pos(unsigned long mpid);
+unsigned long platform_get_stack(unsigned long mpidr);
+int plat_crash_console_init(void);
+int plat_crash_console_putc(int c);
+int plat_crash_console_flush(void);
+/*
+ * plat_get_prot_regions: It returns a pointer to a
+ * set of regions used to test mem_protect_check.
+ * The number of elements are stored in the variable
+ * pointed by nelem.
+ */
+const mem_region_t *plat_get_prot_regions(int *nelem);
+
+void tftf_plat_reset(void);
+
+const mmap_region_t *tftf_platform_get_mmap(void);
+
+/*
+ * Return an IO device handle and specification which can be used
+ * to access an image. Use this to enforce platform load policy.
+ */
+int plat_get_image_source(unsigned int image_id,
+ uintptr_t *dev_handle,
+ uintptr_t *image_spec);
+
+void plat_fwu_io_setup(void);
+
+#endif /* __PLATFORM_H__ */
diff --git a/include/runtime_services/arm_arch_svc.h b/include/runtime_services/arm_arch_svc.h
new file mode 100644
index 000000000..12358f8a6
--- /dev/null
+++ b/include/runtime_services/arm_arch_svc.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __ARM_ARCH_SVC_H__
+#define __ARM_ARCH_SVC_H__
+
+#define SMCCC_VERSION 0x80000000
+#define SMCCC_ARCH_FEATURES 0x80000001
+#define SMCCC_ARCH_WORKAROUND_1 0x80008000
+#define SMCCC_ARCH_WORKAROUND_2 0x80007FFF
+
+#endif /* __ARM_ARCH_SVC_H__ */
diff --git a/include/runtime_services/pmf.h b/include/runtime_services/pmf.h
new file mode 100644
index 000000000..04f79c14f
--- /dev/null
+++ b/include/runtime_services/pmf.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PMF_H__
+#define __PMF_H__
+
+#ifndef __ASSEMBLY__
+#include <assert.h>
+#include <platform_def.h>
+#include <stdbool.h>
+#include <stdint.h>
+#endif
+
+/*
+ * Constants used for/by PMF services.
+ */
+#define PMF_ARM_TIF_IMPL_ID 0x41
+#define PMF_TID_SHIFT 0
+#define PMF_TID_MASK (0xFF << PMF_TID_SHIFT)
+#define PMF_SVC_ID_SHIFT 10
+#define PMF_SVC_ID_MASK (0x3F << PMF_SVC_ID_SHIFT)
+#define PMF_IMPL_ID_SHIFT 24
+#define PMF_IMPL_ID_MASK (0xFFU << PMF_IMPL_ID_SHIFT)
+
+/*
+ * Flags passed to PMF_GET_TIMESTAMP_XXX and PMF_CAPTURE_TIMESTAMP
+ */
+#define PMF_CACHE_MAINT (1 << 0)
+#define PMF_NO_CACHE_MAINT 0
+
+/*
+ * Defines for PMF SMC function ids.
+ */
+#ifdef AARCH32
+#define PMF_SMC_GET_TIMESTAMP 0x82000010
+#else
+#define PMF_SMC_GET_TIMESTAMP 0xC2000010
+#endif
+
+#define PMF_NUM_SMC_CALLS 2
+
+/* Following are the supported PMF service IDs */
+#define PMF_PSCI_STAT_SVC_ID 0
+#define PMF_RT_INSTR_SVC_ID 1
+
+#endif /* __PMF_H__ */
diff --git a/include/runtime_services/psci.h b/include/runtime_services/psci.h
new file mode 100644
index 000000000..1ecfc63d6
--- /dev/null
+++ b/include/runtime_services/psci.h
@@ -0,0 +1,224 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * Definitions related to the Power State Coordination Interface (PSCI)
+ * as per the SMC Calling Convention.
+ *
+ * PSCI calls are a subset of the Standard Service Calls.
+ */
+
+#ifndef __PSCI_H__
+#define __PSCI_H__
+
+#ifndef __ASSEMBLY__
+#include <assert.h>
+#include <platform_def.h>
+#include <stdbool.h>
+#include <stdint.h>
+#endif
+
+/*******************************************************************************
+ * Macro to create the array entry for psci_functions[]
+ ******************************************************************************/
+#define DEFINE_PSCI_FUNC(_func_id, _mandatory) \
+ { SMC_##_func_id, _mandatory, "SMC_" # _func_id }
+
+/*******************************************************************************
+ * Defines for runtime services function ids
+ ******************************************************************************/
+#define SMC_PSCI_VERSION 0x84000000
+#define SMC_PSCI_CPU_SUSPEND_AARCH32 0x84000001
+#define SMC_PSCI_CPU_SUSPEND_AARCH64 0xc4000001
+#define SMC_PSCI_CPU_OFF 0x84000002
+#define SMC_PSCI_CPU_ON_AARCH32 0x84000003
+#define SMC_PSCI_CPU_ON_AARCH64 0xc4000003
+#define SMC_PSCI_AFFINITY_INFO_AARCH32 0x84000004
+#define SMC_PSCI_AFFINITY_INFO_AARCH64 0xc4000004
+#define SMC_PSCI_MIG_AARCH32 0x84000005
+#define SMC_PSCI_MIG_AARCH64 0xc4000005
+#define SMC_PSCI_MIG_INFO_TYPE 0x84000006
+#define SMC_PSCI_MIG_INFO_UP_CPU_AARCH32 0x84000007
+#define SMC_PSCI_MIG_INFO_UP_CPU_AARCH64 0xc4000007
+#define SMC_PSCI_SYSTEM_OFF 0x84000008
+#define SMC_PSCI_SYSTEM_RESET 0x84000009
+#define SMC_PSCI_FEATURES 0x8400000a
+#define SMC_PSCI_CPU_FREEZE 0x8400000b
+#define SMC_PSCI_CPU_DEFAULT_SUSPEND32 0x8400000c
+#define SMC_PSCI_CPU_DEFAULT_SUSPEND64 0xc400000c
+#define SMC_PSCI_CPU_HW_STATE32 0x8400000d
+#define SMC_PSCI_CPU_HW_STATE64 0xc400000d
+#define SMC_PSCI_SYSTEM_SUSPEND32 0x8400000e
+#define SMC_PSCI_SYSTEM_SUSPEND64 0xc400000e
+#define SMC_PSCI_SET_SUSPEND_MODE 0x8400000f
+#define SMC_PSCI_STAT_RESIDENCY32 0x84000010
+#define SMC_PSCI_STAT_RESIDENCY64 0xc4000010
+#define SMC_PSCI_STAT_COUNT32 0x84000011
+#define SMC_PSCI_STAT_COUNT64 0xc4000011
+#define SMC_PSCI_RESET2_AARCH32 0x84000012
+#define SMC_PSCI_RESET2_AARCH64 0xc4000012
+#define SMC_PSCI_MEM_PROTECT 0x84000013
+#define SMC_PSCI_MEM_PROTECT_CHECK_RANGE32 0x84000014
+#define SMC_PSCI_MEM_PROTECT_CHECK_RANGE64 0xc4000014
+
+/*
+ * Architecture-specific SMC function IDs
+ */
+#ifndef AARCH32
+#define SMC_PSCI_CPU_SUSPEND SMC_PSCI_CPU_SUSPEND_AARCH64
+#define SMC_PSCI_CPU_ON SMC_PSCI_CPU_ON_AARCH64
+#define SMC_PSCI_AFFINITY_INFO SMC_PSCI_AFFINITY_INFO_AARCH64
+#define SMC_PSCI_MIG SMC_PSCI_MIG_AARCH64
+#define SMC_PSCI_MIG_INFO_UP_CPU SMC_PSCI_MIG_INFO_UP_CPU_AARCH64
+#define SMC_PSCI_CPU_DEFAULT_SUSPEND SMC_PSCI_CPU_DEFAULT_SUSPEND64
+#define SMC_PSCI_CPU_HW_STATE SMC_PSCI_CPU_HW_STATE64
+#define SMC_PSCI_SYSTEM_SUSPEND SMC_PSCI_SYSTEM_SUSPEND64
+#define SMC_PSCI_STAT_RESIDENCY SMC_PSCI_STAT_RESIDENCY64
+#define SMC_PSCI_STAT_COUNT SMC_PSCI_STAT_COUNT64
+#define SMC_PSCI_RESET2 SMC_PSCI_RESET2_AARCH64
+#define SMC_PSCI_MEM_PROTECT_CHECK SMC_PSCI_MEM_PROTECT_CHECK_RANGE64
+#else
+#define SMC_PSCI_CPU_SUSPEND SMC_PSCI_CPU_SUSPEND_AARCH32
+#define SMC_PSCI_CPU_ON SMC_PSCI_CPU_ON_AARCH32
+#define SMC_PSCI_AFFINITY_INFO SMC_PSCI_AFFINITY_INFO_AARCH32
+#define SMC_PSCI_MIG SMC_PSCI_MIG_AARCH32
+#define SMC_PSCI_MIG_INFO_UP_CPU SMC_PSCI_MIG_INFO_UP_CPU_AARCH32
+#define SMC_PSCI_CPU_DEFAULT_SUSPEND SMC_PSCI_CPU_DEFAULT_SUSPEND32
+#define SMC_PSCI_CPU_HW_STATE SMC_PSCI_CPU_HW_STATE32
+#define SMC_PSCI_SYSTEM_SUSPEND SMC_PSCI_SYSTEM_SUSPEND32
+#define SMC_PSCI_STAT_RESIDENCY SMC_PSCI_STAT_RESIDENCY32
+#define SMC_PSCI_STAT_COUNT SMC_PSCI_STAT_COUNT32
+#define SMC_PSCI_RESET2 SMC_PSCI_RESET2_AARCH32
+#define SMC_PSCI_MEM_PROTECT_CHECK SMC_PSCI_MEM_PROTECT_CHECK_RANGE32
+#endif
+
+/*
+ * Number of PSCI calls defined in the PSCI specification.
+ */
+#define PSCI_NUM_CALLS 33
+
+#ifndef __ASSEMBLY__
+typedef struct {
+ uint32_t id;
+ bool mandatory;
+ const char *str;
+} psci_function_t;
+
+extern const psci_function_t psci_functions[PSCI_NUM_CALLS];
+#endif /* __ASSEMBLY__ */
+
+/*******************************************************************************
+ * PSCI Migrate specific defines
+ ******************************************************************************/
+#define PSCI_TOS_UP_MIG_CAP 0
+#define PSCI_TOS_NOT_UP_MIG_CAP 1
+#define PSCI_TOS_NOT_PRESENT_MP 2
+
+/*******************************************************************************
+ * PSCI CPU_SUSPEND 'power_state' parameter specific defines
+ ******************************************************************************/
+/* Original format */
+#define PSTATE_ID_SHIFT 0
+#define PSTATE_TYPE_SHIFT 16
+#define PSTATE_AFF_LVL_SHIFT 24
+#define PSTATE_ID_MASK 0xFFFF
+#define PSTATE_AFF_LVL_MASK 0x3
+
+#define psci_state_id_valid(state_id) \
+ (((state_id) & ~PSTATE_ID_MASK) == 0)
+
+#define psci_affinity_level_valid(aff_level) \
+ (((aff_level) & ~PSTATE_AFF_LVL_MASK) == 0)
+
+/* Extended format */
+#define PSTATE_ID_SHIFT_EXT 0
+#define PSTATE_TYPE_SHIFT_EXT 30
+#define PSTATE_ID_MASK_EXT 0xFFFFFFF
+
+#define psci_state_id_ext_valid(state_id) \
+ (((state_id) & ~PSTATE_ID_MASK_EXT) == 0)
+
+/* Definitions common to original and extended StateID formats */
+#define PSTATE_TYPE_MASK 0x1
+
+#define PSTATE_TYPE_STANDBY 0x0
+#define PSTATE_TYPE_POWERDOWN 0x1
+
+#define PSTATE_AFF_LVL_0 0
+#define PSTATE_AFF_LVL_1 1
+#define PSTATE_AFF_LVL_2 2
+#define PSTATE_AFF_LVL_3 3
+
+#define psci_state_type_valid(state_type) \
+ (((state_type) & ~PSTATE_TYPE_MASK) == 0)
+
+/*******************************************************************************
+ * PSCI 'Feature Flags' defines for CPU_SUSPEND
+ ******************************************************************************/
+#define CPU_SUSPEND_FEAT_OS_INIT_MODE_SHIFT 0
+#define CPU_SUSPEND_FEAT_PSTATE_FORMAT_SHIFT 1
+#define CPU_SUSPEND_FEAT_PSTATE_FORMAT_ORIGINAL 0
+#define CPU_SUSPEND_FEAT_PSTATE_FORMAT_EXTENDED 1
+
+#define CPU_SUSPEND_FEAT_VALID_MASK \
+ ((1 << CPU_SUSPEND_FEAT_OS_INIT_MODE_SHIFT) | \
+ (1 << CPU_SUSPEND_FEAT_PSTATE_FORMAT_SHIFT))
+
+/*******************************************************************************
+ * PSCI 'Feature Flags' defines for functions other than CPU_SUSPEND
+ ******************************************************************************/
+#define PSCI_FEATURE_FLAGS_ZERO 0
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Construct the local State-ID for a particular level according to
+ * the platform specific local state-ID width.
+ */
+#define psci_make_local_state_id(level, local_state) \
+ (((local_state) & ((1 << PLAT_LOCAL_PSTATE_WIDTH) - 1)) \
+ << (PLAT_LOCAL_PSTATE_WIDTH * (level)))
+#endif
+
+/*******************************************************************************
+ * PSCI version
+ ******************************************************************************/
+#define PSCI_MAJOR_VER 1
+#define PSCI_MINOR_VER 1
+#define PSCI_MAJOR_VER_SHIFT 16
+#define PSCI_MAJOR_VER_MASK 0xFFFF0000
+#define PSCI_VERSION(major, minor) ((major << PSCI_MAJOR_VER_SHIFT) \
+ | minor)
+
+/*******************************************************************************
+ * PSCI error codes
+ ******************************************************************************/
+#define PSCI_E_SUCCESS 0
+#define PSCI_E_NOT_SUPPORTED -1
+#define PSCI_E_INVALID_PARAMS -2
+#define PSCI_E_DENIED -3
+#define PSCI_E_ALREADY_ON -4
+#define PSCI_E_ON_PENDING -5
+#define PSCI_E_INTERN_FAIL -6
+#define PSCI_E_NOT_PRESENT -7
+#define PSCI_E_DISABLED -8
+#define PSCI_E_INVALID_ADDRESS -9
+
+/*******************************************************************************
+ * PSCI affinity state related constants.
+ ******************************************************************************/
+#define PSCI_STATE_ON 0x0
+#define PSCI_STATE_OFF 0x1
+#define PSCI_STATE_ON_PENDING 0x2
+
+/*******************************************************************************
+ * PSCI node hardware state related constants.
+ ******************************************************************************/
+#define PSCI_HW_STATE_ON 0x0
+#define PSCI_HW_STATE_OFF 0x1
+#define PSCI_HW_STATE_STANDBY 0x2
+
+#endif /* __PSCI_H__ */
diff --git a/include/runtime_services/sdei.h b/include/runtime_services/sdei.h
new file mode 100644
index 000000000..0e0a24990
--- /dev/null
+++ b/include/runtime_services/sdei.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SDEI_H__
+#define __SDEI_H__
+
+#define SDEI_VERSION 0xC4000020
+#define SDEI_EVENT_REGISTER 0xC4000021
+#define SDEI_EVENT_ENABLE 0xC4000022
+#define SDEI_EVENT_DISABLE 0xC4000023
+#define SDEI_EVENT_CONTEXT 0xC4000024
+#define SDEI_EVENT_COMPLETE 0xC4000025
+#define SDEI_EVENT_COMPLETE_AND_RESUME 0xC4000026
+
+#define SDEI_EVENT_UNREGISTER 0xC4000027
+#define SDEI_EVENT_STATUS 0xC4000028
+#define SDEI_EVENT_GET_INFO 0xC4000029
+#define SDEI_EVENT_ROUTING_SET 0xC400002A
+#define SDEI_PE_MASK 0xC400002B
+#define SDEI_PE_UNMASK 0xC400002C
+
+#define SDEI_INTERRUPT_BIND 0xC400002D
+#define SDEI_INTERRUPT_RELEASE 0xC400002E
+#define SDEI_EVENT_SIGNAL 0xC400002F
+#define SDEI_FEATURES 0xC4000030
+#define SDEI_PRIVATE_RESET 0xC4000031
+#define SDEI_SHARED_RESET 0xC4000032
+
+/* For debug */
+#define SDEI_SHOW_DEBUG 0xC400003F
+
+/* SDEI_EVENT_REGISTER flags */
+#define SDEI_REGF_RM_ANY 0
+#define SDEI_REGF_RM_PE 1
+
+/* SDEI_EVENT_COMPLETE status flags */
+#define SDEI_EV_HANDLED 0
+#define SDEI_EV_FAILED 1
+
+/* sde event status values in bit position */
+#define SDEI_STATF_REGISTERED 0
+#define SDEI_STATF_ENABLED 1
+#define SDEI_STATF_RUNNING 2
+
+#define SDEI_INFOF_TYPE 0
+#define SDEI_INFOF_SIGNALABLE 1
+#define SDEI_INFOF_ROUTING_MODE 2
+#define SDEI_INFOF_ROUTING_AFF 3
+
+#define SMC_EINVAL 2
+#define SMC_EDENY 3
+#define SMC_EPEND 5
+#define SMC_ENOMEM 10
+
+#define MAKE_SDEI_VERSION(_major, _minor, _vendor) \
+ (((uint64_t)(_major)) << 48 | \
+ ((uint64_t)(_minor)) << 32 | \
+ (_vendor))
+
+#ifndef __ASSEMBLY__
+#include <stdint.h>
+
+struct sdei_intr_ctx {
+ unsigned int priority;
+ unsigned int num;
+ unsigned int enabled;
+};
+
+typedef int sdei_handler_t(int ev, uint64_t arg);
+
+void sdei_trigger_event(void);
+void sdei_handler_done(void);
+
+int64_t sdei_version(void);
+int64_t sdei_interrupt_bind(int intr, struct sdei_intr_ctx *intr_ctx);
+int64_t sdei_interrupt_release(int intr, const struct sdei_intr_ctx *intr_ctx);
+int64_t sdei_event_register(int ev, sdei_handler_t *ep,
+ uint64_t ep_arg, int flags, uint64_t mpidr);
+int64_t sdei_event_unregister(int ev);
+int64_t sdei_event_enable(int ev);
+int64_t sdei_event_disable(int ev);
+int64_t sdei_pe_mask(void);
+int64_t sdei_pe_unmask(void);
+int64_t sdei_private_reset(void);
+int64_t sdei_shared_reset(void);
+int64_t sdei_event_signal(uint64_t mpidr);
+int64_t sdei_event_status(int32_t ev);
+int64_t sdei_event_routing_set(int32_t ev, uint64_t flags);
+int64_t sdei_event_context(uint32_t param);
+int64_t sdei_event_complete(uint32_t flags);
+int64_t sdei_event_complete_and_resume(uint64_t addr);
+#endif /* __ASSEMBLY__ */
+
+#endif /* __SDEI_H__ */
diff --git a/include/runtime_services/secure_el0_payloads/mm_svc.h b/include/runtime_services/secure_el0_payloads/mm_svc.h
new file mode 100644
index 000000000..290fd7d1b
--- /dev/null
+++ b/include/runtime_services/secure_el0_payloads/mm_svc.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2017, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __MM_SVC_H__
+#define __MM_SVC_H__
+
+#include <utils_def.h>
+
+#define MM_VERSION_MAJOR U(1)
+#define MM_VERSION_MAJOR_SHIFT 16
+#define MM_VERSION_MAJOR_MASK U(0x7FFF)
+#define MM_VERSION_MINOR U(0)
+#define MM_VERSION_MINOR_SHIFT 0
+#define MM_VERSION_MINOR_MASK U(0xFFFF)
+#define MM_VERSION_FORM(major, minor) ((major << MM_VERSION_MAJOR_SHIFT) | (minor))
+#define MM_VERSION_COMPILED MM_VERSION_FORM(MM_VERSION_MAJOR, MM_VERSION_MINOR)
+
+/*
+ * SMC IDs defined in [1] for accessing MM services from the Non-secure world.
+ * These FIDs occupy the range 0x40 - 0x5f.
+ * [1] DEN0060A_ARM_MM_Interface_Specification.pdf
+ */
+#define MM_VERSION_AARCH32 U(0x84000040)
+
+#define MM_COMMUNICATE_AARCH64 U(0xC4000041)
+#define MM_COMMUNICATE_AARCH32 U(0x84000041)
+
+#endif /* __MM_SVC_H__ */
diff --git a/include/runtime_services/secure_el0_payloads/secure_partition.h b/include/runtime_services/secure_el0_payloads/secure_partition.h
new file mode 100644
index 000000000..4ee466c80
--- /dev/null
+++ b/include/runtime_services/secure_el0_payloads/secure_partition.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SECURE_PARTITION_H__
+#define __SECURE_PARTITION_H__
+
+#ifndef __ASSEMBLY__
+#include <cassert.h>
+#include <param_header.h>
+#include <types.h>
+#endif
+#include <utils_def.h>
+
+/*
+ * Definitions used to access the members of secure_partition_boot_info from
+ * assembly code.
+ */
+#define SP_BOOT_INFO_STACK_BASE_OFFSET U(32)
+#define SP_BOOT_INFO_IMAGE_SIZE_OFFSET U(64)
+#define SP_BOOT_INFO_PCPU_STACK_SIZE_OFFSET U(72)
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Flags used by the secure_partition_mp_info structure to describe the
+ * characteristics of a cpu. Only a single flag is defined at the moment to
+ * indicate the primary cpu.
+ */
+#define MP_INFO_FLAG_PRIMARY_CPU U(0x00000001)
+
+/*
+ * This structure is used to provide information required to initialise a S-EL0
+ * partition.
+ */
+typedef struct secure_partition_mp_info {
+ uint64_t mpidr;
+ uint32_t linear_id;
+ uint32_t flags;
+} secure_partition_mp_info_t;
+
+typedef struct secure_partition_boot_info {
+ param_header_t h;
+ uint64_t sp_mem_base;
+ uint64_t sp_mem_limit;
+ uint64_t sp_image_base;
+ uint64_t sp_stack_base;
+ uint64_t sp_heap_base;
+ uint64_t sp_ns_comm_buf_base;
+ uint64_t sp_shared_buf_base;
+ uint64_t sp_image_size;
+ uint64_t sp_pcpu_stack_size;
+ uint64_t sp_heap_size;
+ uint64_t sp_ns_comm_buf_size;
+ uint64_t sp_shared_buf_size;
+ uint32_t num_sp_mem_regions;
+ uint32_t num_cpus;
+ secure_partition_mp_info_t *mp_info;
+} secure_partition_boot_info_t;
+
+/*
+ * This structure is used to pass data associated to secure service requests.
+ */
+#define SPS_MAX_PAYLOAD_SIZE 32
+typedef struct secure_partition_request_info {
+ uint32_t id;
+ uint64_t data_size;
+ uint8_t data[SPS_MAX_PAYLOAD_SIZE];
+} secure_partition_request_info_t;
+
+/* Define some fast secure partition requests (SPS) IDs. */
+#define SPS_TIMER_SLEEP 1
+#define SPS_CHECK_ALIVE 2
+
+#define CACTUS_FAST_REQUEST_SUCCESS 0xCACF900D
+
+secure_partition_request_info_t *create_sps_request(uint32_t id,
+ const void *data,
+ uint64_t data_size);
+
+/*
+ * Compile time assertions related to the 'secure_partition_boot_info' structure
+ * to ensure that the assembler and the compiler view of the offsets of the
+ * structure members is the same.
+ */
+CASSERT(SP_BOOT_INFO_STACK_BASE_OFFSET ==
+ __builtin_offsetof(secure_partition_boot_info_t, sp_stack_base), \
+ assert_secure_partition_boot_info_sp_stack_base_offset_mismatch);
+
+CASSERT(SP_BOOT_INFO_IMAGE_SIZE_OFFSET ==
+ __builtin_offsetof(secure_partition_boot_info_t, sp_image_size), \
+ assert_secure_partition_boot_info_sp_image_size_offset_mismatch);
+
+CASSERT(SP_BOOT_INFO_PCPU_STACK_SIZE_OFFSET ==
+ __builtin_offsetof(secure_partition_boot_info_t, sp_pcpu_stack_size), \
+ assert_secure_partition_boot_info_sp_pcpu_stack_size_offset_mismatch);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __SECURE_PARTITION_H__ */
diff --git a/include/runtime_services/secure_el0_payloads/spm_svc.h b/include/runtime_services/secure_el0_payloads/spm_svc.h
new file mode 100644
index 000000000..e3b6cc517
--- /dev/null
+++ b/include/runtime_services/secure_el0_payloads/spm_svc.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2017, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SPM_SVC_H__
+#define __SPM_SVC_H__
+
+#include <utils_def.h>
+
+#define SPM_VERSION_MAJOR U(0)
+#define SPM_VERSION_MAJOR_SHIFT 16
+#define SPM_VERSION_MAJOR_MASK U(0x7FFF)
+#define SPM_VERSION_MINOR U(1)
+#define SPM_VERSION_MINOR_SHIFT 0
+#define SPM_VERSION_MINOR_MASK U(0xFFFF)
+#define SPM_VERSION_FORM(major, minor) ((major << SPM_VERSION_MAJOR_SHIFT) | (minor))
+#define SPM_VERSION_COMPILED SPM_VERSION_FORM(SPM_VERSION_MAJOR, SPM_VERSION_MINOR)
+
+/* The macros below are used to identify SPM calls from the SMC function ID */
+#define SPM_FID_MASK U(0xffff)
+#define SPM_FID_MIN_VALUE U(0x40)
+#define SPM_FID_MAX_VALUE U(0xbf)
+#define is_spm_fid(_fid) \
+ ((((_fid) & SPM_FID_MASK) >= SPM_FID_MIN_VALUE) && \
+ (((_fid) & SPM_FID_MASK) <= SPM_FID_MAX_VALUE))
+
+/*
+ * SMC IDs defined for accessing services implemented by the Secure Partition
+ * Manager from the Secure Partition(s). These services enable a partition to
+ * handle delegated events and request privileged operations from the manager.
+ * They occupy the range 0x60-0x7f.
+ */
+#define SPM_VERSION_AARCH32 U(0x84000060)
+#define SP_EVENT_COMPLETE_AARCH64 U(0xC4000061)
+#define SP_MEMORY_ATTRIBUTES_GET_AARCH64 U(0xC4000064)
+#define SP_MEMORY_ATTRIBUTES_SET_AARCH64 U(0xC4000065)
+
+/*
+ * Macros used by SP_MEMORY_ATTRIBUTES_SET_AARCH64.
+ */
+
+#define SP_MEMORY_ATTRIBUTES_ACCESS_NOACCESS U(0)
+#define SP_MEMORY_ATTRIBUTES_ACCESS_RW U(1)
+/* Value U(2) is reserved. */
+#define SP_MEMORY_ATTRIBUTES_ACCESS_RO U(3)
+#define SP_MEMORY_ATTRIBUTES_ACCESS_MASK U(3)
+#define SP_MEMORY_ATTRIBUTES_ACCESS_SHIFT 0
+
+#define SP_MEMORY_ATTRIBUTES_EXEC (U(0) << 2)
+#define SP_MEMORY_ATTRIBUTES_NON_EXEC (U(1) << 2)
+
+/* SPM error codes. */
+#define SPM_SUCCESS 0
+#define SPM_NOT_SUPPORTED -1
+#define SPM_INVALID_PARAMETER -2
+#define SPM_DENIED -3
+#define SPM_NO_MEMORY -5
+#define SPM_NOT_PRESENT -7
+
+
+#endif /* __SPM_SVC_H__ */
diff --git a/include/runtime_services/secure_el1_payloads/tsp.h b/include/runtime_services/secure_el1_payloads/tsp.h
new file mode 100644
index 000000000..6a4a3d6de
--- /dev/null
+++ b/include/runtime_services/secure_el1_payloads/tsp.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * This header file contains definitions related to the Trusted Firmware-A
+ * Test Secure-EL1 Payload (TSP).
+ */
+
+#ifndef __TSP_H__
+#define __TSP_H__
+
+#include <psci.h>
+#include <uuid.h>
+
+static const uuid_t tsp_uuid = {
+ 0x5b3056a0, 0x3291, 0x427b, 0x98, 0x11,
+ { 0x71, 0x68, 0xca, 0x50, 0xf3, 0xfa }
+};
+
+/*
+ * Identifiers for various TSP services. Corresponding function IDs (whether
+ * fast or standard) are generated by macros defined below
+ */
+#define TSP_ADD 0x2000
+#define TSP_SUB 0x2001
+#define TSP_MUL 0x2002
+#define TSP_DIV 0x2003
+#define TSP_HANDLE_SEL1_INTR_AND_RETURN 0x2004
+
+/*
+ * Identify a TSP service from function ID filtering the last 16 bits from the
+ * SMC function ID
+ */
+#define TSP_BARE_FID(fid) ((fid) & 0xffff)
+/*
+ * Generate function IDs for TSP services to be used in SMC calls, by
+ * appropriately setting bit 31 to differentiate standard and fast SMC calls
+ */
+#define TSP_STD_FID(fid) ((TSP_BARE_FID(fid) | 0x72000000))
+#define TSP_FAST_FID(fid) ((TSP_BARE_FID(fid) | 0x72000000) | (1u << 31))
+
+/* SMC function ID to request a previously preempted std smc */
+#define TSP_FID_RESUME TSP_STD_FID(0x3000)
+/*
+ * SMC function ID to request abortion of a previously preempted std smc. A
+ * fast SMC is used so that the TSP abort handler does not have to be
+ * reentrant.
+ */
+#define TSP_FID_ABORT TSP_FAST_FID(0x3001)
+
+#define TSP_SMC_PREEMPTED -2
+
+/*
+ * Total number of function IDs implemented for services offered to NS clients.
+ * The function IDs are defined above
+ */
+#define TSP_NUM_FID 0x5
+
+/* TSP implementation revision numbers */
+#define TSP_REVISION_MAJOR 0x0
+#define TSP_REVISION_MINOR 0x1
+
+/* TSP is multiprocessor capable so does not require migration */
+#define TSP_MIGRATE_INFO PSCI_TOS_NOT_PRESENT_MP
+
+#endif /* __TSP_H__ */
diff --git a/include/runtime_services/smccc.h b/include/runtime_services/smccc.h
new file mode 100644
index 000000000..c8bc1b93f
--- /dev/null
+++ b/include/runtime_services/smccc.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SMCCC_H__
+#define __SMCCC_H__
+
+#include <utils_def.h>
+
+#define SMCCC_VERSION_MAJOR_SHIFT U(16)
+#define SMCCC_VERSION_MAJOR_MASK U(0x7FFF)
+#define SMCCC_VERSION_MINOR_SHIFT U(0)
+#define SMCCC_VERSION_MINOR_MASK U(0xFFFF)
+#define MAKE_SMCCC_VERSION(_major, _minor) \
+ ((((uint32_t)(_major) & SMCCC_VERSION_MAJOR_MASK) << \
+ SMCCC_VERSION_MAJOR_SHIFT) \
+ | (((uint32_t)(_minor) & SMCCC_VERSION_MINOR_MASK) << \
+ SMCCC_VERSION_MINOR_SHIFT))
+
+#define SMCCC_MAJOR_VERSION 1
+#define SMCCC_MINOR_VERSION 1
+
+#define SMC_UNKNOWN -1
+
+/* TODO: Import SMCCC 2.0 properly instead of having this */
+#define FUNCID_NAMESPACE_SHIFT U(28)
+#define FUNCID_NAMESPACE_MASK U(0x3)
+#define FUNCID_NAMESPACE_WIDTH U(2)
+#define FUNCID_NAMESPACE_SPRT U(2)
+#define FUNCID_NAMESPACE_SPCI U(3)
+
+/*******************************************************************************
+ * Bit definitions inside the function id as per the SMC calling convention
+ ******************************************************************************/
+#define FUNCID_TYPE_SHIFT 31
+#define FUNCID_CC_SHIFT 30
+#define FUNCID_OEN_SHIFT 24
+#define FUNCID_NUM_SHIFT 0
+
+#define FUNCID_TYPE_MASK 0x1
+#define FUNCID_CC_MASK 0x1
+#define FUNCID_OEN_MASK 0x3f
+#define FUNCID_NUM_MASK 0xffff
+
+#define FUNCID_TYPE_WIDTH 1
+#define FUNCID_CC_WIDTH 1
+#define FUNCID_OEN_WIDTH 6
+#define FUNCID_NUM_WIDTH 16
+
+#define SMC_64 1
+#define SMC_32 0
+#define SMC_TYPE_FAST 1
+#define SMC_TYPE_STD 0
+
+/*******************************************************************************
+ * Owning entity number definitions inside the function id as per the SMC
+ * calling convention
+ ******************************************************************************/
+#define OEN_ARM_START 0
+#define OEN_ARM_END 0
+#define OEN_CPU_START 1
+#define OEN_CPU_END 1
+#define OEN_SIP_START 2
+#define OEN_SIP_END 2
+#define OEN_OEM_START 3
+#define OEN_OEM_END 3
+#define OEN_STD_START 4 /* Standard Calls */
+#define OEN_STD_END 4
+#define OEN_TAP_START 48 /* Trusted Applications */
+#define OEN_TAP_END 49
+#define OEN_TOS_START 50 /* Trusted OS */
+#define OEN_TOS_END 63
+#define OEN_LIMIT 64
+
+#endif /* __SMCCC_H__ */
diff --git a/include/runtime_services/sprt_svc.h b/include/runtime_services/sprt_svc.h
new file mode 100644
index 000000000..e789880b5
--- /dev/null
+++ b/include/runtime_services/sprt_svc.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SPRT_SVC_H__
+#define __SPRT_SVC_H__
+
+#include <smccc.h>
+#include <utils_def.h>
+
+/* SPRT_VERSION helpers */
+#define SPRT_VERSION_MAJOR U(0)
+#define SPRT_VERSION_MAJOR_SHIFT 16
+#define SPRT_VERSION_MAJOR_MASK U(0x7FFF)
+/* TODO: Move up minor version to 1 when SPRT is properly supported. */
+#define SPRT_VERSION_MINOR U(0)
+#define SPRT_VERSION_MINOR_SHIFT 0
+#define SPRT_VERSION_MINOR_MASK U(0xFFFF)
+#define SPRT_VERSION_FORM(major, minor) ((((major) & SPRT_VERSION_MAJOR_MASK) << SPRT_VERSION_MAJOR_SHIFT) | \
+ ((minor) & SPRT_VERSION_MINOR_MASK))
+#define SPRT_VERSION_COMPILED SPRT_VERSION_FORM(SPRT_VERSION_MAJOR, SPRT_VERSION_MINOR)
+
+/* TODO: Check all values below are correct when they're specified in SPRT. */
+
+/* SPRT function IDs */
+#define SPRT_FID_VERSION U(0x0)
+#define SPRT_FID_RETURN_RESPONSE U(0x1)
+
+#define SPRT_FID_MASK U(0xFF)
+
+/* Definitions to build the complete SMC ID */
+#define SPRT_SMC_ID(sprt_fid) ((FUNCID_SERV_SPRT << FUNCID_SERV_SHIFT) | \
+ (U(1) << 31) | ((sprt_fid) & SPRT_FID_MASK))
+
+/* Complete SMC IDs */
+#define SPRT_VERSION SPRT_SMC_ID(SPRT_FID_VERSION)
+#define SPRT_RETURN_RESPONSE SPRT_SMC_ID(SPRT_FID_RETURN_RESPONSE)
+
+/* SPRT error codes. */
+#define SPRT_SUCCESS 0
+#define SPRT_NOT_SUPPORTED -1
+#define SPRT_INVALID_PARAMETER -2
+
+#endif /* __SPRT_SVC_H__ */
diff --git a/include/runtime_services/std_svc.h b/include/runtime_services/std_svc.h
new file mode 100644
index 000000000..75ca4e61c
--- /dev/null
+++ b/include/runtime_services/std_svc.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * Definitions related to the Standard Service as per the SMC Calling Convention
+ *
+ * Although PSCI calls are part of the Standard Service call range, PSCI-related
+ * definitions are not in this header file but in psci.h instead.
+ */
+
+#ifndef __STD_SVC_H__
+#define __STD_SVC_H__
+
+/* SMC function IDs for Standard Service queries */
+#define SMC_STD_SVC_CALL_COUNT 0x8400ff00
+#define SMC_STD_SVC_UID 0x8400ff01
+/* 0x8400ff02 is reserved */
+#define SMC_STD_SVC_REVISION 0x8400ff03
+
+/* Standard Service Calls revision numbers */
+#define STD_SVC_REVISION_MAJOR 0x0
+#define STD_SVC_REVISION_MINOR 0x1
+
+#endif /* __STD_SVC_H__ */
diff --git a/include/runtime_services/trusted_os.h b/include/runtime_services/trusted_os.h
new file mode 100644
index 000000000..f480c6bcb
--- /dev/null
+++ b/include/runtime_services/trusted_os.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * Definitions related to the Trusted OS Service as per the SMC Calling
+ * Convention.
+ */
+
+#ifndef __TRUSTED_OS_H__
+#define __TRUSTED_OS_H__
+
+#include <uuid.h>
+
+/* Trusted OS Function IDs that fall under Trusted OS call range */
+#define SMC_TOS_CALL_COUNT 0xbf00ff00
+#define SMC_TOS_UID 0xbf00ff01
+/* 0xbf00ff02 is reserved */
+#define SMC_TOS_REVISION 0xbf00ff03
+
+/*
+ * Detect whether a Trusted OS is present in the software stack.
+ * This is implemented using the Trusted OS UID SMC call.
+ *
+ * If a Trusted OS is detected then this function returns 1
+ * and populates 'tos_uuid' with the UUID of the detected Trusted OS.
+ * Otherwise, this function returns 0 and the value pointed by 'tos_uuid'
+ * should be ignored.
+ */
+unsigned int is_trusted_os_present(uuid_t *tos_uuid);
+
+#endif /* __TRUSTED_OS_H__ */
diff --git a/lib/aarch32/cache_helpers.S b/lib/aarch32/cache_helpers.S
new file mode 100644
index 000000000..810af0f0d
--- /dev/null
+++ b/lib/aarch32/cache_helpers.S
@@ -0,0 +1,217 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+
+ .globl flush_dcache_range
+ .globl clean_dcache_range
+ .globl inv_dcache_range
+ .globl dcsw_op_louis
+ .globl dcsw_op_all
+ .globl dcsw_op_level1
+ .globl dcsw_op_level2
+ .globl dcsw_op_level3
+
+/*
+ * This macro can be used for implementing various data cache operations `op`
+ */
+.macro do_dcache_maintenance_by_mva op, coproc, opc1, CRn, CRm, opc2
+ /* Exit early if size is zero */
+ cmp r1, #0
+ beq exit_loop_\op
+ dcache_line_size r2, r3
+ add r1, r0, r1
+ sub r3, r2, #1
+ bic r0, r0, r3
+loop_\op:
+ stcopr r0, \coproc, \opc1, \CRn, \CRm, \opc2
+ add r0, r0, r2
+ cmp r0, r1
+ blo loop_\op
+ dsb sy
+exit_loop_\op:
+ bx lr
+.endm
+
+ /* ------------------------------------------
+ * Clean+Invalidate from base address till
+ * size. 'r0' = addr, 'r1' = size
+ * ------------------------------------------
+ */
+func flush_dcache_range
+ do_dcache_maintenance_by_mva cimvac, DCCIMVAC
+endfunc flush_dcache_range
+
+ /* ------------------------------------------
+ * Clean from base address till size.
+ * 'r0' = addr, 'r1' = size
+ * ------------------------------------------
+ */
+func clean_dcache_range
+ do_dcache_maintenance_by_mva cmvac, DCCMVAC
+endfunc clean_dcache_range
+
+ /* ------------------------------------------
+ * Invalidate from base address till
+ * size. 'r0' = addr, 'r1' = size
+ * ------------------------------------------
+ */
+func inv_dcache_range
+ do_dcache_maintenance_by_mva imvac, DCIMVAC
+endfunc inv_dcache_range
+
+ /* ----------------------------------------------------------------
+ * Data cache operations by set/way to the level specified
+ *
+ * The main function, do_dcsw_op requires:
+ * r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW),
+ * as defined in arch.h
+ * r1: The cache level to begin operation from
+ * r2: clidr_el1
+ * r3: The last cache level to operate on
+ * and will carry out the operation on each data cache from level 0
+ * to the level in r3 in sequence
+ *
+ * The dcsw_op macro sets up the r2 and r3 parameters based on
+ * clidr_el1 cache information before invoking the main function
+ * ----------------------------------------------------------------
+ */
+
+ .macro dcsw_op shift, fw, ls
+ ldcopr r2, CLIDR
+ ubfx r3, r2, \shift, \fw
+ lsl r3, r3, \ls
+ mov r1, #0
+ b do_dcsw_op
+ .endm
+
+func do_dcsw_op
+ push {r4-r12,lr}
+ adr r11, dcsw_loop_table // compute cache op based on the operation type
+ add r6, r11, r0, lsl #3 // cache op is 2x32-bit instructions
+loop1:
+ add r10, r1, r1, LSR #1 // Work out 3x current cache level
+ mov r12, r2, LSR r10 // extract cache type bits from clidr
+ and r12, r12, #7 // mask the bits for current cache only
+ cmp r12, #2 // see what cache we have at this level
+ blo level_done // no cache or only instruction cache at this level
+
+ stcopr r1, CSSELR // select current cache level in csselr
+ isb // isb to sych the new cssr&csidr
+ ldcopr r12, CCSIDR // read the new ccsidr
+ and r10, r12, #7 // extract the length of the cache lines
+ add r10, r10, #4 // add 4 (r10 = line length offset)
+ ubfx r4, r12, #3, #10 // r4 = maximum way number (right aligned)
+ clz r5, r4 // r5 = the bit position of the way size increment
+ mov r9, r4 // r9 working copy of the aligned max way number
+
+loop2:
+ ubfx r7, r12, #13, #15 // r7 = max set number (right aligned)
+
+loop3:
+ orr r0, r1, r9, LSL r5 // factor in the way number and cache level into r0
+ orr r0, r0, r7, LSL r10 // factor in the set number
+
+ blx r6
+ subs r7, r7, #1 // decrement the set number
+ bhs loop3
+ subs r9, r9, #1 // decrement the way number
+ bhs loop2
+level_done:
+ add r1, r1, #2 // increment the cache number
+ cmp r3, r1
+ dsb sy // ensure completion of previous cache maintenance instruction
+ bhi loop1
+
+ mov r6, #0
+ stcopr r6, CSSELR //select cache level 0 in csselr
+ dsb sy
+ isb
+ pop {r4-r12,pc}
+
+dcsw_loop_table:
+ stcopr r0, DCISW
+ bx lr
+ stcopr r0, DCCISW
+ bx lr
+ stcopr r0, DCCSW
+ bx lr
+
+endfunc do_dcsw_op
+
+ /* ---------------------------------------------------------------
+ * Data cache operations by set/way till PoU.
+ *
+ * The function requires :
+ * r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW),
+ * as defined in arch.h
+ * ---------------------------------------------------------------
+ */
+func dcsw_op_louis
+ dcsw_op #LOUIS_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT
+endfunc dcsw_op_louis
+
+ /* ---------------------------------------------------------------
+ * Data cache operations by set/way till PoC.
+ *
+ * The function requires :
+ * r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW),
+ * as defined in arch.h
+ * ---------------------------------------------------------------
+ */
+func dcsw_op_all
+ dcsw_op #LOC_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT
+endfunc dcsw_op_all
+
+
+ /* ---------------------------------------------------------------
+ * Helper macro for data cache operations by set/way for the
+ * level specified
+ * ---------------------------------------------------------------
+ */
+ .macro dcsw_op_level level
+ ldcopr r2, CLIDR
+ mov r3, \level
+ sub r1, r3, #2
+ b do_dcsw_op
+ .endm
+
+ /* ---------------------------------------------------------------
+ * Data cache operations by set/way for level 1 cache
+ *
+ * The main function, do_dcsw_op requires:
+ * r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW),
+ * as defined in arch.h
+ * ---------------------------------------------------------------
+ */
+func dcsw_op_level1
+ dcsw_op_level #(1 << LEVEL_SHIFT)
+endfunc dcsw_op_level1
+
+ /* ---------------------------------------------------------------
+ * Data cache operations by set/way for level 2 cache
+ *
+ * The main function, do_dcsw_op requires:
+ * r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW),
+ * as defined in arch.h
+ * ---------------------------------------------------------------
+ */
+func dcsw_op_level2
+ dcsw_op_level #(2 << LEVEL_SHIFT)
+endfunc dcsw_op_level2
+
+ /* ---------------------------------------------------------------
+ * Data cache operations by set/way for level 3 cache
+ *
+ * The main function, do_dcsw_op requires:
+ * r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW),
+ * as defined in arch.h
+ * ---------------------------------------------------------------
+ */
+func dcsw_op_level3
+ dcsw_op_level #(3 << LEVEL_SHIFT)
+endfunc dcsw_op_level3
diff --git a/lib/aarch32/exception_stubs.S b/lib/aarch32/exception_stubs.S
new file mode 100644
index 000000000..ed48da2d0
--- /dev/null
+++ b/lib/aarch32/exception_stubs.S
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+/*
+ * Simplistic exceptions vector table.
+ * All entries spin, which means all types of exceptions are unrecoverable.
+ */
+ .global exception_stubs
+vector_base exception_stubs
+ b . /* Not used */
+ b . /* Undef */
+ b . /* Syscall */
+ b . /* Prefetch abort */
+ b . /* Data abort */
+ b . /* Hyp trap */
+ b . /* IRQ */
+ b . /* FIQ */
diff --git a/lib/aarch32/misc_helpers.S b/lib/aarch32/misc_helpers.S
new file mode 100644
index 000000000..ab37be9c3
--- /dev/null
+++ b/lib/aarch32/misc_helpers.S
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+
+ .globl zeromem
+ .globl memcpy4
+ .globl disable_mmu_icache
+
+/* -----------------------------------------------------------------------
+ * void zeromem(void *mem, unsigned int length);
+ *
+ * Initialise a memory region to 0.
+ * The memory address and length must be 4-byte aligned.
+ * -----------------------------------------------------------------------
+ */
+func zeromem
+#if ENABLE_ASSERTIONS
+ tst r0, #0x3
+ ASM_ASSERT(eq)
+ tst r1, #0x3
+ ASM_ASSERT(eq)
+#endif
+ add r2, r0, r1
+ mov r1, #0
+z_loop:
+ cmp r2, r0
+ beq z_end
+ str r1, [r0], #4
+ b z_loop
+z_end:
+ bx lr
+endfunc zeromem
+
+/* --------------------------------------------------------------------------
+ * void memcpy4(void *dest, const void *src, unsigned int length)
+ *
+ * Copy length bytes from memory area src to memory area dest.
+ * The memory areas should not overlap.
+ * Destination and source addresses must be 4-byte aligned.
+ * --------------------------------------------------------------------------
+ */
+func memcpy4
+#if ASM_ASSERTION
+ orr r3, r0, r1
+ tst r3, #0x3
+ ASM_ASSERT(eq)
+#endif
+/* copy 4 bytes at a time */
+m_loop4:
+ cmp r2, #4
+ blt m_loop1
+ ldr r3, [r1], #4
+ str r3, [r0], #4
+ sub r2, r2, #4
+ b m_loop4
+/* copy byte per byte */
+m_loop1:
+ cmp r2,#0
+ beq m_end
+ ldrb r3, [r1], #1
+ strb r3, [r0], #1
+ subs r2, r2, #1
+ bne m_loop1
+m_end:
+ bx lr
+endfunc memcpy4
+
+/* ---------------------------------------------------------------------------
+ * Disable the MMU in Secure State
+ * ---------------------------------------------------------------------------
+ */
+
+func disable_mmu
+ mov r1, #(HSCTLR_M_BIT | HSCTLR_C_BIT)
+do_disable_mmu:
+ ldcopr r0, HSCTLR
+ bic r0, r0, r1
+ stcopr r0, HSCTLR
+ isb // ensure MMU is off
+ dsb sy
+ bx lr
+endfunc disable_mmu
+
+
+func disable_mmu_icache
+ ldr r1, =(HSCTLR_M_BIT | HSCTLR_C_BIT | HSCTLR_I_BIT)
+ b do_disable_mmu
+endfunc disable_mmu_icache
diff --git a/lib/aarch64/cache_helpers.S b/lib/aarch64/cache_helpers.S
new file mode 100644
index 000000000..9c40b9db8
--- /dev/null
+++ b/lib/aarch64/cache_helpers.S
@@ -0,0 +1,203 @@
+/*
+ * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+
+ .globl flush_dcache_range
+ .globl clean_dcache_range
+ .globl inv_dcache_range
+ .globl dcsw_op_louis
+ .globl dcsw_op_all
+ .globl dcsw_op_level1
+ .globl dcsw_op_level2
+ .globl dcsw_op_level3
+
+/*
+ * This macro can be used for implementing various data cache operations `op`
+ */
+.macro do_dcache_maintenance_by_mva op
+ /* Exit early if size is zero */
+ cbz x1, exit_loop_\op
+ dcache_line_size x2, x3
+ add x1, x0, x1
+ sub x3, x2, #1
+ bic x0, x0, x3
+loop_\op:
+ dc \op, x0
+ add x0, x0, x2
+ cmp x0, x1
+ b.lo loop_\op
+ dsb sy
+exit_loop_\op:
+ ret
+.endm
+ /* ------------------------------------------
+ * Clean+Invalidate from base address till
+ * size. 'x0' = addr, 'x1' = size
+ * ------------------------------------------
+ */
+func flush_dcache_range
+ do_dcache_maintenance_by_mva civac
+endfunc flush_dcache_range
+
+ /* ------------------------------------------
+ * Clean from base address till size.
+ * 'x0' = addr, 'x1' = size
+ * ------------------------------------------
+ */
+func clean_dcache_range
+ do_dcache_maintenance_by_mva cvac
+endfunc clean_dcache_range
+
+ /* ------------------------------------------
+ * Invalidate from base address till
+ * size. 'x0' = addr, 'x1' = size
+ * ------------------------------------------
+ */
+func inv_dcache_range
+ do_dcache_maintenance_by_mva ivac
+endfunc inv_dcache_range
+
+
+ /* ---------------------------------------------------------------
+ * Data cache operations by set/way to the level specified
+ *
+ * The main function, do_dcsw_op requires:
+ * x0: The operation type (0-2), as defined in arch.h
+ * x3: The last cache level to operate on
+ * x9: clidr_el1
+ * x10: The cache level to begin operation from
+ * and will carry out the operation on each data cache from level 0
+ * to the level in x3 in sequence
+ *
+ * The dcsw_op macro sets up the x3 and x9 parameters based on
+ * clidr_el1 cache information before invoking the main function
+ * ---------------------------------------------------------------
+ */
+
+ .macro dcsw_op shift, fw, ls
+ mrs x9, clidr_el1
+ ubfx x3, x9, \shift, \fw
+ lsl x3, x3, \ls
+ mov x10, xzr
+ b do_dcsw_op
+ .endm
+
+func do_dcsw_op
+ cbz x3, exit
+ adr x14, dcsw_loop_table // compute inner loop address
+ add x14, x14, x0, lsl #5 // inner loop is 8x32-bit instructions
+ mov x0, x9
+ mov w8, #1
+loop1:
+ add x2, x10, x10, lsr #1 // work out 3x current cache level
+ lsr x1, x0, x2 // extract cache type bits from clidr
+ and x1, x1, #7 // mask the bits for current cache only
+ cmp x1, #2 // see what cache we have at this level
+ b.lo level_done // nothing to do if no cache or icache
+
+ msr csselr_el1, x10 // select current cache level in csselr
+ isb // isb to sych the new cssr&csidr
+ mrs x1, ccsidr_el1 // read the new ccsidr
+ and x2, x1, #7 // extract the length of the cache lines
+ add x2, x2, #4 // add 4 (line length offset)
+ ubfx x4, x1, #3, #10 // maximum way number
+ clz w5, w4 // bit position of way size increment
+ lsl w9, w4, w5 // w9 = aligned max way number
+ lsl w16, w8, w5 // w16 = way number loop decrement
+ orr w9, w10, w9 // w9 = combine way and cache number
+ ubfx w6, w1, #13, #15 // w6 = max set number
+ lsl w17, w8, w2 // w17 = set number loop decrement
+ dsb sy // barrier before we start this level
+ br x14 // jump to DC operation specific loop
+
+ .macro dcsw_loop _op
+loop2_\_op:
+ lsl w7, w6, w2 // w7 = aligned max set number
+
+loop3_\_op:
+ orr w11, w9, w7 // combine cache, way and set number
+ dc \_op, x11
+ subs w7, w7, w17 // decrement set number
+ b.hs loop3_\_op
+
+ subs x9, x9, x16 // decrement way number
+ b.hs loop2_\_op
+
+ b level_done
+ .endm
+
+level_done:
+ add x10, x10, #2 // increment cache number
+ cmp x3, x10
+ b.hi loop1
+ msr csselr_el1, xzr // select cache level 0 in csselr
+ dsb sy // barrier to complete final cache operation
+ isb
+exit:
+ ret
+endfunc do_dcsw_op
+
+dcsw_loop_table:
+ dcsw_loop isw
+ dcsw_loop cisw
+ dcsw_loop csw
+
+
+func dcsw_op_louis
+ dcsw_op #LOUIS_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT
+endfunc dcsw_op_louis
+
+
+func dcsw_op_all
+ dcsw_op #LOC_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT
+endfunc dcsw_op_all
+
+ /* ---------------------------------------------------------------
+ * Helper macro for data cache operations by set/way for the
+ * level specified
+ * ---------------------------------------------------------------
+ */
+ .macro dcsw_op_level level
+ mrs x9, clidr_el1
+ mov x3, \level
+ sub x10, x3, #2
+ b do_dcsw_op
+ .endm
+
+ /* ---------------------------------------------------------------
+ * Data cache operations by set/way for level 1 cache
+ *
+ * The main function, do_dcsw_op requires:
+ * x0: The operation type (0-2), as defined in arch.h
+ * ---------------------------------------------------------------
+ */
+func dcsw_op_level1
+ dcsw_op_level #(1 << LEVEL_SHIFT)
+endfunc dcsw_op_level1
+
+ /* ---------------------------------------------------------------
+ * Data cache operations by set/way for level 2 cache
+ *
+ * The main function, do_dcsw_op requires:
+ * x0: The operation type (0-2), as defined in arch.h
+ * ---------------------------------------------------------------
+ */
+func dcsw_op_level2
+ dcsw_op_level #(2 << LEVEL_SHIFT)
+endfunc dcsw_op_level2
+
+ /* ---------------------------------------------------------------
+ * Data cache operations by set/way for level 3 cache
+ *
+ * The main function, do_dcsw_op requires:
+ * x0: The operation type (0-2), as defined in arch.h
+ * ---------------------------------------------------------------
+ */
+func dcsw_op_level3
+ dcsw_op_level #(3 << LEVEL_SHIFT)
+endfunc dcsw_op_level3
diff --git a/lib/aarch64/exception_stubs.S b/lib/aarch64/exception_stubs.S
new file mode 100644
index 000000000..0508fe573
--- /dev/null
+++ b/lib/aarch64/exception_stubs.S
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+/*
+ * Simplistic exceptions vector table.
+ * All entries spin, which means all types of exceptions are unrecoverable.
+ */
+ .global exception_stubs
+vector_base exception_stubs
+vector_entry SynchronousExceptionSP0
+ b .
+vector_entry IrqSP0
+ b .
+vector_entry FiqSP0
+ b .
+vector_entry SErrorSP0
+ b .
+vector_entry SynchronousExceptionSPx
+ b .
+vector_entry IrqSPx
+ b .
+vector_entry FiqSPx
+ b .
+vector_entry SErrorSPx
+ b .
+vector_entry SynchronousExceptionA64
+ b .
+vector_entry IrqA64
+ b .
+vector_entry FiqA64
+ b .
+vector_entry SErrorA64
+ b .
+vector_entry SynchronousExceptionA32
+ b .
+vector_entry IrqA32
+ b .
+vector_entry FiqA32
+ b .
+vector_entry SErrorA32
+ b .
diff --git a/lib/aarch64/misc_helpers.S b/lib/aarch64/misc_helpers.S
new file mode 100644
index 000000000..6acaa8689
--- /dev/null
+++ b/lib/aarch64/misc_helpers.S
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+
+ .globl get_afflvl_shift
+ .globl mpidr_mask_lower_afflvls
+ .globl eret
+ .globl smc
+
+ .globl zeromem16
+ .globl memcpy16
+
+ .globl disable_mmu
+ .globl disable_mmu_icache
+
+func get_afflvl_shift
+ cmp x0, #3
+ cinc x0, x0, eq
+ mov x1, #MPIDR_AFFLVL_SHIFT
+ lsl x0, x0, x1
+ ret
+endfunc get_afflvl_shift
+
+func mpidr_mask_lower_afflvls
+ cmp x1, #3
+ cinc x1, x1, eq
+ mov x2, #MPIDR_AFFLVL_SHIFT
+ lsl x2, x1, x2
+ lsr x0, x0, x2
+ lsl x0, x0, x2
+ ret
+endfunc mpidr_mask_lower_afflvls
+
+
+func eret
+ eret
+endfunc eret
+
+func smc
+ smc #0
+endfunc smc
+
+/* -----------------------------------------------------------------------
+ * void zeromem16(void *mem, unsigned int length);
+ *
+ * Initialise a memory region to 0.
+ * The memory address must be 16-byte aligned.
+ * -----------------------------------------------------------------------
+ */
+func zeromem16
+#if ENABLE_ASSERTIONS
+ tst x0, #0xf
+ ASM_ASSERT(eq)
+#endif
+ add x2, x0, x1
+/* zero 16 bytes at a time */
+z_loop16:
+ sub x3, x2, x0
+ cmp x3, #16
+ b.lt z_loop1
+ stp xzr, xzr, [x0], #16
+ b z_loop16
+/* zero byte per byte */
+z_loop1:
+ cmp x0, x2
+ b.eq z_end
+ strb wzr, [x0], #1
+ b z_loop1
+z_end:
+ ret
+endfunc zeromem16
+
+
+/* --------------------------------------------------------------------------
+ * void memcpy16(void *dest, const void *src, unsigned int length)
+ *
+ * Copy length bytes from memory area src to memory area dest.
+ * The memory areas should not overlap.
+ * Destination and source addresses must be 16-byte aligned.
+ * --------------------------------------------------------------------------
+ */
+func memcpy16
+#if ENABLE_ASSERTIONS
+ orr x3, x0, x1
+ tst x3, #0xf
+ ASM_ASSERT(eq)
+#endif
+/* copy 16 bytes at a time */
+m_loop16:
+ cmp x2, #16
+ b.lt m_loop1
+ ldp x3, x4, [x1], #16
+ stp x3, x4, [x0], #16
+ sub x2, x2, #16
+ b m_loop16
+/* copy byte per byte */
+m_loop1:
+ cbz x2, m_end
+ ldrb w3, [x1], #1
+ strb w3, [x0], #1
+ subs x2, x2, #1
+ b.ne m_loop1
+m_end:
+ ret
+endfunc memcpy16
+
+/* ---------------------------------------------------------------------------
+ * Disable the MMU at the current exception level (NS-EL1 or EL2)
+ * This is implemented in assembler to ensure that the data cache is cleaned
+ * and invalidated after the MMU is disabled without any intervening cacheable
+ * data accesses
+ * ---------------------------------------------------------------------------
+ */
+func disable_mmu
+ mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT)
+do_disable_mmu:
+ asm_read_sctlr_el1_or_el2
+ bic x0, x0, x1
+ asm_write_sctlr_el1_or_el2 x1
+ isb /* ensure MMU is off */
+ mov x0, #DCCISW /* DCache clean and invalidate */
+ b dcsw_op_all
+endfunc disable_mmu
+
+func disable_mmu_icache
+ mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
+ b do_disable_mmu
+endfunc disable_mmu_icache
+
+/* Need this label for asm_read/write_sctlr_el1_or_el2 */
+dead:
+ b dead
diff --git a/lib/compiler-rt/LICENSE.TXT b/lib/compiler-rt/LICENSE.TXT
new file mode 100644
index 000000000..0134694e4
--- /dev/null
+++ b/lib/compiler-rt/LICENSE.TXT
@@ -0,0 +1,91 @@
+==============================================================================
+compiler_rt License
+==============================================================================
+
+The compiler_rt library is dual licensed under both the University of Illinois
+"BSD-Like" license and the MIT license. As a user of this code you may choose
+to use it under either license. As a contributor, you agree to allow your code
+to be used under both.
+
+Full text of the relevant licenses is included below.
+
+==============================================================================
+
+University of Illinois/NCSA
+Open Source License
+
+Copyright (c) 2009-2018 by the contributors listed in CREDITS.TXT
+
+All rights reserved.
+
+Developed by:
+
+ LLVM Team
+
+ University of Illinois at Urbana-Champaign
+
+ http://llvm.org
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal with
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimers.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimers in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the names of the LLVM Team, University of Illinois at
+ Urbana-Champaign, nor the names of its contributors may be used to
+ endorse or promote products derived from this Software without specific
+ prior written permission.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+SOFTWARE.
+
+==============================================================================
+
+Copyright (c) 2009-2015 by the contributors listed in CREDITS.TXT
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+==============================================================================
+Copyrights and Licenses for Third Party Software Distributed with LLVM:
+==============================================================================
+The LLVM software contains code written by third parties. Such software will
+have its own individual LICENSE.TXT file in the directory in which it appears.
+This file will describe the copyrights, license, and restrictions which apply
+to that code.
+
+The disclaimer of warranty in the University of Illinois Open Source License
+applies to all code in the LLVM Distribution, and nothing in any of the
+other licenses gives permission to use the names of the LLVM Team or the
+University of Illinois to endorse or promote products derived from this
+Software.
+
diff --git a/lib/compiler-rt/builtins/arm/aeabi_uldivmod.S b/lib/compiler-rt/builtins/arm/aeabi_uldivmod.S
new file mode 100644
index 000000000..be343b6bc
--- /dev/null
+++ b/lib/compiler-rt/builtins/arm/aeabi_uldivmod.S
@@ -0,0 +1,46 @@
+//===-- aeabi_uldivmod.S - EABI uldivmod implementation -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is dual licensed under the MIT and the University of Illinois Open
+// Source Licenses. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+// struct { uint64_t quot, uint64_t rem}
+// __aeabi_uldivmod(uint64_t numerator, uint64_t denominator) {
+// uint64_t rem, quot;
+// quot = __udivmoddi4(numerator, denominator, &rem);
+// return {quot, rem};
+// }
+
+#if defined(__MINGW32__)
+#define __aeabi_uldivmod __rt_udiv64
+#endif
+
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__aeabi_uldivmod)
+ push {r6, lr}
+ sub sp, sp, #16
+ add r6, sp, #8
+ str r6, [sp]
+#if defined(__MINGW32__)
+ movs r6, r0
+ movs r0, r2
+ movs r2, r6
+ movs r6, r1
+ movs r1, r3
+ movs r3, r6
+#endif
+ bl SYMBOL_NAME(__udivmoddi4)
+ ldr r2, [sp, #8]
+ ldr r3, [sp, #12]
+ add sp, sp, #16
+ pop {r6, pc}
+END_COMPILERRT_FUNCTION(__aeabi_uldivmod)
+
+NO_EXEC_STACK_DIRECTIVE
+
diff --git a/lib/compiler-rt/builtins/assembly.h b/lib/compiler-rt/builtins/assembly.h
new file mode 100644
index 000000000..3f5e59b25
--- /dev/null
+++ b/lib/compiler-rt/builtins/assembly.h
@@ -0,0 +1,204 @@
+/* ===-- assembly.h - compiler-rt assembler support macros -----------------===
+ *
+ * The LLVM Compiler Infrastructure
+ *
+ * This file is dual licensed under the MIT and the University of Illinois Open
+ * Source Licenses. See LICENSE.TXT for details.
+ *
+ * ===----------------------------------------------------------------------===
+ *
+ * This file defines macros for use in compiler-rt assembler source.
+ * This file is not part of the interface of this library.
+ *
+ * ===----------------------------------------------------------------------===
+ */
+
+#ifndef COMPILERRT_ASSEMBLY_H
+#define COMPILERRT_ASSEMBLY_H
+
+#if defined(__POWERPC__) || defined(__powerpc__) || defined(__ppc__)
+#define SEPARATOR @
+#else
+#define SEPARATOR ;
+#endif
+
+#if defined(__APPLE__)
+#define HIDDEN(name) .private_extern name
+#define LOCAL_LABEL(name) L_##name
+// tell linker it can break up file at label boundaries
+#define FILE_LEVEL_DIRECTIVE .subsections_via_symbols
+#define SYMBOL_IS_FUNC(name)
+#define CONST_SECTION .const
+
+#define NO_EXEC_STACK_DIRECTIVE
+
+#elif defined(__ELF__)
+
+#define HIDDEN(name) .hidden name
+#define LOCAL_LABEL(name) .L_##name
+#define FILE_LEVEL_DIRECTIVE
+#if defined(__arm__)
+#define SYMBOL_IS_FUNC(name) .type name,%function
+#else
+#define SYMBOL_IS_FUNC(name) .type name,@function
+#endif
+#define CONST_SECTION .section .rodata
+
+#if defined(__GNU__) || defined(__FreeBSD__) || defined(__Fuchsia__) || \
+ defined(__linux__)
+#define NO_EXEC_STACK_DIRECTIVE .section .note.GNU-stack,"",%progbits
+#else
+#define NO_EXEC_STACK_DIRECTIVE
+#endif
+
+#else // !__APPLE__ && !__ELF__
+
+#define HIDDEN(name)
+#define LOCAL_LABEL(name) .L ## name
+#define FILE_LEVEL_DIRECTIVE
+#define SYMBOL_IS_FUNC(name) \
+ .def name SEPARATOR \
+ .scl 2 SEPARATOR \
+ .type 32 SEPARATOR \
+ .endef
+#define CONST_SECTION .section .rdata,"rd"
+
+#define NO_EXEC_STACK_DIRECTIVE
+
+#endif
+
+#if defined(__arm__)
+
+/*
+ * Determine actual [ARM][THUMB[1][2]] ISA using compiler predefined macros:
+ * - for '-mthumb -march=armv6' compiler defines '__thumb__'
+ * - for '-mthumb -march=armv7' compiler defines '__thumb__' and '__thumb2__'
+ */
+#if defined(__thumb2__) || defined(__thumb__)
+#define DEFINE_CODE_STATE .thumb SEPARATOR
+#define DECLARE_FUNC_ENCODING .thumb_func SEPARATOR
+#if defined(__thumb2__)
+#define USE_THUMB_2
+#define IT(cond) it cond
+#define ITT(cond) itt cond
+#define ITE(cond) ite cond
+#else
+#define USE_THUMB_1
+#define IT(cond)
+#define ITT(cond)
+#define ITE(cond)
+#endif // defined(__thumb__2)
+#else // !defined(__thumb2__) && !defined(__thumb__)
+#define DEFINE_CODE_STATE .arm SEPARATOR
+#define DECLARE_FUNC_ENCODING
+#define IT(cond)
+#define ITT(cond)
+#define ITE(cond)
+#endif
+
+#if defined(USE_THUMB_1) && defined(USE_THUMB_2)
+#error "USE_THUMB_1 and USE_THUMB_2 can't be defined together."
+#endif
+
+#if defined(__ARM_ARCH_4T__) || __ARM_ARCH >= 5
+#define ARM_HAS_BX
+#endif
+#if !defined(__ARM_FEATURE_CLZ) && !defined(USE_THUMB_1) && \
+ (__ARM_ARCH >= 6 || (__ARM_ARCH == 5 && !defined(__ARM_ARCH_5__)))
+#define __ARM_FEATURE_CLZ
+#endif
+
+#ifdef ARM_HAS_BX
+#define JMP(r) bx r
+#define JMPc(r, c) bx##c r
+#else
+#define JMP(r) mov pc, r
+#define JMPc(r, c) mov##c pc, r
+#endif
+
+// pop {pc} can't switch Thumb mode on ARMv4T
+#if __ARM_ARCH >= 5
+#define POP_PC() pop {pc}
+#else
+#define POP_PC() \
+ pop {ip}; \
+ JMP(ip)
+#endif
+
+#if defined(USE_THUMB_2)
+#define WIDE(op) op.w
+#else
+#define WIDE(op) op
+#endif
+#else // !defined(__arm)
+#define DECLARE_FUNC_ENCODING
+#define DEFINE_CODE_STATE
+#endif
+
+#define GLUE2(a, b) a##b
+#define GLUE(a, b) GLUE2(a, b)
+#define SYMBOL_NAME(name) GLUE(__USER_LABEL_PREFIX__, name)
+
+#ifdef VISIBILITY_HIDDEN
+#define DECLARE_SYMBOL_VISIBILITY(name) \
+ HIDDEN(SYMBOL_NAME(name)) SEPARATOR
+#else
+#define DECLARE_SYMBOL_VISIBILITY(name)
+#endif
+
+#define DEFINE_COMPILERRT_FUNCTION(name) \
+ DEFINE_CODE_STATE \
+ FILE_LEVEL_DIRECTIVE SEPARATOR \
+ .globl SYMBOL_NAME(name) SEPARATOR \
+ SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
+ DECLARE_SYMBOL_VISIBILITY(name) \
+ DECLARE_FUNC_ENCODING \
+ SYMBOL_NAME(name):
+
+#define DEFINE_COMPILERRT_THUMB_FUNCTION(name) \
+ DEFINE_CODE_STATE \
+ FILE_LEVEL_DIRECTIVE SEPARATOR \
+ .globl SYMBOL_NAME(name) SEPARATOR \
+ SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
+ DECLARE_SYMBOL_VISIBILITY(name) SEPARATOR \
+ .thumb_func SEPARATOR \
+ SYMBOL_NAME(name):
+
+#define DEFINE_COMPILERRT_PRIVATE_FUNCTION(name) \
+ DEFINE_CODE_STATE \
+ FILE_LEVEL_DIRECTIVE SEPARATOR \
+ .globl SYMBOL_NAME(name) SEPARATOR \
+ SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
+ HIDDEN(SYMBOL_NAME(name)) SEPARATOR \
+ DECLARE_FUNC_ENCODING \
+ SYMBOL_NAME(name):
+
+#define DEFINE_COMPILERRT_PRIVATE_FUNCTION_UNMANGLED(name) \
+ DEFINE_CODE_STATE \
+ .globl name SEPARATOR \
+ SYMBOL_IS_FUNC(name) SEPARATOR \
+ HIDDEN(name) SEPARATOR \
+ DECLARE_FUNC_ENCODING \
+ name:
+
+#define DEFINE_COMPILERRT_FUNCTION_ALIAS(name, target) \
+ .globl SYMBOL_NAME(name) SEPARATOR \
+ SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
+ DECLARE_SYMBOL_VISIBILITY(SYMBOL_NAME(name)) SEPARATOR \
+ .set SYMBOL_NAME(name), SYMBOL_NAME(target) SEPARATOR
+
+#if defined(__ARM_EABI__)
+#define DEFINE_AEABI_FUNCTION_ALIAS(aeabi_name, name) \
+ DEFINE_COMPILERRT_FUNCTION_ALIAS(aeabi_name, name)
+#else
+#define DEFINE_AEABI_FUNCTION_ALIAS(aeabi_name, name)
+#endif
+
+#ifdef __ELF__
+#define END_COMPILERRT_FUNCTION(name) \
+ .size SYMBOL_NAME(name), . - SYMBOL_NAME(name)
+#else
+#define END_COMPILERRT_FUNCTION(name)
+#endif
+
+#endif /* COMPILERRT_ASSEMBLY_H */
diff --git a/lib/compiler-rt/builtins/ctzdi2.c b/lib/compiler-rt/builtins/ctzdi2.c
new file mode 100644
index 000000000..eecde2971
--- /dev/null
+++ b/lib/compiler-rt/builtins/ctzdi2.c
@@ -0,0 +1,35 @@
+/* ===-- ctzdi2.c - Implement __ctzdi2 -------------------------------------===
+ *
+ * The LLVM Compiler Infrastructure
+ *
+ * This file is dual licensed under the MIT and the University of Illinois Open
+ * Source Licenses. See LICENSE.TXT for details.
+ *
+ * ===----------------------------------------------------------------------===
+ *
+ * This file implements __ctzdi2 for the compiler_rt library.
+ *
+ * ===----------------------------------------------------------------------===
+ */
+
+#include "int_lib.h"
+
+/* Returns: the number of trailing 0-bits */
+
+#if !defined(__clang__) && (defined(__sparc64__) || defined(__mips64) || defined(__riscv__))
+/* gcc resolves __builtin_ctz -> __ctzdi2 leading to infinite recursion */
+#define __builtin_ctz(a) __ctzsi2(a)
+extern si_int __ctzsi2(si_int);
+#endif
+
+/* Precondition: a != 0 */
+
+COMPILER_RT_ABI si_int
+__ctzdi2(di_int a)
+{
+ dwords x;
+ x.all = a;
+ const si_int f = -(x.s.low == 0);
+ return __builtin_ctz((x.s.high & f) | (x.s.low & ~f)) +
+ (f & ((si_int)(sizeof(si_int) * CHAR_BIT)));
+}
diff --git a/lib/compiler-rt/builtins/int_endianness.h b/lib/compiler-rt/builtins/int_endianness.h
new file mode 100644
index 000000000..e2586c56b
--- /dev/null
+++ b/lib/compiler-rt/builtins/int_endianness.h
@@ -0,0 +1,116 @@
+/* ===-- int_endianness.h - configuration header for compiler-rt ------------===
+ *
+ * The LLVM Compiler Infrastructure
+ *
+ * This file is dual licensed under the MIT and the University of Illinois Open
+ * Source Licenses. See LICENSE.TXT for details.
+ *
+ * ===----------------------------------------------------------------------===
+ *
+ * This file is a configuration header for compiler-rt.
+ * This file is not part of the interface of this library.
+ *
+ * ===----------------------------------------------------------------------===
+ */
+
+#ifndef INT_ENDIANNESS_H
+#define INT_ENDIANNESS_H
+
+#if defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
+ defined(__ORDER_LITTLE_ENDIAN__)
+
+/* Clang and GCC provide built-in endianness definitions. */
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#define _YUGA_LITTLE_ENDIAN 0
+#define _YUGA_BIG_ENDIAN 1
+#elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#define _YUGA_LITTLE_ENDIAN 1
+#define _YUGA_BIG_ENDIAN 0
+#endif /* __BYTE_ORDER__ */
+
+#else /* Compilers other than Clang or GCC. */
+
+#if defined(__SVR4) && defined(__sun)
+#include <sys/byteorder.h>
+
+#if defined(_BIG_ENDIAN)
+#define _YUGA_LITTLE_ENDIAN 0
+#define _YUGA_BIG_ENDIAN 1
+#elif defined(_LITTLE_ENDIAN)
+#define _YUGA_LITTLE_ENDIAN 1
+#define _YUGA_BIG_ENDIAN 0
+#else /* !_LITTLE_ENDIAN */
+#error "unknown endianness"
+#endif /* !_LITTLE_ENDIAN */
+
+#endif /* Solaris and AuroraUX. */
+
+/* .. */
+
+#if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__DragonFly__) || \
+ defined(__minix)
+#include <sys/endian.h>
+
+#if _BYTE_ORDER == _BIG_ENDIAN
+#define _YUGA_LITTLE_ENDIAN 0
+#define _YUGA_BIG_ENDIAN 1
+#elif _BYTE_ORDER == _LITTLE_ENDIAN
+#define _YUGA_LITTLE_ENDIAN 1
+#define _YUGA_BIG_ENDIAN 0
+#endif /* _BYTE_ORDER */
+
+#endif /* *BSD */
+
+#if defined(__OpenBSD__)
+#include <machine/endian.h>
+
+#if _BYTE_ORDER == _BIG_ENDIAN
+#define _YUGA_LITTLE_ENDIAN 0
+#define _YUGA_BIG_ENDIAN 1
+#elif _BYTE_ORDER == _LITTLE_ENDIAN
+#define _YUGA_LITTLE_ENDIAN 1
+#define _YUGA_BIG_ENDIAN 0
+#endif /* _BYTE_ORDER */
+
+#endif /* OpenBSD */
+
+/* .. */
+
+/* Mac OSX has __BIG_ENDIAN__ or __LITTLE_ENDIAN__ automatically set by the
+ * compiler (at least with GCC) */
+#if defined(__APPLE__) || defined(__ellcc__ )
+
+#ifdef __BIG_ENDIAN__
+#if __BIG_ENDIAN__
+#define _YUGA_LITTLE_ENDIAN 0
+#define _YUGA_BIG_ENDIAN 1
+#endif
+#endif /* __BIG_ENDIAN__ */
+
+#ifdef __LITTLE_ENDIAN__
+#if __LITTLE_ENDIAN__
+#define _YUGA_LITTLE_ENDIAN 1
+#define _YUGA_BIG_ENDIAN 0
+#endif
+#endif /* __LITTLE_ENDIAN__ */
+
+#endif /* Mac OSX */
+
+/* .. */
+
+#if defined(_WIN32)
+
+#define _YUGA_LITTLE_ENDIAN 1
+#define _YUGA_BIG_ENDIAN 0
+
+#endif /* Windows */
+
+#endif /* Clang or GCC. */
+
+/* . */
+
+#if !defined(_YUGA_LITTLE_ENDIAN) || !defined(_YUGA_BIG_ENDIAN)
+#error Unable to determine endian
+#endif /* Check we found an endianness correctly. */
+
+#endif /* INT_ENDIANNESS_H */
diff --git a/lib/compiler-rt/builtins/int_lib.h b/lib/compiler-rt/builtins/int_lib.h
new file mode 100644
index 000000000..724d5a4fe
--- /dev/null
+++ b/lib/compiler-rt/builtins/int_lib.h
@@ -0,0 +1,124 @@
+/* ===-- int_lib.h - configuration header for compiler-rt -----------------===
+ *
+ * The LLVM Compiler Infrastructure
+ *
+ * This file is dual licensed under the MIT and the University of Illinois Open
+ * Source Licenses. See LICENSE.TXT for details.
+ *
+ * ===----------------------------------------------------------------------===
+ *
+ * This file is a configuration header for compiler-rt.
+ * This file is not part of the interface of this library.
+ *
+ * ===----------------------------------------------------------------------===
+ */
+
+/*
+ * Portions copyright (c) 2018, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef INT_LIB_H
+#define INT_LIB_H
+
+/* Assumption: Signed integral is 2's complement. */
+/* Assumption: Right shift of signed negative is arithmetic shift. */
+/* Assumption: Endianness is little or big (not mixed). */
+
+#if defined(__ELF__)
+#define FNALIAS(alias_name, original_name) \
+ void alias_name() __attribute__((__alias__(#original_name)))
+#define COMPILER_RT_ALIAS(aliasee) __attribute__((__alias__(#aliasee)))
+#else
+#define FNALIAS(alias, name) _Pragma("GCC error(\"alias unsupported on this file format\")")
+#define COMPILER_RT_ALIAS(aliasee) _Pragma("GCC error(\"alias unsupported on this file format\")")
+#endif
+
+/* ABI macro definitions */
+
+#if __ARM_EABI__
+# ifdef COMPILER_RT_ARMHF_TARGET
+# define COMPILER_RT_ABI
+# else
+# define COMPILER_RT_ABI __attribute__((__pcs__("aapcs")))
+# endif
+#else
+# define COMPILER_RT_ABI
+#endif
+
+#define AEABI_RTABI __attribute__((__pcs__("aapcs")))
+
+#ifdef _MSC_VER
+#define ALWAYS_INLINE __forceinline
+#define NOINLINE __declspec(noinline)
+#define NORETURN __declspec(noreturn)
+#define UNUSED
+#else
+#define ALWAYS_INLINE __attribute__((always_inline))
+#define NOINLINE __attribute__((noinline))
+#define NORETURN __attribute__((noreturn))
+#define UNUSED __attribute__((unused))
+#endif
+
+#include <sys/limits.h>
+#include <sys/stdint.h>
+#include <sys/types.h>
+
+/* Include the commonly used internal type definitions. */
+#include "int_types.h"
+
+COMPILER_RT_ABI si_int __paritysi2(si_int a);
+COMPILER_RT_ABI si_int __paritydi2(di_int a);
+
+COMPILER_RT_ABI di_int __divdi3(di_int a, di_int b);
+COMPILER_RT_ABI si_int __divsi3(si_int a, si_int b);
+COMPILER_RT_ABI su_int __udivsi3(su_int n, su_int d);
+
+COMPILER_RT_ABI su_int __udivmodsi4(su_int a, su_int b, su_int* rem);
+COMPILER_RT_ABI du_int __udivmoddi4(du_int a, du_int b, du_int* rem);
+#ifdef CRT_HAS_128BIT
+COMPILER_RT_ABI si_int __clzti2(ti_int a);
+COMPILER_RT_ABI tu_int __udivmodti4(tu_int a, tu_int b, tu_int* rem);
+#endif
+
+/* Definitions for builtins unavailable on MSVC */
+#if defined(_MSC_VER) && !defined(__clang__)
+#include <intrin.h>
+
+uint32_t __inline __builtin_ctz(uint32_t value) {
+ unsigned long trailing_zero = 0;
+ if (_BitScanForward(&trailing_zero, value))
+ return trailing_zero;
+ return 32;
+}
+
+uint32_t __inline __builtin_clz(uint32_t value) {
+ unsigned long leading_zero = 0;
+ if (_BitScanReverse(&leading_zero, value))
+ return 31 - leading_zero;
+ return 32;
+}
+
+#if defined(_M_ARM) || defined(_M_X64)
+uint32_t __inline __builtin_clzll(uint64_t value) {
+ unsigned long leading_zero = 0;
+ if (_BitScanReverse64(&leading_zero, value))
+ return 63 - leading_zero;
+ return 64;
+}
+#else
+uint32_t __inline __builtin_clzll(uint64_t value) {
+ if (value == 0)
+ return 64;
+ uint32_t msh = (uint32_t)(value >> 32);
+ uint32_t lsh = (uint32_t)(value & 0xFFFFFFFF);
+ if (msh != 0)
+ return __builtin_clz(msh);
+ return 32 + __builtin_clz(lsh);
+}
+#endif
+
+#define __builtin_clzl __builtin_clzll
+#endif /* defined(_MSC_VER) && !defined(__clang__) */
+
+#endif /* INT_LIB_H */
diff --git a/lib/compiler-rt/builtins/int_math.h b/lib/compiler-rt/builtins/int_math.h
new file mode 100644
index 000000000..fc81fb7f0
--- /dev/null
+++ b/lib/compiler-rt/builtins/int_math.h
@@ -0,0 +1,114 @@
+/* ===-- int_math.h - internal math inlines ---------------------------------===
+ *
+ * The LLVM Compiler Infrastructure
+ *
+ * This file is dual licensed under the MIT and the University of Illinois Open
+ * Source Licenses. See LICENSE.TXT for details.
+ *
+ * ===-----------------------------------------------------------------------===
+ *
+ * This file is not part of the interface of this library.
+ *
+ * This file defines substitutes for the libm functions used in some of the
+ * compiler-rt implementations, defined in such a way that there is not a direct
+ * dependency on libm or math.h. Instead, we use the compiler builtin versions
+ * where available. This reduces our dependencies on the system SDK by foisting
+ * the responsibility onto the compiler.
+ *
+ * ===-----------------------------------------------------------------------===
+ */
+
+#ifndef INT_MATH_H
+#define INT_MATH_H
+
+#ifndef __has_builtin
+# define __has_builtin(x) 0
+#endif
+
+#if defined(_MSC_VER) && !defined(__clang__)
+#include <math.h>
+#include <stdlib.h>
+#include <ymath.h>
+#endif
+
+#if defined(_MSC_VER) && !defined(__clang__)
+#define CRT_INFINITY INFINITY
+#else
+#define CRT_INFINITY __builtin_huge_valf()
+#endif
+
+#if defined(_MSC_VER) && !defined(__clang__)
+#define crt_isfinite(x) _finite((x))
+#define crt_isinf(x) !_finite((x))
+#define crt_isnan(x) _isnan((x))
+#else
+/* Define crt_isfinite in terms of the builtin if available, otherwise provide
+ * an alternate version in terms of our other functions. This supports some
+ * versions of GCC which didn't have __builtin_isfinite.
+ */
+#if __has_builtin(__builtin_isfinite)
+# define crt_isfinite(x) __builtin_isfinite((x))
+#elif defined(__GNUC__)
+# define crt_isfinite(x) \
+ __extension__(({ \
+ __typeof((x)) x_ = (x); \
+ !crt_isinf(x_) && !crt_isnan(x_); \
+ }))
+#else
+# error "Do not know how to check for infinity"
+#endif /* __has_builtin(__builtin_isfinite) */
+#define crt_isinf(x) __builtin_isinf((x))
+#define crt_isnan(x) __builtin_isnan((x))
+#endif /* _MSC_VER */
+
+#if defined(_MSC_VER) && !defined(__clang__)
+#define crt_copysign(x, y) copysign((x), (y))
+#define crt_copysignf(x, y) copysignf((x), (y))
+#define crt_copysignl(x, y) copysignl((x), (y))
+#else
+#define crt_copysign(x, y) __builtin_copysign((x), (y))
+#define crt_copysignf(x, y) __builtin_copysignf((x), (y))
+#define crt_copysignl(x, y) __builtin_copysignl((x), (y))
+#endif
+
+#if defined(_MSC_VER) && !defined(__clang__)
+#define crt_fabs(x) fabs((x))
+#define crt_fabsf(x) fabsf((x))
+#define crt_fabsl(x) fabs((x))
+#else
+#define crt_fabs(x) __builtin_fabs((x))
+#define crt_fabsf(x) __builtin_fabsf((x))
+#define crt_fabsl(x) __builtin_fabsl((x))
+#endif
+
+#if defined(_MSC_VER) && !defined(__clang__)
+#define crt_fmax(x, y) __max((x), (y))
+#define crt_fmaxf(x, y) __max((x), (y))
+#define crt_fmaxl(x, y) __max((x), (y))
+#else
+#define crt_fmax(x, y) __builtin_fmax((x), (y))
+#define crt_fmaxf(x, y) __builtin_fmaxf((x), (y))
+#define crt_fmaxl(x, y) __builtin_fmaxl((x), (y))
+#endif
+
+#if defined(_MSC_VER) && !defined(__clang__)
+#define crt_logb(x) logb((x))
+#define crt_logbf(x) logbf((x))
+#define crt_logbl(x) logbl((x))
+#else
+#define crt_logb(x) __builtin_logb((x))
+#define crt_logbf(x) __builtin_logbf((x))
+#define crt_logbl(x) __builtin_logbl((x))
+#endif
+
+#if defined(_MSC_VER) && !defined(__clang__)
+#define crt_scalbn(x, y) scalbn((x), (y))
+#define crt_scalbnf(x, y) scalbnf((x), (y))
+#define crt_scalbnl(x, y) scalbnl((x), (y))
+#else
+#define crt_scalbn(x, y) __builtin_scalbn((x), (y))
+#define crt_scalbnf(x, y) __builtin_scalbnf((x), (y))
+#define crt_scalbnl(x, y) __builtin_scalbnl((x), (y))
+#endif
+
+#endif /* INT_MATH_H */
diff --git a/lib/compiler-rt/builtins/int_types.h b/lib/compiler-rt/builtins/int_types.h
new file mode 100644
index 000000000..f53f343d3
--- /dev/null
+++ b/lib/compiler-rt/builtins/int_types.h
@@ -0,0 +1,164 @@
+/* ===-- int_lib.h - configuration header for compiler-rt -----------------===
+ *
+ * The LLVM Compiler Infrastructure
+ *
+ * This file is dual licensed under the MIT and the University of Illinois Open
+ * Source Licenses. See LICENSE.TXT for details.
+ *
+ * ===----------------------------------------------------------------------===
+ *
+ * This file is not part of the interface of this library.
+ *
+ * This file defines various standard types, most importantly a number of unions
+ * used to access parts of larger types.
+ *
+ * ===----------------------------------------------------------------------===
+ */
+
+#ifndef INT_TYPES_H
+#define INT_TYPES_H
+
+#include "int_endianness.h"
+
+/* si_int is defined in Linux sysroot's asm-generic/siginfo.h */
+#ifdef si_int
+#undef si_int
+#endif
+typedef int si_int;
+typedef unsigned su_int;
+
+typedef long long di_int;
+typedef unsigned long long du_int;
+
+typedef union
+{
+ di_int all;
+ struct
+ {
+#if _YUGA_LITTLE_ENDIAN
+ su_int low;
+ si_int high;
+#else
+ si_int high;
+ su_int low;
+#endif /* _YUGA_LITTLE_ENDIAN */
+ }s;
+} dwords;
+
+typedef union
+{
+ du_int all;
+ struct
+ {
+#if _YUGA_LITTLE_ENDIAN
+ su_int low;
+ su_int high;
+#else
+ su_int high;
+ su_int low;
+#endif /* _YUGA_LITTLE_ENDIAN */
+ }s;
+} udwords;
+
+#if (defined(__LP64__) || defined(__wasm__) || defined(__mips64)) || defined(__riscv)
+#define CRT_HAS_128BIT
+#endif
+
+#ifdef CRT_HAS_128BIT
+typedef int ti_int __attribute__ ((mode (TI)));
+typedef unsigned tu_int __attribute__ ((mode (TI)));
+
+typedef union
+{
+ ti_int all;
+ struct
+ {
+#if _YUGA_LITTLE_ENDIAN
+ du_int low;
+ di_int high;
+#else
+ di_int high;
+ du_int low;
+#endif /* _YUGA_LITTLE_ENDIAN */
+ }s;
+} twords;
+
+typedef union
+{
+ tu_int all;
+ struct
+ {
+#if _YUGA_LITTLE_ENDIAN
+ du_int low;
+ du_int high;
+#else
+ du_int high;
+ du_int low;
+#endif /* _YUGA_LITTLE_ENDIAN */
+ }s;
+} utwords;
+
+static __inline ti_int make_ti(di_int h, di_int l) {
+ twords r;
+ r.s.high = h;
+ r.s.low = l;
+ return r.all;
+}
+
+static __inline tu_int make_tu(du_int h, du_int l) {
+ utwords r;
+ r.s.high = h;
+ r.s.low = l;
+ return r.all;
+}
+
+#endif /* CRT_HAS_128BIT */
+
+typedef union
+{
+ su_int u;
+ float f;
+} float_bits;
+
+typedef union
+{
+ udwords u;
+ double f;
+} double_bits;
+
+typedef struct
+{
+#if _YUGA_LITTLE_ENDIAN
+ udwords low;
+ udwords high;
+#else
+ udwords high;
+ udwords low;
+#endif /* _YUGA_LITTLE_ENDIAN */
+} uqwords;
+
+typedef union
+{
+ uqwords u;
+ long double f;
+} long_double_bits;
+
+#if __STDC_VERSION__ >= 199901L
+typedef float _Complex Fcomplex;
+typedef double _Complex Dcomplex;
+typedef long double _Complex Lcomplex;
+
+#define COMPLEX_REAL(x) __real__(x)
+#define COMPLEX_IMAGINARY(x) __imag__(x)
+#else
+typedef struct { float real, imaginary; } Fcomplex;
+
+typedef struct { double real, imaginary; } Dcomplex;
+
+typedef struct { long double real, imaginary; } Lcomplex;
+
+#define COMPLEX_REAL(x) (x).real
+#define COMPLEX_IMAGINARY(x) (x).imaginary
+#endif
+#endif /* INT_TYPES_H */
+
diff --git a/lib/compiler-rt/builtins/udivmoddi4.c b/lib/compiler-rt/builtins/udivmoddi4.c
new file mode 100644
index 000000000..0c8b4ff46
--- /dev/null
+++ b/lib/compiler-rt/builtins/udivmoddi4.c
@@ -0,0 +1,231 @@
+/* ===-- udivmoddi4.c - Implement __udivmoddi4 -----------------------------===
+ *
+ * The LLVM Compiler Infrastructure
+ *
+ * This file is dual licensed under the MIT and the University of Illinois Open
+ * Source Licenses. See LICENSE.TXT for details.
+ *
+ * ===----------------------------------------------------------------------===
+ *
+ * This file implements __udivmoddi4 for the compiler_rt library.
+ *
+ * ===----------------------------------------------------------------------===
+ */
+
+#include "int_lib.h"
+
+/* Effects: if rem != 0, *rem = a % b
+ * Returns: a / b
+ */
+
+/* Translated from Figure 3-40 of The PowerPC Compiler Writer's Guide */
+
+COMPILER_RT_ABI du_int
+__udivmoddi4(du_int a, du_int b, du_int* rem)
+{
+ const unsigned n_uword_bits = sizeof(su_int) * CHAR_BIT;
+ const unsigned n_udword_bits = sizeof(du_int) * CHAR_BIT;
+ udwords n;
+ n.all = a;
+ udwords d;
+ d.all = b;
+ udwords q;
+ udwords r;
+ unsigned sr;
+ /* special cases, X is unknown, K != 0 */
+ if (n.s.high == 0)
+ {
+ if (d.s.high == 0)
+ {
+ /* 0 X
+ * ---
+ * 0 X
+ */
+ if (rem)
+ *rem = n.s.low % d.s.low;
+ return n.s.low / d.s.low;
+ }
+ /* 0 X
+ * ---
+ * K X
+ */
+ if (rem)
+ *rem = n.s.low;
+ return 0;
+ }
+ /* n.s.high != 0 */
+ if (d.s.low == 0)
+ {
+ if (d.s.high == 0)
+ {
+ /* K X
+ * ---
+ * 0 0
+ */
+ if (rem)
+ *rem = n.s.high % d.s.low;
+ return n.s.high / d.s.low;
+ }
+ /* d.s.high != 0 */
+ if (n.s.low == 0)
+ {
+ /* K 0
+ * ---
+ * K 0
+ */
+ if (rem)
+ {
+ r.s.high = n.s.high % d.s.high;
+ r.s.low = 0;
+ *rem = r.all;
+ }
+ return n.s.high / d.s.high;
+ }
+ /* K K
+ * ---
+ * K 0
+ */
+ if ((d.s.high & (d.s.high - 1)) == 0) /* if d is a power of 2 */
+ {
+ if (rem)
+ {
+ r.s.low = n.s.low;
+ r.s.high = n.s.high & (d.s.high - 1);
+ *rem = r.all;
+ }
+ return n.s.high >> __builtin_ctz(d.s.high);
+ }
+ /* K K
+ * ---
+ * K 0
+ */
+ sr = __builtin_clz(d.s.high) - __builtin_clz(n.s.high);
+ /* 0 <= sr <= n_uword_bits - 2 or sr large */
+ if (sr > n_uword_bits - 2)
+ {
+ if (rem)
+ *rem = n.all;
+ return 0;
+ }
+ ++sr;
+ /* 1 <= sr <= n_uword_bits - 1 */
+ /* q.all = n.all << (n_udword_bits - sr); */
+ q.s.low = 0;
+ q.s.high = n.s.low << (n_uword_bits - sr);
+ /* r.all = n.all >> sr; */
+ r.s.high = n.s.high >> sr;
+ r.s.low = (n.s.high << (n_uword_bits - sr)) | (n.s.low >> sr);
+ }
+ else /* d.s.low != 0 */
+ {
+ if (d.s.high == 0)
+ {
+ /* K X
+ * ---
+ * 0 K
+ */
+ if ((d.s.low & (d.s.low - 1)) == 0) /* if d is a power of 2 */
+ {
+ if (rem)
+ *rem = n.s.low & (d.s.low - 1);
+ if (d.s.low == 1)
+ return n.all;
+ sr = __builtin_ctz(d.s.low);
+ q.s.high = n.s.high >> sr;
+ q.s.low = (n.s.high << (n_uword_bits - sr)) | (n.s.low >> sr);
+ return q.all;
+ }
+ /* K X
+ * ---
+ * 0 K
+ */
+ sr = 1 + n_uword_bits + __builtin_clz(d.s.low) - __builtin_clz(n.s.high);
+ /* 2 <= sr <= n_udword_bits - 1
+ * q.all = n.all << (n_udword_bits - sr);
+ * r.all = n.all >> sr;
+ */
+ if (sr == n_uword_bits)
+ {
+ q.s.low = 0;
+ q.s.high = n.s.low;
+ r.s.high = 0;
+ r.s.low = n.s.high;
+ }
+ else if (sr < n_uword_bits) // 2 <= sr <= n_uword_bits - 1
+ {
+ q.s.low = 0;
+ q.s.high = n.s.low << (n_uword_bits - sr);
+ r.s.high = n.s.high >> sr;
+ r.s.low = (n.s.high << (n_uword_bits - sr)) | (n.s.low >> sr);
+ }
+ else // n_uword_bits + 1 <= sr <= n_udword_bits - 1
+ {
+ q.s.low = n.s.low << (n_udword_bits - sr);
+ q.s.high = (n.s.high << (n_udword_bits - sr)) |
+ (n.s.low >> (sr - n_uword_bits));
+ r.s.high = 0;
+ r.s.low = n.s.high >> (sr - n_uword_bits);
+ }
+ }
+ else
+ {
+ /* K X
+ * ---
+ * K K
+ */
+ sr = __builtin_clz(d.s.high) - __builtin_clz(n.s.high);
+ /* 0 <= sr <= n_uword_bits - 1 or sr large */
+ if (sr > n_uword_bits - 1)
+ {
+ if (rem)
+ *rem = n.all;
+ return 0;
+ }
+ ++sr;
+ /* 1 <= sr <= n_uword_bits */
+ /* q.all = n.all << (n_udword_bits - sr); */
+ q.s.low = 0;
+ if (sr == n_uword_bits)
+ {
+ q.s.high = n.s.low;
+ r.s.high = 0;
+ r.s.low = n.s.high;
+ }
+ else
+ {
+ q.s.high = n.s.low << (n_uword_bits - sr);
+ r.s.high = n.s.high >> sr;
+ r.s.low = (n.s.high << (n_uword_bits - sr)) | (n.s.low >> sr);
+ }
+ }
+ }
+ /* Not a special case
+ * q and r are initialized with:
+ * q.all = n.all << (n_udword_bits - sr);
+ * r.all = n.all >> sr;
+ * 1 <= sr <= n_udword_bits - 1
+ */
+ su_int carry = 0;
+ for (; sr > 0; --sr)
+ {
+ /* r:q = ((r:q) << 1) | carry */
+ r.s.high = (r.s.high << 1) | (r.s.low >> (n_uword_bits - 1));
+ r.s.low = (r.s.low << 1) | (q.s.high >> (n_uword_bits - 1));
+ q.s.high = (q.s.high << 1) | (q.s.low >> (n_uword_bits - 1));
+ q.s.low = (q.s.low << 1) | carry;
+ /* carry = 0;
+ * if (r.all >= d.all)
+ * {
+ * r.all -= d.all;
+ * carry = 1;
+ * }
+ */
+ const di_int s = (di_int)(d.all - r.all - 1) >> (n_udword_bits - 1);
+ carry = s & 1;
+ r.all -= d.all & s;
+ }
+ q.all = (q.all << 1) | carry;
+ if (rem)
+ *rem = r.all;
+ return q.all;
+}
diff --git a/lib/compiler-rt/compiler-rt.mk b/lib/compiler-rt/compiler-rt.mk
new file mode 100644
index 000000000..d3e2d5ca5
--- /dev/null
+++ b/lib/compiler-rt/compiler-rt.mk
@@ -0,0 +1,13 @@
+#
+# Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+ifeq (${ARCH},aarch32)
+COMPILER_RT_SRCS := $(addprefix lib/compiler-rt/builtins/, \
+ arm/aeabi_uldivmod.S \
+ ctzdi2.c \
+ udivmoddi4.c \
+)
+endif
diff --git a/lib/delay/delay.c b/lib/delay/delay.c
new file mode 100644
index 000000000..d88e500c6
--- /dev/null
+++ b/lib/delay/delay.c
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <tftf.h>
+
+void waitus(uint64_t us)
+{
+ uint64_t cntp_ct_val_base;
+ uint32_t cnt_frq;
+ uint64_t wait_cycles;
+
+ cnt_frq = read_cntfrq_el0();
+ cntp_ct_val_base = read_cntpct_el0();
+
+ /* Waitms in terms of counter freq */
+ wait_cycles = (us * cnt_frq) / 1000000;
+
+ while (read_cntpct_el0() - cntp_ct_val_base < wait_cycles)
+ ;
+}
+
+void waitms(uint64_t ms)
+{
+ while (ms > 0) {
+ waitus(1000);
+ ms--;
+ }
+}
diff --git a/lib/events/events.c b/lib/events/events.c
new file mode 100644
index 000000000..42130d5e8
--- /dev/null
+++ b/lib/events/events.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <events.h>
+#include <platform_def.h>
+#include <tftf.h>
+#include <tftf_lib.h>
+
+void tftf_init_event(event_t *event)
+{
+ assert(event != NULL);
+ event->cnt = 0;
+ event->lock.lock = 0;
+}
+
+static void send_event_common(event_t *event, unsigned int inc)
+{
+ spin_lock(&event->lock);
+ event->cnt += inc;
+ spin_unlock(&event->lock);
+
+ sev();
+}
+
+void tftf_send_event(event_t *event)
+{
+ VERBOSE("Sending event %p\n", (void *) event);
+ send_event_common(event, 1);
+}
+
+void tftf_send_event_to_all(event_t *event)
+{
+ VERBOSE("Sending event %p to all CPUs\n", (void *) event);
+ send_event_common(event, PLATFORM_CORE_COUNT);
+}
+
+void tftf_send_event_to(event_t *event, unsigned int cpus_count)
+{
+ assert(cpus_count <= PLATFORM_CORE_COUNT);
+ VERBOSE("Sending event %p to %u CPUs\n", (void *) event, cpus_count);
+ send_event_common(event, cpus_count);
+}
+
+void tftf_wait_for_event(event_t *event)
+{
+ unsigned int event_received = 0;
+
+ VERBOSE("Waiting for event %p\n", (void *) event);
+ while (!event_received) {
+
+ dsbsy();
+ /* Wait for someone to send an event */
+ if (!event->cnt) {
+ wfe();
+ } else {
+ spin_lock(&event->lock);
+
+ /*
+ * Check that the event is still pending and that no
+ * one stole it from us while we were trying to
+ * acquire the lock.
+ */
+ if (event->cnt != 0) {
+ event_received = 1;
+ --event->cnt;
+ }
+ /*
+ * No memory barrier is needed here because spin_unlock()
+ * issues a Store-Release instruction, which guarantees
+ * that loads and stores appearing in program order
+ * before the Store-Release are observed before the
+ * Store-Release itself.
+ */
+ spin_unlock(&event->lock);
+ }
+ }
+
+ VERBOSE("Received event %p\n", (void *) event);
+}
diff --git a/lib/extensions/amu/aarch32/amu.c b/lib/extensions/amu/aarch32/amu.c
new file mode 100644
index 000000000..a923df32b
--- /dev/null
+++ b/lib/extensions/amu/aarch32/amu.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <amu.h>
+#include <amu_private.h>
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+
+int amu_supported(void)
+{
+ uint64_t features;
+
+ features = read_id_pfr0() >> ID_PFR0_AMU_SHIFT;
+ return (features & ID_PFR0_AMU_MASK) == 1;
+}
+
+/* Read the group 0 counter identified by the given `idx`. */
+uint64_t amu_group0_cnt_read(int idx)
+{
+ assert(amu_supported());
+ assert(idx >= 0 && idx < AMU_GROUP0_NR_COUNTERS);
+
+ return amu_group0_cnt_read_internal(idx);
+}
+
+/* Read the group 1 counter identified by the given `idx`. */
+uint64_t amu_group1_cnt_read(int idx)
+{
+ assert(amu_supported());
+ assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS);
+
+ return amu_group1_cnt_read_internal(idx);
+}
diff --git a/lib/extensions/amu/aarch32/amu_helpers.S b/lib/extensions/amu/aarch32/amu_helpers.S
new file mode 100644
index 000000000..72f09dc84
--- /dev/null
+++ b/lib/extensions/amu/aarch32/amu_helpers.S
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <assert_macros.S>
+#include <asm_macros.S>
+
+ .globl amu_group0_cnt_read_internal
+ .globl amu_group1_cnt_read_internal
+
+/*
+ * uint64_t amu_group0_cnt_read_internal(int idx);
+ *
+ * Given `idx`, read the corresponding AMU counter
+ * and return it in `r0`.
+ */
+func amu_group0_cnt_read_internal
+#if ENABLE_ASSERTIONS
+ /* `idx` should be between [0, 3] */
+ mov r1, r0
+ lsr r1, r1, #2
+ cmp r1, #0
+ ASM_ASSERT(eq)
+#endif
+
+ /*
+ * Given `idx` calculate address of ldcopr16/bx lr instruction pair
+ * in the table below.
+ */
+ adr r1, 1f
+ lsl r0, r0, #3 /* each ldcopr16/bx lr sequence is 8 bytes */
+ add r1, r1, r0
+ bx r1
+1:
+ ldcopr16 r0, r1, AMEVCNTR00 /* index 0 */
+ bx lr
+ ldcopr16 r0, r1, AMEVCNTR01 /* index 1 */
+ bx lr
+ ldcopr16 r0, r1, AMEVCNTR02 /* index 2 */
+ bx lr
+ ldcopr16 r0, r1, AMEVCNTR03 /* index 3 */
+ bx lr
+endfunc amu_group0_cnt_read_internal
+
+/*
+ * uint64_t amu_group1_cnt_read_internal(int idx);
+ *
+ * Given `idx`, read the corresponding AMU counter
+ * and return it in `r0`.
+ */
+func amu_group1_cnt_read_internal
+#if ENABLE_ASSERTIONS
+ /* `idx` should be between [0, 15] */
+ mov r1, r0
+ lsr r1, r1, #4
+ cmp r1, #0
+ ASM_ASSERT(eq)
+#endif
+
+ /*
+ * Given `idx` calculate address of ldcopr16/bx lr instruction pair
+ * in the table below.
+ */
+ adr r1, 1f
+ lsl r0, r0, #3 /* each ldcopr16/bx lr sequence is 8 bytes */
+ add r1, r1, r0
+ bx r1
+
+1:
+ ldcopr16 r0,r1, AMEVCNTR10 /* index 0 */
+ bx lr
+ ldcopr16 r0,r1, AMEVCNTR11 /* index 1 */
+ bx lr
+ ldcopr16 r0,r1, AMEVCNTR12 /* index 2 */
+ bx lr
+ ldcopr16 r0,r1, AMEVCNTR13 /* index 3 */
+ bx lr
+ ldcopr16 r0,r1, AMEVCNTR14 /* index 4 */
+ bx lr
+ ldcopr16 r0,r1, AMEVCNTR15 /* index 5 */
+ bx lr
+ ldcopr16 r0,r1, AMEVCNTR16 /* index 6 */
+ bx lr
+ ldcopr16 r0,r1, AMEVCNTR17 /* index 7 */
+ bx lr
+ ldcopr16 r0,r1, AMEVCNTR18 /* index 8 */
+ bx lr
+ ldcopr16 r0,r1, AMEVCNTR19 /* index 9 */
+ bx lr
+ ldcopr16 r0,r1, AMEVCNTR1A /* index 10 */
+ bx lr
+ ldcopr16 r0,r1, AMEVCNTR1B /* index 11 */
+ bx lr
+ ldcopr16 r0,r1, AMEVCNTR1C /* index 12 */
+ bx lr
+ ldcopr16 r0,r1, AMEVCNTR1D /* index 13 */
+ bx lr
+ ldcopr16 r0,r1, AMEVCNTR1E /* index 14 */
+ bx lr
+ ldcopr16 r0,r1, AMEVCNTR1F /* index 15 */
+ bx lr
+endfunc amu_group1_cnt_read_internal
diff --git a/lib/extensions/amu/aarch64/amu.c b/lib/extensions/amu/aarch64/amu.c
new file mode 100644
index 000000000..00253b35d
--- /dev/null
+++ b/lib/extensions/amu/aarch64/amu.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <amu.h>
+#include <amu_private.h>
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+
+int amu_supported(void)
+{
+ uint64_t features;
+
+ features = read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT;
+ return (features & ID_AA64PFR0_AMU_MASK) == 1;
+}
+
+/* Read the group 0 counter identified by the given `idx`. */
+uint64_t amu_group0_cnt_read(int idx)
+{
+ assert(amu_supported());
+ assert(idx >= 0 && idx < AMU_GROUP0_NR_COUNTERS);
+
+ return amu_group0_cnt_read_internal(idx);
+}
+
+/* Read the group 1 counter identified by the given `idx`. */
+uint64_t amu_group1_cnt_read(int idx)
+{
+ assert(amu_supported());
+ assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS);
+
+ return amu_group1_cnt_read_internal(idx);
+}
diff --git a/lib/extensions/amu/aarch64/amu_helpers.S b/lib/extensions/amu/aarch64/amu_helpers.S
new file mode 100644
index 000000000..862a71348
--- /dev/null
+++ b/lib/extensions/amu/aarch64/amu_helpers.S
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2017, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <assert_macros.S>
+#include <asm_macros.S>
+
+ .globl amu_group0_cnt_read_internal
+ .globl amu_group1_cnt_read_internal
+
+/*
+ * uint64_t amu_group0_cnt_read_internal(int idx);
+ *
+ * Given `idx`, read the corresponding AMU counter
+ * and return it in `x0`.
+ */
+func amu_group0_cnt_read_internal
+#if ENABLE_ASSERTIONS
+ /*
+ * It can be dangerous to call this function with an
+ * out of bounds index. Ensure `idx` is valid.
+ */
+ mov x1, x0
+ lsr x1, x1, #2
+ cmp x1, #0
+ ASM_ASSERT(eq)
+#endif
+
+ /*
+ * Given `idx` calculate address of mrs/ret instruction pair
+ * in the table below.
+ */
+ adr x1, 1f
+ lsl x0, x0, #3 /* each mrs/ret sequence is 8 bytes */
+ add x1, x1, x0
+ br x1
+
+1:
+ mrs x0, AMEVCNTR00_EL0 /* index 0 */
+ ret
+ mrs x0, AMEVCNTR01_EL0 /* index 1 */
+ ret
+ mrs x0, AMEVCNTR02_EL0 /* index 2 */
+ ret
+ mrs x0, AMEVCNTR03_EL0 /* index 3 */
+ ret
+endfunc amu_group0_cnt_read_internal
+
+/*
+ * uint64_t amu_group1_cnt_read_internal(int idx);
+ *
+ * Given `idx`, read the corresponding AMU counter
+ * and return it in `x0`.
+ */
+func amu_group1_cnt_read_internal
+#if ENABLE_ASSERTIONS
+ /*
+ * It can be dangerous to call this function with an
+ * out of bounds index. Ensure `idx` is valid.
+ */
+ mov x1, x0
+ lsr x1, x1, #4
+ cmp x1, #0
+ ASM_ASSERT(eq)
+#endif
+
+ /*
+ * Given `idx` calculate address of mrs/ret instruction pair
+ * in the table below.
+ */
+ adr x1, 1f
+ lsl x0, x0, #3 /* each mrs/ret sequence is 8 bytes */
+ add x1, x1, x0
+ br x1
+
+1:
+ mrs x0, AMEVCNTR10_EL0 /* index 0 */
+ ret
+ mrs x0, AMEVCNTR11_EL0 /* index 1 */
+ ret
+ mrs x0, AMEVCNTR12_EL0 /* index 2 */
+ ret
+ mrs x0, AMEVCNTR13_EL0 /* index 3 */
+ ret
+ mrs x0, AMEVCNTR14_EL0 /* index 4 */
+ ret
+ mrs x0, AMEVCNTR15_EL0 /* index 5 */
+ ret
+ mrs x0, AMEVCNTR16_EL0 /* index 6 */
+ ret
+ mrs x0, AMEVCNTR17_EL0 /* index 7 */
+ ret
+ mrs x0, AMEVCNTR18_EL0 /* index 8 */
+ ret
+ mrs x0, AMEVCNTR19_EL0 /* index 9 */
+ ret
+ mrs x0, AMEVCNTR1A_EL0 /* index 10 */
+ ret
+ mrs x0, AMEVCNTR1B_EL0 /* index 11 */
+ ret
+ mrs x0, AMEVCNTR1C_EL0 /* index 12 */
+ ret
+ mrs x0, AMEVCNTR1D_EL0 /* index 13 */
+ ret
+ mrs x0, AMEVCNTR1E_EL0 /* index 14 */
+ ret
+ mrs x0, AMEVCNTR1F_EL0 /* index 15 */
+ ret
+endfunc amu_group1_cnt_read_internal
diff --git a/lib/irq/irq.c b/lib/irq/irq.c
new file mode 100644
index 000000000..5f2507fdb
--- /dev/null
+++ b/lib/irq/irq.c
@@ -0,0 +1,204 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <assert.h>
+#include <debug.h>
+#include <irq.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <power_management.h>
+#include <sgi.h>
+#include <spinlock.h>
+#include <string.h>
+#include <tftf.h>
+#include <tftf_lib.h>
+
+#define IS_PLAT_SPI(irq_num) \
+ (((irq_num) >= MIN_SPI_ID) && \
+ ((irq_num) <= MIN_SPI_ID + PLAT_MAX_SPI_OFFSET_ID))
+
+static spi_desc spi_desc_table[PLAT_MAX_SPI_OFFSET_ID - MIN_SPI_ID];
+static ppi_desc ppi_desc_table[PLATFORM_CORE_COUNT][
+ (MAX_PPI_ID + 1) - MIN_PPI_ID];
+static sgi_desc sgi_desc_table[PLATFORM_CORE_COUNT][MAX_SGI_ID + 1];
+static spurious_desc spurious_desc_handler;
+
+/*
+ * For a given SPI, the associated IRQ handler is common to all CPUs.
+ * Therefore, we need a lock to prevent simultaneous updates.
+ *
+ * We use one lock for all SPIs. This will make it impossible to update
+ * different SPIs' handlers at the same time (although it would be fine) but it
+ * saves memory. Updating an SPI handler shouldn't occur that often anyway so we
+ * shouldn't suffer from this restriction too much.
+ */
+static spinlock_t spi_lock;
+
+static irq_handler_t *get_irq_handler(unsigned int irq_num)
+{
+ if (IS_PLAT_SPI(irq_num))
+ return &spi_desc_table[irq_num - MIN_SPI_ID].handler;
+
+ unsigned int mpid = read_mpidr_el1();
+ unsigned int linear_id = platform_get_core_pos(mpid);
+
+ if (IS_PPI(irq_num))
+ return &ppi_desc_table[linear_id][irq_num - MIN_PPI_ID].handler;
+
+ if (IS_SGI(irq_num))
+ return &sgi_desc_table[linear_id][irq_num - MIN_SGI_ID].handler;
+
+ /*
+ * The only possibility is for it to be a spurious
+ * interrupt.
+ */
+ assert(irq_num == GIC_SPURIOUS_INTERRUPT);
+ return &spurious_desc_handler;
+}
+
+void tftf_send_sgi(unsigned int sgi_id, unsigned int core_pos)
+{
+ assert(IS_SGI(sgi_id));
+
+ /*
+ * Ensure that all memory accesses prior to sending the SGI are
+ * completed.
+ */
+ dsbish();
+
+ /*
+ * Don't send interrupts to CPUs that are powering down. That would be a
+ * violation of the PSCI CPU_OFF caller responsibilities. The PSCI
+ * specification explicitely says:
+ * "Asynchronous wake-ups on a core that has been switched off through a
+ * PSCI CPU_OFF call results in an erroneous state. When this erroneous
+ * state is observed, it is IMPLEMENTATION DEFINED how the PSCI
+ * implementation reacts."
+ */
+ assert(tftf_is_core_pos_online(core_pos));
+ arm_gic_send_sgi(sgi_id, core_pos);
+}
+
+void tftf_irq_enable(unsigned int irq_num, uint8_t irq_priority)
+{
+ if (IS_PLAT_SPI(irq_num)) {
+ /*
+ * Instruct the GIC Distributor to forward the interrupt to
+ * the calling core
+ */
+ arm_gic_set_intr_target(irq_num, platform_get_core_pos(read_mpidr_el1()));
+ }
+
+ arm_gic_set_intr_priority(irq_num, irq_priority);
+ arm_gic_intr_enable(irq_num);
+
+ VERBOSE("Enabled IRQ #%u\n", irq_num);
+}
+
+void tftf_irq_disable(unsigned int irq_num)
+{
+ /* Disable the interrupt */
+ arm_gic_intr_disable(irq_num);
+
+ VERBOSE("Disabled IRQ #%u\n", irq_num);
+}
+
+#define HANDLER_VALID(handler, expect_handler) \
+ ((expect_handler) ? ((handler) != NULL) : ((handler) == NULL))
+
+static int tftf_irq_update_handler(unsigned int irq_num,
+ irq_handler_t irq_handler,
+ bool expect_handler)
+{
+ irq_handler_t *cur_handler;
+ int ret = -1;
+
+ cur_handler = get_irq_handler(irq_num);
+ if (IS_PLAT_SPI(irq_num))
+ spin_lock(&spi_lock);
+
+ /*
+ * Update the IRQ handler, if the current handler is in the expected
+ * state
+ */
+ assert(HANDLER_VALID(*cur_handler, expect_handler));
+ if (HANDLER_VALID(*cur_handler, expect_handler)) {
+ *cur_handler = irq_handler;
+ ret = 0;
+ }
+
+ if (IS_PLAT_SPI(irq_num))
+ spin_unlock(&spi_lock);
+
+ return ret;
+}
+
+int tftf_irq_register_handler(unsigned int irq_num, irq_handler_t irq_handler)
+{
+ int ret;
+
+ ret = tftf_irq_update_handler(irq_num, irq_handler, false);
+ if (ret == 0)
+ INFO("Registered IRQ handler %p for IRQ #%u\n",
+ (void *)(uintptr_t) irq_handler, irq_num);
+
+ return ret;
+}
+
+int tftf_irq_unregister_handler(unsigned int irq_num)
+{
+ int ret;
+
+ ret = tftf_irq_update_handler(irq_num, NULL, true);
+ if (ret == 0)
+ INFO("Unregistered IRQ handler for IRQ #%u\n", irq_num);
+
+ return ret;
+}
+
+int tftf_irq_handler_dispatcher(void)
+{
+ unsigned int raw_iar;
+ unsigned int irq_num;
+ sgi_data_t sgi_data;
+ irq_handler_t *handler;
+ void *irq_data = NULL;
+ int rc = 0;
+
+ /* Acknowledge the interrupt */
+ irq_num = arm_gic_intr_ack(&raw_iar);
+
+ handler = get_irq_handler(irq_num);
+ if (IS_PLAT_SPI(irq_num)) {
+ irq_data = &irq_num;
+ } else if (IS_PPI(irq_num)) {
+ irq_data = &irq_num;
+ } else if (IS_SGI(irq_num)) {
+ sgi_data.irq_id = irq_num;
+ irq_data = &sgi_data;
+ }
+
+ if (*handler != NULL)
+ rc = (*handler)(irq_data);
+
+ /* Mark the processing of the interrupt as complete */
+ if (irq_num != GIC_SPURIOUS_INTERRUPT)
+ arm_gic_end_of_intr(raw_iar);
+
+ return rc;
+}
+
+void tftf_irq_setup(void)
+{
+ memset(spi_desc_table, 0, sizeof(spi_desc_table));
+ memset(ppi_desc_table, 0, sizeof(ppi_desc_table));
+ memset(sgi_desc_table, 0, sizeof(sgi_desc_table));
+ memset(&spurious_desc_handler, 0, sizeof(spurious_desc_handler));
+ init_spinlock(&spi_lock);
+}
diff --git a/lib/locks/aarch32/spinlock.S b/lib/locks/aarch32/spinlock.S
new file mode 100644
index 000000000..b408914fc
--- /dev/null
+++ b/lib/locks/aarch32/spinlock.S
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+ .globl init_spinlock
+ .globl spin_lock
+ .globl spin_unlock
+
+func init_spinlock
+ mov r1, #0
+ str r1, [r0]
+ bx lr
+endfunc init_spinlock
+
+func spin_lock
+ mov r2, #1
+1:
+ ldrex r1, [r0]
+ cmp r1, #0
+ wfene
+ strexeq r1, r2, [r0]
+ cmpeq r1, #0
+ bne 1b
+ dmb
+ bx lr
+endfunc spin_lock
+
+
+func spin_unlock
+ mov r1, #0
+ stl r1, [r0]
+ bx lr
+endfunc spin_unlock
diff --git a/lib/locks/aarch64/spinlock.S b/lib/locks/aarch64/spinlock.S
new file mode 100644
index 000000000..7f6d0c6aa
--- /dev/null
+++ b/lib/locks/aarch64/spinlock.S
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+ .globl init_spinlock
+ .globl spin_lock
+ .globl spin_unlock
+
+func init_spinlock
+ str wzr, [x0]
+ ret
+endfunc init_spinlock
+
+func spin_lock
+ mov w2, #1
+ sevl
+l1: wfe
+l2: ldaxr w1, [x0]
+ cbnz w1, l1
+ stxr w1, w2, [x0]
+ cbnz w1, l2
+ ret
+endfunc spin_lock
+
+
+func spin_unlock
+ stlr wzr, [x0]
+ ret
+endfunc spin_unlock
diff --git a/lib/power_management/hotplug/hotplug.c b/lib/power_management/hotplug/hotplug.c
new file mode 100644
index 000000000..7c3a988f8
--- /dev/null
+++ b/lib/power_management/hotplug/hotplug.c
@@ -0,0 +1,307 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <assert.h>
+#include <cdefs.h> /* For __dead2 */
+#include <console.h>
+#include <debug.h>
+#include <irq.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <power_management.h>
+#include <psci.h>
+#include <sgi.h>
+#include <spinlock.h>
+#include <stdint.h>
+#include <tftf.h>
+
+/*
+ * Affinity info map of CPUs as seen by TFTF
+ * - Set cpus_status_map[i].state to TFTF_AFFINITY_STATE_ON to mark CPU i
+ * as ON.
+ * - Set cpus_status_map[i].state to TFTF_AFFINITY_STATE_ON_PENDING to mark
+ * CPU i as ON_PENDING.
+ * - Set cpus_status_map[i].state to TFTF_AFFINITY_STATE_OFF to mark CPU i
+ * as OFF.
+ */
+static tftf_cpu_state_t cpus_status_map[PLATFORM_CORE_COUNT];
+static int cpus_status_init_done;
+
+/*
+ * Reference count keeping track of the number of CPUs participating in
+ * a test.
+ */
+static volatile unsigned int ref_cnt;
+
+/* Lock to prevent concurrent accesses to the reference count */
+static spinlock_t ref_cnt_lock;
+
+/* Per-cpu test entrypoint */
+volatile test_function_t test_entrypoint[PLATFORM_CORE_COUNT];
+
+u_register_t tftf_primary_core = INVALID_MPID;
+
+unsigned int tftf_inc_ref_cnt(void)
+{
+ unsigned int cnt;
+
+ spin_lock(&ref_cnt_lock);
+ assert(ref_cnt < PLATFORM_CORE_COUNT);
+ cnt = ++ref_cnt;
+ spin_unlock(&ref_cnt_lock);
+
+ VERBOSE("Entering the test (%u CPUs in the test now)\n", cnt);
+
+ return cnt;
+}
+
+unsigned int tftf_dec_ref_cnt(void)
+{
+ unsigned int cnt;
+
+ spin_lock(&ref_cnt_lock);
+ assert(ref_cnt != 0);
+ cnt = --ref_cnt;
+ spin_unlock(&ref_cnt_lock);
+
+ VERBOSE("Exiting the test (%u CPUs in the test now)\n", cnt);
+
+ return cnt;
+}
+
+unsigned int tftf_get_ref_cnt(void)
+{
+ return ref_cnt;
+}
+
+void tftf_init_cpus_status_map(void)
+{
+ unsigned int mpid = read_mpidr_el1();
+ unsigned int core_pos = platform_get_core_pos(mpid);
+
+ /* Check only primary does the initialisation */
+ assert((mpid & MPID_MASK) == tftf_primary_core);
+
+ /* Check init is done only once */
+ assert(!cpus_status_init_done);
+
+ cpus_status_init_done = 1;
+
+ /*
+ * cpus_status_map already initialised to zero as part of BSS init,
+ * just set the primary to ON state
+ */
+ cpus_status_map[core_pos].state = TFTF_AFFINITY_STATE_ON;
+}
+
+void tftf_set_cpu_online(void)
+{
+ unsigned int mpid = read_mpidr_el1();
+ unsigned int core_pos = platform_get_core_pos(mpid);
+
+ /*
+ * Wait here till the `tftf_try_cpu_on` has had a chance to update the
+ * the cpu state.
+ */
+ while (cpus_status_map[core_pos].state == TFTF_AFFINITY_STATE_OFF)
+ ;
+
+ spin_lock(&cpus_status_map[core_pos].lock);
+ assert(cpus_status_map[core_pos].state == TFTF_AFFINITY_STATE_ON_PENDING);
+ cpus_status_map[core_pos].state = TFTF_AFFINITY_STATE_ON;
+ spin_unlock(&cpus_status_map[core_pos].lock);
+}
+
+void tftf_set_cpu_offline(void)
+{
+ unsigned int mpid = read_mpidr_el1();
+ unsigned int core_pos = platform_get_core_pos(mpid);
+
+ spin_lock(&cpus_status_map[core_pos].lock);
+
+ assert(tftf_is_cpu_online(mpid));
+ cpus_status_map[core_pos].state = TFTF_AFFINITY_STATE_OFF;
+ spin_unlock(&cpus_status_map[core_pos].lock);
+}
+
+unsigned int tftf_is_cpu_online(unsigned int mpid)
+{
+ unsigned int core_pos = platform_get_core_pos(mpid);
+ return cpus_status_map[core_pos].state == TFTF_AFFINITY_STATE_ON;
+}
+
+unsigned int tftf_is_core_pos_online(unsigned int core_pos)
+{
+ return cpus_status_map[core_pos].state == TFTF_AFFINITY_STATE_ON;
+}
+
+int32_t tftf_cpu_on(u_register_t target_cpu,
+ uintptr_t entrypoint,
+ u_register_t context_id)
+{
+ int32_t ret;
+ tftf_affinity_info_t cpu_state;
+ unsigned int core_pos = platform_get_core_pos(target_cpu);
+
+ spin_lock(&cpus_status_map[core_pos].lock);
+ cpu_state = cpus_status_map[core_pos].state;
+
+ if (cpu_state == TFTF_AFFINITY_STATE_ON) {
+ spin_unlock(&cpus_status_map[core_pos].lock);
+ return PSCI_E_ALREADY_ON;
+ }
+
+ if (cpu_state == TFTF_AFFINITY_STATE_ON_PENDING) {
+ spin_unlock(&cpus_status_map[core_pos].lock);
+ return PSCI_E_SUCCESS;
+ }
+
+ assert(cpu_state == TFTF_AFFINITY_STATE_OFF);
+
+ do {
+ ret = tftf_psci_cpu_on(target_cpu,
+ (uintptr_t) tftf_hotplug_entry,
+ context_id);
+
+ /* Check if multiple CPU_ON calls are done for same CPU */
+ assert(ret != PSCI_E_ON_PENDING);
+ } while (ret == PSCI_E_ALREADY_ON);
+
+ if (ret == PSCI_E_SUCCESS) {
+ /*
+ * Populate the test entry point for this core.
+ * This is the address where the core will jump to once the framework
+ * has finished initialising it.
+ */
+ test_entrypoint[core_pos] = (test_function_t) entrypoint;
+
+ cpus_status_map[core_pos].state = TFTF_AFFINITY_STATE_ON_PENDING;
+ spin_unlock(&cpus_status_map[core_pos].lock);
+ } else {
+ spin_unlock(&cpus_status_map[core_pos].lock);
+ ERROR("Failed to boot CPU 0x%llx (%d)\n",
+ (unsigned long long)target_cpu, ret);
+ }
+
+ return ret;
+}
+
+int32_t tftf_try_cpu_on(u_register_t target_cpu,
+ uintptr_t entrypoint,
+ u_register_t context_id)
+{
+ int32_t ret;
+ unsigned int core_pos = platform_get_core_pos(target_cpu);
+
+ ret = tftf_psci_cpu_on(target_cpu,
+ (uintptr_t) tftf_hotplug_entry,
+ context_id);
+
+ if (ret == PSCI_E_SUCCESS) {
+ spin_lock(&cpus_status_map[core_pos].lock);
+ assert(cpus_status_map[core_pos].state ==
+ TFTF_AFFINITY_STATE_OFF);
+ cpus_status_map[core_pos].state =
+ TFTF_AFFINITY_STATE_ON_PENDING;
+
+ spin_unlock(&cpus_status_map[core_pos].lock);
+
+ /*
+ * Populate the test entry point for this core.
+ * This is the address where the core will jump to once the
+ * framework has finished initialising it.
+ */
+ test_entrypoint[core_pos] = (test_function_t) entrypoint;
+ }
+
+ return ret;
+}
+
+/*
+ * Prepare the core to power off. Any driver which needs to perform specific
+ * tasks before powering off a CPU, e.g. migrating interrupts to another
+ * core, can implement a function and call it from here.
+ */
+static void tftf_prepare_cpu_off(void)
+{
+ /*
+ * Do the bare minimal to turn off this CPU i.e. turn off interrupts
+ * and disable the GIC CPU interface
+ */
+ disable_irq();
+ arm_gic_disable_interrupts_local();
+}
+
+/*
+ * Revert the changes made during tftf_prepare_cpu_off()
+ */
+static void tftf_revert_cpu_off(void)
+{
+ arm_gic_enable_interrupts_local();
+ enable_irq();
+}
+
+int32_t tftf_cpu_off(void)
+{
+ int32_t ret;
+
+ tftf_prepare_cpu_off();
+ tftf_set_cpu_offline();
+
+ INFO("Powering off\n");
+
+ /* Flush console before the last CPU is powered off. */
+ if (tftf_get_ref_cnt() == 0)
+ console_flush();
+
+ /* Power off the CPU */
+ ret = tftf_psci_cpu_off();
+
+ ERROR("Failed to power off (%d)\n", ret);
+
+ /*
+ * PSCI CPU_OFF call does not return when successful.
+ * Otherwise, it should return the PSCI error code 'DENIED'.
+ */
+ assert(ret == PSCI_E_DENIED);
+
+ /*
+ * The CPU failed to power down since we returned from
+ * tftf_psci_cpu_off(). So we need to adjust the framework's view of
+ * the core by marking it back online.
+ */
+ tftf_set_cpu_online();
+ tftf_revert_cpu_off();
+
+ return ret;
+}
+
+/*
+ * C entry point for a CPU that has just been powered up.
+ */
+void __dead2 tftf_warm_boot_main(void)
+{
+ /* Initialise the CPU */
+ tftf_arch_setup();
+ arm_gic_setup_local();
+
+ /* Enable the SGI used by the timer management framework */
+ tftf_irq_enable(IRQ_WAKE_SGI, GIC_HIGHEST_NS_PRIORITY);
+
+ enable_irq();
+
+ INFO("Booting\n");
+
+ tftf_set_cpu_online();
+
+ /* Enter the test session */
+ run_tests();
+
+ /* Should never reach this point */
+ bug_unreachable();
+}
diff --git a/lib/power_management/suspend/aarch32/asm_tftf_suspend.S b/lib/power_management/suspend/aarch32/asm_tftf_suspend.S
new file mode 100644
index 000000000..cc59e7dbe
--- /dev/null
+++ b/lib/power_management/suspend/aarch32/asm_tftf_suspend.S
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <psci.h>
+#include "../suspend_private.h"
+
+ .global __tftf_suspend
+ .global __tftf_save_arch_context
+ .global __tftf_cpu_resume_ep
+
+ .section .text, "ax"
+
+/*
+ * Saves CPU state for entering suspend. This saves callee registers on stack,
+ * and allocates space on the stack to save the CPU specific registers for
+ * coming out of suspend.
+ *
+ * r0 contains a pointer to tftf_suspend_context structure.
+ */
+func __tftf_suspend
+ push {r4 - r12, lr}
+ mov r2, sp
+ sub sp, sp, #SUSPEND_CTX_SZ
+ mov r1, sp
+ /*
+ * r1 now points to struct tftf_suspend_ctx allocated on the stack
+ */
+ str r2, [r1, #SUSPEND_CTX_SP_OFFSET]
+ bl tftf_enter_suspend
+
+ /*
+ * If execution reaches this point, the suspend call was either
+ * a suspend to standby call or an invalid suspend call.
+ * In case of suspend to powerdown, execution will instead resume in
+ * __tftf_cpu_resume_ep().
+ */
+ add sp, sp, #SUSPEND_CTX_SZ
+ pop {r4 - r12, lr}
+ bx lr
+endfunc __tftf_suspend
+
+func __tftf_save_arch_context
+ ldcopr r1, HMAIR0
+ ldcopr r2, HCR
+ stm r0!, {r1, r2}
+ ldcopr16 r1, r2, HTTBR_64
+ stm r0!, {r1, r2}
+ ldcopr r1, HTCR
+ ldcopr r2, HVBAR
+ ldcopr r3, HSCTLR
+ stm r0, {r1, r2, r3}
+ bx lr
+endfunc __tftf_save_arch_context
+
+/*
+ * Restore CPU register context
+ * r0 -- Should contain the context pointer
+ */
+func __tftf_cpu_resume_ep
+ /* Invalidate local tlb entries before turning on MMU */
+ stcopr r0, TLBIALLH
+ mov r4, r0
+ ldm r0!, {r1, r2}
+ stcopr r1, HMAIR0
+ stcopr r2, HCR
+ ldm r0!, {r1, r2}
+ stcopr16 r1, r2, HTTBR_64
+ ldm r0, {r1, r2, r3}
+ stcopr r1, HTCR
+ stcopr r2, HVBAR
+
+ /*
+ * TLB invalidations need to be completed before enabling MMU
+ */
+ dsb nsh
+ stcopr r3, HSCTLR
+ /* Ensure the MMU enable takes effect immediately */
+ isb
+
+ mov r0, r4
+ ldr r2, [r0, #SUSPEND_CTX_SP_OFFSET]
+ mov sp, r2
+ ldr r1, [r0, #SUSPEND_CTX_SAVE_SYSTEM_CTX_OFFSET]
+ cmp r1, #0
+ beq skip_sys_restore
+ bl tftf_restore_system_ctx
+skip_sys_restore:
+ pop {r4 - r12, lr}
+ mov r0, #PSCI_E_SUCCESS
+ bx lr
+endfunc __tftf_cpu_resume_ep
diff --git a/lib/power_management/suspend/aarch64/asm_tftf_suspend.S b/lib/power_management/suspend/aarch64/asm_tftf_suspend.S
new file mode 100644
index 000000000..692baded6
--- /dev/null
+++ b/lib/power_management/suspend/aarch64/asm_tftf_suspend.S
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <psci.h>
+#include "../suspend_private.h"
+
+ .global __tftf_suspend
+ .global __tftf_save_arch_context
+ .global __tftf_cpu_resume_ep
+
+ .section .text, "ax"
+
+/*
+ * Saves CPU state for entering suspend. This saves callee registers on stack,
+ * and allocates space on the stack to save the CPU specific registers for
+ * coming out of suspend.
+ *
+ * x0 contains a pointer to tftf_suspend_context structure.
+ */
+func __tftf_suspend
+ stp x29, x30, [sp, #-96]!
+ stp x19, x20, [sp, #16]
+ stp x21, x22, [sp, #32]
+ stp x23, x24, [sp, #48]
+ stp x25, x26, [sp, #64]
+ stp x27, x28, [sp, #80]
+ mov x2, sp
+ sub sp, sp, #SUSPEND_CTX_SZ
+ mov x1, sp
+ /*
+ * x1 now points to struct tftf_suspend_ctx allocated on the stack
+ */
+ str x2, [x1, #SUSPEND_CTX_SP_OFFSET]
+ bl tftf_enter_suspend
+
+ /*
+ * If execution reaches this point, the suspend call was either
+ * a suspend to standby call or an invalid suspend call.
+ * In case of suspend to powerdown, execution will instead resume in
+ * __tftf_cpu_resume_ep().
+ */
+ add sp, sp, #SUSPEND_CTX_SZ
+ ldp x19, x20, [sp, #16]
+ ldp x21, x22, [sp, #32]
+ ldp x23, x24, [sp, #48]
+ ldp x25, x26, [sp, #64]
+ ldp x27, x28, [sp, #80]
+ ldp x29, x30, [sp], #96
+ ret
+endfunc __tftf_suspend
+
+func __tftf_save_arch_context
+ JUMP_EL1_OR_EL2 x1, 1f, 2f, dead
+1: mrs x1, mair_el1
+ mrs x2, cpacr_el1
+ mrs x3, ttbr0_el1
+ mrs x4, tcr_el1
+ mrs x5, vbar_el1
+ mrs x6, sctlr_el1
+ stp x1, x2, [x0]
+ stp x3, x4, [x0, #16]
+ stp x5, x6, [x0, #32]
+ ret
+
+2: mrs x1, mair_el2
+ mrs x2, hcr_el2
+ mrs x3, ttbr0_el2
+ mrs x4, tcr_el2
+ mrs x5, vbar_el2
+ mrs x6, sctlr_el2
+ stp x1, x2, [x0]
+ stp x3, x4, [x0, #16]
+ stp x5, x6, [x0, #32]
+ ret
+endfunc __tftf_save_arch_context
+
+/*
+ * Restore CPU register context
+ * X0 -- Should contain the context pointer
+ */
+func __tftf_cpu_resume_ep
+ JUMP_EL1_OR_EL2 x1, 1f, 2f, dead
+1: /* Invalidate local tlb entries before turning on MMU */
+ tlbi vmalle1
+ ldp x1, x2, [x0]
+ ldp x3, x4, [x0, #16]
+ ldp x5, x6, [x0, #32]
+ msr mair_el1, x1
+ msr cpacr_el1, x2
+ msr ttbr0_el1, x3
+ msr tcr_el1, x4
+ msr vbar_el1, x5
+ /*
+ * TLB invalidations need to be completed before enabling MMU
+ */
+ dsb nsh
+ msr sctlr_el1, x6
+ /* Ensure the MMU enable takes effect immediately */
+ isb
+ b restore_callee_regs
+
+ /* Invalidate local tlb entries before turning on MMU */
+2: tlbi alle2
+ ldp x1, x2, [x0]
+ ldp x3, x4, [x0, #16]
+ ldp x5, x6, [x0, #32]
+ msr mair_el2, x1
+ msr hcr_el2, x2
+ msr ttbr0_el2, x3
+ msr tcr_el2, x4
+ msr vbar_el2, x5
+ /*
+ * TLB invalidations need to be completed before enabling MMU
+ */
+ dsb nsh
+ msr sctlr_el2, x6
+ /* Ensure the MMU enable takes effect immediately */
+ isb
+
+restore_callee_regs:
+ ldr x2, [x0, #SUSPEND_CTX_SP_OFFSET]
+ mov sp, x2
+ ldr w1, [x0, #SUSPEND_CTX_SAVE_SYSTEM_CTX_OFFSET]
+ cbz w1, skip_sys_restore
+ bl tftf_restore_system_ctx
+skip_sys_restore:
+ ldp x19, x20, [sp, #16] /* Restore the callee saved registers */
+ ldp x21, x22, [sp, #32]
+ ldp x23, x24, [sp, #48]
+ ldp x25, x26, [sp, #64]
+ ldp x27, x28, [sp, #80]
+ ldp x29, x30, [sp], #96
+ mov x0, PSCI_E_SUCCESS
+ ret
+endfunc __tftf_cpu_resume_ep
+
+dead:
+ b .
diff --git a/lib/power_management/suspend/suspend_private.h b/lib/power_management/suspend/suspend_private.h
new file mode 100644
index 000000000..dfc2e930e
--- /dev/null
+++ b/lib/power_management/suspend/suspend_private.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SUSPEND_PRIV_H__
+#define __SUSPEND_PRIV_H__
+
+#define SUSPEND_CTX_SZ 64
+#define SUSPEND_CTX_SP_OFFSET 48
+#define SUSPEND_CTX_SAVE_SYSTEM_CTX_OFFSET 56
+
+#ifndef __ASSEMBLY__
+#include <cassert.h>
+#include <power_management.h>
+#include <stdint.h>
+#include <string.h>
+#include <types.h>
+
+#define NR_CTX_REGS 6
+
+/*
+ * struct tftf_suspend_ctx represents the architecture context to
+ * be saved and restored while entering suspend and coming out.
+ * It must be 16-byte aligned since it is allocated on the stack, which must be
+ * 16-byte aligned on ARMv8 (AArch64). Even though the alignment requirement
+ * is not present in AArch32, we use the same alignment and register width as
+ * it allows the same structure to be reused for AArch32.
+ */
+typedef struct tftf_suspend_context {
+ uint64_t arch_ctx_regs[NR_CTX_REGS];
+ uint64_t stack_pointer;
+ /*
+ * Whether the system context is saved and and needs to be restored.
+ * Note that the system context itself is not saved in this structure.
+ */
+ unsigned int save_system_context;
+} __aligned(16) tftf_suspend_ctx_t;
+
+/*
+ * Saves callee save registers on the stack
+ * Allocate space on stack for CPU context regs
+ * Enters suspend by calling tftf_enter_suspend.
+ * power state: PSCI power state to be sent via SMC
+ * Returns: PSCI_E_SUCCESS or PSCI_E_INVALID_PARAMS
+ */
+unsigned int __tftf_suspend(const suspend_info_t *power_state);
+
+/*
+ * Saves the architecture context of CPU in the memory
+ * tftf_suspend_context: Pointer to the location for saving the context
+ */
+void __tftf_save_arch_context(struct tftf_suspend_context *ctx);
+
+/*
+ * Calls __tftf_save_arch_context to saves arch context of cpu to the memory
+ * pointed by ctx
+ * Enters suspend by calling the SMC
+ * power state: PSCI power state to be sent via SMC
+ * ctx: Pointer to the location where suspend context can be stored
+ * Returns: PSCI_E_SUCCESS or PSCI_E_INVALID_PARAMS
+ */
+int32_t tftf_enter_suspend(const suspend_info_t *power_state,
+ tftf_suspend_ctx_t *ctx);
+
+/*
+ * Invokes the appropriate driver functions in the TFTF framework
+ * to save their context prior to a system suspend.
+ */
+void tftf_save_system_ctx(tftf_suspend_ctx_t *ctx);
+
+/*
+ * Invokes the appropriate driver functions in the TFTF framework
+ * to restore their context on wake-up from system suspend.
+ */
+void tftf_restore_system_ctx(tftf_suspend_ctx_t *ctx);
+
+/*
+ * Restores the CPU arch context and callee registers from the location pointed
+ * by X0(context ID).
+ * Returns: PSCI_E_SUCCESS
+ */
+unsigned int __tftf_cpu_resume_ep(void);
+
+/* Assembler asserts to verify #defines of offsets match as seen by compiler */
+CASSERT(SUSPEND_CTX_SZ == sizeof(tftf_suspend_ctx_t),
+ assert_suspend_context_size_mismatch);
+CASSERT(SUSPEND_CTX_SP_OFFSET == __builtin_offsetof(tftf_suspend_ctx_t, stack_pointer),
+ assert_stack_pointer_location_mismatch_in_suspend_ctx);
+CASSERT(SUSPEND_CTX_SAVE_SYSTEM_CTX_OFFSET ==
+ __builtin_offsetof(tftf_suspend_ctx_t, save_system_context),
+ assert_save_sys_ctx_mismatch_in_suspend_ctx);
+#endif /* __ASSEMBLY__ */
+
+#endif /* __SUSPEND_PRIV_H__ */
diff --git a/lib/power_management/suspend/tftf_suspend.c b/lib/power_management/suspend/tftf_suspend.c
new file mode 100644
index 000000000..75c2ade06
--- /dev/null
+++ b/lib/power_management/suspend/tftf_suspend.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <debug.h>
+#include <platform.h>
+#include <power_management.h>
+#include <psci.h>
+#include <sgi.h>
+#include <stdint.h>
+#include <tftf.h>
+#include <tftf_lib.h>
+#include "suspend_private.h"
+
+int32_t tftf_enter_suspend(const suspend_info_t *info,
+ tftf_suspend_ctx_t *ctx)
+{
+ smc_args cpu_suspend_args = {
+ info->psci_api,
+ info->power_state,
+ (uintptr_t)__tftf_cpu_resume_ep,
+ (u_register_t)ctx
+ };
+
+ smc_args system_suspend_args = {
+ info->psci_api,
+ (uintptr_t)__tftf_cpu_resume_ep,
+ (u_register_t)ctx
+ };
+
+ smc_ret_values rc;
+
+ if (info->save_system_context) {
+ ctx->save_system_context = 1;
+ tftf_save_system_ctx(ctx);
+ } else
+ ctx->save_system_context = 0;
+
+ /*
+ * Save the CPU context. It will be restored in resume path in
+ * __tftf_cpu_resume_ep().
+ */
+ __tftf_save_arch_context(ctx);
+
+ /*
+ * Flush the context that must be retrieved with MMU off
+ */
+ flush_dcache_range((u_register_t)ctx, sizeof(*ctx));
+
+ if (info->psci_api == SMC_PSCI_CPU_SUSPEND)
+ rc = tftf_smc(&cpu_suspend_args);
+ else
+ rc = tftf_smc(&system_suspend_args);
+
+ /*
+ * If execution reaches this point, The above SMC call was an invalid
+ * call or a suspend to standby call. In both cases the CPU does not
+ * power down so there is no need to restore the context.
+ */
+ return rc.ret0;
+}
+
+void tftf_restore_system_ctx(tftf_suspend_ctx_t *ctx)
+{
+ assert(ctx != NULL);
+ assert(ctx->save_system_context);
+
+ /*
+ * TODO: Check if there is a need for separate platform
+ * API for resume.
+ */
+
+ tftf_early_platform_setup();
+
+ INFO("Restoring system context\n");
+
+ /* restore the global GIC context */
+ arm_gic_restore_context_global();
+ tftf_timer_gic_state_restore();
+}
+
+void tftf_save_system_ctx(tftf_suspend_ctx_t *ctx)
+{
+ assert(ctx != NULL);
+ assert(ctx->save_system_context);
+
+ /* Nothing to do here currently */
+ INFO("Saving system context\n");
+
+ /* Save the global GIC context */
+ arm_gic_save_context_global();
+}
+
+int tftf_suspend(const suspend_info_t *info)
+{
+ int32_t rc;
+ uint64_t flags;
+
+ flags = read_daif();
+
+ disable_irq();
+
+ INFO("Going into suspend state\n");
+
+ /* Save the local GIC context */
+ arm_gic_save_context_local();
+
+ rc = __tftf_suspend(info);
+
+ /* Restore the local GIC context */
+ arm_gic_restore_context_local();
+
+ /*
+ * DAIF flags should be restored last because it could be an issue
+ * to unmask exceptions before that point, e.g. if GIC must be
+ * reconfigured upon resume from suspend.
+ */
+ write_daif(flags);
+
+ INFO("Resumed from suspend state\n");
+
+ return rc;
+}
diff --git a/lib/psci/psci.c b/lib/psci/psci.c
new file mode 100644
index 000000000..520c72435
--- /dev/null
+++ b/lib/psci/psci.c
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <debug.h>
+#include <irq.h>
+#include <platform.h>
+#include <power_management.h>
+#include <psci.h>
+#include <sgi.h>
+#include <tftf.h>
+#include <tftf_lib.h>
+
+static unsigned int pstate_format_detected;
+static unsigned int pstate_format;
+static unsigned int is_state_id_null;
+
+const psci_function_t psci_functions[PSCI_NUM_CALLS] = {
+ DEFINE_PSCI_FUNC(PSCI_FEATURES, true),
+ DEFINE_PSCI_FUNC(PSCI_VERSION, true),
+ DEFINE_PSCI_FUNC(PSCI_CPU_SUSPEND_AARCH32, true),
+ DEFINE_PSCI_FUNC(PSCI_CPU_SUSPEND_AARCH64, true),
+ DEFINE_PSCI_FUNC(PSCI_CPU_OFF, true),
+ DEFINE_PSCI_FUNC(PSCI_CPU_ON_AARCH32, true),
+ DEFINE_PSCI_FUNC(PSCI_CPU_ON_AARCH64, true),
+ DEFINE_PSCI_FUNC(PSCI_AFFINITY_INFO_AARCH32, true),
+ DEFINE_PSCI_FUNC(PSCI_AFFINITY_INFO_AARCH64, true),
+ DEFINE_PSCI_FUNC(PSCI_SYSTEM_OFF, true),
+ DEFINE_PSCI_FUNC(PSCI_SYSTEM_RESET, true),
+ DEFINE_PSCI_FUNC(PSCI_MIG_INFO_TYPE, false),
+ DEFINE_PSCI_FUNC(PSCI_MIG_INFO_UP_CPU_AARCH32, false),
+ DEFINE_PSCI_FUNC(PSCI_MIG_INFO_UP_CPU_AARCH64, false),
+ DEFINE_PSCI_FUNC(PSCI_MIG_AARCH32, false),
+ DEFINE_PSCI_FUNC(PSCI_MIG_AARCH64, false),
+ DEFINE_PSCI_FUNC(PSCI_CPU_FREEZE, false),
+ DEFINE_PSCI_FUNC(PSCI_CPU_DEFAULT_SUSPEND32, false),
+ DEFINE_PSCI_FUNC(PSCI_CPU_DEFAULT_SUSPEND64, false),
+ DEFINE_PSCI_FUNC(PSCI_CPU_HW_STATE32, false),
+ DEFINE_PSCI_FUNC(PSCI_CPU_HW_STATE64, false),
+ DEFINE_PSCI_FUNC(PSCI_SYSTEM_SUSPEND32, false),
+ DEFINE_PSCI_FUNC(PSCI_SYSTEM_SUSPEND64, false),
+ DEFINE_PSCI_FUNC(PSCI_SET_SUSPEND_MODE, false),
+ DEFINE_PSCI_FUNC(PSCI_STAT_RESIDENCY32, false),
+ DEFINE_PSCI_FUNC(PSCI_STAT_RESIDENCY64, false),
+ DEFINE_PSCI_FUNC(PSCI_STAT_COUNT32, false),
+ DEFINE_PSCI_FUNC(PSCI_STAT_COUNT64, false),
+ DEFINE_PSCI_FUNC(PSCI_MEM_PROTECT, false),
+ DEFINE_PSCI_FUNC(PSCI_MEM_PROTECT_CHECK_RANGE32, false),
+ DEFINE_PSCI_FUNC(PSCI_MEM_PROTECT_CHECK_RANGE64, false),
+ DEFINE_PSCI_FUNC(PSCI_RESET2_AARCH32, false),
+ DEFINE_PSCI_FUNC(PSCI_RESET2_AARCH64, false),
+};
+
+int32_t tftf_psci_cpu_on(u_register_t target_cpu,
+ uintptr_t entry_point_address,
+ u_register_t context_id)
+{
+ smc_args args = {
+ SMC_PSCI_CPU_ON,
+ target_cpu,
+ entry_point_address,
+ context_id
+ };
+ smc_ret_values ret_vals;
+
+ ret_vals = tftf_smc(&args);
+
+ return ret_vals.ret0;
+}
+
+int32_t tftf_psci_cpu_off(void)
+{
+ smc_args args = { SMC_PSCI_CPU_OFF };
+ smc_ret_values ret_vals;
+
+ ret_vals = tftf_smc(&args);
+ return ret_vals.ret0;
+}
+
+
+u_register_t tftf_psci_stat_residency(u_register_t target_cpu,
+ uint32_t power_state)
+{
+ smc_args args = {
+ SMC_PSCI_STAT_RESIDENCY,
+ target_cpu,
+ power_state,
+ };
+ smc_ret_values ret_vals;
+
+ ret_vals = tftf_smc(&args);
+ return ret_vals.ret0;
+}
+
+u_register_t tftf_psci_stat_count(u_register_t target_cpu,
+ uint32_t power_state)
+{
+ smc_args args = {
+ SMC_PSCI_STAT_COUNT,
+ target_cpu,
+ power_state,
+ };
+ smc_ret_values ret_vals;
+
+ ret_vals = tftf_smc(&args);
+ return ret_vals.ret0;
+}
+
+int32_t tftf_psci_affinity_info(u_register_t target_affinity,
+ uint32_t lowest_affinity_level)
+{
+ smc_ret_values ret_vals;
+
+ smc_args args = {
+ SMC_PSCI_AFFINITY_INFO,
+ target_affinity,
+ lowest_affinity_level
+ };
+
+ ret_vals = tftf_smc(&args);
+ return ret_vals.ret0;
+}
+
+int32_t tftf_psci_node_hw_state(u_register_t target_cpu, uint32_t power_level)
+{
+ smc_args args = {
+ SMC_PSCI_CPU_HW_STATE,
+ target_cpu,
+ power_level
+ };
+ smc_ret_values ret;
+
+ ret = tftf_smc(&args);
+ return ret.ret0;
+}
+
+int32_t tftf_get_psci_feature_info(uint32_t psci_func_id)
+{
+ smc_args args = {
+ SMC_PSCI_FEATURES,
+ psci_func_id
+ };
+ smc_ret_values ret;
+
+ ret = tftf_smc(&args);
+ return ret.ret0;
+}
+
+int tftf_psci_make_composite_state_id(uint32_t affinity_level,
+ uint32_t state_type, uint32_t *state_id)
+{
+ unsigned int found_entry, i;
+ int ret = PSCI_E_SUCCESS;
+ const plat_state_prop_t *state_prop;
+
+ assert(state_id);
+
+ *state_id = 0;
+ for (i = 0; i <= affinity_level; i++) {
+ state_prop = plat_get_state_prop(i);
+ if (!state_prop) {
+ *state_id |= psci_make_local_state_id(i,
+ PLAT_PSCI_DUMMY_STATE_ID);
+ ret = PSCI_E_INVALID_PARAMS;
+ continue;
+ }
+ found_entry = 0;
+
+ while (state_prop->state_ID) {
+ if (state_type == state_prop->is_pwrdown) {
+ *state_id |= psci_make_local_state_id(i,
+ state_prop->state_ID);
+ found_entry = 1;
+ break;
+ }
+ state_prop++;
+ }
+ if (!found_entry) {
+ *state_id |= psci_make_local_state_id(i,
+ PLAT_PSCI_DUMMY_STATE_ID);
+ ret = PSCI_E_INVALID_PARAMS;
+ }
+ }
+
+ return ret;
+}
+
+static unsigned int tftf_psci_get_pstate_format(void)
+{
+ int ret;
+
+ ret = tftf_get_psci_feature_info(SMC_PSCI_CPU_SUSPEND);
+
+ /*
+ * If error is returned, then it probably means that the PSCI version
+ * is less than 1.0 and only the original format is supported. In case
+ * the version is 1.0 or higher, then PSCI FEATURES which is a
+ * mandatory API is not implemented which implies that only the
+ * original format is supported.
+ */
+ if (ret == PSCI_E_NOT_SUPPORTED)
+ return CPU_SUSPEND_FEAT_PSTATE_FORMAT_ORIGINAL;
+
+ /* Treat the invalid return value as PSCI FEATURES not supported */
+ if ((ret & ~CPU_SUSPEND_FEAT_VALID_MASK) != 0)
+ return CPU_SUSPEND_FEAT_PSTATE_FORMAT_ORIGINAL;
+
+ return (ret >> CPU_SUSPEND_FEAT_PSTATE_FORMAT_SHIFT) & 0x1;
+}
+
+/* Make the power state in the original format */
+uint32_t tftf_make_psci_pstate(uint32_t affinity_level,
+ uint32_t state_type,
+ uint32_t state_id)
+{
+ uint32_t power_state;
+
+ assert(psci_state_type_valid(state_type));
+
+ assert(pstate_format_detected);
+
+ if (pstate_format == CPU_SUSPEND_FEAT_PSTATE_FORMAT_EXTENDED) {
+ assert(psci_state_id_ext_valid(state_id));
+ power_state = (state_type << PSTATE_TYPE_SHIFT_EXT)
+ | (state_id << PSTATE_ID_SHIFT_EXT);
+ } else {
+ assert(psci_affinity_level_valid(affinity_level));
+ assert(psci_state_id_valid(state_id));
+ power_state = (affinity_level << PSTATE_AFF_LVL_SHIFT)
+ | (state_type << PSTATE_TYPE_SHIFT);
+ if (!is_state_id_null)
+ power_state |= (state_id << PSTATE_ID_SHIFT);
+ }
+
+ return power_state;
+}
+
+
+void tftf_detect_psci_pstate_format(void)
+{
+ uint32_t power_state;
+ unsigned int ret;
+
+ pstate_format = tftf_psci_get_pstate_format();
+
+ /*
+ * If the power state format is extended format, then the state-ID
+ * must use recommended encoding.
+ */
+ if (pstate_format == CPU_SUSPEND_FEAT_PSTATE_FORMAT_EXTENDED) {
+ pstate_format_detected = 1;
+ INFO("Extended PSCI power state format detected\n");
+ return;
+ }
+
+ tftf_irq_enable(IRQ_NS_SGI_0, GIC_HIGHEST_NS_PRIORITY);
+
+ /*
+ * Mask IRQ to prevent the interrupt handler being invoked
+ * and clearing the interrupt. A pending interrupt will cause this
+ * CPU to wake-up from suspend.
+ */
+ disable_irq();
+
+ /* Configure an SGI to wake-up from suspend */
+ tftf_send_sgi(IRQ_NS_SGI_0,
+ platform_get_core_pos(read_mpidr_el1() & MPID_MASK));
+
+
+ /*
+ * Try to detect if the platform uses NULL State-ID encoding by sending
+ * PSCI_SUSPEND call with the NULL State-ID encoding. If the call
+ * succeeds then the platform uses NULL State-ID encoding. Else it
+ * uses the recommended encoding for State-ID.
+ */
+ power_state = (PSTATE_AFF_LVL_0 << PSTATE_AFF_LVL_SHIFT)
+ | (PSTATE_TYPE_STANDBY << PSTATE_TYPE_SHIFT);
+
+ ret = tftf_cpu_suspend(power_state);
+
+ /* Unmask the IRQ to let the interrupt handler to execute */
+ enable_irq();
+ isb();
+
+ tftf_irq_disable(IRQ_NS_SGI_0);
+
+ /*
+ * The NULL State-ID returned SUCCESS. Hence State-ID is NULL
+ * for the power state format.
+ */
+ if (ret == PSCI_E_SUCCESS) {
+ is_state_id_null = 1;
+ INFO("Original PSCI power state format with NULL State-ID detected\n");
+ } else
+ INFO("Original PSCI power state format detected\n");
+
+
+ pstate_format_detected = 1;
+}
+
+unsigned int tftf_is_psci_state_id_null(void)
+{
+ assert(pstate_format_detected);
+
+ if (pstate_format == CPU_SUSPEND_FEAT_PSTATE_FORMAT_ORIGINAL)
+ return is_state_id_null;
+ else
+ return 0; /* Extended state ID does not support null format */
+}
+
+unsigned int tftf_is_psci_pstate_format_original(void)
+{
+ assert(pstate_format_detected);
+
+ return pstate_format == CPU_SUSPEND_FEAT_PSTATE_FORMAT_ORIGINAL;
+}
+
+unsigned int tftf_get_psci_version(void)
+{
+ smc_args args = { SMC_PSCI_VERSION };
+ smc_ret_values ret;
+
+ ret = tftf_smc(&args);
+
+ return ret.ret0;
+}
+
+int tftf_is_valid_psci_version(unsigned int version)
+{
+ if (version != PSCI_VERSION(1, 1) &&
+ version != PSCI_VERSION(1, 0) &&
+ version != PSCI_VERSION(0, 2) &&
+ version != PSCI_VERSION(0, 1)) {
+ return 0;
+ }
+ return 1;
+}
diff --git a/lib/sdei/sdei.c b/lib/sdei/sdei.c
new file mode 100644
index 000000000..846b96eb4
--- /dev/null
+++ b/lib/sdei/sdei.c
@@ -0,0 +1,221 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arm_gic.h>
+#include <assert.h>
+#include <sdei.h>
+#include <smccc.h>
+#include <stdint.h>
+#include <tftf_lib.h>
+
+int64_t sdei_version(void)
+{
+ smc_args args = { SDEI_VERSION };
+ smc_ret_values ret;
+
+ ret = tftf_smc(&args);
+ return ret.ret0;
+}
+
+int64_t sdei_interrupt_bind(int intr, struct sdei_intr_ctx *intr_ctx)
+{
+ smc_args args = { 0 };
+ smc_ret_values ret;
+
+ assert(intr_ctx);
+
+ intr_ctx->priority = arm_gic_get_intr_priority(intr);
+ intr_ctx->num = intr;
+ intr_ctx->enabled = arm_gic_intr_enabled(intr);
+ arm_gic_intr_disable(intr);
+
+ args.arg0 = SDEI_INTERRUPT_BIND;
+ args.arg1 = intr;
+ ret = tftf_smc(&args);
+ if (ret.ret0 < 0) {
+ arm_gic_set_intr_priority(intr_ctx->num, intr_ctx->priority);
+ if (intr_ctx->enabled)
+ arm_gic_intr_enable(intr_ctx->num);
+ }
+
+ return ret.ret0;
+}
+
+int64_t sdei_interrupt_release(int ev, const struct sdei_intr_ctx *intr_ctx)
+{
+ smc_args args = { 0 };
+ smc_ret_values ret;
+
+ assert(intr_ctx);
+
+ args.arg0 = SDEI_INTERRUPT_RELEASE;
+ args.arg1 = ev;
+ ret = tftf_smc(&args);
+ if (ret.ret0 == 0) {
+ arm_gic_set_intr_priority(intr_ctx->num, intr_ctx->priority);
+ if (intr_ctx->enabled)
+ arm_gic_intr_enable(intr_ctx->num);
+ }
+
+ return ret.ret0;
+}
+
+int64_t sdei_event_register(int ev, sdei_handler_t *ep,
+ uint64_t ep_arg, int flags, uint64_t mpidr)
+{
+ smc_args args = { 0 };
+ smc_ret_values ret;
+
+ args.arg0 = SDEI_EVENT_REGISTER;
+ args.arg1 = ev;
+ args.arg2 = (u_register_t)ep;
+ args.arg3 = ep_arg;
+ args.arg4 = flags;
+ args.arg5 = mpidr;
+ ret = tftf_smc(&args);
+ return ret.ret0;
+}
+
+int64_t sdei_event_unregister(int ev)
+{
+ smc_args args = { 0 };
+ smc_ret_values ret;
+
+ args.arg0 = SDEI_EVENT_UNREGISTER;
+ args.arg1 = ev;
+ ret = tftf_smc(&args);
+ return ret.ret0;
+}
+
+int64_t sdei_event_enable(int ev)
+{
+ smc_args args = { 0 };
+ smc_ret_values ret;
+
+ args.arg0 = SDEI_EVENT_ENABLE;
+ args.arg1 = ev;
+ ret = tftf_smc(&args);
+ return ret.ret0;
+}
+
+int64_t sdei_event_disable(int ev)
+{
+ smc_args args = { 0 };
+ smc_ret_values ret;
+
+ args.arg0 = SDEI_EVENT_DISABLE;
+ args.arg1 = ev;
+ ret = tftf_smc(&args);
+ return ret.ret0;
+}
+
+int64_t sdei_pe_mask(void)
+{
+ smc_args args = { 0 };
+ smc_ret_values ret;
+
+ args.arg0 = SDEI_PE_MASK;
+ ret = tftf_smc(&args);
+ return ret.ret0;
+}
+
+int64_t sdei_pe_unmask(void)
+{
+ smc_args args = { 0 };
+ smc_ret_values ret;
+
+ args.arg0 = SDEI_PE_UNMASK;
+ ret = tftf_smc(&args);
+ return ret.ret0;
+}
+
+int64_t sdei_private_reset(void)
+{
+ smc_args args = { 0 };
+ smc_ret_values ret;
+
+ args.arg0 = SDEI_PRIVATE_RESET;
+ ret = tftf_smc(&args);
+ return ret.ret0;
+}
+
+int64_t sdei_shared_reset(void)
+{
+ smc_args args = { 0 };
+ smc_ret_values ret;
+
+ args.arg0 = SDEI_SHARED_RESET;
+ ret = tftf_smc(&args);
+ return ret.ret0;
+}
+
+int64_t sdei_event_signal(uint64_t mpidr)
+{
+ smc_args args = { 0 };
+ smc_ret_values ret;
+
+ args.arg0 = SDEI_EVENT_SIGNAL;
+ args.arg1 = 0; /* must be event 0 */
+ args.arg2 = mpidr;
+ ret = tftf_smc(&args);
+ return ret.ret0;
+}
+
+int64_t sdei_event_status(int32_t ev)
+{
+ smc_args args = { 0 };
+ smc_ret_values ret;
+
+ args.arg0 = SDEI_EVENT_STATUS;
+ args.arg1 = ev;
+ ret = tftf_smc(&args);
+ return ret.ret0;
+}
+
+int64_t sdei_event_routing_set(int32_t ev, uint64_t flags)
+{
+ smc_args args = { 0 };
+ smc_ret_values ret;
+
+ args.arg0 = SDEI_EVENT_ROUTING_SET;
+ args.arg1 = ev;
+ args.arg2 = flags;
+ ret = tftf_smc(&args);
+ return ret.ret0;
+}
+
+int64_t sdei_event_context(uint32_t param)
+{
+ smc_args args = { 0 };
+ smc_ret_values ret;
+
+ args.arg0 = SDEI_EVENT_CONTEXT;
+ args.arg1 = param;
+ ret = tftf_smc(&args);
+ return ret.ret0;
+}
+
+int64_t sdei_event_complete(uint32_t flags)
+{
+ smc_args args = { 0 };
+ smc_ret_values ret;
+
+ args.arg0 = SDEI_EVENT_COMPLETE;
+ args.arg1 = flags;
+ ret = tftf_smc(&args);
+ return ret.ret0;
+}
+
+int64_t sdei_event_complete_and_resume(uint64_t addr)
+{
+ smc_args args = { 0 };
+ smc_ret_values ret;
+
+ args.arg0 = SDEI_EVENT_COMPLETE_AND_RESUME;
+ args.arg1 = addr;
+ ret = tftf_smc(&args);
+ return ret.ret0;
+}
diff --git a/lib/semihosting/aarch32/semihosting_call.S b/lib/semihosting/aarch32/semihosting_call.S
new file mode 100644
index 000000000..fe489b6f6
--- /dev/null
+++ b/lib/semihosting/aarch32/semihosting_call.S
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+ .globl semihosting_call
+
+func semihosting_call
+ svc #0x123456
+ bx lr
+endfunc semihosting_call
diff --git a/lib/semihosting/aarch64/semihosting_call.S b/lib/semihosting/aarch64/semihosting_call.S
new file mode 100644
index 000000000..dfded2ec6
--- /dev/null
+++ b/lib/semihosting/aarch64/semihosting_call.S
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+ .globl semihosting_call
+
+func semihosting_call
+ hlt #0xf000
+ ret
+endfunc semihosting_call
diff --git a/lib/semihosting/semihosting.c b/lib/semihosting/semihosting.c
new file mode 100644
index 000000000..cf1080227
--- /dev/null
+++ b/lib/semihosting/semihosting.c
@@ -0,0 +1,214 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <semihosting.h>
+#include <string.h>
+
+#ifndef SEMIHOSTING_SUPPORTED
+#define SEMIHOSTING_SUPPORTED 1
+#endif
+
+long semihosting_call(unsigned long operation,
+ void *system_block_address);
+
+typedef struct {
+ const char *file_name;
+ unsigned long mode;
+ size_t name_length;
+} smh_file_open_block_t;
+
+typedef struct {
+ long handle;
+ uintptr_t buffer;
+ size_t length;
+} smh_file_read_write_block_t;
+
+typedef struct {
+ long handle;
+ ssize_t location;
+} smh_file_seek_block_t;
+
+typedef struct {
+ char *command_line;
+ size_t command_length;
+} smh_system_block_t;
+
+long semihosting_connection_supported(void)
+{
+ return SEMIHOSTING_SUPPORTED;
+}
+
+long semihosting_file_open(const char *file_name, size_t mode)
+{
+ smh_file_open_block_t open_block;
+
+ open_block.file_name = file_name;
+ open_block.mode = mode;
+ open_block.name_length = strlen(file_name);
+
+ return semihosting_call(SEMIHOSTING_SYS_OPEN,
+ (void *) &open_block);
+}
+
+long semihosting_file_seek(long file_handle, ssize_t offset)
+{
+ smh_file_seek_block_t seek_block;
+ long result;
+
+ seek_block.handle = file_handle;
+ seek_block.location = offset;
+
+ result = semihosting_call(SEMIHOSTING_SYS_SEEK,
+ (void *) &seek_block);
+
+ if (result)
+ result = semihosting_call(SEMIHOSTING_SYS_ERRNO, 0);
+
+ return result;
+}
+
+long semihosting_file_read(long file_handle, size_t *length, uintptr_t buffer)
+{
+ smh_file_read_write_block_t read_block;
+ long result = -EINVAL;
+
+ if ((length == NULL) || (buffer == (uintptr_t)NULL))
+ return result;
+
+ read_block.handle = file_handle;
+ read_block.buffer = buffer;
+ read_block.length = *length;
+
+ result = semihosting_call(SEMIHOSTING_SYS_READ,
+ (void *) &read_block);
+
+ if (result == *length) {
+ return -EINVAL;
+ } else if (result < *length) {
+ *length -= result;
+ return 0;
+ } else
+ return result;
+}
+
+long semihosting_file_write(long file_handle,
+ size_t *length,
+ const uintptr_t buffer)
+{
+ smh_file_read_write_block_t write_block;
+
+ if ((length == NULL) || (buffer == (uintptr_t)NULL))
+ return -EINVAL;
+
+ write_block.handle = file_handle;
+ write_block.buffer = (uintptr_t)buffer; /* cast away const */
+ write_block.length = *length;
+
+ *length = semihosting_call(SEMIHOSTING_SYS_WRITE,
+ (void *) &write_block);
+
+ return *length;
+}
+
+long semihosting_file_close(long file_handle)
+{
+ return semihosting_call(SEMIHOSTING_SYS_CLOSE,
+ (void *) &file_handle);
+}
+
+long semihosting_file_length(long file_handle)
+{
+ return semihosting_call(SEMIHOSTING_SYS_FLEN,
+ (void *) &file_handle);
+}
+
+char semihosting_read_char(void)
+{
+ return semihosting_call(SEMIHOSTING_SYS_READC, NULL);
+}
+
+void semihosting_write_char(char character)
+{
+ semihosting_call(SEMIHOSTING_SYS_WRITEC, (void *) &character);
+}
+
+void semihosting_write_string(char *string)
+{
+ semihosting_call(SEMIHOSTING_SYS_WRITE0, (void *) string);
+}
+
+long semihosting_system(char *command_line)
+{
+ smh_system_block_t system_block;
+
+ system_block.command_line = command_line;
+ system_block.command_length = strlen(command_line);
+
+ return semihosting_call(SEMIHOSTING_SYS_SYSTEM,
+ (void *) &system_block);
+}
+
+long semihosting_get_flen(const char *file_name)
+{
+ long file_handle;
+ size_t length;
+
+ assert(semihosting_connection_supported());
+
+ file_handle = semihosting_file_open(file_name, FOPEN_MODE_RB);
+ if (file_handle == -1)
+ return file_handle;
+
+ /* Find the length of the file */
+ length = semihosting_file_length(file_handle);
+
+ return semihosting_file_close(file_handle) ? -1 : length;
+}
+
+long semihosting_download_file(const char *file_name,
+ size_t buf_size,
+ uintptr_t buf)
+{
+ long ret = -EINVAL;
+ size_t length;
+ long file_handle;
+
+ /* Null pointer check */
+ if (!buf)
+ return ret;
+
+ assert(semihosting_connection_supported());
+
+ file_handle = semihosting_file_open(file_name, FOPEN_MODE_RB);
+ if (file_handle == -1)
+ return ret;
+
+ /* Find the actual length of the file */
+ length = semihosting_file_length(file_handle);
+ if (length == -1)
+ goto semihosting_fail;
+
+ /* Signal error if we do not have enough space for the file */
+ if (length > buf_size)
+ goto semihosting_fail;
+
+ /*
+ * A successful read will return 0 in which case we pass back
+ * the actual number of bytes read. Else we pass a negative
+ * value indicating an error.
+ */
+ ret = semihosting_file_read(file_handle, &length, buf);
+ if (ret)
+ goto semihosting_fail;
+ else
+ ret = length;
+
+semihosting_fail:
+ semihosting_file_close(file_handle);
+ return ret;
+}
diff --git a/lib/smc/aarch32/asm_smc.S b/lib/smc/aarch32/asm_smc.S
new file mode 100644
index 000000000..908b8d075
--- /dev/null
+++ b/lib/smc/aarch32/asm_smc.S
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2016-2017, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+ .globl asm_tftf_smc32
+
+/* ---------------------------------------------------------------------------
+ * void asm_tftf_smc32(const smc_args *args,
+ * smc_ret_values *smc_ret);
+ * ---------------------------------------------------------------------------
+ */
+func asm_tftf_smc32
+ /* Push r9 to keep the stack pointer aligned to 64 bit. */
+ push {r4 - r9}
+
+ /* Store the `smc_ret` pointer in a callee saved register */
+ mov r8, r1
+
+ /* Load values used as arguments for the SMC. */
+ ldm r0, {r0 - r7}
+
+ smc #0
+
+ /*
+ * The returned values from the SMC are in r0-r3, put them in the
+ * 'smc_ret_values' return structure.
+ */
+ stm r8, {r0 - r3}
+
+ pop {r4 - r9}
+ bx lr
+endfunc asm_tftf_smc32
diff --git a/lib/smc/aarch32/smc.c b/lib/smc/aarch32/smc.c
new file mode 100644
index 000000000..dc3d83fbe
--- /dev/null
+++ b/lib/smc/aarch32/smc.c
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdint.h>
+#include <tftf.h>
+
+void asm_tftf_smc32(const smc_args *args,
+ smc_ret_values *smc_ret);
+
+smc_ret_values tftf_smc(const smc_args *args)
+{
+ smc_ret_values ret = {0};
+ asm_tftf_smc32(args, &ret);
+
+ return ret;
+}
diff --git a/lib/smc/aarch64/asm_smc.S b/lib/smc/aarch64/asm_smc.S
new file mode 100644
index 000000000..2b305b9d3
--- /dev/null
+++ b/lib/smc/aarch64/asm_smc.S
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2013-2017, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+ .globl asm_tftf_smc64
+
+ .section .text, "ax"
+
+
+/* ---------------------------------------------------------------------------
+ * smc_ret_values asm_tftf_smc64(uint64_t arg0,
+ * uint64_t arg1,
+ * uint64_t arg2,
+ * uint64_t arg3,
+ * uint64_t arg4,
+ * uint64_t arg5,
+ * uint64_t arg6,
+ * uint64_t arg7);
+ * ---------------------------------------------------------------------------
+ */
+func asm_tftf_smc64
+ /*
+ * According to the AAPCS64, x8 is the indirect result location
+ * register. It contains the address of the memory block that the caller
+ * has reserved to hold the result, i.e. the smc_ret_values structure
+ * in our case.
+ * x8 might be clobbered across the SMC call so save it on the stack.
+ * Although x8 contains an 8 byte value, we are allocating 16bytes on the stack
+ * to respect 16byte stack-alignment.
+ */
+ str x8, [sp, #-16]!
+
+ /* SMC arguments are already stored in x0-x6 */
+ smc #0
+
+ /* Pop x8 into a caller-saved register */
+ ldr x9, [sp], #16
+
+ /*
+ * Return values are stored in x0-x3, put them in the 'smc_ret_values'
+ * return structure
+ */
+ stp x0, x1, [x9, #0]
+ stp x2, x3, [x9, #16]
+ ret
+endfunc asm_tftf_smc64
diff --git a/lib/smc/aarch64/smc.c b/lib/smc/aarch64/smc.c
new file mode 100644
index 000000000..06b841c9f
--- /dev/null
+++ b/lib/smc/aarch64/smc.c
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdint.h>
+#include <tftf.h>
+
+smc_ret_values asm_tftf_smc64(u_register_t arg0,
+ u_register_t arg1,
+ u_register_t arg2,
+ u_register_t arg3,
+ u_register_t arg4,
+ u_register_t arg5,
+ u_register_t arg6,
+ u_register_t arg7);
+
+smc_ret_values tftf_smc(const smc_args *args)
+{
+ return asm_tftf_smc64(args->arg0,
+ args->arg1,
+ args->arg2,
+ args->arg3,
+ args->arg4,
+ args->arg5,
+ args->arg6,
+ args->arg7);
+}
diff --git a/lib/stdlib/abort.c b/lib/stdlib/abort.c
new file mode 100644
index 000000000..50780335d
--- /dev/null
+++ b/lib/stdlib/abort.c
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <debug.h>
+
+/*
+ * This is a basic implementation. This could be improved.
+ */
+void abort (void)
+{
+ ERROR("ABORT\n");
+ panic();
+}
diff --git a/lib/stdlib/assert.c b/lib/stdlib/assert.c
new file mode 100644
index 000000000..39f30d12c
--- /dev/null
+++ b/lib/stdlib/assert.c
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <debug.h>
+
+/*
+ * This is a basic implementation. This could be improved.
+ */
+void __assert (const char *function, const char *file, unsigned int line,
+ const char *assertion)
+{
+ mp_printf("ASSERT: %s <%d> : %s\n", function, line, assertion);
+ while (1);
+}
diff --git a/lib/stdlib/mem.c b/lib/stdlib/mem.c
new file mode 100644
index 000000000..ec9dc2dea
--- /dev/null
+++ b/lib/stdlib/mem.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stddef.h> /* size_t */
+
+/*
+ * Fill @count bytes of memory pointed to by @dst with @val
+ */
+void *memset(void *dst, int val, size_t count)
+{
+ char *ptr = dst;
+
+ while (count--)
+ *ptr++ = val;
+
+ return dst;
+}
+
+/*
+ * Compare @len bytes of @s1 and @s2
+ */
+int memcmp(const void *s1, const void *s2, size_t len)
+{
+ const char *s = s1;
+ const char *d = s2;
+ char dc;
+ char sc;
+
+ while (len--) {
+ sc = *s++;
+ dc = *d++;
+ if (sc - dc)
+ return (sc - dc);
+ }
+
+ return 0;
+}
+
+/*
+ * Copy @len bytes from @src to @dst
+ */
+void *memcpy(void *dst, const void *src, size_t len)
+{
+ const char *s = src;
+ char *d = dst;
+
+ while (len--)
+ *d++ = *s++;
+
+ return dst;
+}
+
+/*
+ * Move @len bytes from @src to @dst
+ */
+void *memmove(void *dst, const void *src, size_t len)
+{
+ /*
+ * The following test makes use of unsigned arithmetic overflow to
+ * more efficiently test the condition !(src <= dst && dst < str+len).
+ * It also avoids the situation where the more explicit test would give
+ * incorrect results were the calculation str+len to overflow (though
+ * that issue is probably moot as such usage is probably undefined
+ * behaviour and a bug anyway.
+ */
+ if ((size_t)dst - (size_t)src >= len) {
+ /* destination not in source data, so can safely use memcpy */
+ return memcpy(dst, src, len);
+ } else {
+ /* copy backwards... */
+ const char *end = dst;
+ const char *s = (const char *)src + len;
+ char *d = (char *)dst + len;
+ while (d != end)
+ *--d = *--s;
+ }
+ return dst;
+}
+
+/*
+ * Scan @len bytes of @src for value @c
+ */
+void *memchr(const void *src, int c, size_t len)
+{
+ const char *s = src;
+
+ while (len--) {
+ if (*s == c)
+ return (void *) s;
+ s++;
+ }
+
+ return NULL;
+}
diff --git a/lib/stdlib/printf.c b/lib/stdlib/printf.c
new file mode 100644
index 000000000..6329157d0
--- /dev/null
+++ b/lib/stdlib/printf.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdarg.h>
+#include <stdio.h>
+
+/* Choose max of 512 chars for now. */
+#define PRINT_BUFFER_SIZE 512
+int printf(const char *fmt, ...)
+{
+ va_list args;
+ char buf[PRINT_BUFFER_SIZE];
+ int count;
+
+ va_start(args, fmt);
+ vsnprintf(buf, sizeof(buf) - 1, fmt, args);
+ va_end(args);
+
+ /* Use putchar directly as 'puts()' adds a newline. */
+ buf[PRINT_BUFFER_SIZE - 1] = '\0';
+ count = 0;
+ while (buf[count])
+ {
+ if (putchar(buf[count]) != EOF) {
+ count++;
+ } else {
+ count = EOF;
+ break;
+ }
+ }
+
+ return count;
+}
diff --git a/lib/stdlib/putchar.c b/lib/stdlib/putchar.c
new file mode 100644
index 000000000..6b6f75ebb
--- /dev/null
+++ b/lib/stdlib/putchar.c
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <console.h>
+#include <stdio.h>
+
+/* Putchar() should either return the character printed or EOF in case of error.
+ * Our current console_putc() function assumes success and returns the
+ * character. Write all other printing functions in terms of putchar(), if
+ * possible, so they all benefit when this is improved.
+ */
+int putchar(int c)
+{
+ int res;
+ if (console_putc((unsigned char)c) >= 0)
+ res = c;
+ else
+ res = EOF;
+
+ return res;
+}
diff --git a/lib/stdlib/puts.c b/lib/stdlib/puts.c
new file mode 100644
index 000000000..08283a38c
--- /dev/null
+++ b/lib/stdlib/puts.c
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdio.h>
+
+int puts(const char *s)
+{
+ int count = 0;
+ while(*s)
+ {
+ if (putchar(*s++) != EOF) {
+ count++;
+ } else {
+ count = EOF;
+ break;
+ }
+ }
+
+ /* According to the puts(3) manpage, the function should write a
+ * trailing newline.
+ */
+ if ((count != EOF) && (putchar('\n') != EOF))
+ count++;
+ else
+ count = EOF;
+
+ return count;
+}
diff --git a/lib/stdlib/rand.c b/lib/stdlib/rand.c
new file mode 100644
index 000000000..59cb79631
--- /dev/null
+++ b/lib/stdlib/rand.c
@@ -0,0 +1,65 @@
+/*-
+ * Portions Copyright (c) 2010, Intel Corporation. All rights reserved.<BR>
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+//__FBSDID("$FreeBSD: src/lib/libc/stdlib/rand.c,v 1.17.2.1.2.1 2009/10/25 01:10:29 kensmith Exp $");
+#include <stdlib.h>
+
+static unsigned int next = 1;
+
+/** Compute a pseudo-random number.
+ *
+ * Compute x = (7^5 * x) mod (2^31 - 1)
+ * without overflowing 31 bits:
+ * (2^31 - 1) = 127773 * (7^5) + 2836
+ * From "Random number generators: good ones are hard to find",
+ * Park and Miller, Communications of the ACM, vol. 31, no. 10,
+ * October 1988, p. 1195.
+**/
+int
+rand()
+{
+ int hi, lo, x;
+
+ /* Can't be initialized with 0, so use another value. */
+ if (next == 0)
+ next = 123459876;
+ hi = next / 127773;
+ lo = next % 127773;
+ x = 16807 * lo - 2836 * hi;
+ if (x < 0)
+ x += 0x7fffffff;
+ return ((next = x) % ((unsigned int)RAND_MAX + 1));
+}
+
+void
+srand(unsigned int seed)
+{
+ next = (unsigned int)seed;
+}
diff --git a/lib/stdlib/strchr.c b/lib/stdlib/strchr.c
new file mode 100644
index 000000000..0963cb4c0
--- /dev/null
+++ b/lib/stdlib/strchr.c
@@ -0,0 +1,52 @@
+/*-
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Portions copyright (c) 2013-2018, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#include <sys/cdefs.h>
+#include <stddef.h>
+#include <string.h>
+
+char *
+strchr(const char *p, int ch)
+{
+ char c;
+
+ c = ch;
+ for (;; ++p) {
+ if (*p == c)
+ return ((char *)p);
+ if (*p == '\0')
+ return (NULL);
+ }
+ /* NOTREACHED */
+}
diff --git a/lib/stdlib/strcmp.c b/lib/stdlib/strcmp.c
new file mode 100644
index 000000000..52a415b7e
--- /dev/null
+++ b/lib/stdlib/strcmp.c
@@ -0,0 +1,51 @@
+/*-
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Chris Torek.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Portions copyright (c) 2014-2018, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#include <sys/cdefs.h>
+#include <string.h>
+
+/*
+ * Compare strings.
+ */
+int
+strcmp(const char *s1, const char *s2)
+{
+ while (*s1 == *s2++)
+ if (*s1++ == '\0')
+ return 0;
+ return *(const unsigned char *)s1 - *(const unsigned char *)(s2 - 1);
+}
diff --git a/lib/stdlib/strlen.c b/lib/stdlib/strlen.c
new file mode 100644
index 000000000..ba2c5ea22
--- /dev/null
+++ b/lib/stdlib/strlen.c
@@ -0,0 +1,45 @@
+/*-
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Portions copyright (c) 2009-2018, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#include <stddef.h>
+
+size_t
+strlen(str)
+ const char *str;
+{
+ register const char *s;
+
+ for (s = str; *s; ++s);
+ return(s - str);
+}
diff --git a/lib/stdlib/strncmp.c b/lib/stdlib/strncmp.c
new file mode 100644
index 000000000..c5ad0a862
--- /dev/null
+++ b/lib/stdlib/strncmp.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Portions copyright (c) 2014-2018, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#include <sys/cdefs.h>
+#include <string.h>
+
+int
+strncmp(const char *s1, const char *s2, size_t n)
+{
+
+ if (n == 0)
+ return 0;
+ do {
+ if (*s1 != *s2++)
+ return (*(const unsigned char *)s1 -
+ *(const unsigned char *)(s2 - 1));
+ if (*s1++ == '\0')
+ break;
+ } while (--n != 0);
+ return 0;
+}
diff --git a/lib/stdlib/strncpy.c b/lib/stdlib/strncpy.c
new file mode 100644
index 000000000..00e4b7a41
--- /dev/null
+++ b/lib/stdlib/strncpy.c
@@ -0,0 +1,61 @@
+/*-
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Chris Torek.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Portions copyright (c) 2015-2018, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#include <string.h>
+
+/*
+ * Copy src to dst, truncating or null-padding to always copy n bytes.
+ * Return dst.
+ */
+char *
+strncpy(char * __restrict dst, const char * __restrict src, size_t n)
+{
+ if (n != 0) {
+ char *d = dst;
+ const char *s = src;
+
+ do {
+ if ((*d++ = *s++) == '\0') {
+ /* NUL pad the remaining n-1 bytes */
+ while (--n != 0)
+ *d++ = '\0';
+ break;
+ }
+ } while (--n != 0);
+ }
+ return (dst);
+}
diff --git a/lib/stdlib/subr_prf.c b/lib/stdlib/subr_prf.c
new file mode 100644
index 000000000..2e272d900
--- /dev/null
+++ b/lib/stdlib/subr_prf.c
@@ -0,0 +1,548 @@
+/*-
+ * Copyright (c) 1986, 1988, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ * (c) UNIX System Laboratories, Inc.
+ * All or some portions of this file are derived from material licensed
+ * to the University of California by American Telephone and Telegraph
+ * Co. or Unix System Laboratories, Inc. and are reproduced herein with
+ * the permission of UNIX System Laboratories, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)subr_prf.c 8.3 (Berkeley) 1/21/94
+ */
+
+/*
+ * Portions copyright (c) 2009-2018, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <stddef.h>
+#include <string.h>
+#include <ctype.h>
+
+typedef unsigned char u_char;
+typedef unsigned int u_int;
+typedef int64_t quad_t;
+typedef uint64_t u_quad_t;
+typedef unsigned long u_long;
+typedef unsigned short u_short;
+
+static inline int imax(int a, int b) { return (a > b ? a : b); }
+
+/*
+ * Note that stdarg.h and the ANSI style va_start macro is used for both
+ * ANSI and traditional C compilers.
+ */
+
+#define TOCONS 0x01
+#define TOTTY 0x02
+#define TOLOG 0x04
+
+/* Max number conversion buffer length: a u_quad_t in base 2, plus NUL byte. */
+#define MAXNBUF (sizeof(intmax_t) * 8 + 1)
+
+struct putchar_arg {
+ int flags;
+ int pri;
+ struct tty *tty;
+ char *p_bufr;
+ size_t n_bufr;
+ char *p_next;
+ size_t remain;
+};
+
+struct snprintf_arg {
+ char *str;
+ size_t remain;
+};
+
+extern int log_open;
+
+static char *ksprintn(char *nbuf, uintmax_t num, int base, int *len, int upper);
+static void snprintf_func(int ch, void *arg);
+static int kvprintf(char const *fmt, void (*func)(int, void*), void *arg, int radix, va_list ap);
+
+int vsnprintf(char *str, size_t size, const char *format, va_list ap);
+
+static char const hex2ascii_data[] = "0123456789abcdefghijklmnopqrstuvwxyz";
+#define hex2ascii(hex) (hex2ascii_data[hex])
+
+/*
+ * Scaled down version of sprintf(3).
+ */
+int
+sprintf(char *buf, const char *cfmt, ...)
+{
+ int retval;
+ va_list ap;
+
+ va_start(ap, cfmt);
+ retval = kvprintf(cfmt, NULL, (void *)buf, 10, ap);
+ buf[retval] = '\0';
+ va_end(ap);
+ return (retval);
+}
+
+/*
+ * Scaled down version of vsprintf(3).
+ */
+int
+vsprintf(char *buf, const char *cfmt, va_list ap)
+{
+ int retval;
+
+ retval = kvprintf(cfmt, NULL, (void *)buf, 10, ap);
+ buf[retval] = '\0';
+ return (retval);
+}
+
+/*
+ * Scaled down version of snprintf(3).
+ */
+int
+snprintf(char *str, size_t size, const char *format, ...)
+{
+ int retval;
+ va_list ap;
+
+ va_start(ap, format);
+ retval = vsnprintf(str, size, format, ap);
+ va_end(ap);
+ return(retval);
+}
+
+/*
+ * Scaled down version of vsnprintf(3).
+ */
+int
+vsnprintf(char *str, size_t size, const char *format, va_list ap)
+{
+ struct snprintf_arg info;
+ int retval;
+
+ info.str = str;
+ info.remain = size;
+ retval = kvprintf(format, snprintf_func, &info, 10, ap);
+ if (info.remain >= 1)
+ *info.str++ = '\0';
+ return (retval);
+}
+
+static void
+snprintf_func(int ch, void *arg)
+{
+ struct snprintf_arg *const info = arg;
+
+ if (info->remain >= 2) {
+ *info->str++ = ch;
+ info->remain--;
+ }
+}
+
+
+/*
+ * Kernel version which takes radix argument vsnprintf(3).
+ */
+int
+vsnrprintf(char *str, size_t size, int radix, const char *format, va_list ap)
+{
+ struct snprintf_arg info;
+ int retval;
+
+ info.str = str;
+ info.remain = size;
+ retval = kvprintf(format, snprintf_func, &info, radix, ap);
+ if (info.remain >= 1)
+ *info.str++ = '\0';
+ return (retval);
+}
+
+
+/*
+ * Put a NUL-terminated ASCII number (base <= 36) in a buffer in reverse
+ * order; return an optional length and a pointer to the last character
+ * written in the buffer (i.e., the first character of the string).
+ * The buffer pointed to by `nbuf' must have length >= MAXNBUF.
+ */
+static char *
+ksprintn(char *nbuf, uintmax_t num, int base, int *lenp, int upper)
+{
+ char *p, c;
+
+ p = nbuf;
+ *p = '\0';
+ do {
+ c = hex2ascii(num % base);
+ *++p = upper ? toupper(c) : c;
+ } while (num /= base);
+ if (lenp)
+ *lenp = p - nbuf;
+ return (p);
+}
+
+/*
+ * Scaled down version of printf(3).
+ *
+ * Two additional formats:
+ *
+ * The format %b is supported to decode error registers.
+ * Its usage is:
+ *
+ * printf("reg=%b\n", regval, "<base><arg>*");
+ *
+ * where <base> is the output base expressed as a control character, e.g.
+ * \10 gives octal; \20 gives hex. Each arg is a sequence of characters,
+ * the first of which gives the bit number to be inspected (origin 1), and
+ * the next characters (up to a control character, i.e. a character <= 32),
+ * give the name of the register. Thus:
+ *
+ * kvprintf("reg=%b\n", 3, "\10\2BITTWO\1BITONE\n");
+ *
+ * would produce output:
+ *
+ * reg=3<BITTWO,BITONE>
+ *
+ * XXX: %D -- Hexdump, takes pointer and separator string:
+ * ("%6D", ptr, ":") -> XX:XX:XX:XX:XX:XX
+ * ("%*D", len, ptr, " " -> XX XX XX XX ...
+ */
+int
+kvprintf(char const *fmt, void (*func)(int, void*), void *arg, int radix, va_list ap)
+{
+#define PCHAR(c) {int cc=(c); if (func) (*func)(cc,arg); else *d++ = cc; retval++; }
+ char nbuf[MAXNBUF];
+ char *d;
+ const char *p, *percent, *q;
+ u_char *up;
+ int ch, n;
+ uintmax_t num;
+ int base, lflag, qflag, tmp, width, ladjust, sharpflag, neg, sign, dot;
+ int cflag, hflag, jflag, tflag, zflag;
+ int dwidth, upper;
+ char padc;
+ int stop = 0, retval = 0;
+
+ num = 0;
+ if (!func)
+ d = (char *) arg;
+ else
+ d = NULL;
+
+ if (fmt == NULL)
+ fmt = "(fmt null)\n";
+
+ if (radix < 2 || radix > 36)
+ radix = 10;
+
+ for (;;) {
+ padc = ' ';
+ width = 0;
+ while ((ch = (u_char)*fmt++) != '%' || stop) {
+ if (ch == '\0')
+ return (retval);
+ PCHAR(ch);
+ }
+ percent = fmt - 1;
+ qflag = 0; lflag = 0; ladjust = 0; sharpflag = 0; neg = 0;
+ sign = 0; dot = 0; dwidth = 0; upper = 0;
+ cflag = 0; hflag = 0; jflag = 0; tflag = 0; zflag = 0;
+reswitch: switch (ch = (u_char)*fmt++) {
+ case '.':
+ dot = 1;
+ goto reswitch;
+ case '#':
+ sharpflag = 1;
+ goto reswitch;
+ case '+':
+ sign = 1;
+ goto reswitch;
+ case '-':
+ ladjust = 1;
+ goto reswitch;
+ case '%':
+ PCHAR(ch);
+ break;
+ case '*':
+ if (!dot) {
+ width = va_arg(ap, int);
+ if (width < 0) {
+ ladjust = !ladjust;
+ width = -width;
+ }
+ } else {
+ dwidth = va_arg(ap, int);
+ }
+ goto reswitch;
+ case '0':
+ if (!dot) {
+ padc = '0';
+ goto reswitch;
+ }
+ case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ for (n = 0;; ++fmt) {
+ n = n * 10 + ch - '0';
+ ch = *fmt;
+ if (ch < '0' || ch > '9')
+ break;
+ }
+ if (dot)
+ dwidth = n;
+ else
+ width = n;
+ goto reswitch;
+ case 'b':
+ num = (u_int)va_arg(ap, int);
+ p = va_arg(ap, char *);
+ for (q = ksprintn(nbuf, num, *p++, NULL, 0); *q;)
+ PCHAR(*q--);
+
+ if (num == 0)
+ break;
+
+ for (tmp = 0; *p;) {
+ n = *p++;
+ if (num & (1 << (n - 1))) {
+ PCHAR(tmp ? ',' : '<');
+ for (; (n = *p) > ' '; ++p)
+ PCHAR(n);
+ tmp = 1;
+ } else
+ for (; *p > ' '; ++p)
+ continue;
+ }
+ if (tmp)
+ PCHAR('>');
+ break;
+ case 'c':
+ PCHAR(va_arg(ap, int));
+ break;
+ case 'D':
+ up = va_arg(ap, u_char *);
+ p = va_arg(ap, char *);
+ if (!width)
+ width = 16;
+ while(width--) {
+ PCHAR(hex2ascii(*up >> 4));
+ PCHAR(hex2ascii(*up & 0x0f));
+ up++;
+ if (width)
+ for (q=p;*q;q++)
+ PCHAR(*q);
+ }
+ break;
+ case 'd':
+ case 'i':
+ base = 10;
+ sign = 1;
+ goto handle_sign;
+ case 'h':
+ if (hflag) {
+ hflag = 0;
+ cflag = 1;
+ } else
+ hflag = 1;
+ goto reswitch;
+ case 'j':
+ jflag = 1;
+ goto reswitch;
+ case 'l':
+ if (lflag) {
+ lflag = 0;
+ qflag = 1;
+ } else
+ lflag = 1;
+ goto reswitch;
+ case 'n':
+ if (jflag)
+ *(va_arg(ap, intmax_t *)) = retval;
+ else if (qflag)
+ *(va_arg(ap, quad_t *)) = retval;
+ else if (lflag)
+ *(va_arg(ap, long *)) = retval;
+ else if (zflag)
+ *(va_arg(ap, size_t *)) = retval;
+ else if (hflag)
+ *(va_arg(ap, short *)) = retval;
+ else if (cflag)
+ *(va_arg(ap, char *)) = retval;
+ else
+ *(va_arg(ap, int *)) = retval;
+ break;
+ case 'o':
+ base = 8;
+ goto handle_nosign;
+ case 'p':
+ base = 16;
+ sharpflag = (width == 0);
+ sign = 0;
+ num = (uintptr_t)va_arg(ap, void *);
+ goto number;
+ case 'q':
+ qflag = 1;
+ goto reswitch;
+ case 'r':
+ base = radix;
+ if (sign)
+ goto handle_sign;
+ goto handle_nosign;
+ case 's':
+ p = va_arg(ap, char *);
+ if (p == NULL)
+ p = "(null)";
+ if (!dot)
+ n = strlen (p);
+ else
+ for (n = 0; n < dwidth && p[n]; n++)
+ continue;
+
+ width -= n;
+
+ if (!ladjust && width > 0)
+ while (width--)
+ PCHAR(padc);
+ while (n--)
+ PCHAR(*p++);
+ if (ladjust && width > 0)
+ while (width--)
+ PCHAR(padc);
+ break;
+ case 't':
+ tflag = 1;
+ goto reswitch;
+ case 'u':
+ base = 10;
+ goto handle_nosign;
+ case 'X':
+ upper = 1;
+ case 'x':
+ base = 16;
+ goto handle_nosign;
+ case 'y':
+ base = 16;
+ sign = 1;
+ goto handle_sign;
+ case 'z':
+ zflag = 1;
+ goto reswitch;
+handle_nosign:
+ sign = 0;
+ if (jflag)
+ num = va_arg(ap, uintmax_t);
+ else if (qflag)
+ num = va_arg(ap, u_quad_t);
+ else if (tflag)
+ num = va_arg(ap, ptrdiff_t);
+ else if (lflag)
+ num = va_arg(ap, u_long);
+ else if (zflag)
+ num = va_arg(ap, size_t);
+ else if (hflag)
+ num = (u_short)va_arg(ap, int);
+ else if (cflag)
+ num = (u_char)va_arg(ap, int);
+ else
+ num = va_arg(ap, u_int);
+ goto number;
+handle_sign:
+ if (jflag)
+ num = va_arg(ap, intmax_t);
+ else if (qflag)
+ num = va_arg(ap, quad_t);
+ else if (tflag)
+ num = va_arg(ap, ptrdiff_t);
+ else if (lflag)
+ num = va_arg(ap, long);
+ else if (zflag)
+ num = va_arg(ap, ssize_t);
+ else if (hflag)
+ num = (short)va_arg(ap, int);
+ else if (cflag)
+ num = (char)va_arg(ap, int);
+ else
+ num = va_arg(ap, int);
+number:
+ if (sign && (intmax_t)num < 0) {
+ neg = 1;
+ num = -(intmax_t)num;
+ }
+ p = ksprintn(nbuf, num, base, &n, upper);
+ tmp = 0;
+ if (sharpflag && num != 0) {
+ if (base == 8)
+ tmp++;
+ else if (base == 16)
+ tmp += 2;
+ }
+ if (neg)
+ tmp++;
+
+ if (!ladjust && padc == '0')
+ dwidth = width - tmp;
+ width -= tmp + imax(dwidth, n);
+ dwidth -= n;
+ if (!ladjust)
+ while (width-- > 0)
+ PCHAR(' ');
+ if (neg)
+ PCHAR('-');
+ if (sharpflag && num != 0) {
+ if (base == 8) {
+ PCHAR('0');
+ } else if (base == 16) {
+ PCHAR('0');
+ PCHAR('x');
+ }
+ }
+ while (dwidth-- > 0)
+ PCHAR('0');
+
+ while (*p)
+ PCHAR(*p--);
+
+ if (ladjust)
+ while (width-- > 0)
+ PCHAR(' ');
+
+ break;
+ default:
+ while (percent < fmt)
+ PCHAR(*percent++);
+ /*
+ * Since we ignore an formatting argument it is no
+ * longer safe to obey the remaining formatting
+ * arguments as the arguments will no longer match
+ * the format specs.
+ */
+ stop = 1;
+ break;
+ }
+ }
+#undef PCHAR
+}
diff --git a/lib/trusted_os/trusted_os.c b/lib/trusted_os/trusted_os.c
new file mode 100644
index 000000000..b24c3d395
--- /dev/null
+++ b/lib/trusted_os/trusted_os.c
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <smccc.h>
+#include <stdint.h>
+#include <tftf.h>
+#include <trusted_os.h>
+#include <uuid_utils.h>
+
+unsigned int is_trusted_os_present(uuid_t *tos_uuid)
+{
+ smc_args tos_uid_args = { SMC_TOS_UID };
+ smc_ret_values ret;
+ uint32_t *tos_uuid32;
+
+ ret = tftf_smc(&tos_uid_args);
+
+ if ((ret.ret0 == SMC_UNKNOWN) ||
+ ((ret.ret0 == 0) && (ret.ret1 == 0) && (ret.ret2 == 0) &&
+ (ret.ret3 == 0)))
+ return 0;
+
+ tos_uuid32 = (uint32_t *) tos_uuid;
+ tos_uuid32[0] = ret.ret0;
+ tos_uuid32[1] = ret.ret1;
+ tos_uuid32[2] = ret.ret2;
+ tos_uuid32[3] = ret.ret3;
+
+ return 1;
+}
diff --git a/lib/utils/mp_printf.c b/lib/utils/mp_printf.c
new file mode 100644
index 000000000..d1eb780ea
--- /dev/null
+++ b/lib/utils/mp_printf.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <platform.h>
+#include <spinlock.h>
+#include <stdarg.h>
+#include <stdio.h>
+
+/* Lock to avoid concurrent accesses to the serial console */
+static spinlock_t printf_lock;
+
+/*
+ * Print the MPID header, e.g.: [cpu 0x0100]
+ *
+ * If SHELL_COLOR == 1, this also prints shell's color escape sequences to ease
+ * identifying which CPU displays the message. There are 8 standard colors so
+ * if the platform has more than 8 CPUs, some colors will be reused.
+ */
+#if SHELL_COLOR
+#define PRINT_MPID_HDR(_mpid) \
+ do { \
+ unsigned int linear_id = platform_get_core_pos(_mpid); \
+ printf("\033[1;%u;40m", 30 + (linear_id & 0x7)); \
+ printf("[cpu 0x%.4x] ", _mpid); \
+ printf("\033[0m"); \
+ } while (0)
+#else
+#define PRINT_MPID_HDR(_mpid) \
+ printf("[cpu 0x%.4x] ", _mpid)
+#endif /* SHELL_COLOR */
+
+void mp_printf(const char *fmt, ...)
+{
+ va_list ap;
+ char str[256];
+ /*
+ * As part of testing Firmware Update feature on Cortex-A57 CPU, an
+ * issue was discovered while printing in NS_BL1U stage. The issue
+ * appears when the second call to `NOTICE()` is made in the
+ * `ns_bl1u_main()`. As a result of this issue the CPU hangs and the
+ * debugger is also not able to connect anymore.
+ *
+ * After further debugging and experiments it was found that if
+ * `read_mpidr_el1()` is avoided or volatile qualifier is used for
+ * reading the mpidr, this issue gets resolved.
+ *
+ * NOTE: The actual/real reason why this happens is still not known.
+ * Moreover this problem is not encountered on Cortex-A53 CPU.
+ */
+ volatile unsigned int mpid = read_mpidr_el1() & 0xFFFF;
+
+ /*
+ * TODO: It would be simpler to use vprintf() instead of
+ * vsnprintf() + printf(), we wouldn't need to declare a static buffer
+ * for storing the product of vsnprintf(). Unfortunately our C library
+ * doesn't provide vprintf() at the moment.
+ * Import vprintf() code from FreeBSD C library to our local C library.
+ */
+ va_start(ap, fmt);
+ vsnprintf(str, sizeof(str), fmt, ap);
+ str[sizeof(str) - 1] = 0;
+ va_end(ap);
+
+ spin_lock(&printf_lock);
+ PRINT_MPID_HDR(mpid);
+ printf("%s", str);
+ spin_unlock(&printf_lock);
+}
diff --git a/lib/utils/uuid.c b/lib/utils/uuid.c
new file mode 100644
index 000000000..21747a208
--- /dev/null
+++ b/lib/utils/uuid.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <stdio.h>
+#include <string.h>
+#include <uuid_utils.h>
+
+/* Format string to print a UUID */
+static const char *uuid_str_fmt = "{ 0x%.8x, 0x%.4x, 0x%.4x, 0x%.2x, 0x%.2x, "
+ "0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x }";
+
+
+unsigned int is_uuid_null(const uuid_t *uuid)
+{
+ const uuid_t uuid_null = {0};
+
+ return memcmp(uuid, &uuid_null, sizeof(uuid_t)) == 0;
+}
+
+char *uuid_to_str(const uuid_t *uuid, char *str)
+{
+ assert(uuid != NULL);
+ assert(str != NULL);
+
+ snprintf(str, UUID_STR_SIZE, uuid_str_fmt,
+ uuid->time_low, uuid->time_mid, uuid->time_hi_and_version,
+ uuid->clock_seq_hi_and_reserved, uuid->clock_seq_low,
+ uuid->node[0], uuid->node[1], uuid->node[2], uuid->node[3],
+ uuid->node[4], uuid->node[5]);
+
+ return str;
+}
+
+unsigned int uuid_equal(const uuid_t *uuid1, const uuid_t *uuid2)
+{
+ return memcmp(uuid1, uuid2, sizeof(uuid_t)) == 0;
+}
+
+uuid_t *make_uuid_from_4words(uuid_t *uuid,
+ uint32_t w0,
+ uint32_t w1,
+ uint32_t w2,
+ uint32_t w3)
+{
+ uint32_t *uuid32;
+
+ assert(uuid != NULL);
+
+ uuid32 = (uint32_t *) uuid;
+ uuid32[0] = w0;
+ uuid32[1] = w1;
+ uuid32[2] = w2;
+ uuid32[3] = w3;
+
+ return uuid;
+}
diff --git a/lib/xlat_tables_v2/aarch32/enable_mmu.S b/lib/xlat_tables_v2/aarch32/enable_mmu.S
new file mode 100644
index 000000000..4a4ac30f5
--- /dev/null
+++ b/lib/xlat_tables_v2/aarch32/enable_mmu.S
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <xlat_tables_v2.h>
+
+ .global enable_mmu_direct_svc_mon
+ .global enable_mmu_direct_hyp
+
+ /* void enable_mmu_direct_svc_mon(unsigned int flags) */
+func enable_mmu_direct_svc_mon
+ /* Assert that MMU is turned off */
+#if ENABLE_ASSERTIONS
+ ldcopr r1, SCTLR
+ tst r1, #SCTLR_M_BIT
+ ASM_ASSERT(eq)
+#endif
+
+ /* Invalidate TLB entries */
+ TLB_INVALIDATE(r0, TLBIALL)
+
+ mov r3, r0
+ ldr r0, =mmu_cfg_params
+
+ /* MAIR0. Only the lower 32 bits are used. */
+ ldr r1, [r0, #(MMU_CFG_MAIR << 3)]
+ stcopr r1, MAIR0
+
+ /* TTBCR. Only the lower 32 bits are used. */
+ ldr r2, [r0, #(MMU_CFG_TCR << 3)]
+ stcopr r2, TTBCR
+
+ /* TTBR0 */
+ ldr r1, [r0, #(MMU_CFG_TTBR0 << 3)]
+ ldr r2, [r0, #((MMU_CFG_TTBR0 << 3) + 4)]
+ stcopr16 r1, r2, TTBR0_64
+
+ /* TTBR1 is unused right now; set it to 0. */
+ mov r1, #0
+ mov r2, #0
+ stcopr16 r1, r2, TTBR1_64
+
+ /*
+ * Ensure all translation table writes have drained into memory, the TLB
+ * invalidation is complete, and translation register writes are
+ * committed before enabling the MMU
+ */
+ dsb ish
+ isb
+
+ /* Enable enable MMU by honoring flags */
+ ldcopr r1, SCTLR
+ ldr r2, =(SCTLR_WXN_BIT | SCTLR_C_BIT | SCTLR_M_BIT)
+ orr r1, r1, r2
+
+ /* Clear C bit if requested */
+ tst r3, #DISABLE_DCACHE
+ bicne r1, r1, #SCTLR_C_BIT
+
+ stcopr r1, SCTLR
+ isb
+
+ bx lr
+endfunc enable_mmu_direct_svc_mon
+
+
+ /* void enable_mmu_direct_hyp(unsigned int flags) */
+func enable_mmu_direct_hyp
+ /* Assert that MMU is turned off */
+#if ENABLE_ASSERTIONS
+ ldcopr r1, HSCTLR
+ tst r1, #HSCTLR_M_BIT
+ ASM_ASSERT(eq)
+#endif
+
+ /* Invalidate TLB entries */
+ TLB_INVALIDATE(r0, TLBIALL)
+
+ mov r3, r0
+ ldr r0, =mmu_cfg_params
+
+ /* HMAIR0 */
+ ldr r1, [r0, #(MMU_CFG_MAIR << 3)]
+ stcopr r1, HMAIR0
+
+ /* HTCR */
+ ldr r2, [r0, #(MMU_CFG_TCR << 3)]
+ stcopr r2, HTCR
+
+ /* HTTBR */
+ ldr r1, [r0, #(MMU_CFG_TTBR0 << 3)]
+ ldr r2, [r0, #((MMU_CFG_TTBR0 << 3) + 4)]
+ stcopr16 r1, r2, HTTBR_64
+
+ /*
+ * Ensure all translation table writes have drained into memory, the TLB
+ * invalidation is complete, and translation register writes are
+ * committed before enabling the MMU
+ */
+ dsb ish
+ isb
+
+ /* Enable enable MMU by honoring flags */
+ ldcopr r1, HSCTLR
+ ldr r2, =(HSCTLR_WXN_BIT | HSCTLR_C_BIT | HSCTLR_M_BIT)
+ orr r1, r1, r2
+
+ /* Clear C bit if requested */
+ tst r3, #DISABLE_DCACHE
+ bicne r1, r1, #HSCTLR_C_BIT
+
+ stcopr r1, HSCTLR
+ isb
+
+ bx lr
+endfunc enable_mmu_direct_hyp
diff --git a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
new file mode 100644
index 000000000..66938e5f1
--- /dev/null
+++ b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
@@ -0,0 +1,231 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <cassert.h>
+#include <platform_def.h>
+#include <stdbool.h>
+#include <utils_def.h>
+#include <xlat_tables_v2.h>
+#include "../xlat_tables_private.h"
+
+#if (ARM_ARCH_MAJOR == 7) && !defined(ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING)
+#error ARMv7 target does not support LPAE MMU descriptors
+#endif
+
+/*
+ * Returns true if the provided granule size is supported, false otherwise.
+ */
+bool xlat_arch_is_granule_size_supported(size_t size)
+{
+ /*
+ * The library uses the long descriptor translation table format, which
+ * supports 4 KiB pages only.
+ */
+ return size == PAGE_SIZE_4KB;
+}
+
+size_t xlat_arch_get_max_supported_granule_size(void)
+{
+ return PAGE_SIZE_4KB;
+}
+
+#if ENABLE_ASSERTIONS
+unsigned long long xlat_arch_get_max_supported_pa(void)
+{
+ /* Physical address space size for long descriptor format. */
+ return (1ULL << 40) - 1ULL;
+}
+#endif /* ENABLE_ASSERTIONS*/
+
+bool is_mmu_enabled_ctx(const xlat_ctx_t *ctx)
+{
+ if (ctx->xlat_regime == EL1_EL0_REGIME) {
+ assert(xlat_arch_current_el() == 1U);
+ return (read_sctlr() & SCTLR_M_BIT) != 0U;
+ } else {
+ assert(ctx->xlat_regime == EL2_REGIME);
+ assert(xlat_arch_current_el() == 2U);
+ return (read_hsctlr() & HSCTLR_M_BIT) != 0U;
+ }
+}
+
+bool is_dcache_enabled(void)
+{
+ if (IS_IN_EL2()) {
+ return (read_hsctlr() & HSCTLR_C_BIT) != 0U;
+ } else {
+ return (read_sctlr() & SCTLR_C_BIT) != 0U;
+ }
+}
+
+uint64_t xlat_arch_regime_get_xn_desc(int xlat_regime)
+{
+ if (xlat_regime == EL1_EL0_REGIME) {
+ return UPPER_ATTRS(XN) | UPPER_ATTRS(PXN);
+ } else {
+ assert(xlat_regime == EL2_REGIME);
+ return UPPER_ATTRS(XN);
+ }
+}
+
+void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime)
+{
+ /*
+ * Ensure the translation table write has drained into memory before
+ * invalidating the TLB entry.
+ */
+ dsbishst();
+
+ if (xlat_regime == EL1_EL0_REGIME) {
+ tlbimvaais(TLBI_ADDR(va));
+ } else {
+ assert(xlat_regime == EL2_REGIME);
+ tlbimvahis(TLBI_ADDR(va));
+ }
+}
+
+void xlat_arch_tlbi_va_sync(void)
+{
+ /* Invalidate all entries from branch predictors. */
+ bpiallis();
+
+ /*
+ * A TLB maintenance instruction can complete at any time after
+ * it is issued, but is only guaranteed to be complete after the
+ * execution of DSB by the PE that executed the TLB maintenance
+ * instruction. After the TLB invalidate instruction is
+ * complete, no new memory accesses using the invalidated TLB
+ * entries will be observed by any observer of the system
+ * domain. See section D4.8.2 of the ARMv8 (issue k), paragraph
+ * "Ordering and completion of TLB maintenance instructions".
+ */
+ dsbish();
+
+ /*
+ * The effects of a completed TLB maintenance instruction are
+ * only guaranteed to be visible on the PE that executed the
+ * instruction after the execution of an ISB instruction by the
+ * PE that executed the TLB maintenance instruction.
+ */
+ isb();
+}
+
+unsigned int xlat_arch_current_el(void)
+{
+ if (IS_IN_HYP()) {
+ return 2U;
+ } else {
+ assert(IS_IN_SVC() || IS_IN_MON());
+ /*
+ * If EL3 is in AArch32 mode, all secure PL1 modes (Monitor,
+ * System, SVC, Abort, UND, IRQ and FIQ modes) execute at EL3.
+ *
+ * The PL1&0 translation regime in AArch32 behaves like the
+ * EL1&0 regime in AArch64 except for the XN bits, but we set
+ * and unset them at the same time, so there's no difference in
+ * practice.
+ */
+ return 1U;
+ }
+}
+
+/*******************************************************************************
+ * Function for enabling the MMU in PL1 or PL2, assuming that the page tables
+ * have already been created.
+ ******************************************************************************/
+void setup_mmu_cfg(uint64_t *params, unsigned int flags,
+ const uint64_t *base_table, unsigned long long max_pa,
+ uintptr_t max_va, __unused int xlat_regime)
+{
+ uint64_t mair, ttbr0;
+ uint32_t ttbcr;
+
+ /* Set attributes in the right indices of the MAIR */
+ mair = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
+ mair |= MAIR0_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
+ ATTR_IWBWA_OWBWA_NTR_INDEX);
+ mair |= MAIR0_ATTR_SET(ATTR_NON_CACHEABLE,
+ ATTR_NON_CACHEABLE_INDEX);
+
+ /*
+ * Configure the control register for stage 1 of the PL1&0 or EL2
+ * translation regimes.
+ */
+
+ /* Use the Long-descriptor translation table format. */
+ ttbcr = TTBCR_EAE_BIT;
+
+ if (xlat_regime == EL1_EL0_REGIME) {
+ assert(IS_IN_SVC() || IS_IN_MON());
+ /*
+ * Disable translation table walk for addresses that are
+ * translated using TTBR1. Therefore, only TTBR0 is used.
+ */
+ ttbcr |= TTBCR_EPD1_BIT;
+ } else {
+ assert(xlat_regime == EL2_REGIME);
+ assert(IS_IN_HYP());
+
+ /*
+ * Set HTCR bits as well. Set HTTBR table properties
+ * as Inner & outer WBWA & shareable.
+ */
+ ttbcr |= HTCR_RES1 |
+ HTCR_SH0_INNER_SHAREABLE | HTCR_RGN0_OUTER_WBA |
+ HTCR_RGN0_INNER_WBA;
+ }
+
+ /*
+ * Limit the input address ranges and memory region sizes translated
+ * using TTBR0 to the given virtual address space size, if smaller than
+ * 32 bits.
+ */
+ if (max_va != UINT32_MAX) {
+ uintptr_t virtual_addr_space_size = max_va + 1U;
+
+ assert(CHECK_VIRT_ADDR_SPACE_SIZE(virtual_addr_space_size));
+ /*
+ * __builtin_ctzll(0) is undefined but here we are guaranteed
+ * that virtual_addr_space_size is in the range [1, UINT32_MAX].
+ */
+ int t0sz = 32 - __builtin_ctzll(virtual_addr_space_size);
+
+ ttbcr |= (uint32_t) t0sz;
+ }
+
+ /*
+ * Set the cacheability and shareability attributes for memory
+ * associated with translation table walks using TTBR0.
+ */
+ if ((flags & XLAT_TABLE_NC) != 0U) {
+ /* Inner & outer non-cacheable non-shareable. */
+ ttbcr |= TTBCR_SH0_NON_SHAREABLE | TTBCR_RGN0_OUTER_NC |
+ TTBCR_RGN0_INNER_NC;
+ } else {
+ /* Inner & outer WBWA & shareable. */
+ ttbcr |= TTBCR_SH0_INNER_SHAREABLE | TTBCR_RGN0_OUTER_WBA |
+ TTBCR_RGN0_INNER_WBA;
+ }
+
+ /* Set TTBR0 bits as well */
+ ttbr0 = (uint64_t)(uintptr_t) base_table;
+
+#if ARM_ARCH_AT_LEAST(8, 2)
+ /*
+ * Enable CnP bit so as to share page tables with all PEs. This
+ * is mandatory for ARMv8.2 implementations.
+ */
+ ttbr0 |= TTBR_CNP_BIT;
+#endif
+
+ /* Now populate MMU configuration */
+ params[MMU_CFG_MAIR] = mair;
+ params[MMU_CFG_TCR] = (uint64_t) ttbcr;
+ params[MMU_CFG_TTBR0] = ttbr0;
+}
diff --git a/lib/xlat_tables_v2/aarch64/enable_mmu.S b/lib/xlat_tables_v2/aarch64/enable_mmu.S
new file mode 100644
index 000000000..21717d28a
--- /dev/null
+++ b/lib/xlat_tables_v2/aarch64/enable_mmu.S
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <xlat_tables_v2.h>
+
+ .global enable_mmu_direct_el1
+ .global enable_mmu_direct_el2
+ .global enable_mmu_direct_el3
+
+ /* Macros to read and write to system register for a given EL. */
+ .macro _msr reg_name, el, gp_reg
+ msr \reg_name\()_el\()\el, \gp_reg
+ .endm
+
+ .macro _mrs gp_reg, reg_name, el
+ mrs \gp_reg, \reg_name\()_el\()\el
+ .endm
+
+ .macro tlbi_invalidate_all el
+ .if \el == 1
+ TLB_INVALIDATE(vmalle1)
+ .elseif \el == 2
+ TLB_INVALIDATE(alle2)
+ .elseif \el == 3
+ TLB_INVALIDATE(alle3)
+ .else
+ .error "EL must be 1, 2 or 3"
+ .endif
+ .endm
+
+ /* void enable_mmu_direct_el<x>(unsigned int flags) */
+ .macro define_mmu_enable_func el
+ func enable_mmu_direct_\()el\el
+#if ENABLE_ASSERTIONS
+ _mrs x1, sctlr, \el
+ tst x1, #SCTLR_M_BIT
+ ASM_ASSERT(eq)
+#endif
+ /* Invalidate all TLB entries */
+ tlbi_invalidate_all \el
+
+ mov x7, x0
+ ldr x0, =mmu_cfg_params
+
+ /* MAIR */
+ ldr x1, [x0, #(MMU_CFG_MAIR << 3)]
+ _msr mair, \el, x1
+
+ /* TCR */
+ ldr x2, [x0, #(MMU_CFG_TCR << 3)]
+ _msr tcr, \el, x2
+
+ /* TTBR */
+ ldr x3, [x0, #(MMU_CFG_TTBR0 << 3)]
+ _msr ttbr0, \el, x3
+
+ /*
+ * Ensure all translation table writes have drained into memory, the TLB
+ * invalidation is complete, and translation register writes are
+ * committed before enabling the MMU
+ */
+ dsb ish
+ isb
+
+ /* Set and clear required fields of SCTLR */
+ _mrs x4, sctlr, \el
+ mov_imm x5, SCTLR_WXN_BIT | SCTLR_C_BIT | SCTLR_M_BIT
+ orr x4, x4, x5
+
+ /* Additionally, amend SCTLR fields based on flags */
+ bic x5, x4, #SCTLR_C_BIT
+ tst x7, #DISABLE_DCACHE
+ csel x4, x5, x4, ne
+
+ _msr sctlr, \el, x4
+ isb
+
+ ret
+ endfunc enable_mmu_direct_\()el\el
+ .endm
+
+ /*
+ * Define MMU-enabling functions for EL1 and EL3:
+ *
+ * enable_mmu_direct_el1
+ * enable_mmu_direct_el3
+ */
+ define_mmu_enable_func 1
+ define_mmu_enable_func 2
+ define_mmu_enable_func 3
diff --git a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
new file mode 100644
index 000000000..d1555bf27
--- /dev/null
+++ b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
@@ -0,0 +1,278 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <cassert.h>
+#include <stdbool.h>
+#include <sys/types.h>
+#include <utils_def.h>
+#include <xlat_tables_v2.h>
+#include "../xlat_tables_private.h"
+
+/*
+ * Returns true if the provided granule size is supported, false otherwise.
+ */
+bool xlat_arch_is_granule_size_supported(size_t size)
+{
+ u_register_t id_aa64mmfr0_el1 = read_id_aa64mmfr0_el1();
+
+ if (size == PAGE_SIZE_4KB) {
+ return ((id_aa64mmfr0_el1 >> ID_AA64MMFR0_EL1_TGRAN4_SHIFT) &
+ ID_AA64MMFR0_EL1_TGRAN4_MASK) ==
+ ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED;
+ } else if (size == PAGE_SIZE_16KB) {
+ return ((id_aa64mmfr0_el1 >> ID_AA64MMFR0_EL1_TGRAN16_SHIFT) &
+ ID_AA64MMFR0_EL1_TGRAN16_MASK) ==
+ ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED;
+ } else if (size == PAGE_SIZE_64KB) {
+ return ((id_aa64mmfr0_el1 >> ID_AA64MMFR0_EL1_TGRAN64_SHIFT) &
+ ID_AA64MMFR0_EL1_TGRAN64_MASK) ==
+ ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED;
+ } else {
+ return 0;
+ }
+}
+
+size_t xlat_arch_get_max_supported_granule_size(void)
+{
+ if (xlat_arch_is_granule_size_supported(PAGE_SIZE_64KB)) {
+ return PAGE_SIZE_64KB;
+ } else if (xlat_arch_is_granule_size_supported(PAGE_SIZE_16KB)) {
+ return PAGE_SIZE_16KB;
+ } else {
+ assert(xlat_arch_is_granule_size_supported(PAGE_SIZE_4KB));
+ return PAGE_SIZE_4KB;
+ }
+}
+
+unsigned long long tcr_physical_addr_size_bits(unsigned long long max_addr)
+{
+ /* Physical address can't exceed 48 bits */
+ assert((max_addr & ADDR_MASK_48_TO_63) == 0U);
+
+ /* 48 bits address */
+ if ((max_addr & ADDR_MASK_44_TO_47) != 0U)
+ return TCR_PS_BITS_256TB;
+
+ /* 44 bits address */
+ if ((max_addr & ADDR_MASK_42_TO_43) != 0U)
+ return TCR_PS_BITS_16TB;
+
+ /* 42 bits address */
+ if ((max_addr & ADDR_MASK_40_TO_41) != 0U)
+ return TCR_PS_BITS_4TB;
+
+ /* 40 bits address */
+ if ((max_addr & ADDR_MASK_36_TO_39) != 0U)
+ return TCR_PS_BITS_1TB;
+
+ /* 36 bits address */
+ if ((max_addr & ADDR_MASK_32_TO_35) != 0U)
+ return TCR_PS_BITS_64GB;
+
+ return TCR_PS_BITS_4GB;
+}
+
+#if ENABLE_ASSERTIONS
+/*
+ * Physical Address ranges supported in the AArch64 Memory Model. Value 0b110 is
+ * supported in ARMv8.2 onwards.
+ */
+static const unsigned int pa_range_bits_arr[] = {
+ PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011, PARANGE_0100,
+ PARANGE_0101, PARANGE_0110
+};
+
+unsigned long long xlat_arch_get_max_supported_pa(void)
+{
+ u_register_t pa_range = read_id_aa64mmfr0_el1() &
+ ID_AA64MMFR0_EL1_PARANGE_MASK;
+
+ /* All other values are reserved */
+ assert(pa_range < ARRAY_SIZE(pa_range_bits_arr));
+
+ return (1ULL << pa_range_bits_arr[pa_range]) - 1ULL;
+}
+#endif /* ENABLE_ASSERTIONS*/
+
+bool is_mmu_enabled_ctx(const xlat_ctx_t *ctx)
+{
+ if (ctx->xlat_regime == EL1_EL0_REGIME) {
+ assert(xlat_arch_current_el() >= 1U);
+ return (read_sctlr_el1() & SCTLR_M_BIT) != 0U;
+ } else if (ctx->xlat_regime == EL2_REGIME) {
+ assert(xlat_arch_current_el() >= 2U);
+ return (read_sctlr_el2() & SCTLR_M_BIT) != 0U;
+ } else {
+ assert(ctx->xlat_regime == EL3_REGIME);
+ assert(xlat_arch_current_el() >= 3U);
+ return (read_sctlr_el3() & SCTLR_M_BIT) != 0U;
+ }
+}
+
+bool is_dcache_enabled(void)
+{
+ unsigned int el = (unsigned int)GET_EL(read_CurrentEl());
+
+ if (el == 1U) {
+ return (read_sctlr_el1() & SCTLR_C_BIT) != 0U;
+ } else if (el == 2U) {
+ return (read_sctlr_el2() & SCTLR_C_BIT) != 0U;
+ } else {
+ return (read_sctlr_el3() & SCTLR_C_BIT) != 0U;
+ }
+}
+
+uint64_t xlat_arch_regime_get_xn_desc(int xlat_regime)
+{
+ if (xlat_regime == EL1_EL0_REGIME) {
+ return UPPER_ATTRS(UXN) | UPPER_ATTRS(PXN);
+ } else {
+ assert((xlat_regime == EL2_REGIME) ||
+ (xlat_regime == EL3_REGIME));
+ return UPPER_ATTRS(XN);
+ }
+}
+
+void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime)
+{
+ /*
+ * Ensure the translation table write has drained into memory before
+ * invalidating the TLB entry.
+ */
+ dsbishst();
+
+ /*
+ * This function only supports invalidation of TLB entries for the EL3
+ * and EL1&0 translation regimes.
+ *
+ * Also, it is architecturally UNDEFINED to invalidate TLBs of a higher
+ * exception level (see section D4.9.2 of the ARM ARM rev B.a).
+ */
+ if (xlat_regime == EL1_EL0_REGIME) {
+ assert(xlat_arch_current_el() >= 1U);
+ tlbivaae1is(TLBI_ADDR(va));
+ } else if (xlat_regime == EL2_REGIME) {
+ assert(xlat_arch_current_el() >= 2U);
+ tlbivae2is(TLBI_ADDR(va));
+ } else {
+ assert(xlat_regime == EL3_REGIME);
+ assert(xlat_arch_current_el() >= 3U);
+ tlbivae3is(TLBI_ADDR(va));
+ }
+}
+
+void xlat_arch_tlbi_va_sync(void)
+{
+ /*
+ * A TLB maintenance instruction can complete at any time after
+ * it is issued, but is only guaranteed to be complete after the
+ * execution of DSB by the PE that executed the TLB maintenance
+ * instruction. After the TLB invalidate instruction is
+ * complete, no new memory accesses using the invalidated TLB
+ * entries will be observed by any observer of the system
+ * domain. See section D4.8.2 of the ARMv8 (issue k), paragraph
+ * "Ordering and completion of TLB maintenance instructions".
+ */
+ dsbish();
+
+ /*
+ * The effects of a completed TLB maintenance instruction are
+ * only guaranteed to be visible on the PE that executed the
+ * instruction after the execution of an ISB instruction by the
+ * PE that executed the TLB maintenance instruction.
+ */
+ isb();
+}
+
+unsigned int xlat_arch_current_el(void)
+{
+ unsigned int el = (unsigned int)GET_EL(read_CurrentEl());
+
+ assert(el > 0U);
+
+ return el;
+}
+
+void setup_mmu_cfg(uint64_t *params, unsigned int flags,
+ const uint64_t *base_table, unsigned long long max_pa,
+ uintptr_t max_va, int xlat_regime)
+{
+ uint64_t mair, ttbr0, tcr;
+ uintptr_t virtual_addr_space_size;
+
+ /* Set attributes in the right indices of the MAIR. */
+ mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
+ mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ATTR_IWBWA_OWBWA_NTR_INDEX);
+ mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE, ATTR_NON_CACHEABLE_INDEX);
+
+ /*
+ * Limit the input address ranges and memory region sizes translated
+ * using TTBR0 to the given virtual address space size.
+ */
+ assert(max_va < ((uint64_t)UINTPTR_MAX));
+
+ virtual_addr_space_size = (uintptr_t)max_va + 1U;
+ assert(CHECK_VIRT_ADDR_SPACE_SIZE(virtual_addr_space_size));
+
+ /*
+ * __builtin_ctzll(0) is undefined but here we are guaranteed that
+ * virtual_addr_space_size is in the range [1,UINTPTR_MAX].
+ */
+ int t0sz = 64 - __builtin_ctzll(virtual_addr_space_size);
+
+ tcr = (uint64_t) t0sz;
+
+ /*
+ * Set the cacheability and shareability attributes for memory
+ * associated with translation table walks.
+ */
+ if ((flags & XLAT_TABLE_NC) != 0U) {
+ /* Inner & outer non-cacheable non-shareable. */
+ tcr |= TCR_SH_NON_SHAREABLE |
+ TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC;
+ } else {
+ /* Inner & outer WBWA & shareable. */
+ tcr |= TCR_SH_INNER_SHAREABLE |
+ TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA;
+ }
+
+ /*
+ * It is safer to restrict the max physical address accessible by the
+ * hardware as much as possible.
+ */
+ unsigned long long tcr_ps_bits = tcr_physical_addr_size_bits(max_pa);
+
+ if (xlat_regime == EL1_EL0_REGIME) {
+ /*
+ * TCR_EL1.EPD1: Disable translation table walk for addresses
+ * that are translated using TTBR1_EL1.
+ */
+ tcr |= TCR_EPD1_BIT | (tcr_ps_bits << TCR_EL1_IPS_SHIFT);
+ } else if (xlat_regime == EL2_REGIME) {
+ tcr |= TCR_EL2_RES1 | (tcr_ps_bits << TCR_EL2_PS_SHIFT);
+ } else {
+ assert(xlat_regime == EL3_REGIME);
+ tcr |= TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT);
+ }
+
+ /* Set TTBR bits as well */
+ ttbr0 = (uint64_t) base_table;
+
+#if ARM_ARCH_AT_LEAST(8, 2)
+ /*
+ * Enable CnP bit so as to share page tables with all PEs. This
+ * is mandatory for ARMv8.2 implementations.
+ */
+ ttbr0 |= TTBR_CNP_BIT;
+#endif
+
+ params[MMU_CFG_MAIR] = mair;
+ params[MMU_CFG_TCR] = tcr;
+ params[MMU_CFG_TTBR0] = ttbr0;
+}
diff --git a/lib/xlat_tables_v2/xlat_tables.mk b/lib/xlat_tables_v2/xlat_tables.mk
new file mode 100644
index 000000000..9507ad715
--- /dev/null
+++ b/lib/xlat_tables_v2/xlat_tables.mk
@@ -0,0 +1,12 @@
+#
+# Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+XLAT_TABLES_LIB_SRCS := $(addprefix lib/xlat_tables_v2/, \
+ ${ARCH}/enable_mmu.S \
+ ${ARCH}/xlat_tables_arch.c \
+ xlat_tables_context.c \
+ xlat_tables_core.c \
+ xlat_tables_utils.c)
diff --git a/lib/xlat_tables_v2/xlat_tables_context.c b/lib/xlat_tables_v2/xlat_tables_context.c
new file mode 100644
index 000000000..df491d097
--- /dev/null
+++ b/lib/xlat_tables_v2/xlat_tables_context.c
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <debug.h>
+#include <platform_def.h>
+#include <xlat_tables_defs.h>
+#include <xlat_tables_v2.h>
+
+#include "xlat_tables_private.h"
+
+/*
+ * MMU configuration register values for the active translation context. Used
+ * from the MMU assembly helpers.
+ */
+uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
+
+/*
+ * Each platform can define the size of its physical and virtual address spaces.
+ * If the platform hasn't defined one or both of them, default to
+ * ADDR_SPACE_SIZE. The latter is deprecated, though.
+ */
+#if ERROR_DEPRECATED
+# ifdef ADDR_SPACE_SIZE
+# error "ADDR_SPACE_SIZE is deprecated. Use PLAT_xxx_ADDR_SPACE_SIZE instead."
+# endif
+#elif defined(ADDR_SPACE_SIZE)
+# ifndef PLAT_PHY_ADDR_SPACE_SIZE
+# define PLAT_PHY_ADDR_SPACE_SIZE ADDR_SPACE_SIZE
+# endif
+# ifndef PLAT_VIRT_ADDR_SPACE_SIZE
+# define PLAT_VIRT_ADDR_SPACE_SIZE ADDR_SPACE_SIZE
+# endif
+#endif
+
+/*
+ * Allocate and initialise the default translation context for the software
+ * image currently executing.
+ */
+REGISTER_XLAT_CONTEXT(tf, MAX_MMAP_REGIONS, MAX_XLAT_TABLES,
+ PLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE);
+
+void mmap_add_region(unsigned long long base_pa, uintptr_t base_va, size_t size,
+ unsigned int attr)
+{
+ mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr);
+
+ mmap_add_region_ctx(&tf_xlat_ctx, &mm);
+}
+
+void mmap_add(const mmap_region_t *mm)
+{
+ mmap_add_ctx(&tf_xlat_ctx, mm);
+}
+
+void mmap_add_region_alloc_va(unsigned long long base_pa, uintptr_t *base_va,
+ size_t size, unsigned int attr)
+{
+ mmap_region_t mm = MAP_REGION_ALLOC_VA(base_pa, size, attr);
+
+ mmap_add_region_alloc_va_ctx(&tf_xlat_ctx, &mm);
+
+ *base_va = mm.base_va;
+}
+
+void mmap_add_alloc_va(mmap_region_t *mm)
+{
+ while (mm->size != 0U) {
+ assert(mm->base_va == 0U);
+ mmap_add_region_alloc_va_ctx(&tf_xlat_ctx, mm);
+ mm++;
+ }
+}
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+
+int mmap_add_dynamic_region(unsigned long long base_pa, uintptr_t base_va,
+ size_t size, unsigned int attr)
+{
+ mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr);
+
+ return mmap_add_dynamic_region_ctx(&tf_xlat_ctx, &mm);
+}
+
+int mmap_add_dynamic_region_alloc_va(unsigned long long base_pa,
+ uintptr_t *base_va, size_t size,
+ unsigned int attr)
+{
+ mmap_region_t mm = MAP_REGION_ALLOC_VA(base_pa, size, attr);
+
+ int rc = mmap_add_dynamic_region_alloc_va_ctx(&tf_xlat_ctx, &mm);
+
+ *base_va = mm.base_va;
+
+ return rc;
+}
+
+
+int mmap_remove_dynamic_region(uintptr_t base_va, size_t size)
+{
+ return mmap_remove_dynamic_region_ctx(&tf_xlat_ctx,
+ base_va, size);
+}
+
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+void init_xlat_tables(void)
+{
+ assert(tf_xlat_ctx.xlat_regime == EL_REGIME_INVALID);
+
+ unsigned int current_el = xlat_arch_current_el();
+
+ if (current_el == 1U) {
+ tf_xlat_ctx.xlat_regime = EL1_EL0_REGIME;
+ } else if (current_el == 2U) {
+ tf_xlat_ctx.xlat_regime = EL2_REGIME;
+ } else {
+ assert(current_el == 3U);
+ tf_xlat_ctx.xlat_regime = EL3_REGIME;
+ }
+
+ init_xlat_tables_ctx(&tf_xlat_ctx);
+}
+
+int xlat_get_mem_attributes(uintptr_t base_va, uint32_t *attr)
+{
+ return xlat_get_mem_attributes_ctx(&tf_xlat_ctx, base_va, attr);
+}
+
+int xlat_change_mem_attributes(uintptr_t base_va, size_t size, uint32_t attr)
+{
+ return xlat_change_mem_attributes_ctx(&tf_xlat_ctx, base_va, size, attr);
+}
+
+/*
+ * If dynamic allocation of new regions is disabled then by the time we call the
+ * function enabling the MMU, we'll have registered all the memory regions to
+ * map for the system's lifetime. Therefore, at this point we know the maximum
+ * physical address that will ever be mapped.
+ *
+ * If dynamic allocation is enabled then we can't make any such assumption
+ * because the maximum physical address could get pushed while adding a new
+ * region. Therefore, in this case we have to assume that the whole address
+ * space size might be mapped.
+ */
+#ifdef PLAT_XLAT_TABLES_DYNAMIC
+#define MAX_PHYS_ADDR tf_xlat_ctx.pa_max_address
+#else
+#define MAX_PHYS_ADDR tf_xlat_ctx.max_pa
+#endif
+
+#ifdef AARCH32
+
+void enable_mmu_svc_mon(unsigned int flags)
+{
+ setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
+ tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
+ tf_xlat_ctx.va_max_address, EL1_EL0_REGIME);
+ enable_mmu_direct_svc_mon(flags);
+}
+
+void enable_mmu_hyp(unsigned int flags)
+{
+ setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
+ tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
+ tf_xlat_ctx.va_max_address, EL2_REGIME);
+ enable_mmu_direct_hyp(flags);
+}
+
+#else
+
+void enable_mmu_el1(unsigned int flags)
+{
+ setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
+ tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
+ tf_xlat_ctx.va_max_address, EL1_EL0_REGIME);
+ enable_mmu_direct_el1(flags);
+}
+
+void enable_mmu_el2(unsigned int flags)
+{
+ setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
+ tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
+ tf_xlat_ctx.va_max_address, EL2_REGIME);
+ enable_mmu_direct_el2(flags);
+}
+
+void enable_mmu_el3(unsigned int flags)
+{
+ setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
+ tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
+ tf_xlat_ctx.va_max_address, EL3_REGIME);
+ enable_mmu_direct_el3(flags);
+}
+
+#endif /* AARCH32 */
diff --git a/lib/xlat_tables_v2/xlat_tables_core.c b/lib/xlat_tables_v2/xlat_tables_core.c
new file mode 100644
index 000000000..80ce4fa7a
--- /dev/null
+++ b/lib/xlat_tables_v2/xlat_tables_core.c
@@ -0,0 +1,1157 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <errno.h>
+#include <platform_def.h>
+#include <stdbool.h>
+#include <string.h>
+#include <types.h>
+#include <utils_def.h>
+#include <xlat_tables_defs.h>
+#include <xlat_tables_v2.h>
+
+#include "xlat_tables_private.h"
+
+/* Helper function that cleans the data cache only if it is enabled. */
+static inline void xlat_clean_dcache_range(uintptr_t addr, size_t size)
+{
+ if (is_dcache_enabled())
+ clean_dcache_range(addr, size);
+}
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+
+/*
+ * The following functions assume that they will be called using subtables only.
+ * The base table can't be unmapped, so it is not needed to do any special
+ * handling for it.
+ */
+
+/*
+ * Returns the index of the array corresponding to the specified translation
+ * table.
+ */
+static int xlat_table_get_index(const xlat_ctx_t *ctx, const uint64_t *table)
+{
+ for (int i = 0; i < ctx->tables_num; i++)
+ if (ctx->tables[i] == table)
+ return i;
+
+ /*
+ * Maybe we were asked to get the index of the base level table, which
+ * should never happen.
+ */
+ assert(false);
+
+ return -1;
+}
+
+/* Returns a pointer to an empty translation table. */
+static uint64_t *xlat_table_get_empty(const xlat_ctx_t *ctx)
+{
+ for (int i = 0; i < ctx->tables_num; i++)
+ if (ctx->tables_mapped_regions[i] == 0)
+ return ctx->tables[i];
+
+ return NULL;
+}
+
+/* Increments region count for a given table. */
+static void xlat_table_inc_regions_count(const xlat_ctx_t *ctx,
+ const uint64_t *table)
+{
+ int idx = xlat_table_get_index(ctx, table);
+
+ ctx->tables_mapped_regions[idx]++;
+}
+
+/* Decrements region count for a given table. */
+static void xlat_table_dec_regions_count(const xlat_ctx_t *ctx,
+ const uint64_t *table)
+{
+ int idx = xlat_table_get_index(ctx, table);
+
+ ctx->tables_mapped_regions[idx]--;
+}
+
+/* Returns 0 if the specified table isn't empty, otherwise 1. */
+static bool xlat_table_is_empty(const xlat_ctx_t *ctx, const uint64_t *table)
+{
+ return ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)] == 0;
+}
+
+#else /* PLAT_XLAT_TABLES_DYNAMIC */
+
+/* Returns a pointer to the first empty translation table. */
+static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx)
+{
+ assert(ctx->next_table < ctx->tables_num);
+
+ return ctx->tables[ctx->next_table++];
+}
+
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+/*
+ * Returns a block/page table descriptor for the given level and attributes.
+ */
+uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
+ unsigned long long addr_pa, unsigned int level)
+{
+ uint64_t desc;
+ uint32_t mem_type;
+
+ /* Make sure that the granularity is fine enough to map this address. */
+ assert((addr_pa & XLAT_BLOCK_MASK(level)) == 0U);
+
+ desc = addr_pa;
+ /*
+ * There are different translation table descriptors for level 3 and the
+ * rest.
+ */
+ desc |= (level == XLAT_TABLE_LEVEL_MAX) ? PAGE_DESC : BLOCK_DESC;
+ /*
+ * Always set the access flag, as this library assumes access flag
+ * faults aren't managed.
+ */
+ desc |= LOWER_ATTRS(ACCESS_FLAG);
+ /*
+ * Deduce other fields of the descriptor based on the MT_NS and MT_RW
+ * memory region attributes.
+ */
+ desc |= ((attr & MT_NS) != 0U) ? LOWER_ATTRS(NS) : 0U;
+ desc |= ((attr & MT_RW) != 0U) ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO);
+
+ /*
+ * Do not allow unprivileged access when the mapping is for a privileged
+ * EL. For translation regimes that do not have mappings for access for
+ * lower exception levels, set AP[2] to AP_NO_ACCESS_UNPRIVILEGED.
+ */
+ if (ctx->xlat_regime == EL1_EL0_REGIME) {
+ if ((attr & MT_USER) != 0U) {
+ /* EL0 mapping requested, so we give User access */
+ desc |= LOWER_ATTRS(AP_ACCESS_UNPRIVILEGED);
+ } else {
+ /* EL1 mapping requested, no User access granted */
+ desc |= LOWER_ATTRS(AP_NO_ACCESS_UNPRIVILEGED);
+ }
+ } else {
+ assert((ctx->xlat_regime == EL2_REGIME) ||
+ (ctx->xlat_regime == EL3_REGIME));
+ desc |= LOWER_ATTRS(AP_ONE_VA_RANGE_RES1);
+ }
+
+ /*
+ * Deduce shareability domain and executability of the memory region
+ * from the memory type of the attributes (MT_TYPE).
+ *
+ * Data accesses to device memory and non-cacheable normal memory are
+ * coherent for all observers in the system, and correspondingly are
+ * always treated as being Outer Shareable. Therefore, for these 2 types
+ * of memory, it is not strictly needed to set the shareability field
+ * in the translation tables.
+ */
+ mem_type = MT_TYPE(attr);
+ if (mem_type == MT_DEVICE) {
+ desc |= LOWER_ATTRS(ATTR_DEVICE_INDEX | OSH);
+ /*
+ * Always map device memory as execute-never.
+ * This is to avoid the possibility of a speculative instruction
+ * fetch, which could be an issue if this memory region
+ * corresponds to a read-sensitive peripheral.
+ */
+ desc |= xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
+
+ } else { /* Normal memory */
+ /*
+ * Always map read-write normal memory as execute-never.
+ * This library assumes that it is used by software that does
+ * not self-modify its code, therefore R/W memory is reserved
+ * for data storage, which must not be executable.
+ *
+ * Note that setting the XN bit here is for consistency only.
+ * The function that enables the MMU sets the SCTLR_ELx.WXN bit,
+ * which makes any writable memory region to be treated as
+ * execute-never, regardless of the value of the XN bit in the
+ * translation table.
+ *
+ * For read-only memory, rely on the MT_EXECUTE/MT_EXECUTE_NEVER
+ * attribute to figure out the value of the XN bit. The actual
+ * XN bit(s) to set in the descriptor depends on the context's
+ * translation regime and the policy applied in
+ * xlat_arch_regime_get_xn_desc().
+ */
+ if (((attr & MT_RW) != 0U) || ((attr & MT_EXECUTE_NEVER) != 0U)) {
+ desc |= xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
+ }
+
+ if (mem_type == MT_MEMORY) {
+ desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX | ISH);
+ } else {
+ assert(mem_type == MT_NON_CACHEABLE);
+ desc |= LOWER_ATTRS(ATTR_NON_CACHEABLE_INDEX | OSH);
+ }
+ }
+
+ return desc;
+}
+
+/*
+ * Enumeration of actions that can be made when mapping table entries depending
+ * on the previous value in that entry and information about the region being
+ * mapped.
+ */
+typedef enum {
+
+ /* Do nothing */
+ ACTION_NONE,
+
+ /* Write a block (or page, if in level 3) entry. */
+ ACTION_WRITE_BLOCK_ENTRY,
+
+ /*
+ * Create a new table and write a table entry pointing to it. Recurse
+ * into it for further processing.
+ */
+ ACTION_CREATE_NEW_TABLE,
+
+ /*
+ * There is a table descriptor in this entry, read it and recurse into
+ * that table for further processing.
+ */
+ ACTION_RECURSE_INTO_TABLE,
+
+} action_t;
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+
+/*
+ * Recursive function that writes to the translation tables and unmaps the
+ * specified region.
+ */
+static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
+ const uintptr_t table_base_va,
+ uint64_t *const table_base,
+ const unsigned int table_entries,
+ const unsigned int level)
+{
+ assert((level >= ctx->base_level) && (level <= XLAT_TABLE_LEVEL_MAX));
+
+ uint64_t *subtable;
+ uint64_t desc;
+
+ uintptr_t table_idx_va;
+ uintptr_t table_idx_end_va; /* End VA of this entry */
+
+ uintptr_t region_end_va = mm->base_va + mm->size - 1U;
+
+ unsigned int table_idx;
+
+ if (mm->base_va > table_base_va) {
+ /* Find the first index of the table affected by the region. */
+ table_idx_va = mm->base_va & ~XLAT_BLOCK_MASK(level);
+
+ table_idx = (unsigned int)((table_idx_va - table_base_va) >>
+ XLAT_ADDR_SHIFT(level));
+
+ assert(table_idx < table_entries);
+ } else {
+ /* Start from the beginning of the table. */
+ table_idx_va = table_base_va;
+ table_idx = 0;
+ }
+
+ while (table_idx < table_entries) {
+
+ table_idx_end_va = table_idx_va + XLAT_BLOCK_SIZE(level) - 1U;
+
+ desc = table_base[table_idx];
+ uint64_t desc_type = desc & DESC_MASK;
+
+ action_t action;
+
+ if ((mm->base_va <= table_idx_va) &&
+ (region_end_va >= table_idx_end_va)) {
+ /* Region covers all block */
+
+ if (level == 3U) {
+ /*
+ * Last level, only page descriptors allowed,
+ * erase it.
+ */
+ assert(desc_type == PAGE_DESC);
+
+ action = ACTION_WRITE_BLOCK_ENTRY;
+ } else {
+ /*
+ * Other levels can have table descriptors. If
+ * so, recurse into it and erase descriptors
+ * inside it as needed. If there is a block
+ * descriptor, just erase it. If an invalid
+ * descriptor is found, this table isn't
+ * actually mapped, which shouldn't happen.
+ */
+ if (desc_type == TABLE_DESC) {
+ action = ACTION_RECURSE_INTO_TABLE;
+ } else {
+ assert(desc_type == BLOCK_DESC);
+ action = ACTION_WRITE_BLOCK_ENTRY;
+ }
+ }
+
+ } else if ((mm->base_va <= table_idx_end_va) ||
+ (region_end_va >= table_idx_va)) {
+ /*
+ * Region partially covers block.
+ *
+ * It can't happen in level 3.
+ *
+ * There must be a table descriptor here, if not there
+ * was a problem when mapping the region.
+ */
+ assert(level < 3U);
+ assert(desc_type == TABLE_DESC);
+
+ action = ACTION_RECURSE_INTO_TABLE;
+ } else {
+ /* The region doesn't cover the block at all */
+ action = ACTION_NONE;
+ }
+
+ if (action == ACTION_WRITE_BLOCK_ENTRY) {
+
+ table_base[table_idx] = INVALID_DESC;
+ xlat_arch_tlbi_va(table_idx_va, ctx->xlat_regime);
+
+ } else if (action == ACTION_RECURSE_INTO_TABLE) {
+
+ subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
+
+ /* Recurse to write into subtable */
+ xlat_tables_unmap_region(ctx, mm, table_idx_va,
+ subtable, XLAT_TABLE_ENTRIES,
+ level + 1U);
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ xlat_clean_dcache_range((uintptr_t)subtable,
+ XLAT_TABLE_ENTRIES * sizeof(uint64_t));
+#endif
+ /*
+ * If the subtable is now empty, remove its reference.
+ */
+ if (xlat_table_is_empty(ctx, subtable)) {
+ table_base[table_idx] = INVALID_DESC;
+ xlat_arch_tlbi_va(table_idx_va,
+ ctx->xlat_regime);
+ }
+
+ } else {
+ assert(action == ACTION_NONE);
+ }
+
+ table_idx++;
+ table_idx_va += XLAT_BLOCK_SIZE(level);
+
+ /* If reached the end of the region, exit */
+ if (region_end_va <= table_idx_va)
+ break;
+ }
+
+ if (level > ctx->base_level)
+ xlat_table_dec_regions_count(ctx, table_base);
+}
+
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+/*
+ * From the given arguments, it decides which action to take when mapping the
+ * specified region.
+ */
+static action_t xlat_tables_map_region_action(const mmap_region_t *mm,
+ unsigned int desc_type, unsigned long long dest_pa,
+ uintptr_t table_entry_base_va, unsigned int level)
+{
+ uintptr_t mm_end_va = mm->base_va + mm->size - 1U;
+ uintptr_t table_entry_end_va =
+ table_entry_base_va + XLAT_BLOCK_SIZE(level) - 1U;
+
+ /*
+ * The descriptor types allowed depend on the current table level.
+ */
+
+ if ((mm->base_va <= table_entry_base_va) &&
+ (mm_end_va >= table_entry_end_va)) {
+
+ /*
+ * Table entry is covered by region
+ * --------------------------------
+ *
+ * This means that this table entry can describe the whole
+ * translation with this granularity in principle.
+ */
+
+ if (level == 3U) {
+ /*
+ * Last level, only page descriptors are allowed.
+ */
+ if (desc_type == PAGE_DESC) {
+ /*
+ * There's another region mapped here, don't
+ * overwrite.
+ */
+ return ACTION_NONE;
+ } else {
+ assert(desc_type == INVALID_DESC);
+ return ACTION_WRITE_BLOCK_ENTRY;
+ }
+
+ } else {
+
+ /*
+ * Other levels. Table descriptors are allowed. Block
+ * descriptors too, but they have some limitations.
+ */
+
+ if (desc_type == TABLE_DESC) {
+ /* There's already a table, recurse into it. */
+ return ACTION_RECURSE_INTO_TABLE;
+
+ } else if (desc_type == INVALID_DESC) {
+ /*
+ * There's nothing mapped here, create a new
+ * entry.
+ *
+ * Check if the destination granularity allows
+ * us to use a block descriptor or we need a
+ * finer table for it.
+ *
+ * Also, check if the current level allows block
+ * descriptors. If not, create a table instead.
+ */
+ if (((dest_pa & XLAT_BLOCK_MASK(level)) != 0U)
+ || (level < MIN_LVL_BLOCK_DESC) ||
+ (mm->granularity < XLAT_BLOCK_SIZE(level)))
+ return ACTION_CREATE_NEW_TABLE;
+ else
+ return ACTION_WRITE_BLOCK_ENTRY;
+
+ } else {
+ /*
+ * There's another region mapped here, don't
+ * overwrite.
+ */
+ assert(desc_type == BLOCK_DESC);
+
+ return ACTION_NONE;
+ }
+ }
+
+ } else if ((mm->base_va <= table_entry_end_va) ||
+ (mm_end_va >= table_entry_base_va)) {
+
+ /*
+ * Region partially covers table entry
+ * -----------------------------------
+ *
+ * This means that this table entry can't describe the whole
+ * translation, a finer table is needed.
+
+ * There cannot be partial block overlaps in level 3. If that
+ * happens, some of the preliminary checks when adding the
+ * mmap region failed to detect that PA and VA must at least be
+ * aligned to PAGE_SIZE.
+ */
+ assert(level < 3U);
+
+ if (desc_type == INVALID_DESC) {
+ /*
+ * The block is not fully covered by the region. Create
+ * a new table, recurse into it and try to map the
+ * region with finer granularity.
+ */
+ return ACTION_CREATE_NEW_TABLE;
+
+ } else {
+ assert(desc_type == TABLE_DESC);
+ /*
+ * The block is not fully covered by the region, but
+ * there is already a table here. Recurse into it and
+ * try to map with finer granularity.
+ *
+ * PAGE_DESC for level 3 has the same value as
+ * TABLE_DESC, but this code can't run on a level 3
+ * table because there can't be overlaps in level 3.
+ */
+ return ACTION_RECURSE_INTO_TABLE;
+ }
+ } else {
+
+ /*
+ * This table entry is outside of the region specified in the
+ * arguments, don't write anything to it.
+ */
+ return ACTION_NONE;
+ }
+}
+
+/*
+ * Recursive function that writes to the translation tables and maps the
+ * specified region. On success, it returns the VA of the last byte that was
+ * successfully mapped. On error, it returns the VA of the next entry that
+ * should have been mapped.
+ */
+static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
+ uintptr_t table_base_va,
+ uint64_t *const table_base,
+ unsigned int table_entries,
+ unsigned int level)
+{
+ assert((level >= ctx->base_level) && (level <= XLAT_TABLE_LEVEL_MAX));
+
+ uintptr_t mm_end_va = mm->base_va + mm->size - 1U;
+
+ uintptr_t table_idx_va;
+ unsigned long long table_idx_pa;
+
+ uint64_t *subtable;
+ uint64_t desc;
+
+ unsigned int table_idx;
+
+ if (mm->base_va > table_base_va) {
+ /* Find the first index of the table affected by the region. */
+ table_idx_va = mm->base_va & ~XLAT_BLOCK_MASK(level);
+
+ table_idx = (unsigned int)((table_idx_va - table_base_va) >>
+ XLAT_ADDR_SHIFT(level));
+
+ assert(table_idx < table_entries);
+ } else {
+ /* Start from the beginning of the table. */
+ table_idx_va = table_base_va;
+ table_idx = 0U;
+ }
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+ if (level > ctx->base_level)
+ xlat_table_inc_regions_count(ctx, table_base);
+#endif
+
+ while (table_idx < table_entries) {
+
+ desc = table_base[table_idx];
+
+ table_idx_pa = mm->base_pa + table_idx_va - mm->base_va;
+
+ action_t action = xlat_tables_map_region_action(mm,
+ (uint32_t)(desc & DESC_MASK), table_idx_pa,
+ table_idx_va, level);
+
+ if (action == ACTION_WRITE_BLOCK_ENTRY) {
+
+ table_base[table_idx] =
+ xlat_desc(ctx, (uint32_t)mm->attr, table_idx_pa,
+ level);
+
+ } else if (action == ACTION_CREATE_NEW_TABLE) {
+ uintptr_t end_va;
+
+ subtable = xlat_table_get_empty(ctx);
+ if (subtable == NULL) {
+ /* Not enough free tables to map this region */
+ return table_idx_va;
+ }
+
+ /* Point to new subtable from this one. */
+ table_base[table_idx] = TABLE_DESC | (unsigned long)subtable;
+
+ /* Recurse to write into subtable */
+ end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
+ subtable, XLAT_TABLE_ENTRIES,
+ level + 1U);
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ xlat_clean_dcache_range((uintptr_t)subtable,
+ XLAT_TABLE_ENTRIES * sizeof(uint64_t));
+#endif
+ if (end_va !=
+ (table_idx_va + XLAT_BLOCK_SIZE(level) - 1U))
+ return end_va;
+
+ } else if (action == ACTION_RECURSE_INTO_TABLE) {
+ uintptr_t end_va;
+
+ subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
+ /* Recurse to write into subtable */
+ end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
+ subtable, XLAT_TABLE_ENTRIES,
+ level + 1U);
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ xlat_clean_dcache_range((uintptr_t)subtable,
+ XLAT_TABLE_ENTRIES * sizeof(uint64_t));
+#endif
+ if (end_va !=
+ (table_idx_va + XLAT_BLOCK_SIZE(level) - 1U))
+ return end_va;
+
+ } else {
+
+ assert(action == ACTION_NONE);
+
+ }
+
+ table_idx++;
+ table_idx_va += XLAT_BLOCK_SIZE(level);
+
+ /* If reached the end of the region, exit */
+ if (mm_end_va <= table_idx_va)
+ break;
+ }
+
+ return table_idx_va - 1U;
+}
+
+/*
+ * Function that verifies that a region can be mapped.
+ * Returns:
+ * 0: Success, the mapping is allowed.
+ * EINVAL: Invalid values were used as arguments.
+ * ERANGE: The memory limits were surpassed.
+ * ENOMEM: There is not enough memory in the mmap array.
+ * EPERM: Region overlaps another one in an invalid way.
+ */
+static int mmap_add_region_check(const xlat_ctx_t *ctx, const mmap_region_t *mm)
+{
+ unsigned long long base_pa = mm->base_pa;
+ uintptr_t base_va = mm->base_va;
+ size_t size = mm->size;
+ size_t granularity = mm->granularity;
+
+ unsigned long long end_pa = base_pa + size - 1U;
+ uintptr_t end_va = base_va + size - 1U;
+
+ if (!IS_PAGE_ALIGNED(base_pa) || !IS_PAGE_ALIGNED(base_va) ||
+ !IS_PAGE_ALIGNED(size))
+ return -EINVAL;
+
+ if ((granularity != XLAT_BLOCK_SIZE(1U)) &&
+ (granularity != XLAT_BLOCK_SIZE(2U)) &&
+ (granularity != XLAT_BLOCK_SIZE(3U))) {
+ return -EINVAL;
+ }
+
+ /* Check for overflows */
+ if ((base_pa > end_pa) || (base_va > end_va))
+ return -ERANGE;
+
+ if ((base_va + (uintptr_t)size - (uintptr_t)1) > ctx->va_max_address)
+ return -ERANGE;
+
+ if ((base_pa + (unsigned long long)size - 1ULL) > ctx->pa_max_address)
+ return -ERANGE;
+
+ /* Check that there is space in the ctx->mmap array */
+ if (ctx->mmap[ctx->mmap_num - 1].size != 0U)
+ return -ENOMEM;
+
+ /* Check for PAs and VAs overlaps with all other regions */
+ for (const mmap_region_t *mm_cursor = ctx->mmap;
+ mm_cursor->size != 0U; ++mm_cursor) {
+
+ uintptr_t mm_cursor_end_va = mm_cursor->base_va
+ + mm_cursor->size - 1U;
+
+ /*
+ * Check if one of the regions is completely inside the other
+ * one.
+ */
+ bool fully_overlapped_va =
+ ((base_va >= mm_cursor->base_va) &&
+ (end_va <= mm_cursor_end_va)) ||
+ ((mm_cursor->base_va >= base_va) &&
+ (mm_cursor_end_va <= end_va));
+
+ /*
+ * Full VA overlaps are only allowed if both regions are
+ * identity mapped (zero offset) or have the same VA to PA
+ * offset. Also, make sure that it's not the exact same area.
+ * This can only be done with static regions.
+ */
+ if (fully_overlapped_va) {
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+ if (((mm->attr & MT_DYNAMIC) != 0U) ||
+ ((mm_cursor->attr & MT_DYNAMIC) != 0U))
+ return -EPERM;
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+ if ((mm_cursor->base_va - mm_cursor->base_pa) !=
+ (base_va - base_pa))
+ return -EPERM;
+
+ if ((base_va == mm_cursor->base_va) &&
+ (size == mm_cursor->size))
+ return -EPERM;
+
+ } else {
+ /*
+ * If the regions do not have fully overlapping VAs,
+ * then they must have fully separated VAs and PAs.
+ * Partial overlaps are not allowed
+ */
+
+ unsigned long long mm_cursor_end_pa =
+ mm_cursor->base_pa + mm_cursor->size - 1U;
+
+ bool separated_pa = (end_pa < mm_cursor->base_pa) ||
+ (base_pa > mm_cursor_end_pa);
+ bool separated_va = (end_va < mm_cursor->base_va) ||
+ (base_va > mm_cursor_end_va);
+
+ if (!separated_va || !separated_pa)
+ return -EPERM;
+ }
+ }
+
+ return 0;
+}
+
+void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
+{
+ mmap_region_t *mm_cursor = ctx->mmap, *mm_destination;
+ const mmap_region_t *mm_end = ctx->mmap + ctx->mmap_num;
+ const mmap_region_t *mm_last;
+ unsigned long long end_pa = mm->base_pa + mm->size - 1U;
+ uintptr_t end_va = mm->base_va + mm->size - 1U;
+ int ret;
+
+ /* Ignore empty regions */
+ if (mm->size == 0U)
+ return;
+
+ /* Static regions must be added before initializing the xlat tables. */
+ assert(!ctx->initialized);
+
+ ret = mmap_add_region_check(ctx, mm);
+ if (ret != 0) {
+ ERROR("mmap_add_region_check() failed. error %d\n", ret);
+ assert(false);
+ return;
+ }
+
+ /*
+ * Find correct place in mmap to insert new region.
+ *
+ * 1 - Lower region VA end first.
+ * 2 - Smaller region size first.
+ *
+ * VA 0 0xFF
+ *
+ * 1st |------|
+ * 2nd |------------|
+ * 3rd |------|
+ * 4th |---|
+ * 5th |---|
+ * 6th |----------|
+ * 7th |-------------------------------------|
+ *
+ * This is required for overlapping regions only. It simplifies adding
+ * regions with the loop in xlat_tables_init_internal because the outer
+ * ones won't overwrite block or page descriptors of regions added
+ * previously.
+ *
+ * Overlapping is only allowed for static regions.
+ */
+
+ while (((mm_cursor->base_va + mm_cursor->size - 1U) < end_va)
+ && (mm_cursor->size != 0U)) {
+ ++mm_cursor;
+ }
+
+ while (((mm_cursor->base_va + mm_cursor->size - 1U) == end_va) &&
+ (mm_cursor->size != 0U) && (mm_cursor->size < mm->size)) {
+ ++mm_cursor;
+ }
+
+ /*
+ * Find the last entry marker in the mmap
+ */
+ mm_last = ctx->mmap;
+ while ((mm_last->size != 0U) && (mm_last < mm_end)) {
+ ++mm_last;
+ }
+
+ /*
+ * Check if we have enough space in the memory mapping table.
+ * This shouldn't happen as we have checked in mmap_add_region_check
+ * that there is free space.
+ */
+ assert(mm_last->size == 0U);
+
+ /* Make room for new region by moving other regions up by one place */
+ mm_destination = mm_cursor + 1;
+ (void)memmove(mm_destination, mm_cursor,
+ (uintptr_t)mm_last - (uintptr_t)mm_cursor);
+
+ /*
+ * Check we haven't lost the empty sentinel from the end of the array.
+ * This shouldn't happen as we have checked in mmap_add_region_check
+ * that there is free space.
+ */
+ assert(mm_end->size == 0U);
+
+ *mm_cursor = *mm;
+
+ if (end_pa > ctx->max_pa)
+ ctx->max_pa = end_pa;
+ if (end_va > ctx->max_va)
+ ctx->max_va = end_va;
+}
+
+/*
+ * Determine the table level closest to the initial lookup level that
+ * can describe this translation. Then, align base VA to the next block
+ * at the determined level.
+ */
+static void mmap_alloc_va_align_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
+{
+ /*
+ * By or'ing the size and base PA the alignment will be the one
+ * corresponding to the smallest boundary of the two of them.
+ *
+ * There are three different cases. For example (for 4 KiB page size):
+ *
+ * +--------------+------------------++--------------+
+ * | PA alignment | Size multiple of || VA alignment |
+ * +--------------+------------------++--------------+
+ * | 2 MiB | 2 MiB || 2 MiB | (1)
+ * | 2 MiB | 4 KiB || 4 KiB | (2)
+ * | 4 KiB | 2 MiB || 4 KiB | (3)
+ * +--------------+------------------++--------------+
+ *
+ * - In (1), it is possible to take advantage of the alignment of the PA
+ * and the size of the region to use a level 2 translation table
+ * instead of a level 3 one.
+ *
+ * - In (2), the size is smaller than a block entry of level 2, so it is
+ * needed to use a level 3 table to describe the region or the library
+ * will map more memory than the desired one.
+ *
+ * - In (3), even though the region has the size of one level 2 block
+ * entry, it isn't possible to describe the translation with a level 2
+ * block entry because of the alignment of the base PA.
+ *
+ * Only bits 47:21 of a level 2 block descriptor are used by the MMU,
+ * bits 20:0 of the resulting address are 0 in this case. Because of
+ * this, the PA generated as result of this translation is aligned to
+ * 2 MiB. The PA that was requested to be mapped is aligned to 4 KiB,
+ * though, which means that the resulting translation is incorrect.
+ * The only way to prevent this is by using a finer granularity.
+ */
+ unsigned long long align_check;
+
+ align_check = mm->base_pa | (unsigned long long)mm->size;
+
+ /*
+ * Assume it is always aligned to level 3. There's no need to check that
+ * level because its block size is PAGE_SIZE. The checks to verify that
+ * the addresses and size are aligned to PAGE_SIZE are inside
+ * mmap_add_region.
+ */
+ for (int level = ctx->base_level; level <= 2; ++level) {
+
+ if (align_check & XLAT_BLOCK_MASK(level))
+ continue;
+
+ mm->base_va = round_up(mm->base_va, XLAT_BLOCK_SIZE(level));
+ return;
+ }
+}
+
+void mmap_add_region_alloc_va_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
+{
+ mm->base_va = ctx->max_va + 1UL;
+
+ assert(mm->size > 0U);
+
+ mmap_alloc_va_align_ctx(ctx, mm);
+
+ /* Detect overflows. More checks are done in mmap_add_region_check(). */
+ assert(mm->base_va > ctx->max_va);
+
+ mmap_add_region_ctx(ctx, mm);
+}
+
+void mmap_add_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
+{
+ const mmap_region_t *mm_cursor = mm;
+
+ while (mm_cursor->size != 0U) {
+ mmap_add_region_ctx(ctx, mm_cursor);
+ mm_cursor++;
+ }
+}
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+
+int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
+{
+ mmap_region_t *mm_cursor = ctx->mmap;
+ const mmap_region_t *mm_last = mm_cursor + ctx->mmap_num;
+ unsigned long long end_pa = mm->base_pa + mm->size - 1U;
+ uintptr_t end_va = mm->base_va + mm->size - 1U;
+ int ret;
+
+ /* Nothing to do */
+ if (mm->size == 0U)
+ return 0;
+
+ /* Now this region is a dynamic one */
+ mm->attr |= MT_DYNAMIC;
+
+ ret = mmap_add_region_check(ctx, mm);
+ if (ret != 0)
+ return ret;
+
+ /*
+ * Find the adequate entry in the mmap array in the same way done for
+ * static regions in mmap_add_region_ctx().
+ */
+
+ while (((mm_cursor->base_va + mm_cursor->size - 1U) < end_va)
+ && (mm_cursor->size != 0U)) {
+ ++mm_cursor;
+ }
+
+ while (((mm_cursor->base_va + mm_cursor->size - 1U) == end_va) &&
+ (mm_cursor->size != 0U) && (mm_cursor->size < mm->size)) {
+ ++mm_cursor;
+ }
+
+ /* Make room for new region by moving other regions up by one place */
+ (void)memmove(mm_cursor + 1U, mm_cursor,
+ (uintptr_t)mm_last - (uintptr_t)mm_cursor);
+
+ /*
+ * Check we haven't lost the empty sentinal from the end of the array.
+ * This shouldn't happen as we have checked in mmap_add_region_check
+ * that there is free space.
+ */
+ assert(mm_last->size == 0U);
+
+ *mm_cursor = *mm;
+
+ /*
+ * Update the translation tables if the xlat tables are initialized. If
+ * not, this region will be mapped when they are initialized.
+ */
+ if (ctx->initialized) {
+ end_va = xlat_tables_map_region(ctx, mm_cursor,
+ 0U, ctx->base_table, ctx->base_table_entries,
+ ctx->base_level);
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ xlat_clean_dcache_range((uintptr_t)ctx->base_table,
+ ctx->base_table_entries * sizeof(uint64_t));
+#endif
+ /* Failed to map, remove mmap entry, unmap and return error. */
+ if (end_va != (mm_cursor->base_va + mm_cursor->size - 1U)) {
+ (void)memmove(mm_cursor, mm_cursor + 1U,
+ (uintptr_t)mm_last - (uintptr_t)mm_cursor);
+
+ /*
+ * Check if the mapping function actually managed to map
+ * anything. If not, just return now.
+ */
+ if (mm->base_va >= end_va)
+ return -ENOMEM;
+
+ /*
+ * Something went wrong after mapping some table
+ * entries, undo every change done up to this point.
+ */
+ mmap_region_t unmap_mm = {
+ .base_pa = 0U,
+ .base_va = mm->base_va,
+ .size = end_va - mm->base_va,
+ .attr = 0U
+ };
+ xlat_tables_unmap_region(ctx, &unmap_mm, 0U,
+ ctx->base_table, ctx->base_table_entries,
+ ctx->base_level);
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ xlat_clean_dcache_range((uintptr_t)ctx->base_table,
+ ctx->base_table_entries * sizeof(uint64_t));
+#endif
+ return -ENOMEM;
+ }
+
+ /*
+ * Make sure that all entries are written to the memory. There
+ * is no need to invalidate entries when mapping dynamic regions
+ * because new table/block/page descriptors only replace old
+ * invalid descriptors, that aren't TLB cached.
+ */
+ dsbishst();
+ }
+
+ if (end_pa > ctx->max_pa)
+ ctx->max_pa = end_pa;
+ if (end_va > ctx->max_va)
+ ctx->max_va = end_va;
+
+ return 0;
+}
+
+int mmap_add_dynamic_region_alloc_va_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
+{
+ mm->base_va = ctx->max_va + 1UL;
+
+ if (mm->size == 0U)
+ return 0;
+
+ mmap_alloc_va_align_ctx(ctx, mm);
+
+ /* Detect overflows. More checks are done in mmap_add_region_check(). */
+ if (mm->base_va < ctx->max_va) {
+ return -ENOMEM;
+ }
+
+ return mmap_add_dynamic_region_ctx(ctx, mm);
+}
+
+/*
+ * Removes the region with given base Virtual Address and size from the given
+ * context.
+ *
+ * Returns:
+ * 0: Success.
+ * EINVAL: Invalid values were used as arguments (region not found).
+ * EPERM: Tried to remove a static region.
+ */
+int mmap_remove_dynamic_region_ctx(xlat_ctx_t *ctx, uintptr_t base_va,
+ size_t size)
+{
+ mmap_region_t *mm = ctx->mmap;
+ const mmap_region_t *mm_last = mm + ctx->mmap_num;
+ int update_max_va_needed = 0;
+ int update_max_pa_needed = 0;
+
+ /* Check sanity of mmap array. */
+ assert(mm[ctx->mmap_num].size == 0U);
+
+ while (mm->size != 0U) {
+ if ((mm->base_va == base_va) && (mm->size == size))
+ break;
+ ++mm;
+ }
+
+ /* Check that the region was found */
+ if (mm->size == 0U)
+ return -EINVAL;
+
+ /* If the region is static it can't be removed */
+ if ((mm->attr & MT_DYNAMIC) == 0U)
+ return -EPERM;
+
+ /* Check if this region is using the top VAs or PAs. */
+ if ((mm->base_va + mm->size - 1U) == ctx->max_va)
+ update_max_va_needed = 1;
+ if ((mm->base_pa + mm->size - 1U) == ctx->max_pa)
+ update_max_pa_needed = 1;
+
+ /* Update the translation tables if needed */
+ if (ctx->initialized) {
+ xlat_tables_unmap_region(ctx, mm, 0U, ctx->base_table,
+ ctx->base_table_entries,
+ ctx->base_level);
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ xlat_clean_dcache_range((uintptr_t)ctx->base_table,
+ ctx->base_table_entries * sizeof(uint64_t));
+#endif
+ xlat_arch_tlbi_va_sync();
+ }
+
+ /* Remove this region by moving the rest down by one place. */
+ (void)memmove(mm, mm + 1U, (uintptr_t)mm_last - (uintptr_t)mm);
+
+ /* Check if we need to update the max VAs and PAs */
+ if (update_max_va_needed == 1) {
+ ctx->max_va = 0U;
+ mm = ctx->mmap;
+ while (mm->size != 0U) {
+ if ((mm->base_va + mm->size - 1U) > ctx->max_va)
+ ctx->max_va = mm->base_va + mm->size - 1U;
+ ++mm;
+ }
+ }
+
+ if (update_max_pa_needed == 1) {
+ ctx->max_pa = 0U;
+ mm = ctx->mmap;
+ while (mm->size != 0U) {
+ if ((mm->base_pa + mm->size - 1U) > ctx->max_pa)
+ ctx->max_pa = mm->base_pa + mm->size - 1U;
+ ++mm;
+ }
+ }
+
+ return 0;
+}
+
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+void init_xlat_tables_ctx(xlat_ctx_t *ctx)
+{
+ assert(ctx != NULL);
+ assert(!ctx->initialized);
+ assert((ctx->xlat_regime == EL3_REGIME) ||
+ (ctx->xlat_regime == EL2_REGIME) ||
+ (ctx->xlat_regime == EL1_EL0_REGIME));
+ assert(!is_mmu_enabled_ctx(ctx));
+
+ mmap_region_t *mm = ctx->mmap;
+
+ xlat_mmap_print(mm);
+
+ /* All tables must be zeroed before mapping any region. */
+
+ for (unsigned int i = 0U; i < ctx->base_table_entries; i++)
+ ctx->base_table[i] = INVALID_DESC;
+
+ for (int j = 0; j < ctx->tables_num; j++) {
+#if PLAT_XLAT_TABLES_DYNAMIC
+ ctx->tables_mapped_regions[j] = 0;
+#endif
+ for (unsigned int i = 0U; i < XLAT_TABLE_ENTRIES; i++)
+ ctx->tables[j][i] = INVALID_DESC;
+ }
+
+ while (mm->size != 0U) {
+ uintptr_t end_va = xlat_tables_map_region(ctx, mm, 0U,
+ ctx->base_table, ctx->base_table_entries,
+ ctx->base_level);
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ xlat_clean_dcache_range((uintptr_t)ctx->base_table,
+ ctx->base_table_entries * sizeof(uint64_t));
+#endif
+ if (end_va != (mm->base_va + mm->size - 1U)) {
+ ERROR("Not enough memory to map region:\n"
+ " VA:0x%lx PA:0x%llx size:0x%zx attr:0x%x\n",
+ mm->base_va, mm->base_pa, mm->size, mm->attr);
+ panic();
+ }
+
+ mm++;
+ }
+
+ assert(ctx->pa_max_address <= xlat_arch_get_max_supported_pa());
+ assert(ctx->max_va <= ctx->va_max_address);
+ assert(ctx->max_pa <= ctx->pa_max_address);
+
+ ctx->initialized = true;
+
+ xlat_tables_print(ctx);
+}
diff --git a/lib/xlat_tables_v2/xlat_tables_private.h b/lib/xlat_tables_v2/xlat_tables_private.h
new file mode 100644
index 000000000..528996a29
--- /dev/null
+++ b/lib/xlat_tables_v2/xlat_tables_private.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef XLAT_TABLES_PRIVATE_H
+#define XLAT_TABLES_PRIVATE_H
+
+#include <platform_def.h>
+#include <stdbool.h>
+#include <xlat_tables_defs.h>
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+/*
+ * Private shifts and masks to access fields of an mmap attribute
+ */
+/* Dynamic or static */
+#define MT_DYN_SHIFT U(31)
+
+/*
+ * Memory mapping private attributes
+ *
+ * Private attributes not exposed in the public header.
+ */
+
+/*
+ * Regions mapped before the MMU can't be unmapped dynamically (they are
+ * static) and regions mapped with MMU enabled can be unmapped. This
+ * behaviour can't be overridden.
+ *
+ * Static regions can overlap each other, dynamic regions can't.
+ */
+#define MT_STATIC (U(0) << MT_DYN_SHIFT)
+#define MT_DYNAMIC (U(1) << MT_DYN_SHIFT)
+
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+extern uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
+
+/*
+ * Return the execute-never mask that will prevent instruction fetch at the
+ * given translation regime.
+ */
+uint64_t xlat_arch_regime_get_xn_desc(int xlat_regime);
+
+/*
+ * Invalidate all TLB entries that match the given virtual address. This
+ * operation applies to all PEs in the same Inner Shareable domain as the PE
+ * that executes this function. This functions must be called for every
+ * translation table entry that is modified. It only affects the specified
+ * translation regime.
+ *
+ * Note, however, that it is architecturally UNDEFINED to invalidate TLB entries
+ * pertaining to a higher exception level, e.g. invalidating EL3 entries from
+ * S-EL1.
+ */
+void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime);
+
+/*
+ * This function has to be called at the end of any code that uses the function
+ * xlat_arch_tlbi_va().
+ */
+void xlat_arch_tlbi_va_sync(void);
+
+/* Print VA, PA, size and attributes of all regions in the mmap array. */
+void xlat_mmap_print(const mmap_region_t *mmap);
+
+/*
+ * Print the current state of the translation tables by reading them from
+ * memory.
+ */
+void xlat_tables_print(xlat_ctx_t *ctx);
+
+/*
+ * Returns a block/page table descriptor for the given level and attributes.
+ */
+uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
+ unsigned long long addr_pa, unsigned int level);
+
+/*
+ * Architecture-specific initialization code.
+ */
+
+/* Returns the current Exception Level. The returned EL must be 1 or higher. */
+unsigned int xlat_arch_current_el(void);
+
+/*
+ * Return the maximum physical address supported by the hardware.
+ * This value depends on the execution state (AArch32/AArch64).
+ */
+unsigned long long xlat_arch_get_max_supported_pa(void);
+
+/*
+ * Returns true if the MMU of the translation regime managed by the given
+ * xlat_ctx_t is enabled, false otherwise.
+ */
+bool is_mmu_enabled_ctx(const xlat_ctx_t *ctx);
+
+/* Returns true if the data cache is enabled at the current EL. */
+bool is_dcache_enabled(void);
+
+#endif /* XLAT_TABLES_PRIVATE_H */
diff --git a/lib/xlat_tables_v2/xlat_tables_utils.c b/lib/xlat_tables_v2/xlat_tables_utils.c
new file mode 100644
index 000000000..1ada48896
--- /dev/null
+++ b/lib/xlat_tables_v2/xlat_tables_utils.c
@@ -0,0 +1,565 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <errno.h>
+#include <platform_def.h>
+#include <stdbool.h>
+#include <types.h>
+#include <utils_def.h>
+#include <xlat_tables_defs.h>
+#include <xlat_tables_v2.h>
+
+#include "xlat_tables_private.h"
+
+#if LOG_LEVEL < LOG_LEVEL_VERBOSE
+
+void xlat_mmap_print(__unused const mmap_region_t *mmap)
+{
+ /* Empty */
+}
+
+void xlat_tables_print(__unused xlat_ctx_t *ctx)
+{
+ /* Empty */
+}
+
+#else /* if LOG_LEVEL >= LOG_LEVEL_VERBOSE */
+
+void xlat_mmap_print(const mmap_region_t *mmap)
+{
+ printf("mmap:\n");
+ const mmap_region_t *mm = mmap;
+
+ while (mm->size != 0U) {
+ printf(" VA:0x%lx PA:0x%llx size:0x%zx attr:0x%x granularity:0x%zx\n",
+ mm->base_va, mm->base_pa, mm->size, mm->attr,
+ mm->granularity);
+ ++mm;
+ };
+ printf("\n");
+}
+
+/* Print the attributes of the specified block descriptor. */
+static void xlat_desc_print(const xlat_ctx_t *ctx, uint64_t desc)
+{
+ uint64_t mem_type_index = ATTR_INDEX_GET(desc);
+ int xlat_regime = ctx->xlat_regime;
+
+ if (mem_type_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
+ printf("MEM");
+ } else if (mem_type_index == ATTR_NON_CACHEABLE_INDEX) {
+ printf("NC");
+ } else {
+ assert(mem_type_index == ATTR_DEVICE_INDEX);
+ printf("DEV");
+ }
+
+ if ((xlat_regime == EL3_REGIME) || (xlat_regime == EL2_REGIME)) {
+ /* For EL3 and EL2 only check the AP[2] and XN bits. */
+ printf(((desc & LOWER_ATTRS(AP_RO)) != 0ULL) ? "-RO" : "-RW");
+ printf(((desc & UPPER_ATTRS(XN)) != 0ULL) ? "-XN" : "-EXEC");
+ } else {
+ assert(xlat_regime == EL1_EL0_REGIME);
+ /*
+ * For EL0 and EL1:
+ * - In AArch64 PXN and UXN can be set independently but in
+ * AArch32 there is no UXN (XN affects both privilege levels).
+ * For consistency, we set them simultaneously in both cases.
+ * - RO and RW permissions must be the same in EL1 and EL0. If
+ * EL0 can access that memory region, so can EL1, with the
+ * same permissions.
+ */
+#if ENABLE_ASSERTIONS
+ uint64_t xn_mask = xlat_arch_regime_get_xn_desc(EL1_EL0_REGIME);
+ uint64_t xn_perm = desc & xn_mask;
+
+ assert((xn_perm == xn_mask) || (xn_perm == 0ULL));
+#endif
+ printf(((desc & LOWER_ATTRS(AP_RO)) != 0ULL) ? "-RO" : "-RW");
+ /* Only check one of PXN and UXN, the other one is the same. */
+ printf(((desc & UPPER_ATTRS(PXN)) != 0ULL) ? "-XN" : "-EXEC");
+ /*
+ * Privileged regions can only be accessed from EL1, user
+ * regions can be accessed from EL1 and EL0.
+ */
+ printf(((desc & LOWER_ATTRS(AP_ACCESS_UNPRIVILEGED)) != 0ULL)
+ ? "-USER" : "-PRIV");
+ }
+
+ printf(((LOWER_ATTRS(NS) & desc) != 0ULL) ? "-NS" : "-S");
+}
+
+static const char * const level_spacers[] = {
+ "[LV0] ",
+ " [LV1] ",
+ " [LV2] ",
+ " [LV3] "
+};
+
+static const char *invalid_descriptors_ommited =
+ "%s(%d invalid descriptors omitted)\n";
+
+/*
+ * Recursive function that reads the translation tables passed as an argument
+ * and prints their status.
+ */
+static void xlat_tables_print_internal(xlat_ctx_t *ctx, uintptr_t table_base_va,
+ const uint64_t *table_base, unsigned int table_entries,
+ unsigned int level)
+{
+ assert(level <= XLAT_TABLE_LEVEL_MAX);
+
+ uint64_t desc;
+ uintptr_t table_idx_va = table_base_va;
+ unsigned int table_idx = 0U;
+ size_t level_size = XLAT_BLOCK_SIZE(level);
+
+ /*
+ * Keep track of how many invalid descriptors are counted in a row.
+ * Whenever multiple invalid descriptors are found, only the first one
+ * is printed, and a line is added to inform about how many descriptors
+ * have been omitted.
+ */
+ int invalid_row_count = 0;
+
+ while (table_idx < table_entries) {
+
+ desc = table_base[table_idx];
+
+ if ((desc & DESC_MASK) == INVALID_DESC) {
+
+ if (invalid_row_count == 0) {
+ printf("%sVA:0x%lx size:0x%zx\n",
+ level_spacers[level],
+ table_idx_va, level_size);
+ }
+ invalid_row_count++;
+
+ } else {
+
+ if (invalid_row_count > 1) {
+ printf(invalid_descriptors_ommited,
+ level_spacers[level],
+ invalid_row_count - 1);
+ }
+ invalid_row_count = 0;
+
+ /*
+ * Check if this is a table or a block. Tables are only
+ * allowed in levels other than 3, but DESC_PAGE has the
+ * same value as DESC_TABLE, so we need to check.
+ */
+ if (((desc & DESC_MASK) == TABLE_DESC) &&
+ (level < XLAT_TABLE_LEVEL_MAX)) {
+ /*
+ * Do not print any PA for a table descriptor,
+ * as it doesn't directly map physical memory
+ * but instead points to the next translation
+ * table in the translation table walk.
+ */
+ printf("%sVA:0x%lx size:0x%zx\n",
+ level_spacers[level],
+ table_idx_va, level_size);
+
+ uintptr_t addr_inner = desc & TABLE_ADDR_MASK;
+
+ xlat_tables_print_internal(ctx, table_idx_va,
+ (uint64_t *)addr_inner,
+ XLAT_TABLE_ENTRIES, level + 1U);
+ } else {
+ printf("%sVA:0x%lx PA:0x%llx size:0x%zx ",
+ level_spacers[level], table_idx_va,
+ (uint64_t)(desc & TABLE_ADDR_MASK),
+ level_size);
+ xlat_desc_print(ctx, desc);
+ printf("\n");
+ }
+ }
+
+ table_idx++;
+ table_idx_va += level_size;
+ }
+
+ if (invalid_row_count > 1) {
+ printf(invalid_descriptors_ommited,
+ level_spacers[level], invalid_row_count - 1);
+ }
+}
+
+void xlat_tables_print(xlat_ctx_t *ctx)
+{
+ const char *xlat_regime_str;
+ int used_page_tables;
+
+ if (ctx->xlat_regime == EL1_EL0_REGIME) {
+ xlat_regime_str = "1&0";
+ } else if (ctx->xlat_regime == EL2_REGIME) {
+ xlat_regime_str = "2";
+ } else {
+ assert(ctx->xlat_regime == EL3_REGIME);
+ xlat_regime_str = "3";
+ }
+ VERBOSE("Translation tables state:\n");
+ VERBOSE(" Xlat regime: EL%s\n", xlat_regime_str);
+ VERBOSE(" Max allowed PA: 0x%llx\n", ctx->pa_max_address);
+ VERBOSE(" Max allowed VA: 0x%lx\n", ctx->va_max_address);
+ VERBOSE(" Max mapped PA: 0x%llx\n", ctx->max_pa);
+ VERBOSE(" Max mapped VA: 0x%lx\n", ctx->max_va);
+
+ VERBOSE(" Initial lookup level: %u\n", ctx->base_level);
+ VERBOSE(" Entries @initial lookup level: %u\n",
+ ctx->base_table_entries);
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+ used_page_tables = 0;
+ for (int i = 0; i < ctx->tables_num; ++i) {
+ if (ctx->tables_mapped_regions[i] != 0)
+ ++used_page_tables;
+ }
+#else
+ used_page_tables = ctx->next_table;
+#endif
+ VERBOSE(" Used %d sub-tables out of %d (spare: %d)\n",
+ used_page_tables, ctx->tables_num,
+ ctx->tables_num - used_page_tables);
+
+ xlat_tables_print_internal(ctx, 0U, ctx->base_table,
+ ctx->base_table_entries, ctx->base_level);
+}
+
+#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
+
+/*
+ * Do a translation table walk to find the block or page descriptor that maps
+ * virtual_addr.
+ *
+ * On success, return the address of the descriptor within the translation
+ * table. Its lookup level is stored in '*out_level'.
+ * On error, return NULL.
+ *
+ * xlat_table_base
+ * Base address for the initial lookup level.
+ * xlat_table_base_entries
+ * Number of entries in the translation table for the initial lookup level.
+ * virt_addr_space_size
+ * Size in bytes of the virtual address space.
+ */
+static uint64_t *find_xlat_table_entry(uintptr_t virtual_addr,
+ void *xlat_table_base,
+ unsigned int xlat_table_base_entries,
+ unsigned long long virt_addr_space_size,
+ unsigned int *out_level)
+{
+ unsigned int start_level;
+ uint64_t *table;
+ unsigned int entries;
+
+ start_level = GET_XLAT_TABLE_LEVEL_BASE(virt_addr_space_size);
+
+ table = xlat_table_base;
+ entries = xlat_table_base_entries;
+
+ for (unsigned int level = start_level;
+ level <= XLAT_TABLE_LEVEL_MAX;
+ ++level) {
+ uint64_t idx, desc, desc_type;
+
+ idx = XLAT_TABLE_IDX(virtual_addr, level);
+ if (idx >= entries) {
+ WARN("Missing xlat table entry at address 0x%lx\n",
+ virtual_addr);
+ return NULL;
+ }
+
+ desc = table[idx];
+ desc_type = desc & DESC_MASK;
+
+ if (desc_type == INVALID_DESC) {
+ VERBOSE("Invalid entry (memory not mapped)\n");
+ return NULL;
+ }
+
+ if (level == XLAT_TABLE_LEVEL_MAX) {
+ /*
+ * Only page descriptors allowed at the final lookup
+ * level.
+ */
+ assert(desc_type == PAGE_DESC);
+ *out_level = level;
+ return &table[idx];
+ }
+
+ if (desc_type == BLOCK_DESC) {
+ *out_level = level;
+ return &table[idx];
+ }
+
+ assert(desc_type == TABLE_DESC);
+ table = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
+ entries = XLAT_TABLE_ENTRIES;
+ }
+
+ /*
+ * This shouldn't be reached, the translation table walk should end at
+ * most at level XLAT_TABLE_LEVEL_MAX and return from inside the loop.
+ */
+ assert(false);
+
+ return NULL;
+}
+
+
+static int xlat_get_mem_attributes_internal(const xlat_ctx_t *ctx,
+ uintptr_t base_va, uint32_t *attributes, uint64_t **table_entry,
+ unsigned long long *addr_pa, unsigned int *table_level)
+{
+ uint64_t *entry;
+ uint64_t desc;
+ unsigned int level;
+ unsigned long long virt_addr_space_size;
+
+ /*
+ * Sanity-check arguments.
+ */
+ assert(ctx != NULL);
+ assert(ctx->initialized);
+ assert((ctx->xlat_regime == EL1_EL0_REGIME) ||
+ (ctx->xlat_regime == EL2_REGIME) ||
+ (ctx->xlat_regime == EL3_REGIME));
+
+ virt_addr_space_size = (unsigned long long)ctx->va_max_address + 1ULL;
+ assert(virt_addr_space_size > 0U);
+
+ entry = find_xlat_table_entry(base_va,
+ ctx->base_table,
+ ctx->base_table_entries,
+ virt_addr_space_size,
+ &level);
+ if (entry == NULL) {
+ WARN("Address 0x%lx is not mapped.\n", base_va);
+ return -EINVAL;
+ }
+
+ if (addr_pa != NULL) {
+ *addr_pa = *entry & TABLE_ADDR_MASK;
+ }
+
+ if (table_entry != NULL) {
+ *table_entry = entry;
+ }
+
+ if (table_level != NULL) {
+ *table_level = level;
+ }
+
+ desc = *entry;
+
+#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
+ VERBOSE("Attributes: ");
+ xlat_desc_print(ctx, desc);
+ printf("\n");
+#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
+
+ assert(attributes != NULL);
+ *attributes = 0U;
+
+ uint64_t attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK;
+
+ if (attr_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
+ *attributes |= MT_MEMORY;
+ } else if (attr_index == ATTR_NON_CACHEABLE_INDEX) {
+ *attributes |= MT_NON_CACHEABLE;
+ } else {
+ assert(attr_index == ATTR_DEVICE_INDEX);
+ *attributes |= MT_DEVICE;
+ }
+
+ uint64_t ap2_bit = (desc >> AP2_SHIFT) & 1U;
+
+ if (ap2_bit == AP2_RW)
+ *attributes |= MT_RW;
+
+ if (ctx->xlat_regime == EL1_EL0_REGIME) {
+ uint64_t ap1_bit = (desc >> AP1_SHIFT) & 1U;
+
+ if (ap1_bit == AP1_ACCESS_UNPRIVILEGED)
+ *attributes |= MT_USER;
+ }
+
+ uint64_t ns_bit = (desc >> NS_SHIFT) & 1U;
+
+ if (ns_bit == 1U)
+ *attributes |= MT_NS;
+
+ uint64_t xn_mask = xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
+
+ if ((desc & xn_mask) == xn_mask) {
+ *attributes |= MT_EXECUTE_NEVER;
+ } else {
+ assert((desc & xn_mask) == 0U);
+ }
+
+ return 0;
+}
+
+
+int xlat_get_mem_attributes_ctx(const xlat_ctx_t *ctx, uintptr_t base_va,
+ uint32_t *attr)
+{
+ return xlat_get_mem_attributes_internal(ctx, base_va, attr,
+ NULL, NULL, NULL);
+}
+
+
+int xlat_change_mem_attributes_ctx(const xlat_ctx_t *ctx, uintptr_t base_va,
+ size_t size, uint32_t attr)
+{
+ /* Note: This implementation isn't optimized. */
+
+ assert(ctx != NULL);
+ assert(ctx->initialized);
+
+ unsigned long long virt_addr_space_size =
+ (unsigned long long)ctx->va_max_address + 1U;
+ assert(virt_addr_space_size > 0U);
+
+ if (!IS_PAGE_ALIGNED(base_va)) {
+ WARN("%s: Address 0x%lx is not aligned on a page boundary.\n",
+ __func__, base_va);
+ return -EINVAL;
+ }
+
+ if (size == 0U) {
+ WARN("%s: Size is 0.\n", __func__);
+ return -EINVAL;
+ }
+
+ if ((size % PAGE_SIZE) != 0U) {
+ WARN("%s: Size 0x%zx is not a multiple of a page size.\n",
+ __func__, size);
+ return -EINVAL;
+ }
+
+ if (((attr & MT_EXECUTE_NEVER) == 0U) && ((attr & MT_RW) != 0U)) {
+ WARN("%s: Mapping memory as read-write and executable not allowed.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ size_t pages_count = size / PAGE_SIZE;
+
+ VERBOSE("Changing memory attributes of %zu pages starting from address 0x%lx...\n",
+ pages_count, base_va);
+
+ uintptr_t base_va_original = base_va;
+
+ /*
+ * Sanity checks.
+ */
+ for (size_t i = 0U; i < pages_count; ++i) {
+ const uint64_t *entry;
+ uint64_t desc, attr_index;
+ unsigned int level;
+
+ entry = find_xlat_table_entry(base_va,
+ ctx->base_table,
+ ctx->base_table_entries,
+ virt_addr_space_size,
+ &level);
+ if (entry == NULL) {
+ WARN("Address 0x%lx is not mapped.\n", base_va);
+ return -EINVAL;
+ }
+
+ desc = *entry;
+
+ /*
+ * Check that all the required pages are mapped at page
+ * granularity.
+ */
+ if (((desc & DESC_MASK) != PAGE_DESC) ||
+ (level != XLAT_TABLE_LEVEL_MAX)) {
+ WARN("Address 0x%lx is not mapped at the right granularity.\n",
+ base_va);
+ WARN("Granularity is 0x%llx, should be 0x%x.\n",
+ (unsigned long long)XLAT_BLOCK_SIZE(level), PAGE_SIZE);
+ return -EINVAL;
+ }
+
+ /*
+ * If the region type is device, it shouldn't be executable.
+ */
+ attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK;
+ if (attr_index == ATTR_DEVICE_INDEX) {
+ if ((attr & MT_EXECUTE_NEVER) == 0U) {
+ WARN("Setting device memory as executable at address 0x%lx.",
+ base_va);
+ return -EINVAL;
+ }
+ }
+
+ base_va += PAGE_SIZE;
+ }
+
+ /* Restore original value. */
+ base_va = base_va_original;
+
+ for (unsigned int i = 0U; i < pages_count; ++i) {
+
+ uint32_t old_attr = 0U, new_attr;
+ uint64_t *entry = NULL;
+ unsigned int level = 0U;
+ unsigned long long addr_pa = 0ULL;
+
+ (void) xlat_get_mem_attributes_internal(ctx, base_va, &old_attr,
+ &entry, &addr_pa, &level);
+
+ /*
+ * From attr, only MT_RO/MT_RW, MT_EXECUTE/MT_EXECUTE_NEVER and
+ * MT_USER/MT_PRIVILEGED are taken into account. Any other
+ * information is ignored.
+ */
+
+ /* Clean the old attributes so that they can be rebuilt. */
+ new_attr = old_attr & ~(MT_RW | MT_EXECUTE_NEVER | MT_USER);
+
+ /*
+ * Update attributes, but filter out the ones this function
+ * isn't allowed to change.
+ */
+ new_attr |= attr & (MT_RW | MT_EXECUTE_NEVER | MT_USER);
+
+ /*
+ * The break-before-make sequence requires writing an invalid
+ * descriptor and making sure that the system sees the change
+ * before writing the new descriptor.
+ */
+ *entry = INVALID_DESC;
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ dccvac((uintptr_t)entry);
+#endif
+ /* Invalidate any cached copy of this mapping in the TLBs. */
+ xlat_arch_tlbi_va(base_va, ctx->xlat_regime);
+
+ /* Ensure completion of the invalidation. */
+ xlat_arch_tlbi_va_sync();
+
+ /* Write new descriptor */
+ *entry = xlat_desc(ctx, new_attr, addr_pa, level);
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ dccvac((uintptr_t)entry);
+#endif
+ base_va += PAGE_SIZE;
+ }
+
+ /* Ensure that the last descriptor writen is seen by the system. */
+ dsbish();
+
+ return 0;
+}
diff --git a/license.rst b/license.rst
new file mode 100644
index 000000000..3c873110c
--- /dev/null
+++ b/license.rst
@@ -0,0 +1,38 @@
+Copyright (c) <year> <owner>. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+- Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+- Redistributions in binary form must reproduce the above copyright notice, this
+ list of conditions and the following disclaimer in the documentation and/or
+ other materials provided with the distribution.
+
+- Neither the name of the copyright holder nor the names of its contributors
+ may be used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+--------------
+
+Note:
+Individual files contain the following tag instead of the full license text.
+
+::
+
+ SPDX-License-Identifier: BSD-3-Clause
+
+This enables machine processing of license information based on the SPDX
+License Identifiers that are here available: http://spdx.org/licenses/
diff --git a/maintainers.rst b/maintainers.rst
new file mode 100644
index 000000000..6999528b9
--- /dev/null
+++ b/maintainers.rst
@@ -0,0 +1,22 @@
+Trusted Firmware-A Tests maintainers
+====================================
+
+Trusted Firmware-A Tests (TF-A Tests) is a community maintained project. All
+contributions are ultimately merged by the maintainers listed below.
+
+Please note the maintainers' bandwidth is limited and contributions to this
+project will be reviewed and handled on a best-effort basis.
+
+Maintainers
+-----------
+
+- Dimitris Papastamos <dimitris.papastamos@arm.com>
+- Jeenu Viswambharan <jeenu.viswambharan@arm.com>
+- Joanna Farley <joanna.farley@arm.com>
+- Roberto Vargas <roberto.vargas@arm.com>
+- Sandrine Bailleux <sandrine.bailleux@arm.com>
+- Soby Mathew <soby.mathew@arm.com>
+
+--------------
+
+*Copyright (c) 2018, Arm Limited. All rights reserved.*
diff --git a/plat/arm/board/fvp/aarch32/plat_helpers.S b/plat/arm/board/fvp/aarch32/plat_helpers.S
new file mode 100644
index 000000000..2dcc6e9e4
--- /dev/null
+++ b/plat/arm/board/fvp/aarch32/plat_helpers.S
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include "../fvp_def.h"
+
+ .globl platform_get_core_pos
+
+/*----------------------------------------------------------------------
+ * unsigned int platform_get_core_pos(unsigned long mpid)
+ *
+ * Function to calculate the core position on FVP.
+ *
+ * (ClusterId * FVP_MAX_CPUS_PER_CLUSTER * FVP_MAX_PE_PER_CPU) +
+ * (CPUId * FVP_MAX_PE_PER_CPU) +
+ * ThreadId
+ *
+ * which can be simplified as:
+ *
+ * ((ClusterId * FVP_MAX_CPUS_PER_CLUSTER + CPUId) * FVP_MAX_PE_PER_CPU)
+ * + ThreadId
+ * ---------------------------------------------------------------------
+ */
+func platform_get_core_pos
+ /*
+ * Check for MT bit in MPIDR. If not set, shift MPIDR to left to make it
+ * look as if in a multi-threaded implementation
+ */
+ tst r0, #MPIDR_MT_MASK
+ mov r3, r0
+ lsleq r3, r0, #MPIDR_AFFINITY_BITS
+
+ /* Extract individual affinity fields from MPIDR */
+ ubfx r0, r3, #MPIDR_AFF0_SHIFT, #MPIDR_AFFINITY_BITS
+ ubfx r1, r3, #MPIDR_AFF1_SHIFT, #MPIDR_AFFINITY_BITS
+ ubfx r2, r3, #MPIDR_AFF2_SHIFT, #MPIDR_AFFINITY_BITS
+
+ /* Compute linear position */
+ mov r3, #FVP_MAX_CPUS_PER_CLUSTER
+ mla r1, r2, r3, r1
+ mov r3, #FVP_MAX_PE_PER_CPU
+ mla r0, r1, r3, r0
+
+ bx lr
+endfunc platform_get_core_pos
diff --git a/plat/arm/board/fvp/aarch64/plat_helpers.S b/plat/arm/board/fvp/aarch64/plat_helpers.S
new file mode 100644
index 000000000..bc9f60d39
--- /dev/null
+++ b/plat/arm/board/fvp/aarch64/plat_helpers.S
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include "../fvp_def.h"
+
+ .globl platform_get_core_pos
+
+/*----------------------------------------------------------------------
+ * unsigned int platform_get_core_pos(unsigned long mpid)
+ *
+ * Function to calculate the core position on FVP.
+ *
+ * (ClusterId * FVP_MAX_CPUS_PER_CLUSTER * FVP_MAX_PE_PER_CPU) +
+ * (CPUId * FVP_MAX_PE_PER_CPU) +
+ * ThreadId
+ *
+ * which can be simplified as:
+ *
+ * ((ClusterId * FVP_MAX_CPUS_PER_CLUSTER + CPUId) * FVP_MAX_PE_PER_CPU)
+ * + ThreadId
+ * ---------------------------------------------------------------------
+ */
+func platform_get_core_pos
+ /*
+ * Check for MT bit in MPIDR. If not set, shift MPIDR to left to make it
+ * look as if in a multi-threaded implementation.
+ */
+ tst x0, #MPIDR_MT_MASK
+ lsl x3, x0, #MPIDR_AFFINITY_BITS
+ csel x3, x3, x0, eq
+
+ /* Extract individual affinity fields from MPIDR */
+ ubfx x0, x3, #MPIDR_AFF0_SHIFT, #MPIDR_AFFINITY_BITS
+ ubfx x1, x3, #MPIDR_AFF1_SHIFT, #MPIDR_AFFINITY_BITS
+ ubfx x2, x3, #MPIDR_AFF2_SHIFT, #MPIDR_AFFINITY_BITS
+
+ /* Compute linear position */
+ mov x3, #FVP_MAX_CPUS_PER_CLUSTER
+ madd x1, x2, x3, x1
+ mov x3, #FVP_MAX_PE_PER_CPU
+ madd x0, x1, x3, x0
+ ret
+endfunc platform_get_core_pos
diff --git a/plat/arm/board/fvp/fvp_def.h b/plat/arm/board/fvp/fvp_def.h
new file mode 100644
index 000000000..a47287f3d
--- /dev/null
+++ b/plat/arm/board/fvp/fvp_def.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*******************************************************************************
+ * FVP specific definitions. Used only by FVP specific code.
+ ******************************************************************************/
+
+#ifndef __FVP_DEF_H__
+#define __FVP_DEF_H__
+
+#include <platform_def.h>
+
+/*******************************************************************************
+ * Cluster Topology definitions
+ ******************************************************************************/
+#define FVP_MAX_CPUS_PER_CLUSTER 4
+/* Currently the highest cluster count on the FVP is 4 (Quad cluster) */
+#define FVP_CLUSTER_COUNT 4
+/* Currently multi-threaded CPUs only have a single thread */
+#define FVP_MAX_PE_PER_CPU 1
+
+/*******************************************************************************
+ * FVP memory map related constants
+ ******************************************************************************/
+
+#define DEVICE0_BASE 0x1a000000
+#define DEVICE0_SIZE 0x12200000
+
+#define DEVICE1_BASE 0x2f000000
+#define DEVICE1_SIZE 0x400000
+
+/*******************************************************************************
+ * GIC-400 & interrupt handling related constants
+ ******************************************************************************/
+/* Base FVP compatible GIC memory map */
+#define GICD_BASE 0x2f000000
+#define GICR_BASE 0x2f100000
+#define GICC_BASE 0x2c000000
+
+/*******************************************************************************
+ * PL011 related constants
+ ******************************************************************************/
+#define PL011_UART0_BASE 0x1c090000
+#define PL011_UART0_CLK_IN_HZ 24000000
+
+#define PLAT_ARM_UART_BASE PL011_UART0_BASE
+#define PLAT_ARM_UART_CLK_IN_HZ PL011_UART0_CLK_IN_HZ
+
+#endif /* __FVP_DEF_H__ */
diff --git a/plat/arm/board/fvp/fvp_mem_prot.c b/plat/arm/board/fvp/fvp_mem_prot.c
new file mode 100644
index 000000000..6a7d651ef
--- /dev/null
+++ b/plat/arm/board/fvp/fvp_mem_prot.c
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <platform.h>
+#include <psci.h>
+#include <utils_def.h>
+#include <xlat_tables_v2.h>
+
+#define NS_IMAGE_OFFSET TFTF_BASE
+#define NS_IMAGE_LIMIT (NS_IMAGE_OFFSET + (32 << TWO_MB_SHIFT))
+
+static const mem_region_t fvp_ram_ranges[] = {
+ {NS_IMAGE_LIMIT, 1 << ONE_GB_SHIFT},
+#ifdef AARCH64
+ {FVP_DRAM2_BASE, 1 << ONE_GB_SHIFT},
+#endif
+};
+
+const mem_region_t *plat_get_prot_regions(int *nelem)
+{
+ *nelem = ARRAY_SIZE(fvp_ram_ranges);
+ return fvp_ram_ranges;
+}
diff --git a/plat/arm/board/fvp/fvp_pwr_state.c b/plat/arm/board/fvp/fvp_pwr_state.c
new file mode 100644
index 000000000..394818b74
--- /dev/null
+++ b/plat/arm/board/fvp/fvp_pwr_state.c
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <platform.h>
+#include <psci.h>
+#include <stddef.h>
+
+/*
+ * State IDs for local power states on the FVP.
+ */
+#define FVP_RUN_STATE_ID 0 /* Valid for CPUs and Clusters */
+#define FVP_RETENTION_STATE_ID 1 /* Valid for only CPUs */
+#define FVP_OFF_STATE_ID 2 /* Valid for CPUs and Clusters */
+
+/*
+ * Suspend depth definitions for each power state
+ */
+typedef enum {
+ FVP_RUN_DEPTH = 0,
+ FVP_RETENTION_DEPTH,
+ FVP_OFF_DEPTH,
+} suspend_depth_t;
+
+/* The state property array with details of idle state possible for the core */
+static const plat_state_prop_t core_state_prop[] = {
+ {FVP_RETENTION_DEPTH, FVP_RETENTION_STATE_ID, PSTATE_TYPE_STANDBY},
+ {FVP_OFF_DEPTH, FVP_OFF_STATE_ID, PSTATE_TYPE_POWERDOWN},
+ {0},
+};
+
+/* The state property array with details of idle state possible
+ for the cluster */
+static const plat_state_prop_t cluster_state_prop[] = {
+ {FVP_OFF_DEPTH, FVP_OFF_STATE_ID, PSTATE_TYPE_POWERDOWN},
+ {0},
+};
+
+/* The state property array with details of idle state possible
+ for the system level */
+static const plat_state_prop_t system_state_prop[] = {
+ {FVP_OFF_DEPTH, FVP_OFF_STATE_ID, PSTATE_TYPE_POWERDOWN},
+ {0},
+};
+
+const plat_state_prop_t *plat_get_state_prop(unsigned int level)
+{
+ switch (level) {
+ case MPIDR_AFFLVL0:
+ return core_state_prop;
+ case MPIDR_AFFLVL1:
+ return cluster_state_prop;
+ case MPIDR_AFFLVL2:
+ return system_state_prop;
+ default:
+ return NULL;
+ }
+}
diff --git a/plat/arm/board/fvp/fvp_topology.c b/plat/arm/board/fvp/fvp_topology.c
new file mode 100644
index 000000000..348f8efff
--- /dev/null
+++ b/plat/arm/board/fvp/fvp_topology.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <assert.h>
+#include <mmio.h>
+#include <plat_topology.h>
+#include <platform_def.h>
+#include <stddef.h>
+#include <tftf_lib.h>
+
+/* FVP Power controller based defines */
+#define PWRC_BASE 0x1c100000
+#define PSYSR_OFF 0x10
+#define PSYSR_INVALID 0xffffffff
+
+static const struct {
+ unsigned int cluster_id;
+ unsigned int cpu_id;
+} fvp_base_aemv8a_aemv8a_cores[] = {
+ /* Cluster 0 */
+ { 0, 0 },
+ { 0, 1 },
+ { 0, 2 },
+ { 0, 3 },
+ /* Cluster 1 */
+ { 1, 0 },
+ { 1, 1 },
+ { 1, 2 },
+ { 1, 3 },
+ /* Cluster 2 */
+ { 2, 0 },
+ { 2, 1 },
+ { 2, 2 },
+ { 2, 3 },
+ /* Cluster 3 */
+ { 3, 0 },
+ { 3, 1 },
+ { 3, 2 },
+ { 3, 3 },
+};
+
+/*
+ * The FVP power domain tree descriptor. We always construct a topology
+ * with the maximum number of cluster nodes possible for FVP. During
+ * TFTF initialization, the actual number of nodes present on the model
+ * will be queried dynamically using `tftf_plat_get_mpidr()`.
+ * The FVP power domain tree does not have a single system level power domain
+ * i.e. a single root node. The first entry in the power domain descriptor
+ * specifies the number of power domains at the highest power level which
+ * is equal to FVP_CLUSTER_COUNT.
+ */
+static const unsigned char fvp_power_domain_tree_desc[] = {
+ /* Number of system nodes */
+ 1,
+ /* Number of cluster nodes */
+ FVP_CLUSTER_COUNT,
+ /* Number of children for the first node */
+ FVP_MAX_CPUS_PER_CLUSTER,
+ /* Number of children for the second node */
+ FVP_MAX_CPUS_PER_CLUSTER,
+ /* Number of children for the third node */
+ FVP_MAX_CPUS_PER_CLUSTER,
+ /* Number of children for the fourth node */
+ FVP_MAX_CPUS_PER_CLUSTER
+};
+
+const unsigned char *tftf_plat_get_pwr_domain_tree_desc(void)
+{
+ return fvp_power_domain_tree_desc;
+}
+
+static unsigned int fvp_pwrc_read_psysr(unsigned long mpidr)
+{
+ unsigned int rc;
+ mmio_write_32(PWRC_BASE + PSYSR_OFF, (unsigned int) mpidr);
+ rc = mmio_read_32(PWRC_BASE + PSYSR_OFF);
+ return rc;
+}
+
+uint64_t tftf_plat_get_mpidr(unsigned int core_pos)
+{
+ unsigned int mpid;
+
+ assert(core_pos < PLATFORM_CORE_COUNT);
+
+ mpid = make_mpid(
+ fvp_base_aemv8a_aemv8a_cores[core_pos].cluster_id,
+ fvp_base_aemv8a_aemv8a_cores[core_pos].cpu_id);
+
+ if (fvp_pwrc_read_psysr(mpid) != PSYSR_INVALID)
+ return mpid;
+
+ return INVALID_MPID;
+}
diff --git a/plat/arm/board/fvp/include/platform_def.h b/plat/arm/board/fvp/include/platform_def.h
new file mode 100644
index 000000000..f09b6b52d
--- /dev/null
+++ b/plat/arm/board/fvp/include/platform_def.h
@@ -0,0 +1,242 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arm_def.h>
+#include "../fvp_def.h"
+
+/*******************************************************************************
+ * Platform definitions used by common code
+ ******************************************************************************/
+
+#ifndef __PLATFORM_DEF_H__
+#define __PLATFORM_DEF_H__
+
+/*******************************************************************************
+ * Platform binary types for linking
+ ******************************************************************************/
+#ifndef AARCH32
+#define PLATFORM_LINKER_FORMAT "elf64-littleaarch64"
+#define PLATFORM_LINKER_ARCH aarch64
+#else
+#define PLATFORM_LINKER_FORMAT "elf32-littlearm"
+#define PLATFORM_LINKER_ARCH arm
+#endif
+
+/*******************************************************************************
+ * Run-time address of the TFTF image.
+ * It has to match the location where the Trusted Firmware-A loads the BL33
+ * image.
+ ******************************************************************************/
+#define TFTF_BASE 0x88000000
+
+/* Base address of non-trusted watchdog (SP805) */
+#define SP805_WDOG_BASE 0x1C0F0000
+
+/*******************************************************************************
+ * Base address and size of external NVM flash
+ ******************************************************************************/
+#define FLASH_BASE 0x08000000
+
+/*
+ * The flash memory in FVP resembles as a SCSP package of 2-die's and
+ * of a total size of 512Mb, we are using only the main blocks of size
+ * 128KB for storing results. Also the FVP performs data striping and
+ * splits the word into half to each flash die's which leads to a
+ * virtual block size of 256KB to software.
+ */
+#define NOR_FLASH_BLOCK_SIZE 0x40000 /* 256KB */
+#define NOR_FLASH_BLOCKS_COUNT 255
+#define FLASH_SIZE (NOR_FLASH_BLOCK_SIZE * NOR_FLASH_BLOCKS_COUNT)
+
+/*******************************************************************************
+ * Base address and size for the FIP that contains FWU images.
+ ******************************************************************************/
+#define PLAT_ARM_FWU_FIP_BASE (FLASH_BASE + 0x400000)
+#define PLAT_ARM_FWU_FIP_SIZE (0x100000)
+
+/*******************************************************************************
+ * Base address and size for non-trusted SRAM.
+ ******************************************************************************/
+#define NSRAM_BASE (0x2e000000)
+#define NSRAM_SIZE (0x00010000)
+
+/*******************************************************************************
+ * Corresponds to the function ID of the BL1 SMC handler for FWU process.
+ ******************************************************************************/
+#define BL1_SMC_CALL_COUNT 0x0
+#define BL1_SMC_UID 0x1
+/* SMC #0x2 reserved */
+#define BL1_SMC_VERSION 0x3
+#define FWU_SMC_IMAGE_COPY 0x10
+#define FWU_SMC_IMAGE_AUTH 0x11
+#define FWU_SMC_IMAGE_EXECUTE 0x12
+#define FWU_SMC_IMAGE_RESUME 0x13
+#define FWU_SMC_SEC_IMAGE_DONE 0x14
+#define FWU_SMC_UPDATE_DONE 0x15
+#define FWU_SMC_IMAGE_RESET 0x16
+
+
+/*******************************************************************************
+ * NS_BL1U specific defines.
+ * NS_BL1U RW data is relocated from NS-ROM to NS-RAM at runtime so we
+ * need 2 sets of addresses.
+ ******************************************************************************/
+#define NS_BL1U_RO_BASE (0x08000000 + 0x03EB8000)
+#define NS_BL1U_RO_LIMIT (NS_BL1U_RO_BASE + 0xC000)
+
+/*******************************************************************************
+ * Put NS_BL1U RW at the top of the Non-Trusted SRAM. NS_BL1U_RW_BASE is
+ * calculated using the current NS_BL1U RW debug size plus a little space
+ * for growth.
+ ******************************************************************************/
+#define NS_BL1U_RW_SIZE (0x7000)
+#define NS_BL1U_RW_BASE (NSRAM_BASE)
+#define NS_BL1U_RW_LIMIT (NS_BL1U_RW_BASE + NS_BL1U_RW_SIZE)
+
+/*******************************************************************************
+ * Platform memory map related constants
+ ******************************************************************************/
+#define FVP_DRAM1_BASE 0x80000000
+#define FVP_DRAM2_BASE 0x880000000
+#define DRAM_BASE FVP_DRAM1_BASE
+#define DRAM_SIZE 0x80000000
+
+/*******************************************************************************
+ * Base address and limit for NS_BL2U image.
+ ******************************************************************************/
+#define NS_BL2U_BASE DRAM_BASE
+#define NS_BL2U_LIMIT (NS_BL2U_BASE + 0x4D000)
+
+/******************************************************************************
+ * Memory mapped Generic timer interfaces
+ ******************************************************************************/
+/* REFCLK CNTControl, Generic Timer. Secure Access only. */
+#define SYS_CNT_CONTROL_BASE 0x2a430000
+/* REFCLK CNTRead, Generic Timer. */
+#define SYS_CNT_READ_BASE 0x2a800000
+/* AP_REFCLK CNTBase1, Generic Timer. */
+#define SYS_CNT_BASE1 0x2a830000
+
+/* V2M motherboard system registers & offsets */
+#define VE_SYSREGS_BASE 0x1c010000
+#define V2M_SYS_LED 0x8
+
+/*******************************************************************************
+ * Generic platform constants
+ ******************************************************************************/
+
+/* Size of cacheable stacks */
+#if IMAGE_NS_BL1U || IMAGE_NS_BL2U
+#define PLATFORM_STACK_SIZE 0x1000
+#else
+#define PLATFORM_STACK_SIZE 0x1400
+#endif
+
+/* Size of coherent stacks for debug and release builds */
+#if DEBUG
+#define PCPU_DV_MEM_STACK_SIZE 0x600
+#else
+#define PCPU_DV_MEM_STACK_SIZE 0x500
+#endif
+
+#define PLATFORM_CORE_COUNT (FVP_CLUSTER_COUNT * \
+ FVP_MAX_CPUS_PER_CLUSTER)
+#define PLATFORM_NUM_AFFS (1 + FVP_CLUSTER_COUNT + \
+ PLATFORM_CORE_COUNT)
+#define PLATFORM_MAX_AFFLVL MPIDR_AFFLVL2
+
+/* TODO : Migrate complete TFTF from affinity level to power levels */
+#define PLAT_MAX_PWR_LEVEL PLATFORM_MAX_AFFLVL
+#define PLAT_MAX_PWR_STATES_PER_LVL 2
+
+#if IMAGE_NS_BL1U
+#define MAX_IO_DEVICES 2
+#define MAX_IO_HANDLES 2
+#else
+#define MAX_IO_DEVICES 1
+#define MAX_IO_HANDLES 1
+#endif
+
+/* Local state bit width for each level in the state-ID field of power state */
+#define PLAT_LOCAL_PSTATE_WIDTH 4
+
+#if USE_NVM
+/*
+ * The Flash memory is used to store the TFTF data on FVP.
+ * However, it might contain other data that must not be overwritten.
+ * For example, when using the Trusted Firmware-A, the FIP image
+ * (containing the bootloader images) is also stored in Flash.
+ * Hence, consider the first 40MB of Flash as reserved for firmware usage.
+ * The TFTF can use the rest of the Flash memory.
+ */
+#define TFTF_NVM_OFFSET 0x2800000 /* 40 MB */
+#define TFTF_NVM_SIZE (FLASH_SIZE - TFTF_NVM_OFFSET)
+#else
+/*
+ * If you want to run without support for non-volatile memory (due to
+ * e.g. unavailability of a flash driver), DRAM can be used instead as
+ * a workaround. The TFTF binary itself is loaded at 0x88000000 so the
+ * first 128MB can be used
+ * Please note that this won't be suitable for all test scenarios and
+ * for this reason some tests will be disabled in this configuration.
+ */
+#define TFTF_NVM_OFFSET 0x0
+#define TFTF_NVM_SIZE (TFTF_BASE - DRAM_BASE - TFTF_NVM_OFFSET)
+#endif
+
+/*******************************************************************************
+ * Platform specific page table and MMU setup constants
+ ******************************************************************************/
+#define PLAT_PHY_ADDR_SPACE_SIZE (1ull << 32)
+#define PLAT_VIRT_ADDR_SPACE_SIZE (1ull << 32)
+#if IMAGE_TFTF
+#define MAX_XLAT_TABLES 6
+#define MAX_MMAP_REGIONS 16
+#else
+#define MAX_XLAT_TABLES 5
+#define MAX_MMAP_REGIONS 16
+#endif
+
+/*******************************************************************************
+ * Used to align variables on the biggest cache line size in the platform.
+ * This is known only to the platform as it might have a combination of
+ * integrated and external caches.
+ ******************************************************************************/
+#define CACHE_WRITEBACK_SHIFT 6
+#define CACHE_WRITEBACK_GRANULE (1 << CACHE_WRITEBACK_SHIFT)
+
+/*******************************************************************************
+ * Non-Secure Software Generated Interupts IDs
+ ******************************************************************************/
+#define IRQ_NS_SGI_0 0
+#define IRQ_NS_SGI_1 1
+#define IRQ_NS_SGI_2 2
+#define IRQ_NS_SGI_3 3
+#define IRQ_NS_SGI_4 4
+#define IRQ_NS_SGI_5 5
+#define IRQ_NS_SGI_6 6
+#define IRQ_NS_SGI_7 7
+
+/*
+ * On FVP, consider that the last SPI is the Trusted Random Number Generator
+ * interrupt.
+ */
+#define PLAT_MAX_SPI_OFFSET_ID 107
+
+/* AP_REFCLK, Generic Timer, CNTPSIRQ1. */
+#define IRQ_CNTPSIRQ1 58
+/* Per-CPU Hypervisor Timer Interrupt ID */
+#define IRQ_PCPU_HP_TIMER 26
+/* Per-CPU Non-Secure Timer Interrupt ID */
+#define IRQ_PCPU_NS_TIMER 30
+
+
+/* Times(in ms) used by test code for completion of different events */
+#define PLAT_SUSPEND_ENTRY_TIME 15
+#define PLAT_SUSPEND_ENTRY_EXIT_TIME 30
+
+#endif /* __PLATFORM_DEF_H__ */
diff --git a/plat/arm/board/fvp/plat_setup.c b/plat/arm/board/fvp/plat_setup.c
new file mode 100644
index 000000000..0d816863a
--- /dev/null
+++ b/plat/arm/board/fvp/plat_setup.c
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arm_gic.h>
+#include <plat_arm.h>
+#include <platform.h>
+
+/*
+ * Table of regions to map using the MMU.
+ */
+#if IMAGE_NS_BL1U
+static const mmap_region_t mmap[] = {
+ MAP_REGION_FLAT(DEVICE0_BASE, DEVICE0_SIZE, MT_DEVICE | MT_RW | MT_NS),
+ MAP_REGION_FLAT(FLASH_BASE, FLASH_SIZE, MT_MEMORY | MT_RO | MT_NS),
+ MAP_REGION_FLAT(DRAM_BASE, DRAM_SIZE, MT_MEMORY | MT_RW | MT_NS),
+ {0}
+ };
+#elif IMAGE_NS_BL2U
+static const mmap_region_t mmap[] = {
+ MAP_REGION_FLAT(DEVICE0_BASE, DEVICE0_SIZE, MT_DEVICE | MT_RW | MT_NS),
+ MAP_REGION_FLAT(DEVICE1_BASE, DEVICE1_SIZE, MT_DEVICE | MT_RW | MT_NS),
+ MAP_REGION_FLAT(FLASH_BASE, FLASH_SIZE, MT_DEVICE | MT_RW | MT_NS),
+ MAP_REGION_FLAT(DRAM_BASE, DRAM_SIZE, MT_MEMORY | MT_RW | MT_NS),
+ {0}
+};
+#elif IMAGE_TFTF
+static const mmap_region_t mmap[] = {
+ MAP_REGION_FLAT(DEVICE0_BASE, DEVICE0_SIZE, MT_DEVICE | MT_RW | MT_NS),
+ MAP_REGION_FLAT(DEVICE1_BASE, DEVICE1_SIZE, MT_DEVICE | MT_RW | MT_NS),
+#if USE_NVM
+ MAP_REGION_FLAT(FLASH_BASE, FLASH_SIZE, MT_DEVICE | MT_RW | MT_NS),
+#endif
+ MAP_REGION_FLAT(DRAM_BASE, TFTF_BASE - DRAM_BASE, MT_MEMORY | MT_RW | MT_NS),
+ {0}
+};
+#endif /* IMAGE_NS_BL1U */
+
+const mmap_region_t *tftf_platform_get_mmap(void)
+{
+ return mmap;
+}
+
+void plat_arm_gic_init(void)
+{
+ arm_gic_init(GICC_BASE, GICD_BASE, GICR_BASE);
+}
diff --git a/plat/arm/board/fvp/platform.mk b/plat/arm/board/fvp/platform.mk
new file mode 100644
index 000000000..281a5f30c
--- /dev/null
+++ b/plat/arm/board/fvp/platform.mk
@@ -0,0 +1,26 @@
+#
+# Copyright (c) 2018, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+PLAT_INCLUDES := -Iplat/arm/board/fvp/include/
+
+PLAT_SOURCES := drivers/arm/gic/arm_gic_v2v3.c \
+ drivers/arm/gic/gic_v2.c \
+ drivers/arm/gic/gic_v3.c \
+ drivers/arm/sp805/sp805.c \
+ drivers/arm/timer/private_timer.c \
+ drivers/arm/timer/system_timer.c \
+ lib/semihosting/${ARCH}/semihosting_call.S \
+ lib/semihosting/semihosting.c \
+ plat/arm/board/fvp/${ARCH}/plat_helpers.S \
+ plat/arm/board/fvp/fvp_pwr_state.c \
+ plat/arm/board/fvp/fvp_topology.c \
+ plat/arm/board/fvp/fvp_mem_prot.c \
+ plat/arm/board/fvp/plat_setup.c
+
+# Firmware update is implemented on FVP.
+FIRMWARE_UPDATE := 1
+
+include plat/arm/common/arm_common.mk
diff --git a/plat/arm/board/juno/aarch32/plat_helpers.S b/plat/arm/board/juno/aarch32/plat_helpers.S
new file mode 100644
index 000000000..6f252c00c
--- /dev/null
+++ b/plat/arm/board/juno/aarch32/plat_helpers.S
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+
+ .globl platform_get_core_pos
+
+ /*
+ * Return 0 to 3 for the Cortex-A53 cores and 4 to 5 for the Cortex-A57
+ * cores.
+ */
+func platform_get_core_pos
+ and r1, r0, #MPIDR_CPU_MASK
+ and r0, r0, #MPIDR_CLUSTER_MASK
+ /* Swap Cortex-A53/Cortex-A57 order. */
+ eor r0, r0, #(1 << MPIDR_AFF1_SHIFT)
+ add r0, r1, r0, LSR #6
+ bx lr
+endfunc platform_get_core_pos
diff --git a/plat/arm/board/juno/aarch64/plat_helpers.S b/plat/arm/board/juno/aarch64/plat_helpers.S
new file mode 100644
index 000000000..c51a4d98e
--- /dev/null
+++ b/plat/arm/board/juno/aarch64/plat_helpers.S
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+
+ .globl platform_get_core_pos
+
+ /*
+ * Return 0 to 3 for the Cortex-A53 cores and 4 to 5 for the Cortex-A57
+ * cores.
+ */
+func platform_get_core_pos
+ and x1, x0, #MPIDR_CPU_MASK
+ and x0, x0, #MPIDR_CLUSTER_MASK
+ /* Swap Cortex-A53/Cortex-A57 order. */
+ eor x0, x0, #(1 << MPIDR_AFF1_SHIFT)
+ add x0, x1, x0, LSR #6
+ ret
+endfunc platform_get_core_pos
diff --git a/plat/arm/board/juno/include/platform_def.h b/plat/arm/board/juno/include/platform_def.h
new file mode 100644
index 000000000..37b419fc9
--- /dev/null
+++ b/plat/arm/board/juno/include/platform_def.h
@@ -0,0 +1,238 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arm_def.h>
+#include "../juno_def.h"
+
+/*******************************************************************************
+ * Platform definitions used by common code
+ ******************************************************************************/
+
+#ifndef __PLATFORM_DEF_H__
+#define __PLATFORM_DEF_H__
+
+/*******************************************************************************
+ * Platform binary types for linking
+ ******************************************************************************/
+#ifndef AARCH32
+#define PLATFORM_LINKER_FORMAT "elf64-littleaarch64"
+#define PLATFORM_LINKER_ARCH aarch64
+#else
+#define PLATFORM_LINKER_FORMAT "elf32-littlearm"
+#define PLATFORM_LINKER_ARCH arm
+#endif
+
+/*******************************************************************************
+ * Run-time address of the TFTF image.
+ * It has to match the location where the Trusted Firmware-A loads the BL33
+ * image.
+ ******************************************************************************/
+#define TFTF_BASE 0xE0000000
+
+#define JUNO_DRAM1_BASE 0x80000000
+#define JUNO_DRAM2_BASE 0x880000000
+#define DRAM_BASE JUNO_DRAM1_BASE
+#define DRAM_SIZE 0x80000000
+
+/* Base address of non-trusted watchdog (SP805) */
+#define SP805_WDOG_BASE 0x1C0F0000
+
+/* Memory mapped Generic timer interfaces */
+#define SYS_CNT_BASE1 0x2a830000
+
+/* V2M motherboard system registers & offsets */
+#define VE_SYSREGS_BASE 0x1c010000
+#define V2M_SYS_LED 0x8
+
+/*******************************************************************************
+ * Base address and size of external NVM flash
+ ******************************************************************************/
+#define FLASH_BASE 0x08000000
+
+/*
+ * The flash chip on Juno is a SCSP package of 2-die's and of a total size of
+ * 512Mb, we are using only the main blocks of size 128KB for storing results.
+ * The SMC controller performs data striping and splits the word into half to
+ * each flash die's which leads to a virtual block size of 256KB to software.
+ */
+#define NOR_FLASH_BLOCK_SIZE 0x40000
+#define NOR_FLASH_BLOCKS_COUNT 255
+#define FLASH_SIZE (NOR_FLASH_BLOCK_SIZE * NOR_FLASH_BLOCKS_COUNT)
+
+/*******************************************************************************
+ * Base address and size for the FIP that contains FWU images.
+ ******************************************************************************/
+#define PLAT_ARM_FWU_FIP_BASE (FLASH_BASE + 0x400000)
+#define PLAT_ARM_FWU_FIP_SIZE (0x100000)
+
+/*******************************************************************************
+ * Base address and size for non-trusted SRAM.
+ ******************************************************************************/
+#define NSRAM_BASE (0x2e000000)
+#define NSRAM_SIZE (0x00008000)
+
+/*******************************************************************************
+ * Corresponds to the function ID of the BL1 SMC handler for FWU process.
+ ******************************************************************************/
+#define BL1_SMC_CALL_COUNT 0x0
+#define BL1_SMC_UID 0x1
+/* SMC #0x2 reserved */
+#define BL1_SMC_VERSION 0x3
+#define FWU_SMC_IMAGE_COPY 0x10
+#define FWU_SMC_IMAGE_AUTH 0x11
+#define FWU_SMC_IMAGE_EXECUTE 0x12
+#define FWU_SMC_IMAGE_RESUME 0x13
+#define FWU_SMC_SEC_IMAGE_DONE 0x14
+#define FWU_SMC_UPDATE_DONE 0x15
+#define FWU_SMC_IMAGE_RESET 0x16
+
+/*******************************************************************************
+ * NS_BL1U specific defines.
+ * NS_BL1U RW data is relocated from NS-ROM to NS-RAM at runtime so we
+ * need 2 sets of addresses.
+ ******************************************************************************/
+#define NS_BL1U_RO_BASE (0x08000000 + 0x03EB8000)
+#define NS_BL1U_RO_LIMIT (NS_BL1U_RO_BASE + 0xC000)
+
+/*******************************************************************************
+ * Put NS_BL1U RW at the top of the Non-Trusted SRAM. NS_BL1U_RW_BASE is
+ * calculated using the current NS_BL1U RW debug size plus a little space
+ * for growth.
+ ******************************************************************************/
+#define NS_BL1U_RW_SIZE (0x7000)
+#define NS_BL1U_RW_BASE (NSRAM_BASE)
+#define NS_BL1U_RW_LIMIT (NS_BL1U_RW_BASE + NS_BL1U_RW_SIZE)
+
+/*******************************************************************************
+ * Base address and limit for NS_BL2U image.
+ ******************************************************************************/
+#define NS_BL2U_BASE DRAM_BASE
+#define NS_BL2U_LIMIT (NS_BL2U_BASE + 0x4D000)
+
+/*******************************************************************************
+ * Generic platform constants
+ ******************************************************************************/
+
+/* Size of cacheable stacks */
+#if IMAGE_NS_BL1U
+#define PLATFORM_STACK_SIZE 0x1000
+#elif IMAGE_NS_BL2U
+#define PLATFORM_STACK_SIZE 0x1000
+#elif IMAGE_TFTF
+#define PLATFORM_STACK_SIZE 0x1400
+#endif
+
+/* Size of coherent stacks for debug and release builds */
+#if DEBUG
+#define PCPU_DV_MEM_STACK_SIZE 0x600
+#else
+#define PCPU_DV_MEM_STACK_SIZE 0x500
+#endif
+
+#define PLATFORM_SYSTEM_COUNT 1
+#define PLATFORM_CLUSTER_COUNT 2
+#define PLATFORM_CLUSTER1_CORE_COUNT 4 /* Cortex-A53 Cluster */
+#define PLATFORM_CLUSTER0_CORE_COUNT 2 /* Cortex-A57 Cluster */
+#define PLATFORM_CORE_COUNT (PLATFORM_CLUSTER1_CORE_COUNT + \
+ PLATFORM_CLUSTER0_CORE_COUNT)
+#define PLATFORM_NUM_AFFS (PLATFORM_SYSTEM_COUNT + \
+ PLATFORM_CLUSTER_COUNT + \
+ PLATFORM_CORE_COUNT)
+#define PLATFORM_MAX_AFFLVL MPIDR_AFFLVL2
+#define PLAT_MAX_PWR_LEVEL PLATFORM_MAX_AFFLVL
+#define PLAT_MAX_PWR_STATES_PER_LVL 2
+
+/* Local state bit width for each level in the state-ID field of power state */
+#define PLAT_LOCAL_PSTATE_WIDTH 4
+
+#if IMAGE_NS_BL1U
+#define MAX_IO_DEVICES 2
+#define MAX_IO_HANDLES 2
+#else
+#define MAX_IO_DEVICES 1
+#define MAX_IO_HANDLES 1
+#endif
+
+#if USE_NVM
+/*
+ * The Flash memory is used to store TFTF data on Juno.
+ * However, it might contain other data that must not be overwritten.
+ * For example, when using the Trusted Firmware-A, the FIP image
+ * (containing the bootloader images) is also stored in Flash.
+ * Hence, consider the first 40MB of Flash as reserved for firmware usage.
+ * The TFTF can use the rest of the Flash memory.
+ */
+#define TFTF_NVM_OFFSET 0x2800000 /* 40MB */
+#define TFTF_NVM_SIZE (FLASH_SIZE - TFTF_NVM_OFFSET)
+#else
+/*
+ * If you want to run without support for non-volatile memory (due to e.g.
+ * unavailability of a flash driver), DRAM can be used instead as workaround.
+ * The TFTF binary itself is loaded at 0xE0000000 so we have plenty of free
+ * memory at the beginning of the DRAM. Let's use the first 128MB.
+ *
+ * Please note that this won't be suitable for all test scenarios and
+ * for this reason some tests will be disabled in this configuration.
+ */
+#define TFTF_NVM_OFFSET 0x0
+#define TFTF_NVM_SIZE 0x8000000 /* 128 MB */
+#endif
+
+/*******************************************************************************
+ * Platform specific page table and MMU setup constants
+ ******************************************************************************/
+#define PLAT_PHY_ADDR_SPACE_SIZE (1ull << 32)
+#define PLAT_VIRT_ADDR_SPACE_SIZE (1ull << 32)
+#define MAX_XLAT_TABLES 5
+#define MAX_MMAP_REGIONS 16
+
+/*******************************************************************************
+ * Used to align variables on the biggest cache line size in the platform.
+ * This is known only to the platform as it might have a combination of
+ * integrated and external caches.
+ ******************************************************************************/
+#define CACHE_WRITEBACK_SHIFT 6
+#define CACHE_WRITEBACK_GRANULE (1 << CACHE_WRITEBACK_SHIFT)
+
+/*******************************************************************************
+ * Non-Secure Software Generated Interupts IDs
+ ******************************************************************************/
+#define IRQ_NS_SGI_0 0
+#define IRQ_NS_SGI_1 1
+#define IRQ_NS_SGI_2 2
+#define IRQ_NS_SGI_3 3
+#define IRQ_NS_SGI_4 4
+#define IRQ_NS_SGI_5 5
+#define IRQ_NS_SGI_6 6
+#define IRQ_NS_SGI_7 7
+
+#define PLAT_MAX_SPI_OFFSET_ID 220
+
+/* The IRQ generated by Ethernet controller */
+#define IRQ_ETHERNET 192
+
+#define IRQ_CNTPSIRQ1 92
+/* Per-CPU Hypervisor Timer Interrupt ID */
+#define IRQ_PCPU_HP_TIMER 26
+/* Per-CPU Non-Secure Timer Interrupt ID */
+#define IRQ_PCPU_NS_TIMER 30
+
+/*
+ * Times(in ms) used by test code for completion of different events.
+ * Suspend entry time for debug build is high due to the time taken
+ * by the VERBOSE/INFO prints. The value considers the worst case scenario
+ * where all CPUs are going and coming out of suspend continuously.
+ */
+#if DEBUG
+#define PLAT_SUSPEND_ENTRY_TIME 0x100
+#define PLAT_SUSPEND_ENTRY_EXIT_TIME 0x200
+#else
+#define PLAT_SUSPEND_ENTRY_TIME 10
+#define PLAT_SUSPEND_ENTRY_EXIT_TIME 20
+#endif
+
+#endif /* __PLATFORM_DEF_H__ */
diff --git a/plat/arm/board/juno/juno32_tests_to_skip.txt b/plat/arm/board/juno/juno32_tests_to_skip.txt
new file mode 100644
index 000000000..078e36351
--- /dev/null
+++ b/plat/arm/board/juno/juno32_tests_to_skip.txt
@@ -0,0 +1,11 @@
+#
+# Copyright (c) 2018, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# System suspend is not supported on AArch32 Juno.
+PSCI System Suspend Validation
+PSCI STAT/Stats test cases after system suspend
+IRQ support in TSP/Resume preempted STD SMC after PSCI SYSTEM SUSPEND
+PSCI SYSTEM SUSPEND stress tests
diff --git a/plat/arm/board/juno/juno_def.h b/plat/arm/board/juno/juno_def.h
new file mode 100644
index 000000000..930332fb3
--- /dev/null
+++ b/plat/arm/board/juno/juno_def.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __JUNO_DEF_H__
+#define __JUNO_DEF_H__
+
+#include <platform_def.h>
+
+/*******************************************************************************
+ * Juno memory map related constants
+ ******************************************************************************/
+/* First peripherals region excluding Non-Trusted ROM and Non-Trusted RAM */
+#define DEVICE0_BASE 0x20000000
+#define DEVICE0_SIZE 0x0e000000
+
+/* PCIe expansion region and 2nd peripherals region */
+#define DEVICE1_BASE 0x40000000
+#define DEVICE1_SIZE 0x40000000
+
+#define IOFPGA_PERIPHERALS_BASE 0x1c000000
+#define IOFPGA_PERIPHERALS_SIZE 0x3000000
+
+/*******************************************************************************
+ * GIC-400 & interrupt handling related constants
+ ******************************************************************************/
+#define GICD_BASE 0x2c010000
+#define GICC_BASE 0x2c02f000
+/* Juno doesn't support GIC Redistributor, it's a GICv3 feature */
+#define GICR_BASE 0
+
+/*******************************************************************************
+ * PL011 related constants
+ ******************************************************************************/
+/* SoC UART0 */
+#define PL011_UART2_BASE 0x7ff80000
+#define PL011_UART2_CLK_IN_HZ 7273800
+
+#define PLAT_ARM_UART_BASE PL011_UART2_BASE
+#define PLAT_ARM_UART_CLK_IN_HZ PL011_UART2_CLK_IN_HZ
+
+/*******************************************************************************
+ * Motherboard timer related constants
+ ******************************************************************************/
+#define MB_TIMER1_BASE 0x1C120000
+#define MB_TIMER1_IRQ 198
+#define MB_TIMER1_FREQ 32000
+
+/*******************************************************************************
+ * Ethernet controller related constants
+ ******************************************************************************/
+#define ETHERNET_BASE 0x18000000
+#define ETHERNET_SIZE 0x04000000
+#define ETHERNET_IRQ_CFG_OFFSET 0x54
+#define ETHERNET_IRQ_CFG_VAL 0x11
+
+#endif /* __JUNO_DEF_H__ */
+
diff --git a/plat/arm/board/juno/juno_mem_prot.c b/plat/arm/board/juno/juno_mem_prot.c
new file mode 100644
index 000000000..f0e350673
--- /dev/null
+++ b/plat/arm/board/juno/juno_mem_prot.c
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <platform.h>
+#include <psci.h>
+#include <utils_def.h>
+#include <xlat_tables_v2.h>
+
+#define NS_IMAGE_OFFSET TFTF_BASE
+#define NS_IMAGE_LIMIT (NS_IMAGE_OFFSET + (32 << TWO_MB_SHIFT))
+
+static const mem_region_t juno_ram_ranges[] = {
+ {NS_IMAGE_LIMIT, 128 << TWO_MB_SHIFT},
+#ifdef AARCH64
+ {JUNO_DRAM2_BASE, 1 << ONE_GB_SHIFT},
+#endif
+};
+
+const mem_region_t *plat_get_prot_regions(int *nelem)
+{
+ *nelem = ARRAY_SIZE(juno_ram_ranges);
+ return juno_ram_ranges;
+}
diff --git a/plat/arm/board/juno/juno_pwr_state.c b/plat/arm/board/juno/juno_pwr_state.c
new file mode 100644
index 000000000..1e067c2a6
--- /dev/null
+++ b/plat/arm/board/juno/juno_pwr_state.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <platform.h>
+#include <psci.h>
+#include <stddef.h>
+
+/*
+ * State IDs for local power states on Juno.
+ */
+#define JUNO_RUN_STATE_ID 0 /* Valid for CPUs and Clusters */
+#define JUNO_RETENTION_STATE_ID 1 /* Valid for only CPUs */
+#define JUNO_OFF_STATE_ID 2 /* Valid for CPUs and Clusters */
+
+/*
+ * Suspend depth definitions for each power state
+ */
+typedef enum {
+ JUNO_RUN_DEPTH = 0,
+ JUNO_RETENTION_DEPTH,
+ JUNO_OFF_DEPTH,
+} suspend_depth_t;
+
+/* The state property array with details of idle state possible for the core */
+static const plat_state_prop_t core_state_prop[] = {
+ {JUNO_RETENTION_DEPTH, JUNO_RETENTION_STATE_ID, PSTATE_TYPE_STANDBY},
+ {JUNO_OFF_DEPTH, JUNO_OFF_STATE_ID, PSTATE_TYPE_POWERDOWN},
+ {0},
+};
+
+/*
+ * The state property array with details of idle state possible
+ * for the cluster
+ */
+static const plat_state_prop_t cluster_state_prop[] = {
+ {JUNO_OFF_DEPTH, JUNO_OFF_STATE_ID, PSTATE_TYPE_POWERDOWN},
+ {0},
+};
+
+/*
+ * The state property array with details of idle state possible
+ * for the system. Currently Juno does not support CPU SUSPEND
+ * at system power level.
+ */
+static const plat_state_prop_t system_state_prop[] = {
+ {JUNO_OFF_DEPTH, JUNO_OFF_STATE_ID, PSTATE_TYPE_POWERDOWN},
+ {0},
+};
+
+const plat_state_prop_t *plat_get_state_prop(unsigned int level)
+{
+ switch (level) {
+ case MPIDR_AFFLVL0:
+ return core_state_prop;
+ case MPIDR_AFFLVL1:
+ return cluster_state_prop;
+ case MPIDR_AFFLVL2:
+ return system_state_prop;
+ default:
+ return NULL;
+ }
+}
diff --git a/plat/arm/board/juno/juno_timers.c b/plat/arm/board/juno/juno_timers.c
new file mode 100644
index 000000000..1d1ce4884
--- /dev/null
+++ b/plat/arm/board/juno/juno_timers.c
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <sp804.h>
+#include <stddef.h>
+#include <timer.h>
+
+static const plat_timer_t plat_timers = {
+ .program = sp804_timer_program,
+ .cancel = sp804_timer_cancel,
+ .handler = sp804_timer_handler,
+ .timer_step_value = 2,
+ .timer_irq = MB_TIMER1_IRQ /* Motherboard SP804 timer1 IRQ */
+};
+
+int plat_initialise_timer_ops(const plat_timer_t **timer_ops)
+{
+ assert(timer_ops != NULL);
+ *timer_ops = &plat_timers;
+
+ /* Initialise the system timer */
+ sp804_timer_init(MB_TIMER1_BASE, MB_TIMER1_FREQ);
+
+ return 0;
+}
diff --git a/plat/arm/board/juno/juno_topology.c b/plat/arm/board/juno/juno_topology.c
new file mode 100644
index 000000000..f7e00440e
--- /dev/null
+++ b/plat/arm/board/juno/juno_topology.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <assert.h>
+#include <plat_topology.h>
+#include <platform_def.h>
+#include <stddef.h>
+#include <tftf_lib.h>
+
+static const struct {
+ unsigned cluster_id;
+ unsigned cpu_id;
+} juno_cores[] = {
+ /* Cortex-A53 Cluster: 4 cores*/
+ { 1, 0 },
+ { 1, 1 },
+ { 1, 2 },
+ { 1, 3 },
+ /* Cortex-A57 Cluster: 2 cores */
+ { 0, 0 },
+ { 0, 1 },
+};
+
+/*
+ * The Juno power domain tree descriptor. Juno implements a system
+ * power domain at the level 2. The first entry in the power domain descriptor
+ * specifies the number of power domains at the highest power level. For Juno
+ * this is 1 i.e. the number of system power domain.
+ */
+static const unsigned char juno_power_domain_tree_desc[] = {
+ /* Number of root nodes */
+ PLATFORM_SYSTEM_COUNT,
+ /* Number of children of root node */
+ PLATFORM_CLUSTER_COUNT,
+ /* Number of children for the first cluster */
+ PLATFORM_CLUSTER1_CORE_COUNT,
+ /* Number of children for the second cluster */
+ PLATFORM_CLUSTER0_CORE_COUNT
+};
+
+const unsigned char *tftf_plat_get_pwr_domain_tree_desc(void)
+{
+ return juno_power_domain_tree_desc;
+}
+
+uint64_t tftf_plat_get_mpidr(unsigned int core_pos)
+{
+ assert(core_pos < PLATFORM_CORE_COUNT);
+
+ return make_mpid(juno_cores[core_pos].cluster_id,
+ juno_cores[core_pos].cpu_id);
+}
diff --git a/plat/arm/board/juno/plat_setup.c b/plat/arm/board/juno/plat_setup.c
new file mode 100644
index 000000000..8792cb343
--- /dev/null
+++ b/plat/arm/board/juno/plat_setup.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arm_gic.h>
+#include <mmio.h>
+#include <plat_arm.h>
+#include <platform.h>
+#include <xlat_tables_v2.h>
+
+/*
+ * Table of regions to map using the MMU.
+ */
+#if IMAGE_NS_BL1U
+static const mmap_region_t mmap[] = {
+ MAP_REGION_FLAT(DEVICE1_BASE, DEVICE1_SIZE, MT_DEVICE | MT_RW | MT_NS),
+ MAP_REGION_FLAT(FLASH_BASE, FLASH_SIZE, MT_MEMORY | MT_RO | MT_NS),
+ MAP_REGION_FLAT(DRAM_BASE, DRAM_SIZE, MT_MEMORY | MT_RW | MT_NS),
+ {0}
+};
+#elif IMAGE_NS_BL2U
+static const mmap_region_t mmap[] = {
+ MAP_REGION_FLAT(DEVICE0_BASE, DEVICE0_SIZE, MT_DEVICE | MT_RW | MT_NS),
+ MAP_REGION_FLAT(IOFPGA_PERIPHERALS_BASE, IOFPGA_PERIPHERALS_SIZE,
+ MT_DEVICE | MT_RW | MT_NS),
+ MAP_REGION_FLAT(DEVICE1_BASE, DEVICE1_SIZE, MT_DEVICE | MT_RW | MT_NS),
+ MAP_REGION_FLAT(FLASH_BASE, FLASH_SIZE, MT_DEVICE | MT_RW | MT_NS),
+ MAP_REGION_FLAT(DRAM_BASE, DRAM_SIZE, MT_MEMORY | MT_RW | MT_NS),
+ {0}
+};
+#elif IMAGE_TFTF
+static const mmap_region_t mmap[] = {
+ MAP_REGION_FLAT(DEVICE0_BASE, DEVICE0_SIZE, MT_DEVICE | MT_RW | MT_NS),
+ MAP_REGION_FLAT(ETHERNET_BASE, ETHERNET_SIZE, MT_DEVICE | MT_RW | MT_NS),
+ MAP_REGION_FLAT(IOFPGA_PERIPHERALS_BASE, IOFPGA_PERIPHERALS_SIZE,
+ MT_DEVICE | MT_RW | MT_NS),
+ MAP_REGION_FLAT(DEVICE1_BASE, DEVICE1_SIZE, MT_DEVICE | MT_RW | MT_NS),
+#if USE_NVM
+ MAP_REGION_FLAT(FLASH_BASE, FLASH_SIZE, MT_DEVICE | MT_RW | MT_NS),
+#endif
+ MAP_REGION_FLAT(DRAM_BASE, TFTF_BASE - DRAM_BASE, MT_MEMORY | MT_RW | MT_NS),
+ {0}
+};
+#endif /* IMAGE_NS_BL1U */
+
+const mmap_region_t *tftf_platform_get_mmap(void)
+{
+ return mmap;
+}
+
+void tftf_platform_setup(void)
+{
+ arm_platform_setup();
+
+#if !IMAGE_NS_BL2U
+ /*
+ * The Ethernet IRQ line is high by default which prevents Juno
+ * from entering system suspend. Configure it to be low.
+ *
+ * Interrupts are disabled in NS_BL2U so there's no need to fix this
+ * as we are not going to suspend the system.
+ *
+ * TODO: Currently this needs to be done in a loop for the write
+ * to IRQ_CFG register to take effect. Need to find the reason for
+ * this behavior.
+ */
+ int val;
+
+ do {
+ mmio_write_32(ETHERNET_BASE + ETHERNET_IRQ_CFG_OFFSET,
+ ETHERNET_IRQ_CFG_VAL);
+
+ val = mmio_read_8(ETHERNET_BASE + ETHERNET_IRQ_CFG_OFFSET);
+ } while (val != ETHERNET_IRQ_CFG_VAL);
+#endif /* IMAGE_NS_BL2U */
+}
+
+void plat_arm_gic_init(void)
+{
+ arm_gic_init(GICC_BASE, GICD_BASE, GICR_BASE);
+}
diff --git a/plat/arm/board/juno/platform.mk b/plat/arm/board/juno/platform.mk
new file mode 100644
index 000000000..97172b5a9
--- /dev/null
+++ b/plat/arm/board/juno/platform.mk
@@ -0,0 +1,42 @@
+#
+# Copyright (c) 2018, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+PLAT_INCLUDES := -Iplat/arm/board/juno/include/
+
+PLAT_SOURCES := drivers/arm/gic/arm_gic_v2.c \
+ drivers/arm/gic/gic_v2.c \
+ drivers/arm/sp805/sp805.c \
+ drivers/arm/timer/private_timer.c \
+ drivers/arm/timer/sp804.c \
+ plat/arm/board/juno/${ARCH}/plat_helpers.S \
+ plat/arm/board/juno/juno_pwr_state.c \
+ plat/arm/board/juno/juno_timers.c \
+ plat/arm/board/juno/juno_topology.c \
+ plat/arm/board/juno/juno_mem_prot.c \
+ plat/arm/board/juno/plat_setup.c
+
+TESTS_SOURCES += tftf/tests/runtime_services/trusted_os/tsp/test_irq_spurious_gicv2.c
+
+# Some tests are not supported on Juno AArch32.
+ifeq (${ARCH},aarch32)
+PLAT_TESTS_SKIP_LIST := plat/arm/board/juno/juno32_tests_to_skip.txt
+endif
+
+PLAT_SUPPORTS_NS_RESET := 1
+
+# Process PLAT_SUPPORTS_NS_RESET flag
+$(eval $(call assert_boolean,PLAT_SUPPORTS_NS_RESET))
+$(eval $(call add_define,TFTF_DEFINES,PLAT_SUPPORTS_NS_RESET))
+
+ifeq (${ARCH},aarch32)
+ifeq (${FIRMWARE_UPDATE},1)
+$(error "FIRMWARE_UPDATE is not supported on Juno aarch32")
+endif
+else
+FIRMWARE_UPDATE := 1
+endif
+
+include plat/arm/common/arm_common.mk
diff --git a/plat/arm/board/juno/tests.xml b/plat/arm/board/juno/tests.xml
new file mode 100644
index 000000000..8e679c31d
--- /dev/null
+++ b/plat/arm/board/juno/tests.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2018, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<!-- External references to all individual tests files. -->
+<!DOCTYPE testsuites [
+ <!ENTITY tests-tftf-validation SYSTEM "../../../../tftf/tests/tests-tftf-validation.xml">
+ <!ENTITY tests-boot-req SYSTEM "../../../../tftf/tests/tests-boot-req.xml">
+ <!ENTITY tests-psci SYSTEM "../../../../tftf/tests/tests-psci.xml">
+ <!ENTITY tests-sdei SYSTEM "../../../../tftf/tests/tests-sdei.xml">
+ <!ENTITY tests-rt-instr SYSTEM "../../../../tftf/tests/tests-runtime-instrumentation.xml">
+ <!ENTITY tests-tsp SYSTEM "../../../../tftf/tests/tests-tsp.xml">
+ <!ENTITY tests-el3-pstate SYSTEM "../../../../tftf/tests/tests-el3-power-state.xml">
+ <!ENTITY tests-state-switch SYSTEM "../../../../tftf/tests/tests-arm-state-switch.xml">
+ <!ENTITY tests-cpu-extensions SYSTEM "../../../../tftf/tests/tests-cpu-extensions.xml">
+ <!ENTITY tests-performance SYSTEM "../../../../tftf/tests/tests-performance.xml">
+]>
+
+<testsuites>
+
+ &tests-tftf-validation;
+ &tests-boot-req;
+ &tests-psci;
+ &tests-sdei;
+ &tests-rt-instr;
+ &tests-tsp;
+ &tests-el3-pstate;
+ &tests-state-switch;
+ &tests-cpu-extensions;
+ &tests-performance;
+
+ <testsuite name="Juno - IRQ support in TSP" description="Test the normal IRQ preemption support in TSP.">
+ <testcase name="Juno - Multicore spurious interrupt test" function="test_juno_multicore_spurious_interrupt" />
+ </testsuite>
+
+</testsuites>
diff --git a/plat/arm/common/arm_common.mk b/plat/arm/common/arm_common.mk
new file mode 100644
index 000000000..73b4690b3
--- /dev/null
+++ b/plat/arm/common/arm_common.mk
@@ -0,0 +1,18 @@
+#
+# Copyright (c) 2018, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+PLAT_INCLUDES += -Iinclude/plat/arm/common/
+
+PLAT_SOURCES += drivers/arm/gic/gic_common.c \
+ drivers/arm/pl011/${ARCH}/pl011_console.S \
+ plat/arm/common/arm_setup.c \
+ plat/arm/common/arm_timers.c
+
+# Flash driver sources.
+PLAT_SOURCES += drivers/io/io_storage.c \
+ drivers/io/vexpress_nor/io_vexpress_nor_ops.c \
+ drivers/io/vexpress_nor/io_vexpress_nor_hw.c \
+ plat/arm/common/arm_io_storage.c
diff --git a/plat/arm/common/arm_fwu_io_storage.c b/plat/arm/common/arm_fwu_io_storage.c
new file mode 100644
index 000000000..184f2af4d
--- /dev/null
+++ b/plat/arm/common/arm_fwu_io_storage.c
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <firmware_image_package.h>
+#include <image_loader.h>
+#include <io_driver.h>
+#include <io_fip.h>
+#include <io_memmap.h>
+#include <io_storage.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <string.h>
+#include <tftf_lib.h>
+
+/* May be overridden in specific ARM standard platform */
+#pragma weak plat_arm_fwu_io_setup
+
+/* IO devices */
+static const io_dev_connector_t *fwu_fip_dev_con;
+static uintptr_t fwu_fip_dev_handle;
+static const io_dev_connector_t *memmap_dev_con;
+static uintptr_t memmap_dev_handle;
+
+static const io_block_spec_t fwu_fip_block_spec = {
+ .offset = PLAT_ARM_FWU_FIP_BASE,
+ .length = PLAT_ARM_FWU_FIP_SIZE
+};
+static const io_uuid_spec_t fwu_cert_uuid_spec = {
+ .uuid = UUID_FIRMWARE_UPDATE_FWU_CERT,
+};
+static const io_uuid_spec_t scp_bl2u_uuid_spec = {
+ .uuid = UUID_FIRMWARE_UPDATE_SCP_BL2U,
+};
+static const io_uuid_spec_t bl2u_uuid_spec = {
+ .uuid = UUID_FIRMWARE_UPDATE_BL2U,
+};
+static const io_uuid_spec_t ns_bl2u_uuid_spec = {
+ .uuid = UUID_FIRMWARE_UPDATE_NS_BL2U,
+};
+
+static int open_fwu_fip(const uintptr_t spec);
+static int open_memmap(const uintptr_t spec);
+
+struct plat_io_policy {
+ uintptr_t *dev_handle;
+ uintptr_t image_spec;
+ int (*check)(const uintptr_t spec);
+};
+
+static const struct plat_io_policy policies[] = {
+ [FWU_FIP_IMAGE_ID] = {
+ &memmap_dev_handle,
+ (uintptr_t)&fwu_fip_block_spec,
+ open_memmap
+ },
+ [FWU_CERT_ID] = {
+ &fwu_fip_dev_handle,
+ (uintptr_t)&fwu_cert_uuid_spec,
+ open_fwu_fip
+ },
+ [SCP_BL2U_IMAGE_ID] = {
+ &fwu_fip_dev_handle,
+ (uintptr_t)&scp_bl2u_uuid_spec,
+ open_fwu_fip
+ },
+ [BL2U_IMAGE_ID] = {
+ &fwu_fip_dev_handle,
+ (uintptr_t)&bl2u_uuid_spec,
+ open_fwu_fip
+ },
+ [NS_BL2U_IMAGE_ID] = {
+ &fwu_fip_dev_handle,
+ (uintptr_t)&ns_bl2u_uuid_spec,
+ open_fwu_fip
+ },
+};
+
+
+/* Weak definitions may be overridden in each specific platform */
+#pragma weak plat_get_image_source
+
+static int open_fwu_fip(const uintptr_t spec)
+{
+ int result;
+ uintptr_t local_image_handle;
+
+ /* See if a Firmware Image Package is available */
+ result = io_dev_init(fwu_fip_dev_handle,
+ (uintptr_t)FWU_FIP_IMAGE_ID);
+ if (result == IO_SUCCESS) {
+ result = io_open(fwu_fip_dev_handle, spec,
+ &local_image_handle);
+ if (result == IO_SUCCESS) {
+ VERBOSE("Using FIP\n");
+ io_close(local_image_handle);
+ }
+ }
+ return result;
+}
+
+
+static int open_memmap(const uintptr_t spec)
+{
+ int result;
+ uintptr_t local_image_handle;
+
+ result = io_dev_init(memmap_dev_handle, (uintptr_t)NULL);
+ if (result == IO_SUCCESS) {
+ result = io_open(memmap_dev_handle, spec, &local_image_handle);
+ if (result == IO_SUCCESS) {
+ VERBOSE("Using Memmap\n");
+ io_close(local_image_handle);
+ }
+ }
+ return result;
+}
+
+
+void plat_arm_fwu_io_setup(void)
+{
+ int io_result;
+
+ io_result = register_io_dev_fip(&fwu_fip_dev_con);
+ assert(io_result == IO_SUCCESS);
+
+ io_result = register_io_dev_memmap(&memmap_dev_con);
+ assert(io_result == IO_SUCCESS);
+
+ /* Open connections to devices and cache the handles */
+ io_result = io_dev_open(fwu_fip_dev_con, (uintptr_t)NULL,
+ &fwu_fip_dev_handle);
+ assert(io_result == IO_SUCCESS);
+
+ io_result = io_dev_open(memmap_dev_con, (uintptr_t)NULL,
+ &memmap_dev_handle);
+ assert(io_result == IO_SUCCESS);
+
+ /* Ignore improbable errors in release builds */
+ (void)io_result;
+}
+
+
+/* Return an IO device handle and specification which can be used to access
+ * an image. Use this to enforce platform load policy */
+int plat_get_image_source(unsigned int image_id, uintptr_t *dev_handle,
+ uintptr_t *image_spec)
+{
+ int result = IO_FAIL;
+ const struct plat_io_policy *policy;
+
+ assert(image_id < ARRAY_SIZE(policies));
+
+ policy = &policies[image_id];
+ result = policy->check(policy->image_spec);
+ if (result == IO_SUCCESS) {
+ *image_spec = policy->image_spec;
+ *dev_handle = *(policy->dev_handle);
+ }
+
+ return result;
+}
+
+void plat_fwu_io_setup(void)
+{
+ plat_arm_fwu_io_setup();
+}
diff --git a/plat/arm/common/arm_io_storage.c b/plat/arm/common/arm_io_storage.c
new file mode 100644
index 000000000..d9d8a389c
--- /dev/null
+++ b/plat/arm/common/arm_io_storage.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <io_driver.h>
+#include <io_nor_flash.h>
+#include <io_storage.h>
+#include <platform.h>
+#include <tftf.h>
+#include "platform_def.h"
+
+#pragma weak plat_get_nvm_handle
+
+/* IO devices */
+static const io_dev_connector_t *flash_dev_con;
+static uintptr_t flash_dev_spec;
+static uintptr_t flash_init_params;
+static uintptr_t flash_dev_handle;
+static uintptr_t flash_handle;
+static unsigned int flash_init;
+
+static const io_nor_flash_spec_t flash_main_block_spec = {
+ .device_address = FLASH_BASE,
+ .region_address = FLASH_BASE,
+ .block_size = NOR_FLASH_BLOCK_SIZE,
+ .block_count = FLASH_SIZE / NOR_FLASH_BLOCK_SIZE
+};
+
+int arm_io_setup(void)
+{
+ int io_result;
+
+ io_result = register_io_dev_nor_flash(&flash_dev_con);
+ if (io_result != IO_SUCCESS)
+ return io_result;
+
+ io_result = io_dev_open(flash_dev_con, flash_dev_spec,
+ &flash_dev_handle);
+ if (io_result != IO_SUCCESS)
+ return io_result;
+
+ io_result = io_dev_init(flash_dev_handle, flash_init_params);
+ if (io_result != IO_SUCCESS)
+ return io_result;
+
+ io_result = io_open(flash_dev_handle,
+ (uintptr_t)&flash_main_block_spec,
+ &flash_handle);
+
+ if (io_result == IO_SUCCESS)
+ flash_init = 1;
+ return io_result;
+}
+
+void plat_get_nvm_handle(uintptr_t *handle)
+{
+ assert(handle);
+ assert(flash_init);
+
+ *handle = flash_handle;
+}
+
diff --git a/plat/arm/common/arm_setup.c b/plat/arm/common/arm_setup.c
new file mode 100644
index 000000000..79f463176
--- /dev/null
+++ b/plat/arm/common/arm_setup.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arm_gic.h>
+#include <console.h>
+#include <debug.h>
+#include <io_storage.h>
+#include <pl011.h>
+#include <plat_arm.h>
+#include <platform.h>
+#include <platform_def.h>
+
+#pragma weak tftf_platform_setup
+
+void arm_platform_setup(void)
+{
+#if USE_NVM
+ int ret;
+
+ ret = arm_io_setup();
+ if (ret != IO_SUCCESS)
+ WARN("IO setup failed : 0x%x\n", ret);
+#endif
+
+#if IMAGE_NS_BL2U
+ /* NS_BL2U is not expecting interrupts. */
+ return;
+#endif
+
+ plat_arm_gic_init();
+
+ arm_gic_setup_global();
+ arm_gic_setup_local();
+}
+
+void tftf_platform_setup(void)
+{
+ arm_platform_setup();
+}
+
+void tftf_plat_arch_setup(void)
+{
+ tftf_plat_configure_mmu();
+}
+
+void tftf_early_platform_setup(void)
+{
+ console_init(PLAT_ARM_UART_BASE, PLAT_ARM_UART_CLK_IN_HZ,
+ PL011_BAUDRATE);
+}
diff --git a/plat/arm/common/arm_timers.c b/plat/arm/common/arm_timers.c
new file mode 100644
index 000000000..c84cb845a
--- /dev/null
+++ b/plat/arm/common/arm_timers.c
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <platform.h>
+#include <stddef.h>
+#include <system_timer.h>
+#include <timer.h>
+
+#pragma weak plat_initialise_timer_ops
+
+static const plat_timer_t plat_timers = {
+ .program = program_systimer,
+ .cancel = cancel_systimer,
+ .handler = handler_systimer,
+ .timer_step_value = 2,
+ .timer_irq = IRQ_CNTPSIRQ1
+};
+
+int plat_initialise_timer_ops(const plat_timer_t **timer_ops)
+{
+ assert(timer_ops != NULL);
+ *timer_ops = &plat_timers;
+
+ /* Initialise the system timer */
+ init_systimer(SYS_CNT_BASE1);
+
+ return 0;
+}
diff --git a/plat/common/aarch32/platform_helpers.S b/plat/common/aarch32/platform_helpers.S
new file mode 100644
index 000000000..1a5741868
--- /dev/null
+++ b/plat/common/aarch32/platform_helpers.S
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <pl011.h>
+#include <platform_def.h>
+
+ .weak platform_get_core_pos
+ .weak plat_crash_console_init
+ .weak plat_crash_console_putc
+ .weak plat_crash_console_flush
+
+ /* -----------------------------------------------------
+ * int platform_get_core_pos(u_register_t mpidr);
+ * With this function: CorePos = (ClusterId * 4) +
+ * CoreId
+ * -----------------------------------------------------
+ */
+func platform_get_core_pos
+ and r1, r0, #MPIDR_CPU_MASK
+ and r0, r0, #MPIDR_CLUSTER_MASK
+ add r0, r1, r0, LSR #6
+ bx lr
+endfunc platform_get_core_pos
+
+ /* ---------------------------------------------
+ * int plat_crash_console_init(void)
+ * Function to initialize the crash console
+ * without a C Runtime to print crash report.
+ * Clobber list : x0 - x4
+ * ---------------------------------------------
+ */
+func plat_crash_console_init
+ ldr r0, =PLAT_ARM_UART_BASE
+ ldr r1, =PLAT_ARM_UART_CLK_IN_HZ
+ ldr r2, =PL011_BAUDRATE
+ b console_core_init
+endfunc plat_crash_console_init
+
+ /* ---------------------------------------------
+ * int plat_crash_console_putc(int c)
+ * Function to print a character on the crash
+ * console without a C Runtime.
+ * Clobber list : x1, x2
+ * ---------------------------------------------
+ */
+func plat_crash_console_putc
+ ldr r1, =PLAT_ARM_UART_BASE
+ b console_core_putc
+endfunc plat_crash_console_putc
+
+ /* ---------------------------------------------
+ * int plat_crash_console_flush()
+ * Function to force a write of all buffered
+ * data that hasn't been output.
+ * Out : return -1 on error else return 0.
+ * Clobber list : r0 - r1
+ * ---------------------------------------------
+ */
+func plat_crash_console_flush
+ ldr r1, =PLAT_ARM_UART_BASE
+ b console_core_flush
+endfunc plat_crash_console_flush
diff --git a/plat/common/aarch32/platform_mp_stack.S b/plat/common/aarch32/platform_mp_stack.S
new file mode 100644
index 000000000..c4ea89584
--- /dev/null
+++ b/plat/common/aarch32/platform_mp_stack.S
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <platform_def.h>
+
+
+ .local pcpu_dv_mem_stack
+ .local platform_normal_stacks
+ .weak platform_set_stack
+ .weak platform_get_stack
+ .weak platform_set_coherent_stack
+
+ /* -----------------------------------------------------
+ * void platform_set_coherent_stack (unsigned long mpidr)
+ *
+ * For a given CPU, this function sets the stack pointer
+ * to a stack allocated in device memory. This stack can
+ * be used by C code which enables/disables the SCTLR.M
+ * SCTLR.C bit e.g. while powering down a cpu
+ * -----------------------------------------------------
+ */
+func platform_set_coherent_stack
+ mov r9, lr
+ get_mp_stack pcpu_dv_mem_stack, PCPU_DV_MEM_STACK_SIZE
+ mov sp, r0
+ bx r9
+endfunc platform_set_coherent_stack
+
+ /* -----------------------------------------------------
+ * uintptr_t platform_get_stack (u_register_t mpidr)
+ *
+ * For a given CPU, this function returns the stack
+ * pointer for a stack allocated in normal memory.
+ * -----------------------------------------------------
+ */
+func platform_get_stack
+ mov r9, lr
+ get_mp_stack platform_normal_stacks, PLATFORM_STACK_SIZE
+ bx r9
+endfunc platform_get_stack
+
+ /* -----------------------------------------------------
+ * void platform_set_stack (u_register_t mpidr)
+ *
+ * For a given CPU, this function sets the stack
+ * pointer to a stack allocated in normal memory.
+ * -----------------------------------------------------
+ */
+func platform_set_stack
+ mov r9, lr
+ get_mp_stack platform_normal_stacks, PLATFORM_STACK_SIZE
+ mov sp, r0
+ bx r9
+endfunc platform_set_stack
+
+ /* -----------------------------------------------------
+ * Per-cpu stacks in normal memory.
+ * Used for C code during runtime execution (when coherent
+ * stacks are not required).
+ * Each cpu gets a stack of PLATFORM_STACK_SIZE bytes.
+ * -----------------------------------------------------
+ */
+declare_stack platform_normal_stacks, tftf_normal_stacks, \
+ PLATFORM_STACK_SIZE, PLATFORM_CORE_COUNT
+
+ /* -----------------------------------------------------
+ * Per-cpu stacks in device memory.
+ * Used for C code just before power down or right after
+ * power up when the MMU or caches need to be turned on
+ * or off.
+ * Each cpu gets a stack of PCPU_DV_MEM_STACK_SIZE bytes.
+ * -----------------------------------------------------
+ */
+declare_stack pcpu_dv_mem_stack, tftf_coherent_stacks, \
+ PCPU_DV_MEM_STACK_SIZE, PLATFORM_CORE_COUNT
diff --git a/plat/common/aarch32/platform_up_stack.S b/plat/common/aarch32/platform_up_stack.S
new file mode 100644
index 000000000..e67ded820
--- /dev/null
+++ b/plat/common/aarch32/platform_up_stack.S
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <platform_def.h>
+
+
+ .local platform_normal_stacks
+ .globl platform_set_stack
+ .globl platform_get_stack
+
+ /* -----------------------------------------------------
+ * unsigned long platform_get_stack (unsigned long)
+ *
+ * For cold-boot images, only the primary CPU needs a
+ * stack. This function returns the stack pointer for a
+ * stack allocated in device memory.
+ * -----------------------------------------------------
+ */
+func platform_get_stack
+ get_up_stack platform_normal_stacks, PLATFORM_STACK_SIZE
+ bx lr
+endfunc platform_get_stack
+
+ /* -----------------------------------------------------
+ * void platform_set_stack (unsigned long)
+ *
+ * For cold-boot images, only the primary CPU needs a
+ * stack. This function sets the stack pointer to a stack
+ * allocated in normal memory.
+ * -----------------------------------------------------
+ */
+func platform_set_stack
+ get_up_stack platform_normal_stacks, PLATFORM_STACK_SIZE
+ mov sp, r0
+ bx lr
+endfunc platform_set_stack
+
+ /* -----------------------------------------------------
+ * Single cpu stack in normal memory.
+ * Used for C code during boot, PLATFORM_STACK_SIZE bytes
+ * are allocated
+ * -----------------------------------------------------
+ */
+declare_stack platform_normal_stacks, ns_bl_normal_stacks, \
+ PLATFORM_STACK_SIZE, 1
diff --git a/plat/common/aarch64/platform_helpers.S b/plat/common/aarch64/platform_helpers.S
new file mode 100644
index 000000000..216163872
--- /dev/null
+++ b/plat/common/aarch64/platform_helpers.S
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <pl011.h>
+#include <platform_def.h>
+
+ .weak platform_get_core_pos
+ .weak plat_crash_console_init
+ .weak plat_crash_console_putc
+ .weak plat_crash_console_flush
+
+ /* -----------------------------------------------------
+ * int platform_get_core_pos(int mpidr);
+ * With this function: CorePos = (ClusterId * 4) +
+ * CoreId
+ * -----------------------------------------------------
+ */
+func platform_get_core_pos
+ and x1, x0, #MPIDR_CPU_MASK
+ and x0, x0, #MPIDR_CLUSTER_MASK
+ add x0, x1, x0, LSR #6
+ ret
+endfunc platform_get_core_pos
+
+ /* ---------------------------------------------
+ * int plat_crash_console_init(void)
+ * Function to initialize the crash console
+ * without a C Runtime to print crash report.
+ * Clobber list : x0 - x4
+ * ---------------------------------------------
+ */
+func plat_crash_console_init
+ mov_imm x0, PLAT_ARM_UART_BASE
+ mov_imm x1, PLAT_ARM_UART_CLK_IN_HZ
+ mov_imm x2, PL011_BAUDRATE
+ b console_core_init
+endfunc plat_crash_console_init
+
+ /* ---------------------------------------------
+ * int plat_crash_console_putc(int c)
+ * Function to print a character on the crash
+ * console without a C Runtime.
+ * Clobber list : x1, x2
+ * ---------------------------------------------
+ */
+func plat_crash_console_putc
+ mov_imm x1, PLAT_ARM_UART_BASE
+ b console_core_putc
+endfunc plat_crash_console_putc
+
+ /* ---------------------------------------------
+ * int plat_crash_console_flush()
+ * Function to force a write of all buffered
+ * data that hasn't been output.
+ * Out : return -1 on error else return 0.
+ * Clobber list : r0 - r1
+ * ---------------------------------------------
+ */
+func plat_crash_console_flush
+ mov_imm x1, PLAT_ARM_UART_BASE
+ b console_core_flush
+endfunc plat_crash_console_flush
diff --git a/plat/common/aarch64/platform_mp_stack.S b/plat/common/aarch64/platform_mp_stack.S
new file mode 100644
index 000000000..fe167cc9b
--- /dev/null
+++ b/plat/common/aarch64/platform_mp_stack.S
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <platform_def.h>
+
+
+ .local pcpu_dv_mem_stack
+ .local platform_normal_stacks
+ .weak platform_set_stack
+ .weak platform_get_stack
+ .weak platform_set_coherent_stack
+
+
+ /* -----------------------------------------------------
+ * void platform_set_coherent_stack (unsigned long mpidr)
+ *
+ * For a given CPU, this function sets the stack pointer
+ * to a stack allocated in device memory. This stack can
+ * be used by C code which enables/disables the SCTLR.M
+ * SCTLR.C bit e.g. while powering down a cpu
+ * -----------------------------------------------------
+ */
+func platform_set_coherent_stack
+ mov x9, x30 // lr
+ get_mp_stack pcpu_dv_mem_stack, PCPU_DV_MEM_STACK_SIZE
+ mov sp, x0
+ ret x9
+endfunc platform_set_coherent_stack
+
+ /* -----------------------------------------------------
+ * unsigned long platform_get_stack (unsigned long mpidr)
+ *
+ * For a given CPU, this function returns the stack
+ * pointer for a stack allocated in device memory.
+ * -----------------------------------------------------
+ */
+func platform_get_stack
+ mov x10, x30 // lr
+ get_mp_stack platform_normal_stacks, PLATFORM_STACK_SIZE
+ ret x10
+endfunc platform_get_stack
+
+ /* -----------------------------------------------------
+ * void platform_set_stack (unsigned long mpidr)
+ *
+ * For a given CPU, this function sets the stack pointer
+ * to a stack allocated in normal memory.
+ * -----------------------------------------------------
+ */
+func platform_set_stack
+ mov x9, x30 // lr
+ bl platform_get_stack
+ mov sp, x0
+ ret x9
+endfunc platform_set_stack
+
+ /* -----------------------------------------------------
+ * Per-cpu stacks in normal memory.
+ * Used for C code during runtime execution (when coherent
+ * stacks are not required).
+ * Each cpu gets a stack of PLATFORM_STACK_SIZE bytes.
+ * -----------------------------------------------------
+ */
+declare_stack platform_normal_stacks, tftf_normal_stacks, \
+ PLATFORM_STACK_SIZE, PLATFORM_CORE_COUNT
+
+ /* -----------------------------------------------------
+ * Per-cpu stacks in device memory.
+ * Used for C code just before power down or right after
+ * power up when the MMU or caches need to be turned on
+ * or off.
+ * Each cpu gets a stack of PCPU_DV_MEM_STACK_SIZE bytes.
+ * -----------------------------------------------------
+ */
+declare_stack pcpu_dv_mem_stack, tftf_coherent_stacks, \
+ PCPU_DV_MEM_STACK_SIZE, PLATFORM_CORE_COUNT
diff --git a/plat/common/aarch64/platform_up_stack.S b/plat/common/aarch64/platform_up_stack.S
new file mode 100644
index 000000000..c61b47226
--- /dev/null
+++ b/plat/common/aarch64/platform_up_stack.S
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <platform_def.h>
+
+
+ .local platform_normal_stacks
+ .globl platform_set_stack
+ .globl platform_get_stack
+
+ /* -----------------------------------------------------
+ * unsigned long platform_get_stack (unsigned long)
+ *
+ * For cold-boot images, only the primary CPU needs a
+ * stack. This function returns the stack pointer for a
+ * stack allocated in device memory.
+ * -----------------------------------------------------
+ */
+func platform_get_stack
+ get_up_stack platform_normal_stacks, PLATFORM_STACK_SIZE
+ ret
+endfunc platform_get_stack
+
+ /* -----------------------------------------------------
+ * void platform_set_stack (unsigned long)
+ *
+ * For cold-boot images, only the primary CPU needs a
+ * stack. This function sets the stack pointer to a stack
+ * allocated in normal memory.
+ * -----------------------------------------------------
+ */
+func platform_set_stack
+ get_up_stack platform_normal_stacks, PLATFORM_STACK_SIZE
+ mov sp, x0
+ ret
+endfunc platform_set_stack
+
+ /* -----------------------------------------------------
+ * Single cpu stack in normal memory.
+ * Used for C code during boot, PLATFORM_STACK_SIZE bytes
+ * are allocated
+ * -----------------------------------------------------
+ */
+declare_stack platform_normal_stacks, ns_bl_normal_stacks, \
+ PLATFORM_STACK_SIZE, 1
diff --git a/plat/common/fwu_nvm_accessors.c b/plat/common/fwu_nvm_accessors.c
new file mode 100644
index 000000000..5b32151d4
--- /dev/null
+++ b/plat/common/fwu_nvm_accessors.c
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <debug.h>
+#include <firmware_image_package.h>
+#include <fwu_nvm.h>
+#include <io_fip.h>
+#include <io_storage.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <status.h>
+#include <string.h>
+#include <uuid_utils.h>
+
+
+STATUS fwu_nvm_write(unsigned long long offset, const void *buffer, size_t size)
+{
+ uintptr_t nvm_handle;
+ int ret;
+ size_t length_write;
+
+ if (offset + size > FLASH_SIZE)
+ return STATUS_OUT_OF_RESOURCES;
+
+ /* Obtain a handle to the NVM by querying the platfom layer */
+ plat_get_nvm_handle(&nvm_handle);
+
+ /* Seek to the given offset. */
+ ret = io_seek(nvm_handle, IO_SEEK_SET, offset);
+ if (ret != IO_SUCCESS)
+ return STATUS_FAIL;
+
+ /* Write to the given offset. */
+ ret = io_write(nvm_handle, (const uintptr_t)buffer,
+ size, &length_write);
+ if ((ret != IO_SUCCESS) || (size != length_write))
+ return STATUS_FAIL;
+
+ return STATUS_SUCCESS;
+}
+
+STATUS fwu_nvm_read(unsigned long long offset, void *buffer, size_t size)
+{
+ uintptr_t nvm_handle;
+ int ret;
+ size_t length_read;
+
+ if (offset + size > FLASH_SIZE)
+ return STATUS_OUT_OF_RESOURCES;
+
+ /* Obtain a handle to the NVM by querying the platform layer */
+ plat_get_nvm_handle(&nvm_handle);
+
+ /* Seek to the given offset. */
+ ret = io_seek(nvm_handle, IO_SEEK_SET, offset);
+ if (ret != IO_SUCCESS)
+ return STATUS_FAIL;
+
+ /* Read from the given offset. */
+ ret = io_read(nvm_handle, (const uintptr_t)buffer,
+ size, &length_read);
+ if ((ret != IO_SUCCESS) || (size != length_read))
+ return STATUS_FAIL;
+
+ return STATUS_SUCCESS;
+}
+
+
+STATUS fwu_update_fip(unsigned long fip_addr)
+{
+ uintptr_t nvm_handle;
+ int ret;
+ size_t bytes;
+ int fip_size;
+ unsigned int fip_read;
+ fip_toc_header_t *toc_header;
+ fip_toc_entry_t *toc_entry;
+
+ /* Obtain a handle to the NVM by querying the platform layer */
+ plat_get_nvm_handle(&nvm_handle);
+
+#if FWU_BL_TEST
+ /* Read the address of backup fip.bin for Firmware Update. */
+ ret = io_seek(nvm_handle, IO_SEEK_SET,
+ FWU_TFTF_TESTCASE_BUFFER_OFFSET);
+ if (ret != IO_SUCCESS)
+ return STATUS_FAIL;
+
+ ret = io_read(nvm_handle, (const uintptr_t)&fip_addr,
+ sizeof(bytes), &bytes);
+ if (ret != IO_SUCCESS)
+ return STATUS_FAIL;
+#endif /* FWU_BL_TEST */
+
+ /* If the new FIP address is 0 it means no update. */
+ if (fip_addr == 0)
+ return STATUS_SUCCESS;
+
+ /* Set the ToC Header at the base of the buffer */
+ toc_header = (fip_toc_header_t *)fip_addr;
+
+ /* Check if this FIP is Valid */
+ if ((toc_header->name != TOC_HEADER_NAME) ||
+ (toc_header->serial_number == 0))
+ return STATUS_LOAD_ERROR;
+
+ /* Get to the last NULL TOC entry */
+ toc_entry = (fip_toc_entry_t *)(toc_header + 1);
+ while (!is_uuid_null(&toc_entry->uuid))
+ toc_entry++;
+
+ /* get the total size of this FIP */
+ fip_size = (int)toc_entry->offset_address;
+
+ /* Copy the new FIP in DDR. */
+ memcpy((void *)FIP_IMAGE_TMP_DDR_ADDRESS, (void *)fip_addr, fip_size);
+
+ /* Update the FIP */
+ ret = io_seek(nvm_handle, IO_SEEK_SET, 0);
+ if (ret != IO_SUCCESS)
+ return STATUS_FAIL;
+
+ ret = io_write(nvm_handle, (const uintptr_t)FIP_IMAGE_TMP_DDR_ADDRESS,
+ fip_size, &bytes);
+ if ((ret != IO_SUCCESS) || fip_size != bytes)
+ return STATUS_LOAD_ERROR;
+
+ /* Read the TOC header after update. */
+ ret = io_seek(nvm_handle, IO_SEEK_SET, 0);
+ if (ret != IO_SUCCESS)
+ return STATUS_LOAD_ERROR;
+
+ ret = io_read(nvm_handle, (const uintptr_t)&fip_read,
+ sizeof(bytes), &bytes);
+ if (ret != IO_SUCCESS)
+ return STATUS_FAIL;
+
+ /* Check if this FIP is Valid */
+ if (fip_read != TOC_HEADER_NAME)
+ return STATUS_LOAD_ERROR;
+
+#if FWU_BL_TEST
+ unsigned int done_flag = FIP_IMAGE_UPDATE_DONE_FLAG;
+ /* Update the TFTF test case buffer with DONE flag */
+ ret = io_seek(nvm_handle, IO_SEEK_SET,
+ FWU_TFTF_TESTCASE_BUFFER_OFFSET);
+ if (ret != IO_SUCCESS)
+ return STATUS_FAIL;
+
+ ret = io_write(nvm_handle, (const uintptr_t)&done_flag,
+ 4, &bytes);
+ if (ret != IO_SUCCESS)
+ return STATUS_FAIL;
+#endif /* FWU_BL_TEST */
+
+ INFO("FWU Image update success\n");
+
+ return STATUS_SUCCESS;
+}
+
diff --git a/plat/common/image_loader.c b/plat/common/image_loader.c
new file mode 100644
index 000000000..9b27f6f45
--- /dev/null
+++ b/plat/common/image_loader.c
@@ -0,0 +1,210 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <firmware_image_package.h>
+#include <image_loader.h>
+#include <io_driver.h>
+#include <io_fip.h>
+#include <io_memmap.h>
+#include <io_storage.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <string.h>
+#include <tftf_lib.h>
+
+unsigned long get_image_offset(unsigned int image_id)
+{
+ uintptr_t dev_handle;
+ uintptr_t image_handle;
+ uintptr_t image_spec;
+ unsigned long img_offset;
+ int io_result;
+ io_entity_t *entity;
+ fip_file_state_t *fp;
+
+ /* Obtain a reference to the image by querying the platform layer */
+ io_result = plat_get_image_source(image_id, &dev_handle, &image_spec);
+ if (io_result != IO_SUCCESS) {
+ WARN("Failed to obtain reference to image id=%u (%i)\n",
+ image_id, io_result);
+ return 0;
+ }
+
+ /* Attempt to access the image */
+ io_result = io_open(dev_handle, image_spec, &image_handle);
+ if (io_result != IO_SUCCESS) {
+ WARN("Failed to access image id=%u (%i)\n",
+ image_id, io_result);
+ return 0;
+ }
+
+ entity = (io_entity_t *)image_handle;
+
+ fp = (fip_file_state_t *)entity->info;
+ img_offset = PLAT_ARM_FWU_FIP_BASE + fp->entry.offset_address;
+
+ (void)io_close(image_handle);
+ (void)io_dev_close(dev_handle);
+
+ return img_offset;
+}
+
+
+unsigned long get_image_size(unsigned int image_id)
+{
+ uintptr_t dev_handle;
+ uintptr_t image_handle;
+ uintptr_t image_spec;
+ size_t image_size;
+ int io_result;
+
+ /* Obtain a reference to the image by querying the platform layer */
+ io_result = plat_get_image_source(image_id, &dev_handle, &image_spec);
+ if (io_result != IO_SUCCESS) {
+ WARN("Failed to obtain reference to image id=%u (%i)\n",
+ image_id, io_result);
+ return 0;
+ }
+
+ /* Attempt to access the image */
+ io_result = io_open(dev_handle, image_spec, &image_handle);
+ if (io_result != IO_SUCCESS) {
+ WARN("Failed to access image id=%u (%i)\n",
+ image_id, io_result);
+ return 0;
+ }
+
+ /* Find the size of the image */
+ io_result = io_size(image_handle, &image_size);
+ if ((io_result != IO_SUCCESS) || (image_size == 0)) {
+ WARN("Failed to determine the size of the image id=%u (%i)\n",
+ image_id, io_result);
+ }
+ io_result = io_close(image_handle);
+ io_result = io_dev_close(dev_handle);
+
+ return image_size;
+}
+
+
+int load_image(unsigned int image_id, uintptr_t image_base)
+{
+ uintptr_t dev_handle;
+ uintptr_t image_handle;
+ uintptr_t image_spec;
+ size_t image_size;
+ size_t bytes_read;
+ int io_result;
+
+ /* Obtain a reference to the image by querying the platform layer */
+ io_result = plat_get_image_source(image_id, &dev_handle, &image_spec);
+ if (io_result != IO_SUCCESS) {
+ WARN("Failed to obtain reference to image id=%u (%i)\n",
+ image_id, io_result);
+ return io_result;
+ }
+
+ /* Attempt to access the image */
+ io_result = io_open(dev_handle, image_spec, &image_handle);
+ if (io_result != IO_SUCCESS) {
+ WARN("Failed to access image id=%u (%i)\n",
+ image_id, io_result);
+ return io_result;
+ }
+
+ INFO("Loading image id=%u at address %p\n", image_id, (void *)image_base);
+
+ /* Find the size of the image */
+ io_result = io_size(image_handle, &image_size);
+ if ((io_result != IO_SUCCESS) || (image_size == 0)) {
+ WARN("Failed to determine the size of the image id=%u (%i)\n",
+ image_id, io_result);
+ goto exit;
+ }
+
+ /* Load the image now */
+ io_result = io_read(image_handle, image_base, image_size, &bytes_read);
+ if ((io_result != IO_SUCCESS) || (bytes_read < image_size)) {
+ WARN("Failed to load image id=%u (%i)\n", image_id, io_result);
+ goto exit;
+ }
+
+ /*
+ * File has been successfully loaded.
+ * Flush the image so that the next EL can see it.
+ */
+ flush_dcache_range(image_base, image_size);
+
+ INFO("Image id=%u loaded: %p - %p\n", image_id, (void *)image_base,
+ (void *)(image_base + image_size - 1));
+
+exit:
+ io_close(image_handle);
+ io_dev_close(dev_handle);
+ return io_result;
+}
+
+
+int load_partial_image(unsigned int image_id,
+ uintptr_t image_base,
+ size_t image_size,
+ unsigned int is_last_block)
+{
+ static uintptr_t dev_handle;
+ static uintptr_t image_handle;
+ uintptr_t image_spec;
+ size_t bytes_read;
+ int io_result;
+
+ if (!image_handle) {
+ /* Obtain a reference to the image by querying the platform layer */
+ io_result = plat_get_image_source(image_id, &dev_handle, &image_spec);
+ if (io_result != IO_SUCCESS) {
+ WARN("Failed to obtain reference to image id=%u (%i)\n",
+ image_id, io_result);
+ return io_result;
+ }
+
+ /* Attempt to access the image */
+ io_result = io_open(dev_handle, image_spec, &image_handle);
+ if (io_result != IO_SUCCESS) {
+ WARN("Failed to access image id=%u (%i)\n",
+ image_id, io_result);
+ return io_result;
+ }
+ }
+
+ INFO("Loading image id=%u at address %p\n", image_id, (void *)image_base);
+
+ io_result = io_read(image_handle, image_base, image_size, &bytes_read);
+ if ((io_result != IO_SUCCESS) || (bytes_read < image_size)) {
+ WARN("Failed to load image id=%u (%i)\n", image_id, io_result);
+ is_last_block = 0;
+ goto exit;
+ }
+
+ /*
+ * File has been successfully loaded.
+ * Flush the image so that the next EL can see it.
+ */
+ flush_dcache_range(image_base, image_size);
+
+ INFO("Image id=%u loaded: %p - %p\n", image_id, (void *)image_base,
+ (void *)(image_base + image_size - 1));
+
+exit:
+
+ if (is_last_block == 0) {
+ io_close(image_handle);
+ io_dev_close(dev_handle);
+ image_handle = 0;
+ }
+ return io_result;
+}
+
diff --git a/plat/common/plat_common.c b/plat/common/plat_common.c
new file mode 100644
index 000000000..571387111
--- /dev/null
+++ b/plat/common/plat_common.c
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <console.h>
+#include <debug.h>
+#include <platform.h>
+#include <sp805.h>
+#include <xlat_tables_v2.h>
+
+/*
+ * The following platform functions are all weakly defined. They provide typical
+ * implementations that may be re-used by multiple platforms but may also be
+ * overridden by a platform if required.
+ */
+
+#pragma weak tftf_platform_end
+#pragma weak tftf_platform_watchdog_set
+#pragma weak tftf_platform_watchdog_reset
+#pragma weak tftf_plat_configure_mmu
+#pragma weak tftf_plat_enable_mmu
+#pragma weak tftf_plat_reset
+#pragma weak plat_get_prot_regions
+
+#if IMAGE_TFTF
+
+#define IMAGE_RO_BASE TFTF_BASE
+IMPORT_SYM(uintptr_t, __RO_END__, IMAGE_RO_END);
+
+#define IMAGE_RW_BASE IMAGE_RO_END
+IMPORT_SYM(uintptr_t, __TFTF_END__, IMAGE_RW_END);
+
+IMPORT_SYM(uintptr_t, __COHERENT_RAM_START__, COHERENT_RAM_START);
+IMPORT_SYM(uintptr_t, __COHERENT_RAM_END__, COHERENT_RAM_END);
+
+#elif IMAGE_NS_BL1U
+
+IMPORT_SYM(uintptr_t, __RO_END__, IMAGE_RO_END_UNALIGNED);
+#define IMAGE_RO_BASE NS_BL1U_RO_BASE
+#define IMAGE_RO_END round_up(IMAGE_RO_END_UNALIGNED, PAGE_SIZE)
+
+#define IMAGE_RW_BASE NS_BL1U_RW_BASE
+IMPORT_SYM(uintptr_t, __NS_BL1U_RAM_END__, IMAGE_RW_END);
+
+#elif IMAGE_NS_BL2U
+
+#define IMAGE_RO_BASE NS_BL2U_BASE
+IMPORT_SYM(uintptr_t, __RO_END__, IMAGE_RO_END);
+
+#define IMAGE_RW_BASE IMAGE_RO_END
+IMPORT_SYM(uintptr_t, __NS_BL2U_END__, IMAGE_RW_END_UNALIGNED);
+#define IMAGE_RW_END round_up(IMAGE_RW_END_UNALIGNED, PAGE_SIZE)
+
+#endif
+
+void tftf_platform_end(void)
+{
+ /*
+ * Send EOT (End Of Transmission) on the UART.
+ * This can be used to shutdown a software model.
+ */
+ static const char ascii_eot = 4;
+ console_putc(ascii_eot);
+}
+
+void tftf_platform_watchdog_set(void)
+{
+ /* Placeholder function which should be redefined by each platform */
+}
+
+void tftf_platform_watchdog_reset(void)
+{
+ /* Placeholder function which should be redefined by each platform */
+}
+
+void tftf_plat_configure_mmu(void)
+{
+ /* RO data + Code */
+ mmap_add_region(IMAGE_RO_BASE, IMAGE_RO_BASE,
+ IMAGE_RO_END - IMAGE_RO_BASE, MT_CODE);
+
+ /* Data + BSS */
+ mmap_add_region(IMAGE_RW_BASE, IMAGE_RW_BASE,
+ IMAGE_RW_END - IMAGE_RW_BASE, MT_RW_DATA);
+
+#if IMAGE_TFTF
+ mmap_add_region(COHERENT_RAM_START, COHERENT_RAM_START,
+ COHERENT_RAM_END - COHERENT_RAM_START,
+ MT_DEVICE | MT_RW | MT_NS);
+#endif
+
+ mmap_add(tftf_platform_get_mmap());
+ init_xlat_tables();
+
+ tftf_plat_enable_mmu();
+}
+
+void tftf_plat_enable_mmu(void)
+{
+#ifndef AARCH32
+ if (IS_IN_EL1())
+ enable_mmu_el1(0);
+ else if (IS_IN_EL2())
+ enable_mmu_el2(0);
+ else
+ panic();
+#else
+ if (IS_IN_HYP())
+ enable_mmu_hyp(0);
+ else
+ enable_mmu_svc_mon(0);
+#endif
+}
+
+void tftf_plat_reset(void)
+{
+ /*
+ * SP805 peripheral interrupt is not serviced in TFTF. The reset signal
+ * generated by it is used to reset the platform.
+ */
+ sp805_wdog_start(1);
+
+ /*
+ * Reset might take some execution cycles, Depending on the ratio between
+ * CPU clock frequency and Watchdog clock frequency
+ */
+ while (1)
+ ;
+}
+
+const mem_region_t *plat_get_prot_regions(int *nelem)
+{
+ *nelem = 0;
+ return NULL;
+}
diff --git a/plat/common/plat_state_id.c b/plat/common/plat_state_id.c
new file mode 100644
index 000000000..ce43ba4a2
--- /dev/null
+++ b/plat/common/plat_state_id.c
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <debug.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <psci.h>
+#include <tftf.h>
+
+static unsigned int pstate_initialized;
+
+/*
+ * Stores pointer to the state_prop_t for all implemented levels
+ */
+static const plat_state_prop_t *plat_state_ptr[PLAT_MAX_PWR_LEVEL + 1];
+
+/*
+ * Saves number of implemented power states per level
+ */
+static unsigned int power_states_per_level[PLAT_MAX_PWR_LEVEL + 1];
+
+void tftf_init_pstate_framework(void)
+{
+ int i, j;
+
+ if (pstate_initialized)
+ return;
+
+ /* Detect the PSCI power state format used. */
+ tftf_detect_psci_pstate_format();
+
+ /*
+ * Get and save the pointers to plat_state_prop_t values for all
+ * levels. Also, store the max number of local states possible for
+ * each level in power_states_per_level.
+ */
+ for (i = 0; i <= PLAT_MAX_PWR_LEVEL; i++) {
+ plat_state_ptr[i] = plat_get_state_prop(i);
+ assert(plat_state_ptr[i]);
+
+ for (j = 0; (plat_state_ptr[i]+j)->state_ID != 0; j++)
+ ;
+
+ power_states_per_level[i] = j;
+ }
+
+ pstate_initialized = 1;
+}
+
+void tftf_set_next_state_id_idx(unsigned int power_level,
+ unsigned int pstate_id_idx[])
+{
+ unsigned int i;
+#if ENABLE_ASSERTIONS
+ /* Verify that this is a valid power level. */
+ assert(power_level <= PLAT_MAX_PWR_LEVEL);
+
+ /*
+ * Verify if a level has PWR_STATE_INIT_INDEX index, all higher levels
+ * have to be in PWR_STATE_INIT_INDEX. Not needed to check the top
+ * power level in the outer loop.
+ */
+ for (i = 0; i < power_level; i++) {
+ if (pstate_id_idx[i] == PWR_STATE_INIT_INDEX) {
+ for ( ; i <= power_level; i++)
+ assert(pstate_id_idx[i] == PWR_STATE_INIT_INDEX);
+ }
+ }
+#endif
+
+ /* Increment the pstate_id_idx starting from the lowest power level */
+ for (i = 0; i <= power_level; i++) {
+ pstate_id_idx[i]++;
+
+ /*
+ * Wraparound index if the maximum power states available for
+ * that level is reached and proceed to next level.
+ */
+ if (pstate_id_idx[i] == power_states_per_level[i])
+ pstate_id_idx[i] = 0;
+ else
+ break;
+ }
+
+ /*
+ * Check if the requested power level has wrapped around. If it has,
+ * reset pstate_id_idx.
+ */
+ if (i > power_level) {
+ for (i = 0; i <= power_level; i++)
+ pstate_id_idx[i] = PWR_STATE_INIT_INDEX;
+ }
+}
+
+void tftf_set_deepest_pstate_idx(unsigned int power_level,
+ unsigned int pstate_id_idx[])
+{
+ int i;
+
+ /* Verify that this is a valid power level. */
+ assert(power_level <= PLAT_MAX_PWR_LEVEL);
+
+ /*
+ * Assign the highest pstate_id_idx starting from the lowest power
+ * level
+ */
+ for (i = 0; i <= power_level; i++)
+ pstate_id_idx[i] = power_states_per_level[i] - 1;
+}
+
+
+int tftf_get_pstate_vars(unsigned int *test_power_level,
+ unsigned int *test_suspend_type,
+ unsigned int *suspend_state_id,
+ unsigned int pstate_id_idx[])
+{
+ unsigned int i;
+ int state_id = 0;
+ int suspend_type;
+ int suspend_depth;
+ int psci_ret = PSCI_E_SUCCESS;
+ const plat_state_prop_t *local_state;
+
+ /* Atleast one entry should be valid to generate correct power state params */
+ assert(pstate_id_idx[0] != PWR_STATE_INIT_INDEX &&
+ pstate_id_idx[0] <= power_states_per_level[0]);
+
+ suspend_depth = (plat_state_ptr[0] + pstate_id_idx[0])->suspend_depth;
+ suspend_type = (plat_state_ptr[0] + pstate_id_idx[0])->is_pwrdown;
+
+ for (i = 0; i <= PLAT_MAX_PWR_LEVEL; i++) {
+
+ /* Reached all levels with the valid power index values */
+ if (pstate_id_idx[i] == PWR_STATE_INIT_INDEX)
+ break;
+
+ assert(pstate_id_idx[i] <= power_states_per_level[i]);
+
+ local_state = plat_state_ptr[i] + pstate_id_idx[i];
+ state_id |= (local_state->state_ID << i * PLAT_LOCAL_PSTATE_WIDTH);
+
+ if (local_state->is_pwrdown > suspend_type)
+ suspend_type = local_state->is_pwrdown;
+
+ if (local_state->suspend_depth > suspend_depth)
+ psci_ret = PSCI_E_INVALID_PARAMS;
+ else
+ suspend_depth = local_state->suspend_depth;
+ }
+
+ *test_suspend_type = suspend_type;
+ *suspend_state_id = state_id;
+ *test_power_level = --i;
+
+ return psci_ret;
+}
+
+void tftf_set_next_local_state_id_idx(unsigned int power_level,
+ unsigned int pstate_id_idx[])
+{
+ assert(power_level <= PLAT_MAX_PWR_LEVEL);
+
+ if (pstate_id_idx[power_level] + 1 >= power_states_per_level[power_level]) {
+ pstate_id_idx[power_level] = PWR_STATE_INIT_INDEX;
+ return;
+ }
+
+ pstate_id_idx[power_level]++;
+}
diff --git a/plat/common/plat_topology.c b/plat/common/plat_topology.c
new file mode 100644
index 000000000..a7920c37b
--- /dev/null
+++ b/plat/common/plat_topology.c
@@ -0,0 +1,362 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <assert.h>
+#include <debug.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <stdlib.h>
+
+#define CPU_INDEX_IS_VALID(_cpu_idx) \
+ (((_cpu_idx) - tftf_pwr_domain_start_idx[0]) < PLATFORM_CORE_COUNT)
+
+#define IS_A_CPU_NODE(_cpu_idx) (tftf_pd_nodes[(_cpu_idx)].level == 0)
+
+#define CPU_NODE_IS_VALID(_cpu_node) \
+ (CPU_INDEX_IS_VALID(_cpu_node) && IS_A_CPU_NODE(_cpu_node))
+
+/*
+ * Global variable to check that the platform topology is not queried until it
+ * has been setup.
+ */
+static unsigned int topology_setup_done;
+
+/*
+ * Store the start indices of power domains at various levels. This array makes it
+ * easier to traverse the topology tree if the power domain level is known.
+ */
+unsigned int tftf_pwr_domain_start_idx[PLATFORM_MAX_AFFLVL + 1];
+
+/* The grand array to store the platform power domain topology */
+tftf_pwr_domain_node_t tftf_pd_nodes[PLATFORM_NUM_AFFS];
+
+#if DEBUG
+/*
+ * Debug function to display the platform topology.
+ * Does not print absent affinity instances.
+ */
+static void dump_topology(void)
+{
+ unsigned int cluster_idx, cpu_idx, count;
+
+ NOTICE("Platform topology:\n");
+
+ NOTICE(" %u cluster(s)\n", tftf_get_total_clusters_count());
+ NOTICE(" %u CPU(s) (total)\n\n", tftf_get_total_cpus_count());
+
+ for (cluster_idx = PWR_DOMAIN_INIT;
+ cluster_idx = tftf_get_next_peer_domain(cluster_idx, 1),
+ cluster_idx != PWR_DOMAIN_INIT;) {
+ count = 0;
+ for (cpu_idx = tftf_pd_nodes[cluster_idx].cpu_start_node;
+ cpu_idx < (tftf_pd_nodes[cluster_idx].cpu_start_node +
+ tftf_pd_nodes[cluster_idx].ncpus);
+ cpu_idx++) {
+ if (tftf_pd_nodes[cpu_idx].is_present)
+ count++;
+ }
+ NOTICE(" Cluster #%u [%u CPUs]\n",
+ cluster_idx - tftf_pwr_domain_start_idx[1],
+ count);
+ for (cpu_idx = PWR_DOMAIN_INIT;
+ cpu_idx = tftf_get_next_cpu_in_pwr_domain(cluster_idx, cpu_idx),
+ cpu_idx != PWR_DOMAIN_INIT;) {
+ NOTICE(" CPU #%u [MPID: 0x%x]\n",
+ cpu_idx - tftf_pwr_domain_start_idx[0],
+ tftf_get_mpidr_from_node(cpu_idx));
+ }
+ }
+ NOTICE("\n");
+}
+#endif
+
+unsigned int tftf_get_total_aff_count(unsigned int aff_lvl)
+{
+ unsigned int count = 0;
+ unsigned int node_idx;
+
+ assert(topology_setup_done == 1);
+
+ if (aff_lvl > PLATFORM_MAX_AFFLVL)
+ return count;
+
+ node_idx = tftf_pwr_domain_start_idx[aff_lvl];
+
+ while (tftf_pd_nodes[node_idx].level == aff_lvl) {
+ if (tftf_pd_nodes[node_idx].is_present)
+ count++;
+ node_idx++;
+ }
+
+ return count;
+}
+
+unsigned int tftf_get_next_peer_domain(unsigned int pwr_domain_idx,
+ unsigned int pwr_lvl)
+{
+ assert(topology_setup_done == 1);
+
+ assert(pwr_lvl <= PLATFORM_MAX_AFFLVL);
+
+ if (pwr_domain_idx == PWR_DOMAIN_INIT) {
+ pwr_domain_idx = tftf_pwr_domain_start_idx[pwr_lvl];
+ if (tftf_pd_nodes[pwr_domain_idx].is_present)
+ return pwr_domain_idx;
+ }
+
+ assert(pwr_domain_idx < PLATFORM_NUM_AFFS &&
+ tftf_pd_nodes[pwr_domain_idx].level == pwr_lvl);
+
+ for (++pwr_domain_idx; (pwr_domain_idx < PLATFORM_NUM_AFFS)
+ && (tftf_pd_nodes[pwr_domain_idx].level == pwr_lvl);
+ pwr_domain_idx++) {
+ if (tftf_pd_nodes[pwr_domain_idx].is_present)
+ return pwr_domain_idx;
+ }
+
+ return PWR_DOMAIN_INIT;
+}
+
+unsigned int tftf_get_next_cpu_in_pwr_domain(unsigned int pwr_domain_idx,
+ unsigned int cpu_node)
+{
+ unsigned int cpu_end_node;
+
+ assert(topology_setup_done == 1);
+ assert(pwr_domain_idx != PWR_DOMAIN_INIT
+ && pwr_domain_idx < PLATFORM_NUM_AFFS);
+
+ if (cpu_node == PWR_DOMAIN_INIT) {
+ cpu_node = tftf_pd_nodes[pwr_domain_idx].cpu_start_node;
+ if (tftf_pd_nodes[cpu_node].is_present)
+ return cpu_node;
+ }
+
+ assert(CPU_NODE_IS_VALID(cpu_node));
+
+ cpu_end_node = tftf_pd_nodes[pwr_domain_idx].cpu_start_node
+ + tftf_pd_nodes[pwr_domain_idx].ncpus - 1;
+
+ assert(cpu_end_node < PLATFORM_NUM_AFFS);
+
+ for (++cpu_node; cpu_node <= cpu_end_node; cpu_node++) {
+ if (tftf_pd_nodes[cpu_node].is_present)
+ return cpu_node;
+ }
+
+ return PWR_DOMAIN_INIT;
+}
+
+/*
+ * Helper function to get the parent nodes of a particular CPU power
+ * domain.
+ */
+static void get_parent_pwr_domain_nodes(unsigned int cpu_node,
+ unsigned int end_lvl,
+ unsigned int node_index[])
+{
+ unsigned int parent_node = tftf_pd_nodes[cpu_node].parent_node;
+ unsigned int i;
+
+ for (i = 1; i <= end_lvl; i++) {
+ node_index[i - 1] = parent_node;
+ parent_node = tftf_pd_nodes[parent_node].parent_node;
+ }
+}
+
+/*******************************************************************************
+ * This function updates cpu_start_node and ncpus field for each of the nodes
+ * in tftf_pd_nodes[]. It does so by comparing the parent nodes of each of
+ * the CPUs and check whether they match with the parent of the previous
+ * CPU. The basic assumption for this work is that children of the same parent
+ * are allocated adjacent indices. The platform should ensure this through
+ * proper mapping of the CPUs to indices via platform_get_core_pos() API.
+ *
+ * It also updates the 'is_present' field for non-cpu power domains. It does
+ * this by checking the 'is_present' field of the child cpu nodes and updates
+ * it if any of the child cpu nodes are present.
+ *******************************************************************************/
+static void update_pwrlvl_limits(void)
+{
+ int cpu_id, j, is_present;
+ unsigned int nodes_idx[PLATFORM_MAX_AFFLVL] = {-1};
+ unsigned int temp_index[PLATFORM_MAX_AFFLVL];
+
+ unsigned int cpu_node_offset = tftf_pwr_domain_start_idx[0];
+
+ for (cpu_id = 0; cpu_id < PLATFORM_CORE_COUNT; cpu_id++) {
+ get_parent_pwr_domain_nodes(cpu_id + cpu_node_offset,
+ PLATFORM_MAX_AFFLVL,
+ temp_index);
+ is_present = tftf_pd_nodes[cpu_id + cpu_node_offset].is_present;
+
+ for (j = PLATFORM_MAX_AFFLVL - 1; j >= 0; j--) {
+ if (temp_index[j] != nodes_idx[j]) {
+ nodes_idx[j] = temp_index[j];
+ tftf_pd_nodes[nodes_idx[j]].cpu_start_node
+ = cpu_id + cpu_node_offset;
+ if (!tftf_pd_nodes[nodes_idx[j]].is_present)
+ tftf_pd_nodes[nodes_idx[j]].is_present = is_present;
+ }
+ tftf_pd_nodes[nodes_idx[j]].ncpus++;
+ }
+ }
+}
+
+/******************************************************************************
+ * This function populates the power domain topology array 'tftf_pd_nodes[]'
+ * based on the power domain description retrieved from the platform layer.
+ * It also updates the start index of each power domain level in
+ * tftf_pwr_domain_start_idx[]. The uninitialized fields of 'tftf_pd_nodes[]'
+ * for the non CPU power domain will be initialized in update_pwrlvl_limits().
+ *****************************************************************************/
+static void populate_power_domain_tree(void)
+{
+ unsigned int i, j = 0, num_nodes_at_lvl = 1, num_nodes_at_next_lvl,
+ node_index = 0, parent_idx = 0, num_children;
+ int num_level = PLATFORM_MAX_AFFLVL;
+ const unsigned char *plat_array;
+
+ plat_array = tftf_plat_get_pwr_domain_tree_desc();
+
+ /*
+ * For each level the inputs are:
+ * - number of nodes at this level in plat_array i.e. num_nodes_at_lvl
+ * This is the sum of values of nodes at the parent level.
+ * - Index of first entry at this level in the plat_array i.e.
+ * parent_idx.
+ * - Index of first free entry in tftf_pd_nodes[].
+ */
+ while (num_level >= 0) {
+ num_nodes_at_next_lvl = 0;
+
+ /* Store the start index for every level */
+ tftf_pwr_domain_start_idx[num_level] = node_index;
+
+ /*
+ * For each entry (parent node) at this level in the plat_array:
+ * - Find the number of children
+ * - Allocate a node in a power domain array for each child
+ * - Set the parent of the child to the parent_node_index - 1
+ * - Increment parent_node_index to point to the next parent
+ * - Accumulate the number of children at next level.
+ */
+ for (i = 0; i < num_nodes_at_lvl; i++) {
+ assert(parent_idx <=
+ PLATFORM_NUM_AFFS - PLATFORM_CORE_COUNT);
+ num_children = plat_array[parent_idx];
+
+ for (j = node_index;
+ j < node_index + num_children; j++) {
+ /* Initialize the power domain node */
+ tftf_pd_nodes[j].parent_node = parent_idx - 1;
+ tftf_pd_nodes[j].level = num_level;
+
+ /* Additional initializations for CPU power domains */
+ if (num_level == 0) {
+ /* Calculate the cpu id from node index */
+ int cpu_id = j - tftf_pwr_domain_start_idx[0];
+
+ assert(cpu_id < PLATFORM_CORE_COUNT);
+
+ /* Set the mpidr of cpu node */
+ tftf_pd_nodes[j].mpidr =
+ tftf_plat_get_mpidr(cpu_id);
+ if (tftf_pd_nodes[j].mpidr != INVALID_MPID)
+ tftf_pd_nodes[j].is_present = 1;
+
+ tftf_pd_nodes[j].cpu_start_node = j;
+ tftf_pd_nodes[j].ncpus = 1;
+ }
+ }
+ node_index = j;
+ num_nodes_at_next_lvl += num_children;
+ parent_idx++;
+ }
+
+ num_nodes_at_lvl = num_nodes_at_next_lvl;
+ num_level--;
+ }
+
+ /* Validate the sanity of array exported by the platform */
+ assert(j == PLATFORM_NUM_AFFS);
+}
+
+
+void tftf_init_topology(void)
+{
+ populate_power_domain_tree();
+ update_pwrlvl_limits();
+ topology_setup_done = 1;
+#if DEBUG
+ dump_topology();
+#endif
+}
+
+unsigned int tftf_topology_next_cpu(unsigned int cpu_node)
+{
+ assert(topology_setup_done == 1);
+
+ if (cpu_node == PWR_DOMAIN_INIT) {
+ cpu_node = tftf_pwr_domain_start_idx[0];
+ if (tftf_pd_nodes[cpu_node].is_present)
+ return cpu_node;
+ }
+
+ assert(CPU_NODE_IS_VALID(cpu_node));
+
+ for (++cpu_node; cpu_node < PLATFORM_NUM_AFFS; cpu_node++) {
+ if (tftf_pd_nodes[cpu_node].is_present)
+ return cpu_node;
+ }
+
+ return PWR_DOMAIN_INIT;
+}
+
+
+unsigned int tftf_get_mpidr_from_node(unsigned int cpu_node)
+{
+ assert(topology_setup_done == 1);
+
+ assert(CPU_NODE_IS_VALID(cpu_node));
+
+ if (tftf_pd_nodes[cpu_node].is_present)
+ return tftf_pd_nodes[cpu_node].mpidr;
+
+ return INVALID_MPID;
+}
+
+unsigned int tftf_find_any_cpu_other_than(unsigned exclude_mpid)
+{
+ unsigned int cpu_node, mpidr;
+
+ for_each_cpu(cpu_node) {
+ mpidr = tftf_get_mpidr_from_node(cpu_node);
+ if (mpidr != exclude_mpid)
+ return mpidr;
+ }
+
+ return INVALID_MPID;
+}
+
+unsigned int tftf_find_random_cpu_other_than(unsigned int exclude_mpid)
+{
+ unsigned int cpu_node, mpidr;
+ unsigned int possible_cpus_cnt = 0;
+ unsigned int possible_cpus[PLATFORM_CORE_COUNT];
+
+ for_each_cpu(cpu_node) {
+ mpidr = tftf_get_mpidr_from_node(cpu_node);
+ if (mpidr != exclude_mpid)
+ possible_cpus[possible_cpus_cnt++] = mpidr;
+ }
+
+ if (possible_cpus_cnt == 0)
+ return INVALID_MPID;
+
+ return possible_cpus[rand() % possible_cpus_cnt];
+}
diff --git a/plat/common/tftf_nvm_accessors.c b/plat/common/tftf_nvm_accessors.c
new file mode 100644
index 000000000..f6d003133
--- /dev/null
+++ b/plat/common/tftf_nvm_accessors.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <io_storage.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <spinlock.h>
+#include <status.h>
+#include <string.h>
+#include <tftf_lib.h>
+
+#if USE_NVM
+/* Used to serialize write operations from different CPU's */
+static spinlock_t flash_access_lock;
+#endif
+
+STATUS tftf_nvm_write(unsigned long long offset, const void *buffer, size_t size)
+{
+#if USE_NVM
+ int ret;
+ uintptr_t nvm_handle;
+ size_t length_written;
+#endif
+
+ if (offset + size > TFTF_NVM_SIZE)
+ return STATUS_OUT_OF_RESOURCES;
+
+#if USE_NVM
+ /* Obtain a handle to the NVM by querying the platfom layer */
+ plat_get_nvm_handle(&nvm_handle);
+
+ spin_lock(&flash_access_lock);
+
+ ret = io_seek(nvm_handle, IO_SEEK_SET,
+ offset + TFTF_NVM_OFFSET);
+ if (ret != IO_SUCCESS)
+ goto fail;
+
+ ret = io_write(nvm_handle, (const uintptr_t)buffer, size,
+ &length_written);
+ if (ret != IO_SUCCESS)
+ goto fail;
+
+ assert(length_written == size);
+fail:
+ spin_unlock(&flash_access_lock);
+
+ if (ret != IO_SUCCESS)
+ return STATUS_FAIL;
+
+#else
+ uintptr_t addr = DRAM_BASE + TFTF_NVM_OFFSET + offset;
+ memcpy((void *)addr, buffer, size);
+#endif
+
+ return STATUS_SUCCESS;
+}
+
+STATUS tftf_nvm_read(unsigned long long offset, void *buffer, size_t size)
+{
+#if USE_NVM
+ int ret;
+ uintptr_t nvm_handle;
+ size_t length_read;
+#endif
+
+ if (offset + size > TFTF_NVM_SIZE)
+ return STATUS_OUT_OF_RESOURCES;
+
+#if USE_NVM
+ /* Obtain a handle to the NVM by querying the platfom layer */
+ plat_get_nvm_handle(&nvm_handle);
+
+ spin_lock(&flash_access_lock);
+
+ ret = io_seek(nvm_handle, IO_SEEK_SET, TFTF_NVM_OFFSET + offset);
+ if (ret != IO_SUCCESS)
+ goto fail;
+
+ ret = io_read(nvm_handle, (uintptr_t)buffer, size, &length_read);
+ if (ret != IO_SUCCESS)
+ goto fail;
+
+ assert(length_read == size);
+fail:
+ spin_unlock(&flash_access_lock);
+
+ if (ret != IO_SUCCESS)
+ return STATUS_FAIL;
+#else
+ uintptr_t addr = DRAM_BASE + TFTF_NVM_OFFSET + offset;
+ memcpy(buffer, (void *)addr, size);
+#endif
+
+ return STATUS_SUCCESS;
+}
+
diff --git a/readme.rst b/readme.rst
new file mode 100644
index 000000000..359ff5b62
--- /dev/null
+++ b/readme.rst
@@ -0,0 +1,163 @@
+Trusted Firmware-A Tests - version 2.0
+======================================
+
+The Trusted Firmware-A Tests (TF-A-Tests) is a suite of baremetal tests to
+exercise the `Trusted Firmware-A (TF-A)`_ features from the Normal World. It
+enables strong TF-A functional testing without dependency on a Rich OS. It
+mainly interacts with TF-A through its SMC interface.
+
+It provides a basis for TF-A developers to validate their own platform ports and
+add their own test cases.
+
+The following TF-A features are currently tested to some extent (this list is
+not exhaustive):
+
+- `SMC Calling Convention`_
+- `Power State Coordination Interface (PSCI)`_
+- `Software Delegated Exception Interface (SDEI)`_
+- `Performance Measurement Framework (PMF)`_
+- Communication and interaction with the `Test Secure Payload (TSP)`_
+- `Firmware update`_ (or recovery mode)
+- `EL3 payload`_ boot flow
+- `Secure partition`_ support
+
+These tests are not a compliance test suite for the Arm interface standards used
+in TF-A (such as PSCI).
+
+They do not cover 100% of the TF-A code. The fact that all tests pass does not
+mean that TF-A is free of bugs.
+
+They are not reference code. They should not be considered as the official way
+to test hardware/firmware features. Instead, they are provided as example code
+to experiment with and improve on.
+
+License
+-------
+
+The software is provided under a BSD-3-Clause `license`_. Contributions to this
+project are accepted under the same license with developer sign-off as
+described in the `Contributing Guidelines`_.
+
+This project contains code from other projects as listed below. The original
+license text is included in those source files.
+
+- The libc source code is derived from `FreeBSD`_ code, which uses various BSD
+ licenses, including BSD-3-Clause and BSD-2-Clause.
+
+- The `LLVM compiler-rt`_ source code is disjunctively dual licensed
+ (NCSA OR MIT). It is used by this project under the terms of the NCSA
+ license (also known as the University of Illinois/NCSA Open Source License),
+ which is a permissive license compatible with BSD-3-Clause. Any
+ contributions to this code must be made under the terms of both licenses.
+
+This release
+------------
+
+This is the first public release of the TF-A Tests code. It provides a starting
+point for exercising some of the TF-A features on the Arm FVP and Juno
+platforms, porting the tests to new platforms, enhancing existing tests or
+implementing new ones.
+
+Please note that this code is not mature yet and suffers from some stability
+issues. Refer to the known issues in the `change log`_ for more details.
+
+
+Platforms
+`````````
+
+Juno Arm Development Platform
+'''''''''''''''''''''''''''''
+
+The AArch64 build of this release has been tested on variants r0, r1 and r2 of
+the `Juno Arm Development Platform`_. The AArch32 build has only been tested on
+variant r0.
+
+Arm Fixed Virtual Platforms
+'''''''''''''''''''''''''''
+
+The AArch64 build has been tested on the following Arm Fixed Virtual Platforms
+(`FVP`_):
+
+- ``FVP_Base_AEMv8A-AEMv8A``
+- ``FVP_Base_Cortex-A35x4``
+- ``FVP_Base_Cortex-A57x4-A53x4``
+- ``FVP_Base_RevC-2xAEMv8A``
+- ``Foundation_Platform``
+
+The AArch32 build has been tested on the following `FVP`_\ s:
+
+- ``FVP_Base_Cortex-A32x4``
+- ``FVP_Base_RevC-2xAEMv8A``
+
+NOTE: Unless otherwise stated, the model version is version 11.4, build 37.
+
+Still to come
+`````````````
+
+- More tests.
+- Support for new platforms.
+- Design improvements.
+- Stability improvements.
+- Enhance test framework to make it easier to implement tests.
+- Fixing known issues (see the `change log`_ for more details).
+
+
+Getting started
+---------------
+
+Get the TF-A Tests source code from `trustedfirmware.org`_.
+
+See the `User Guide`_ for instructions on how to install, build and use the TF-A
+Tests.
+
+See the `Design Guide`_ for information on how the TF-A Tests internally work.
+
+See the `Porting Guide`_ for information about how to use this software on
+another Armv8-A platform.
+
+See the `Contributing Guidelines`_ for information on how to contribute to this
+project.
+
+
+IRC channel
+-----------
+
+Development discussion takes place on the same channel as for TF-A, i.e. the
+``#trusted-firmware-a`` channel on the Freenode IRC network. This is not an
+official support channel. If you have an issue to raise, please use the issues
+tracker <TODO: insert link>.
+
+--------------
+
+*Copyright (c) 2018, Arm Limited. All rights reserved.*
+
+.. _Contributing Guidelines: contributing.rst
+.. _license: license.rst
+.. _change log: docs/change-log.rst
+.. _Design Guide: docs/design.rst
+.. _Porting Guide: docs/porting-guide.rst
+.. _User Guide: docs/user-guide.rst
+
+.. _FVP: https://developer.arm.com/products/system-design/fixed-virtual-platforms
+.. _Juno Arm Development Platform: https://developer.arm.com/products/system-design/development-boards/juno-development-board
+
+.. _FreeBSD: http://www.freebsd.org
+.. _LLVM compiler-rt: https://compiler-rt.llvm.org/
+
+.. _Power State Coordination Interface (PSCI): PSCI_
+.. _PSCI: http://infocenter.arm.com/help/topic/com.arm.doc.den0022d/Power_State_Coordination_Interface_PDD_v1_1_DEN0022D.pdf
+.. _Software Delegated Exception Interface (SDEI): SDEI_
+.. _SDEI: http://infocenter.arm.com/help/topic/com.arm.doc.den0054a/ARM_DEN0054A_Software_Delegated_Exception_Interface.pdf
+.. _SMC Calling Convention: http://infocenter.arm.com/help/topic/com.arm.doc.den0028b/ARM_DEN0028B_SMC_Calling_Convention.pdf
+
+.. _trustedfirmware.org: https://git.trustedfirmware.org/tf-a-tests.git
+
+.. _Trusted Firmware-A (TF-A): TF-A_
+.. _TF-A: https://www.github.com/ARM-software/arm-trusted-firmware
+.. _Test Secure Payload (TSP): TSP_
+.. _TSP: https://github.com/ARM-software/arm-trusted-firmware/tree/master/bl32/tsp
+.. _Performance Measurement Framework (PMF): PMF_
+.. _PMF: https://github.com/ARM-software/arm-trusted-firmware/blob/master/docs/firmware-design.rst#performance-measurement-framework
+.. _Firmware update: https://github.com/ARM-software/arm-trusted-firmware/blob/master/docs/firmware-update.rst
+.. _EL3 payload: https://github.com/ARM-software/arm-trusted-firmware/blob/master/docs/user-guide.rst#el3-payloads-alternative-boot-flow
+.. _Secure partition: https://github.com/ARM-software/arm-trusted-firmware/blob/master/docs/secure-partition-manager-design.rst
diff --git a/scripts/run_fwu_fvp.sh b/scripts/run_fwu_fvp.sh
new file mode 100755
index 000000000..a47365efc
--- /dev/null
+++ b/scripts/run_fwu_fvp.sh
@@ -0,0 +1,34 @@
+#! /bin/bash
+
+set -e
+
+function check_file {
+ if [ -e "$1" ]; then
+ return 0
+ else
+ echo "File $1 does not exist."
+ return 1
+ fi
+}
+
+# Check the presence of all required files.
+check_file bl1.bin
+check_file ns_bl1u.bin
+check_file fip.bin
+check_file fwu_fip.bin
+check_file backup_fip.bin ||
+ (echo "Creating backup_fip.bin as a copy of fip.bin." ;
+ cp fip.bin backup_fip.bin)
+
+# Chosen topology: 2 clusters of 4 cores each.
+# Power on core 0 only.
+# Load all binaries at the right addresses.
+FVP_Base_RevC-2xAEMv8A \
+ -C cluster0.NUM_CORES=4 \
+ -C cluster1.NUM_CORES=4 \
+ -C pctl.startup=0.0.0.0 \
+ -C bp.secureflashloader.fname=bl1.bin \
+ -C bp.flashloader0.fname=fip.bin \
+ --data cluster0.cpu0=backup_fip.bin@0x09000000 \
+ --data cluster0.cpu0=fwu_fip.bin@0x08400000 \
+ --data cluster0.cpu0=ns_bl1u.bin@0x0beb8000
diff --git a/spm/cactus/aarch64/cactus_entrypoint.S b/spm/cactus/aarch64/cactus_entrypoint.S
new file mode 100644
index 000000000..704905ee7
--- /dev/null
+++ b/spm/cactus/aarch64/cactus_entrypoint.S
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2017-2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <secure_partition.h>
+#include <spm_svc.h>
+#include <xlat_tables_defs.h>
+
+ .globl cactus_entrypoint
+
+func cactus_entrypoint
+
+ /*
+ * All the information needed to remap the memory of the Secure
+ * Partition is in the buffer whose pointer is passed on X0 and size on
+ * X1. If the size is 0, return with an error.
+ */
+ cmp x1, #0
+ beq .return_error
+
+ /* Save the base address and size of the buffer. */
+ mov x20, x0
+ mov x21, x1
+ /* Size of the Secure Partition image. */
+ ldr x22, [x20, SP_BOOT_INFO_IMAGE_SIZE_OFFSET]
+
+ /*
+ * Remap all sections of the image before doing anything else.
+ *
+ * Not even the console can be initialized before because it needs to
+ * initialize variables (that can only be modified after remapping that
+ * region as RW).
+ *
+ * If any of the calls fails, loop, as there is no console to print an
+ * error message to.
+ */
+ .macro set_sp_mem_attributes
+ cmp x2, #0 /* If size is 0, skip the call. */
+ beq 1f
+ mov_imm x0, SP_MEMORY_ATTRIBUTES_SET_AARCH64
+ svc #0
+ cmp x0, #0
+ bne .return_error
+1:
+ .endm
+
+ adr x1, __TEXT_START__
+ adr x2, __TEXT_END__
+ sub x2, x2, x1 /* __TEXT_SIZE__ */
+ lsr x2, x2, PAGE_SIZE_SHIFT /* __TEXT_SIZE__ in pages */
+ mov x3, SP_MEMORY_ATTRIBUTES_ACCESS_RO | SP_MEMORY_ATTRIBUTES_EXEC
+ set_sp_mem_attributes
+
+ adr x1, __RODATA_START__
+ adr x2, __RODATA_END__
+ sub x2, x2, x1 /* __RODATA_SIZE__ */
+ lsr x2, x2, PAGE_SIZE_SHIFT /* __RODATA_SIZE__ in pages */
+ mov x3, SP_MEMORY_ATTRIBUTES_ACCESS_RO | SP_MEMORY_ATTRIBUTES_NON_EXEC
+ set_sp_mem_attributes
+
+ adr x1, __RWDATA_START__
+ adr x2, __RWDATA_END__
+ sub x2, x2, x1 /* __RWDATA_SIZE__ */
+ lsr x2, x2, PAGE_SIZE_SHIFT /* __RWDATA_SIZE__ in pages */
+ mov x3, SP_MEMORY_ATTRIBUTES_ACCESS_RW | SP_MEMORY_ATTRIBUTES_NON_EXEC
+ set_sp_mem_attributes
+
+ /*
+ * To avoid accessing it by mistake, prevent EL0 from accessing the rest
+ * of the memory reserved for the Secure Partition.
+ *
+ * Unused size = Total size - Used size
+ * = Total size - (__RWDATA_END__ - __TEXT_START__)
+ */
+ adr x1, __RWDATA_END__
+ adr x2, __TEXT_START__
+ sub x2, x1, x2 /* x2 = Used size, x22 = Total size */
+ sub x2, x22, x2 /* x2 = Unused size */
+ lsr x2, x2, PAGE_SIZE_SHIFT /* Unused size in pages */
+ mov x3, SP_MEMORY_ATTRIBUTES_ACCESS_NOACCESS | SP_MEMORY_ATTRIBUTES_NON_EXEC
+ set_sp_mem_attributes
+
+ adr x0, __BSS_START__
+ adr x1, __BSS_END__
+ sub x1, x1, x0
+ bl zeromem16
+
+ /* Setup the stack pointer. */
+ ldr x0, [x20, SP_BOOT_INFO_STACK_BASE_OFFSET]
+ ldr x1, [x20, SP_BOOT_INFO_PCPU_STACK_SIZE_OFFSET]
+ add x0, x0, x1
+ mov sp, x0
+
+ /* And do the rest in C code */
+ mov x0, x20
+ mov x1, x21
+ b cactus_main
+
+.return_error:
+ /* Tell SPM that the initialization failed. */
+ mov_imm x0, SP_EVENT_COMPLETE_AARCH64
+ mov x1, #-1
+ svc #0
+
+ /* Loop forever */
+ b .
+
+endfunc cactus_entrypoint
diff --git a/spm/cactus/cactus.h b/spm/cactus/cactus.h
new file mode 100644
index 000000000..966b154d8
--- /dev/null
+++ b/spm/cactus/cactus.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2017, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __CACTUS_H__
+#define __CACTUS_H__
+
+#include <secure_partition.h>
+#include <types.h>
+
+/* Linker symbols used to figure out the memory layout of Cactus. */
+extern uintptr_t __TEXT_START__, __TEXT_END__;
+#define CACTUS_TEXT_START ((uintptr_t)&__TEXT_START__)
+#define CACTUS_TEXT_END ((uintptr_t)&__TEXT_END__)
+
+extern uintptr_t __RODATA_START__, __RODATA_END__;
+#define CACTUS_RODATA_START ((uintptr_t)&__RODATA_START__)
+#define CACTUS_RODATA_END ((uintptr_t)&__RODATA_END__)
+
+extern uintptr_t __RWDATA_START__, __RWDATA_END__;
+#define CACTUS_RWDATA_START ((uintptr_t)&__RWDATA_START__)
+#define CACTUS_RWDATA_END ((uintptr_t)&__RWDATA_END__)
+
+extern uintptr_t __BSS_START__, __BSS_END__;
+#define CACTUS_BSS_START ((uintptr_t)&__BSS_START__)
+#define CACTUS_BSS_END ((uintptr_t)&__BSS_END__)
+
+/*
+ * Once Cactus has finished its initialisation, this is the function it will
+ * jump to to handle runtime services for the rest of its lifetime.
+ */
+__dead2 void secure_services_loop(void);
+
+#endif /* __CACTUS_H__ */
diff --git a/spm/cactus/cactus.ld.S b/spm/cactus/cactus.ld.S
new file mode 100644
index 000000000..b33f591f5
--- /dev/null
+++ b/spm/cactus/cactus.ld.S
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2017-2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <platform_def.h>
+#include <xlat_tables_defs.h>
+
+OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
+OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
+ENTRY(cactus_entrypoint)
+
+SECTIONS
+{
+ ASSERT(. == ALIGN(PAGE_SIZE),
+ "TEXT_START address is not aligned to PAGE_SIZE.")
+
+ .text : {
+ __TEXT_START__ = .;
+ *cactus_entrypoint.o(.text*)
+ *(.text*)
+ *(.vectors)
+ . = NEXT(PAGE_SIZE);
+ __TEXT_END__ = .;
+ }
+
+ .rodata : {
+ . = ALIGN(PAGE_SIZE);
+ __RODATA_START__ = .;
+ *(.rodata*)
+ . = NEXT(PAGE_SIZE);
+ __RODATA_END__ = .;
+ }
+
+
+ .data : {
+ . = ALIGN(PAGE_SIZE);
+ __RWDATA_START__ = .;
+ *(.data*)
+ }
+
+ .bss : {
+ . = ALIGN(16);
+ __BSS_START__ = .;
+ *(SORT_BY_ALIGNMENT(.bss*))
+ *(COMMON)
+ . = NEXT(PAGE_SIZE);
+ __BSS_END__ = .;
+ __RWDATA_END__ = .;
+ }
+}
diff --git a/spm/cactus/cactus.mk b/spm/cactus/cactus.mk
new file mode 100644
index 000000000..f7794db7d
--- /dev/null
+++ b/spm/cactus/cactus.mk
@@ -0,0 +1,73 @@
+#
+# Copyright (c) 2018, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+CACTUS_INCLUDES := \
+ -Iinclude \
+ -Iinclude/common \
+ -Iinclude/common/${ARCH} \
+ -Iinclude/drivers \
+ -Iinclude/drivers/arm \
+ -Iinclude/lib \
+ -Iinclude/lib/${ARCH} \
+ -Iinclude/lib/stdlib \
+ -Iinclude/lib/stdlib/sys \
+ -Iinclude/lib/utils \
+ -Iinclude/lib/xlat_tables \
+ -Iinclude/runtime_services \
+ -Iinclude/runtime_services/secure_el0_payloads \
+ -Ispm/cactus \
+ -Ispm/common \
+
+CACTUS_SOURCES := \
+ $(addprefix spm/cactus/, \
+ aarch64/cactus_entrypoint.S \
+ cactus_main.c \
+ cactus_service_loop.c \
+ cactus_tests_memory_attributes.c \
+ cactus_tests_misc.c \
+ cactus_tests_system_setup.c \
+ ) \
+ $(addprefix spm/common/, \
+ aarch64/sp_arch_helpers.S \
+ sp_helpers.c \
+ ) \
+
+STDLIB_SOURCES := $(addprefix lib/stdlib/, \
+ assert.c \
+ mem.c \
+ putchar.c \
+ printf.c \
+ rand.c \
+ strlen.c \
+ subr_prf.c \
+)
+
+# TODO: Remove dependency on TFTF files.
+CACTUS_SOURCES += \
+ tftf/framework/debug.c \
+ tftf/framework/${ARCH}/asm_debug.S
+
+CACTUS_SOURCES += drivers/arm/pl011/${ARCH}/pl011_console.S \
+ lib/${ARCH}/cache_helpers.S \
+ lib/${ARCH}/misc_helpers.S \
+ plat/common/${ARCH}/platform_helpers.S \
+ ${STDLIB_SOURCES}
+
+CACTUS_LINKERFILE := spm/cactus/cactus.ld.S
+
+CACTUS_DEFINES :=
+
+$(eval $(call add_define,CACTUS_DEFINES,DEBUG))
+$(eval $(call add_define,CACTUS_DEFINES,ENABLE_ASSERTIONS))
+$(eval $(call add_define,CACTUS_DEFINES,LOG_LEVEL))
+$(eval $(call add_define,CACTUS_DEFINES,PLAT_${PLAT}))
+ifeq (${ARCH},aarch32)
+ $(eval $(call add_define,CACTUS_DEFINES,AARCH32))
+else
+ $(eval $(call add_define,CACTUS_DEFINES,AARCH64))
+endif
+
+cactus: ${AUTOGEN_DIR}/tests_list.h
diff --git a/spm/cactus/cactus_main.c b/spm/cactus/cactus_main.c
new file mode 100644
index 000000000..45197d354
--- /dev/null
+++ b/spm/cactus/cactus_main.c
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <console.h>
+#include <debug.h>
+#include <pl011.h>
+#include <plat_arm.h>
+#include <platform_def.h>
+#include <secure_partition.h>
+#include <sp_helpers.h>
+#include <spm_svc.h>
+#include <std_svc.h>
+
+#include "cactus.h"
+#include "cactus_tests.h"
+
+
+/* Host machine information injected by the build system in the ELF file. */
+extern const char build_message[];
+extern const char version_string[];
+
+/*
+ * The ARM Trusted Firmware passes a description of the memory resources
+ * allocated to the secure partition through the x0 register. This maps to
+ * a secure_partition_boot_info_t structure type.
+ *
+ * This functions prints the information stored in this structure.
+ */
+static void cactus_print_memory_layout(const secure_partition_boot_info_t *boot_info)
+{
+ NOTICE("Secure Partition memory layout:\n");
+
+ NOTICE(" Secure Partition image : %p - %p\n",
+ (void *) boot_info->sp_image_base,
+ (void *)(boot_info->sp_image_base + boot_info->sp_image_size));
+ NOTICE(" Text region : %p - %p\n",
+ (void *) CACTUS_TEXT_START, (void *) CACTUS_TEXT_END);
+ NOTICE(" Read-only data region : %p - %p\n",
+ (void *) CACTUS_RODATA_START, (void *) CACTUS_RODATA_END);
+ NOTICE(" Read-write data region : %p - %p\n",
+ (void *) CACTUS_RWDATA_START, (void *) CACTUS_RWDATA_END);
+ NOTICE(" BSS region : %p - %p\n",
+ (void *) CACTUS_BSS_START, (void *) CACTUS_BSS_END);
+ NOTICE(" Unused SP image space : %p - %p\n",
+ (void *) CACTUS_BSS_END,
+ (void *)(boot_info->sp_image_base + boot_info->sp_image_size));
+
+ NOTICE(" EL3-EL0 shared buffer : %p - %p\n",
+ (void *) boot_info->sp_shared_buf_base,
+ (void *)(boot_info->sp_shared_buf_base + boot_info->sp_shared_buf_size));
+
+ NOTICE(" S-NS shared buffer : %p - %p\n",
+ (void *) boot_info->sp_ns_comm_buf_base,
+ (void *)(boot_info->sp_ns_comm_buf_base + boot_info->sp_ns_comm_buf_size));
+
+ assert(boot_info->sp_ns_comm_buf_base == ARM_SECURE_SERVICE_BUFFER_BASE);
+ assert(boot_info->sp_ns_comm_buf_size == ARM_SECURE_SERVICE_BUFFER_SIZE);
+
+ NOTICE(" Stacks region (%u CPUS) : %p - %p\n",
+ boot_info->num_cpus,
+ (void *) boot_info->sp_stack_base,
+ (void *)(boot_info->sp_stack_base +
+ (boot_info->sp_pcpu_stack_size * boot_info->num_cpus)));
+
+ NOTICE(" Heap region : %p - %p\n",
+ (void *) boot_info->sp_heap_base,
+ (void *)(boot_info->sp_heap_base + boot_info->sp_heap_size));
+
+ NOTICE("Total memory : %p - %p\n",
+ (void *) boot_info->sp_mem_base, (void *) boot_info->sp_mem_limit);
+}
+
+
+void __dead2 cactus_main(void *el3_el0_buffer, size_t el3_el0_buffer_size)
+{
+ console_init(PLAT_ARM_UART_BASE,
+ PLAT_ARM_UART_CLK_IN_HZ,
+ PL011_BAUDRATE);
+
+ NOTICE("Booting test Secure Partition Cactus\n");
+ NOTICE("%s\n", build_message);
+ NOTICE("%s\n", version_string);
+ NOTICE("Running at S-EL0\n");
+
+ const secure_partition_boot_info_t *boot_info =
+ (const secure_partition_boot_info_t *) el3_el0_buffer;
+
+ if (el3_el0_buffer_size < sizeof(secure_partition_boot_info_t)) {
+ ERROR("The memory buffer shared between EL3/S-EL0 is too small\n");
+ ERROR("It is %lu bytes, it should be at least %lu bytes\n",
+ el3_el0_buffer_size,
+ sizeof(secure_partition_boot_info_t));
+ panic();
+ }
+
+ if ((CACTUS_TEXT_START != boot_info->sp_image_base) ||
+ (CACTUS_RWDATA_END > boot_info->sp_image_base
+ + boot_info->sp_image_size)) {
+ ERROR("Cactus does not fit in the buffer allocated for the secure partition\n");
+ panic();
+ }
+
+ cactus_print_memory_layout(boot_info);
+
+
+ /*
+ * Run some initial tests.
+ *
+ * These are executed when the system is still booting, just after SPM
+ * has handed over to Cactus.
+ */
+ misc_tests();
+ system_setup_tests();
+ mem_attr_changes_tests(boot_info);
+
+ /*
+ * Handle secure service requests.
+ */
+ secure_services_loop();
+}
diff --git a/spm/cactus/cactus_service_loop.c b/spm/cactus/cactus_service_loop.c
new file mode 100644
index 000000000..9f542b548
--- /dev/null
+++ b/spm/cactus/cactus_service_loop.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <debug.h>
+#include <mm_svc.h>
+#include <secure_partition.h>
+#include <sp_helpers.h>
+#include <spm_svc.h>
+#include <string.h>
+
+
+/*
+ * Handle a fast secure service request, i.e. one made through an MM_COMMUNICATE
+ * call.
+ *
+ * cc
+ * Calling convention. If MM_COMMUNICATE has been invoked using the SMC32
+ * calling convention, this argument must be 32, else 64.
+ *
+ * sps
+ * Communication buffer attached to the secure partition service request.
+ */
+static int32_t cactus_handle_fast_request(int cc,
+ secure_partition_request_info_t *sps)
+{
+ assert(cc == 32 || cc == 64);
+
+ /* No SMC32 is supported at the moment. Just ignore them. */
+ if (cc == 32) {
+ INFO("Ignoring MM_COMMUNICATE_AARCH32 call\n");
+ return SPM_SUCCESS;
+ }
+
+ /* See secure_partition.h for possible ID values. */
+ switch (sps->id) {
+ case SPS_TIMER_SLEEP: {
+ if (sps->data_size != 1) {
+ ERROR("Invalid payload size for SPM_SPS_TIMER_SLEEP request (%llu)\n",
+ sps->data_size);
+ return SPM_INVALID_PARAMETER;
+ }
+ int duration_sec = sps->data[0];
+ sp_sleep(duration_sec);
+
+ /*
+ * Write back to the communication buffer to acknowledge the
+ * request has been successfully handled.
+ */
+ uint32_t response = CACTUS_FAST_REQUEST_SUCCESS;
+ memcpy(sps->data, &response, sizeof(response));
+ return SPM_SUCCESS;
+ }
+
+ case SPS_CHECK_ALIVE:
+ return SPM_SUCCESS;
+
+ default:
+ INFO("Unsupported MM_COMMUNICATE_AARCH64 call with service ID 0x%x, ignoring it\n",
+ sps->id);
+ return SPM_INVALID_PARAMETER;
+ }
+}
+
+__dead2 void secure_services_loop(void)
+{
+ int32_t event_status_code;
+ svc_args svc_values = { 0 };
+
+ /*
+ * The first time this loop is executed corresponds to when Cactus has
+ * finished initialising its run time environment and is ready to handle
+ * secure service requests.
+ */
+ NOTICE("Cactus: Signal end of init to SPM\n");
+ event_status_code = SPM_SUCCESS;
+
+ while (1) {
+ svc_values.arg0 = SP_EVENT_COMPLETE_AARCH64;
+ svc_values.arg1 = event_status_code;
+ int32_t event_id = sp_svc(&svc_values);
+
+ switch (event_id) {
+ case MM_COMMUNICATE_AARCH64:
+ {
+ uint64_t ctx_addr = svc_values.arg1;
+ uint32_t ctx_size = svc_values.arg2;
+ uint64_t cookie = svc_values.arg3;
+
+ NOTICE("Cactus: Received MM_COMMUNICATE_AARCH64 call\n");
+ NOTICE("Cactus: Context address: 0x%llx\n", ctx_addr);
+ NOTICE("Cactus: Context size : %u\n", ctx_size);
+ NOTICE("Cactus: Cookie : 0x%llx\n", cookie);
+
+ if (ctx_addr == 0) {
+ ERROR("Context address is invalid\n");
+ event_status_code = SPM_INVALID_PARAMETER;
+ continue;
+ }
+
+ secure_partition_request_info_t *sps = (void *)(uintptr_t) ctx_addr;
+ NOTICE("Received fast secure service request with ID #%u\n",
+ sps->id);
+ event_status_code = cactus_handle_fast_request(64, sps);
+ break;
+ }
+
+ case MM_COMMUNICATE_AARCH32:
+ {
+ uint32_t ctx_addr = svc_values.arg1;
+ uint32_t ctx_size = svc_values.arg2;
+ uint32_t cookie = svc_values.arg3;
+
+ NOTICE("Cactus: Received MM_COMMUNICATE_AARCH32 call\n");
+ NOTICE("Cactus: Context address: 0x%x\n", ctx_addr);
+ NOTICE("Cactus: Context size : %u\n", ctx_size);
+ NOTICE("Cactus: Cookie : 0x%x\n", cookie);
+
+ if (ctx_addr == 0) {
+ ERROR("Context address is invalid\n");
+ event_status_code = SPM_INVALID_PARAMETER;
+ continue;
+ }
+
+ secure_partition_request_info_t *sps = (void *)(uintptr_t) ctx_addr;
+ NOTICE("Received fast secure service request with ID #%u\n",
+ sps->id);
+ event_status_code = cactus_handle_fast_request(32, sps);
+ break;
+ }
+
+ default:
+ NOTICE("Unhandled Service ID 0x%x\n", event_id);
+ event_status_code = SPM_NOT_SUPPORTED;
+ break;
+ }
+ }
+}
diff --git a/spm/cactus/cactus_tests.h b/spm/cactus/cactus_tests.h
new file mode 100644
index 000000000..d0e11dcba
--- /dev/null
+++ b/spm/cactus/cactus_tests.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2017, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __CACTUS_TESTS_H__
+#define __CACTUS_TESTS_H__
+
+#include <secure_partition.h>
+
+/*
+ * Test functions
+ */
+
+/*
+ * Test other things like the version number returned by SPM.
+ */
+void misc_tests(void);
+
+/*
+ * The Arm TF is responsible for setting up system registers on behalf of the
+ * Secure Partition. For example, TF is supposed to allow Secure Partitions to
+ * perform cache maintenance operations (by setting the SCTLR_EL1.UCI bit).
+ *
+ * This function attempts to verify that we indeed have access to these system
+ * features from S-EL0. These tests report their results on the UART. They do
+ * not recover from a failure : when an error is encountered they will most
+ * likely trigger an exception into S-EL1.
+ */
+void system_setup_tests(void);
+
+/*
+ * Exercise the SP_MEMORY_ATTRIBUTES_SET_AARCH64 SMC interface. A variety of
+ * valid and invalid requests to change memory attributes are tested.
+ *
+ * These tests report their results on the UART. They do not recover from a
+ * failure : when an error is encountered they endlessly loop.
+ *
+ * The argument is a pointer to a secure_partition_boot_info_t struct that has
+ * been filled by EL3 with the information about the memory map of this Secure
+ * Partition.
+ */
+void mem_attr_changes_tests(const secure_partition_boot_info_t *boot_info);
+
+#endif /* __CACTUS_TESTS_H__ */
diff --git a/spm/cactus/cactus_tests_memory_attributes.c b/spm/cactus/cactus_tests_memory_attributes.c
new file mode 100644
index 000000000..1a3072bea
--- /dev/null
+++ b/spm/cactus/cactus_tests_memory_attributes.c
@@ -0,0 +1,236 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <debug.h>
+#include <errno.h>
+#include <platform_def.h>
+#include <secure_partition.h>
+#include <sp_helpers.h>
+#include <spm_svc.h>
+#include <stdio.h>
+#include <types.h>
+#include <xlat_tables_defs.h>
+
+#include "cactus.h"
+#include "cactus_tests.h"
+
+/* This is filled at runtime. */
+static uintptr_t cactus_tests_start;
+static uintptr_t cactus_tests_end;
+static uintptr_t cactus_tests_size;
+
+/*
+ * Given the required instruction and data access permissions,
+ * create a memory access controls value that is formatted as expected
+ * by the SP_MEMORY_ATTRIBUTES_SET_AARCH64 SMC.
+ */
+static inline uint32_t mem_access_perm(int instr_access_perm,
+ int data_access_perm)
+{
+ return instr_access_perm |
+ ((data_access_perm & SP_MEMORY_ATTRIBUTES_ACCESS_MASK)
+ << SP_MEMORY_ATTRIBUTES_ACCESS_SHIFT);
+}
+
+/*
+ * Send an SP_MEMORY_ATTRIBUTES_SET_AARCH64 SVC with the given arguments.
+ * Return the return value of the SVC.
+ */
+static int32_t request_mem_attr_changes(uintptr_t base_address,
+ int pages_count,
+ uint32_t memory_access_controls)
+{
+ INFO("Requesting memory attributes change\n");
+ INFO(" Start address : %p\n", (void *) base_address);
+ INFO(" Number of pages: %i\n", pages_count);
+ INFO(" Attributes : 0x%x\n", memory_access_controls);
+
+ svc_args svc_values = { SP_MEMORY_ATTRIBUTES_SET_AARCH64,
+ base_address,
+ pages_count,
+ memory_access_controls };
+ return sp_svc(&svc_values);
+}
+
+/*
+ * Send an SP_MEMORY_ATTRIBUTES_GET_AARCH64 SVC with the given arguments.
+ * Return the return value of the SVC.
+ */
+static int32_t request_get_mem_attr(uintptr_t base_address)
+{
+ INFO("Requesting memory attributes\n");
+ INFO(" Base address : %p\n", (void *) base_address);
+
+ svc_args svc_values = { SP_MEMORY_ATTRIBUTES_GET_AARCH64,
+ base_address };
+ return sp_svc(&svc_values);
+}
+
+/*
+ * This function expects a base address and number of pages identifying the
+ * extents of some memory region mapped as non-executable, read-only.
+ *
+ * 1) It changes its data access permissions to read-write.
+ * 2) It checks this memory can now be written to.
+ * 3) It restores the original data access permissions.
+ *
+ * If any check fails, it loops forever. It could also trigger a permission
+ * fault while trying to write to the memory.
+ */
+static void mem_attr_changes_unittest(uintptr_t addr, int pages_count)
+{
+ int32_t ret;
+ uintptr_t end_addr = addr + pages_count * PAGE_SIZE;
+ uint32_t old_attr, new_attr;
+
+ char test_desc[50];
+
+ snprintf(test_desc, sizeof(test_desc),
+ "RO -> RW (%i page(s) from address 0x%lx)", pages_count, addr);
+ announce_test_start(test_desc);
+
+ /*
+ * Ensure we don't change the attributes of some random memory
+ * location
+ */
+ assert(addr >= cactus_tests_start);
+ assert(end_addr < (cactus_tests_start + cactus_tests_size));
+
+ old_attr = mem_access_perm(SP_MEMORY_ATTRIBUTES_NON_EXEC, SP_MEMORY_ATTRIBUTES_ACCESS_RO);
+ /* Memory was read-only, let's try changing that to RW */
+ new_attr = mem_access_perm(SP_MEMORY_ATTRIBUTES_NON_EXEC, SP_MEMORY_ATTRIBUTES_ACCESS_RW);
+
+ ret = request_mem_attr_changes(addr, pages_count, new_attr);
+ expect(ret, SPM_SUCCESS);
+ printf("Successfully changed memory attributes\n");
+
+ /* The attributes should be the ones we have just written. */
+ ret = request_get_mem_attr(addr);
+ expect(ret, new_attr);
+
+ /* If it worked, we should be able to write to this memory now! */
+ for (unsigned char *data = (unsigned char *) addr;
+ (uintptr_t) data != end_addr;
+ ++data) {
+ *data = 42;
+ }
+ printf("Successfully wrote to the memory\n");
+
+ /* Let's revert back to the original attributes for the next test */
+ ret = request_mem_attr_changes(addr, pages_count, old_attr);
+ expect(ret, SPM_SUCCESS);
+ printf("Successfully restored the old attributes\n");
+
+ /* The attributes should be the original ones again. */
+ ret = request_get_mem_attr(addr);
+ expect(ret, old_attr);
+
+ announce_test_end(test_desc);
+}
+
+/*
+ * Exercise the ability of the Trusted Firmware to change the data access
+ * permissions and instruction execution permissions of some memory region.
+ */
+void mem_attr_changes_tests(const secure_partition_boot_info_t *boot_info)
+{
+ uint32_t attributes;
+ int32_t ret;
+ uintptr_t addr;
+
+ cactus_tests_start = CACTUS_BSS_END;
+ cactus_tests_end = boot_info->sp_image_base + boot_info->sp_image_size;
+ cactus_tests_size = cactus_tests_end - cactus_tests_start;
+
+ const char *test_sect_desc = "memory attributes changes";
+
+ announce_test_section_start(test_sect_desc);
+ /*
+ * Start with error cases, i.e. requests that are expected to be denied
+ */
+ const char *test_desc1 = "Read-write, executable";
+
+ announce_test_start(test_desc1);
+ attributes = mem_access_perm(SP_MEMORY_ATTRIBUTES_EXEC, SP_MEMORY_ATTRIBUTES_ACCESS_RW);
+ ret = request_mem_attr_changes(CACTUS_RWDATA_START, 1, attributes);
+ expect(ret, SPM_INVALID_PARAMETER);
+ announce_test_end(test_desc1);
+
+ const char *test_desc2 = "Size == 0";
+
+ announce_test_start(test_desc2);
+ attributes = mem_access_perm(SP_MEMORY_ATTRIBUTES_NON_EXEC, SP_MEMORY_ATTRIBUTES_ACCESS_RW);
+ ret = request_mem_attr_changes(CACTUS_RWDATA_START, 0, attributes);
+ expect(ret, SPM_INVALID_PARAMETER);
+ announce_test_end(test_desc2);
+
+ const char *test_desc3 = "Unaligned address";
+
+ announce_test_start(test_desc3);
+ attributes = mem_access_perm(SP_MEMORY_ATTRIBUTES_NON_EXEC, SP_MEMORY_ATTRIBUTES_ACCESS_RW);
+ /* Choose an address not aligned to a page boundary. */
+ addr = cactus_tests_start + 5;
+ ret = request_mem_attr_changes(addr, 1, attributes);
+ expect(ret, SPM_INVALID_PARAMETER);
+ announce_test_end(test_desc3);
+
+ const char *test_desc4 = "Unmapped memory region";
+
+ announce_test_start(test_desc4);
+ addr = boot_info->sp_mem_limit + 2 * PAGE_SIZE;
+ attributes = mem_access_perm(SP_MEMORY_ATTRIBUTES_NON_EXEC, SP_MEMORY_ATTRIBUTES_ACCESS_RW);
+ ret = request_mem_attr_changes(addr, 3, attributes);
+ expect(ret, SPM_INVALID_PARAMETER);
+ announce_test_end(test_desc4);
+
+ const char *test_desc5 = "Partially unmapped memory region";
+
+ announce_test_start(test_desc5);
+ addr = boot_info->sp_mem_base - 2 * PAGE_SIZE;
+ attributes = mem_access_perm(SP_MEMORY_ATTRIBUTES_NON_EXEC, SP_MEMORY_ATTRIBUTES_ACCESS_RW);
+ ret = request_mem_attr_changes(addr, 6, attributes);
+ expect(ret, SPM_INVALID_PARAMETER);
+ announce_test_end(test_desc5);
+
+ const char *test_desc6 = "Memory region mapped with the wrong granularity";
+
+ announce_test_start(test_desc6);
+ /*
+ * This address is usually mapped at a 2 MiB granularity. By using as
+ * test address the block after the console we make sure that in case
+ * the attributes of the block actually changed, the console would work
+ * and we would get the error message.
+ */
+ addr = ((uintptr_t)PLAT_ARM_UART_BASE + 0x200000ULL) & ~(0x200000ULL - 1ULL);
+ attributes = mem_access_perm(SP_MEMORY_ATTRIBUTES_NON_EXEC, SP_MEMORY_ATTRIBUTES_ACCESS_RW);
+ ret = request_mem_attr_changes(addr, 1, attributes);
+ expect(ret, SPM_INVALID_PARAMETER);
+ announce_test_end(test_desc6);
+
+ const char *test_desc7 = "Try some valid memory change requests";
+
+ announce_test_start(test_desc7);
+ for (unsigned int i = 0; i < 20; ++i) {
+ /*
+ * Choose some random address in the pool of memory reserved
+ * for these tests.
+ */
+ const int pages_max = cactus_tests_size / PAGE_SIZE;
+ int pages_count = bound_rand(1, pages_max);
+
+ addr = bound_rand(
+ cactus_tests_start,
+ cactus_tests_end - (pages_count * PAGE_SIZE));
+ /* Align to PAGE_SIZE. */
+ addr &= ~(PAGE_SIZE - 1);
+
+ mem_attr_changes_unittest(addr, pages_count);
+ }
+ announce_test_end(test_desc7);
+
+ announce_test_section_end(test_sect_desc);
+}
diff --git a/spm/cactus/cactus_tests_misc.c b/spm/cactus/cactus_tests_misc.c
new file mode 100644
index 000000000..bb8eaa77d
--- /dev/null
+++ b/spm/cactus/cactus_tests_misc.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <debug.h>
+#include <errno.h>
+#include <sp_helpers.h>
+#include <spm_svc.h>
+#include <types.h>
+
+#include "cactus.h"
+#include "cactus_tests.h"
+
+/*
+ * Miscellaneous SPM tests.
+ */
+void misc_tests(void)
+{
+ int32_t ret;
+
+ const char *test_sect_desc = "miscellaneous";
+
+ announce_test_section_start(test_sect_desc);
+
+ const char *test_version = "SPM version check";
+
+ announce_test_start(test_version);
+ svc_args svc_values = { SPM_VERSION_AARCH32 };
+ ret = sp_svc(&svc_values);
+ INFO("Version = 0x%x (%u.%u)\n", ret,
+ (ret >> 16) & 0x7FFF, ret & 0xFFFF);
+ expect(ret, SPM_VERSION_COMPILED);
+ announce_test_end(test_version);
+
+ announce_test_section_end(test_sect_desc);
+}
diff --git a/spm/cactus/cactus_tests_system_setup.c b/spm/cactus/cactus_tests_system_setup.c
new file mode 100644
index 000000000..685d82dde
--- /dev/null
+++ b/spm/cactus/cactus_tests_system_setup.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <debug.h>
+#include <sp_helpers.h>
+#include <types.h>
+
+#include "cactus.h"
+
+extern uintptr_t __TEXT_START__;
+
+void system_setup_tests(void)
+{
+ const char *test_sect_desc = "system setup";
+
+ announce_test_section_start(test_sect_desc);
+
+ /*
+ * Try accessing CTR_EL0 register. This should work if SCTLR_EL1.UCT bit
+ * has been correctly setup by TF.
+ */
+ const char *test_desc1 = "Read CTR_EL0 register";
+
+ announce_test_start(test_desc1);
+
+ uint32_t ctr __unused = read_ctr_el0();
+
+ INFO("CTR_EL0 = 0x%x\n", ctr);
+ announce_test_end(test_desc1);
+
+ /*
+ * Try to execute a cache maintenance instruction. This should work if
+ * SCTLR_EL1.UCI bit has been correctly setup by TF.
+ */
+ const char *test_desc2 = "Access to cache maintenance operations";
+
+ announce_test_start(test_desc2);
+ flush_dcache_range((uintptr_t)&__TEXT_START__, 1);
+ announce_test_end(test_desc2);
+
+ /*
+ * Try accessing a floating point register. This should not trap to
+ * S-EL1.
+ */
+ const char *test_desc3 = "Access to FP regs";
+
+ announce_test_start(test_desc3);
+ /*
+ * Can't use the 'double' type here because Cactus (like the rest of
+ * the TF code) is compiled with GCC's -mgeneral-regs-only compiler flag
+ * that disables floating point support in GCC.
+ */
+ uint64_t fp_reg;
+
+ __asm__ volatile("fmov %0, d0" : "=r" (fp_reg) :: "d0");
+ INFO("D0 = 0x%llx\n", fp_reg);
+ __asm__ volatile(
+ "fmov d0, #1.0 \n\t"
+ "fmov %0, d0 \n\t"
+ : "=r" (fp_reg)
+ :
+ : "d0");
+ INFO("D0 = 0x%llx\n", fp_reg);
+ announce_test_end(test_desc3);
+
+ announce_test_section_end(test_sect_desc);
+}
diff --git a/spm/common/aarch64/sp_arch_helpers.S b/spm/common/aarch64/sp_arch_helpers.S
new file mode 100644
index 000000000..11f408733
--- /dev/null
+++ b/spm/common/aarch64/sp_arch_helpers.S
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+ .globl sp_svc
+
+func sp_svc
+ /*
+ * Save the address of the svc_args structure on the stack.
+ *
+ * Although x0 contains an 8-byte value, we are allocating 16 bytes
+ * on the stack to respect the 16-byte stack-alignment.
+ */
+ str x0, [sp, #-16]!
+
+ /* Load the SVC arguments values into the appropriate registers. */
+ ldp x6, x7, [x0, #48]
+ ldp x4, x5, [x0, #32]
+ ldp x2, x3, [x0, #16]
+ ldp x0, x1, [x0, #0]
+
+ svc #0
+
+ /*
+ * Pop the svc_args structure address from the stack into a caller-saved
+ * register.
+ */
+ ldr x9, [sp], #16
+
+ /*
+ * The return values are stored in x0-x3, put them in the svc_args
+ * return structure.
+ */
+ stp x0, x1, [x9, #0]
+ stp x2, x3, [x9, #16]
+ ret
+endfunc sp_svc
diff --git a/spm/common/sp_helpers.c b/spm/common/sp_helpers.c
new file mode 100644
index 000000000..cccaf8bc7
--- /dev/null
+++ b/spm/common/sp_helpers.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <debug.h>
+#include <mmio.h>
+#include <platform_def.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+uintptr_t bound_rand(uintptr_t min, uintptr_t max)
+{
+ /*
+ * This is not ideal as some numbers will never be generated because of
+ * the integer arithmetic rounding.
+ */
+ return ((rand() * (UINT64_MAX/RAND_MAX)) % (max - min)) + min;
+}
+
+/*******************************************************************************
+ * Test framework helpers
+ ******************************************************************************/
+
+void expect(int expr, int expected)
+{
+ if (expr != expected) {
+ ERROR("Expected value %i, got %i\n", expected, expr);
+ while (1)
+ continue;
+ }
+}
+
+void announce_test_section_start(const char *test_sect_desc)
+{
+ INFO("========================================\n");
+ INFO("Starting %s tests\n", test_sect_desc);
+ INFO("========================================\n");
+}
+void announce_test_section_end(const char *test_sect_desc)
+{
+ INFO("========================================\n");
+ INFO("End of %s tests\n", test_sect_desc);
+ INFO("========================================\n");
+}
+
+void announce_test_start(const char *test_desc)
+{
+ INFO("[+] %s\n", test_desc);
+}
+
+void announce_test_end(const char *test_desc)
+{
+ INFO("Test \"%s\" passed.\n", test_desc);
+}
+
+void sp_sleep(uint32_t duration_sec)
+{
+ uint32_t timer_freq = mmio_read_32(SYS_CNT_CONTROL_BASE + CNTFID_OFF);
+ VERBOSE("%s: Timer frequency = %u\n", __func__, timer_freq);
+
+ INFO("%s: Sleeping for %u seconds...\n", __func__, duration_sec);
+ uint64_t time1 = mmio_read_64(SYS_CNT_READ_BASE);
+ volatile uint64_t time2 = time1;
+ while ((time2 - time1) < duration_sec * timer_freq) {
+ time2 = mmio_read_64(SYS_CNT_READ_BASE);
+ }
+
+ INFO("%s: Done\n", __func__);
+}
diff --git a/spm/common/sp_helpers.h b/spm/common/sp_helpers.h
new file mode 100644
index 000000000..ab6415d6c
--- /dev/null
+++ b/spm/common/sp_helpers.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SP_HELPERS_H
+#define SP_HELPERS_H
+
+#include <stdint.h>
+#include <sys/types.h>
+
+typedef struct {
+ u_register_t arg0;
+ u_register_t arg1;
+ u_register_t arg2;
+ u_register_t arg3;
+ u_register_t arg4;
+ u_register_t arg5;
+ u_register_t arg6;
+ u_register_t arg7;
+} svc_args;
+
+/*
+ * Trigger an SVC call.
+ *
+ * The arguments to pass through the SVC call must be stored in the svc_args
+ * structure. The return values of the SVC call will be stored in the same
+ * structure (overriding the input arguments).
+ *
+ * Return the first return value. It is equivalent to args.arg0 but is also
+ * provided as the return value for convenience.
+ */
+u_register_t sp_svc(svc_args *args);
+
+/*
+ * Choose a pseudo-random number within the [min,max] range (both limits are
+ * inclusive).
+ */
+uintptr_t bound_rand(uintptr_t min, uintptr_t max);
+
+/*
+ * Check that expr == expected.
+ * If not, loop forever.
+ */
+void expect(int expr, int expected);
+
+/*
+ * Test framework functions
+ */
+
+void announce_test_section_start(const char *test_sect_desc);
+void announce_test_section_end(const char *test_sect_desc);
+
+void announce_test_start(const char *test_desc);
+void announce_test_end(const char *test_desc);
+
+/* Sleep for at least 'duration_sec' seconds then return. */
+void sp_sleep(uint32_t duration_sec);
+
+#endif /* SP_HELPERS_H */
diff --git a/tftf/framework/aarch32/arch.c b/tftf/framework/aarch32/arch.c
new file mode 100644
index 000000000..763ea1a13
--- /dev/null
+++ b/tftf/framework/aarch32/arch.c
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <debug.h>
+#include <tftf.h>
+
+
+void tftf_arch_setup(void)
+{
+ if (!IS_IN_HYP())
+ panic();
+
+ write_hcr(HCR_TGE_BIT);
+}
diff --git a/tftf/framework/aarch32/asm_debug.S b/tftf/framework/aarch32/asm_debug.S
new file mode 100644
index 000000000..d2b2c79a3
--- /dev/null
+++ b/tftf/framework/aarch32/asm_debug.S
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+
+#if ENABLE_ASSERTIONS
+
+ .globl asm_assert
+
+/* Since the max decimal input number is 65536 */
+#define MAX_DEC_DIVISOR 10000
+
+/* The offset to add to get ascii for numerals '0 - 9' */
+#define ASCII_OFFSET_NUM '0'
+
+.section .rodata.assert_str, "aS"
+assert_msg1:
+ .asciz "ASSERT: File "
+assert_msg2:
+ .asciz " Line "
+
+/* ---------------------------------------------------------------------------
+ * Assertion support in assembly.
+ * The below function helps to support assertions in assembly where we do not
+ * have a C runtime stack. Arguments to the function are :
+ * r0 - File name
+ * r1 - Line no
+ * Clobber list : lr, r0 - r6
+ * ---------------------------------------------------------------------------
+ */
+func asm_assert
+ /*
+ * Only print the output if LOG_LEVEL is higher or equal to
+ * LOG_LEVEL_INFO, which is the default value for builds with DEBUG=1.
+ */
+ /* Stash the parameters already in r0 and r1 */
+ mov r5, r0
+ mov r6, r1
+
+ /* Initialize crash console and verify success */
+ bl plat_crash_console_init
+ cmp r0, #0
+ beq 1f
+
+ /* Print file name */
+ ldr r4, =assert_msg1
+ bl asm_print_str
+ mov r4, r5
+ bl asm_print_str
+
+ /* Print line number string */
+ ldr r4, =assert_msg2
+ bl asm_print_str
+
+ /* Test for maximum supported line number */
+ ldr r4, =~0xffff
+ tst r6, r4
+ bne 1f
+ mov r4, r6
+
+ /* Print line number in decimal */
+ mov r6, #10 /* Divide by 10 after every loop iteration */
+ ldr r5, =MAX_DEC_DIVISOR
+dec_print_loop:
+ udiv r0, r4, r5 /* Quotient */
+ mls r4, r0, r5, r4 /* Remainder */
+ add r0, r0, #ASCII_OFFSET_NUM /* Convert to ASCII */
+ bl plat_crash_console_putc
+ udiv r5, r5, r6 /* Reduce divisor */
+ cmp r5, #0
+ bne dec_print_loop
+
+ bl plat_crash_console_flush
+
+1:
+ wfi
+ b 1b
+endfunc asm_assert
+
+/*
+ * This function prints a string from address in r4
+ * Clobber: lr, r0 - r4
+ */
+func asm_print_str
+ mov r3, lr
+1:
+ ldrb r0, [r4], #0x1
+ cmp r0, #0
+ beq 2f
+ bl plat_crash_console_putc
+ b 1b
+2:
+ bx r3
+endfunc asm_print_str
+
+/*
+ * This function prints a hexadecimal number in r4.
+ * In: r4 = the hexadecimal to print.
+ * Clobber: lr, r0 - r3, r5
+ */
+func asm_print_hex
+ mov r3, lr
+ mov r5, #32 /* No of bits to convert to ascii */
+1:
+ sub r5, r5, #4
+ lsr r0, r4, r5
+ and r0, r0, #0xf
+ cmp r0, #0xa
+ blo 2f
+ /* Add by 0x27 in addition to ASCII_OFFSET_NUM
+ * to get ascii for characters 'a - f'.
+ */
+ add r0, r0, #0x27
+2:
+ add r0, r0, #ASCII_OFFSET_NUM
+ bl plat_crash_console_putc
+ cmp r5, #0
+ bne 1b
+ bx r3
+endfunc asm_print_hex
+
+#endif /* ENABLE_ASSERTIONS */
diff --git a/tftf/framework/aarch32/entrypoint.S b/tftf/framework/aarch32/entrypoint.S
new file mode 100644
index 000000000..04a7d4c85
--- /dev/null
+++ b/tftf/framework/aarch32/entrypoint.S
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <tftf.h>
+
+ .globl tftf_entrypoint
+ .globl tftf_hotplug_entry
+
+/* ----------------------------------------------------------------------------
+ * Cold boot entry point for the primary CPU.
+ * ----------------------------------------------------------------------------
+ */
+func tftf_entrypoint
+ /* --------------------------------------------------------------------
+ * Set the exception vectors
+ * --------------------------------------------------------------------
+ */
+ ldr r0, =tftf_vector
+ stcopr r0, HVBAR
+
+ /* --------------------------------------------------------------------
+ * Enable the instruction cache and asynchronous interrupts.
+ * --------------------------------------------------------------------
+ */
+ ldcopr r0, HSCTLR
+ ldr r1, =(HSCTLR_I_BIT | HSCTLR_A_BIT)
+ orr r0, r0, r1
+ stcopr r0, HSCTLR
+ isb
+
+ /* --------------------------------------------------------------------
+ * This code is expected to be executed only by the primary CPU.
+ * Save the mpid for the first core that executes and if a secondary
+ * CPU has lost its way make it spin forever.
+ * --------------------------------------------------------------------
+ */
+ bl save_primary_mpid
+
+ /* --------------------------------------------------------------------
+ * Zero out NOBITS sections. There are 2 of them:
+ * - the .bss section;
+ * - the coherent memory section.
+ * --------------------------------------------------------------------
+ */
+ ldr r0, =__BSS_START__
+ ldr r1, =__BSS_SIZE__
+ bl zeromem
+
+ ldr r0, =__COHERENT_RAM_START__
+ ldr r1, =__COHERENT_RAM_UNALIGNED_SIZE__
+ bl zeromem
+
+ /* --------------------------------------------------------------------
+ * Give ourselves a small coherent stack to ease the pain of
+ * initializing the MMU
+ * --------------------------------------------------------------------
+ */
+ ldcopr r0, MPIDR
+ bl platform_set_coherent_stack
+
+ bl tftf_early_platform_setup
+ bl tftf_plat_arch_setup
+
+ /* --------------------------------------------------------------------
+ * Give ourselves a stack allocated in Normal -IS-WBWA memory
+ * --------------------------------------------------------------------
+ */
+ ldcopr r0, MPIDR
+ bl platform_set_stack
+
+ /* --------------------------------------------------------------------
+ * tftf_cold_boot_main() will perform the remaining architectural and
+ * platform setup, initialise the test framework's state, then run the
+ * tests.
+ * --------------------------------------------------------------------
+ */
+ b tftf_cold_boot_main
+endfunc tftf_entrypoint
+
+/* ----------------------------------------------------------------------------
+ * Entry point for a CPU that has just been powered up.
+ * In : r0 - context_id
+ * ----------------------------------------------------------------------------
+ */
+func tftf_hotplug_entry
+
+ /* --------------------------------------------------------------------
+ * Preserve the context_id in a callee-saved register
+ * --------------------------------------------------------------------
+ */
+ mov r4, r0
+
+ /* --------------------------------------------------------------------
+ * Set the exception vectors
+ * --------------------------------------------------------------------
+ */
+ ldr r0, =tftf_vector
+ stcopr r0, HVBAR
+
+ /* --------------------------------------------------------------------
+ * Enable the instruction cache and asynchronous interrupts.
+ * --------------------------------------------------------------------
+ */
+ ldcopr r0, HSCTLR
+ ldr r1, =(HSCTLR_I_BIT | HSCTLR_A_BIT)
+ orr r0, r0, r1
+ stcopr r0, HSCTLR
+ isb
+
+ /* --------------------------------------------------------------------
+ * Give ourselves a small coherent stack to ease the pain of
+ * initializing the MMU
+ * --------------------------------------------------------------------
+ */
+ ldcopr r0, MPIDR
+ bl platform_set_coherent_stack
+
+ /* --------------------------------------------------------------------
+ * Enable the MMU
+ * --------------------------------------------------------------------
+ */
+ bl tftf_plat_enable_mmu
+
+ /* --------------------------------------------------------------------
+ * Give ourselves a stack in normal memory.
+ * --------------------------------------------------------------------
+ */
+ ldcopr r0, MPIDR
+ bl platform_set_stack
+
+ /* --------------------------------------------------------------------
+ * Save the context_id for later retrieval by tests
+ * --------------------------------------------------------------------
+ */
+ ldcopr r0, MPIDR
+ ldr r1, =MPID_MASK
+ and r0, r0, r1
+ bl platform_get_core_pos
+
+ mov r1, r4
+
+ bl tftf_set_cpu_on_ctx_id
+
+ /* --------------------------------------------------------------------
+ * Jump to warm boot main function
+ * --------------------------------------------------------------------
+ */
+ b tftf_warm_boot_main
+endfunc tftf_hotplug_entry
+
+/* ----------------------------------------------------------------------------
+ * Saves the mpid of the primary core and if the primary core
+ * is already saved then it loops infinitely.
+ * ----------------------------------------------------------------------------
+ */
+func save_primary_mpid
+ ldr r1, =tftf_primary_core
+ ldr r0, [r1]
+ mov r2, #INVALID_MPID
+ cmp r0, r2
+ bne panic
+ ldr r2, =MPID_MASK
+ ldcopr r0, MPIDR
+ and r0, r0, r2
+ str r0, [r1]
+ bx lr
+panic:
+ /* Primary core MPID already saved */
+ b panic
+endfunc save_primary_mpid
diff --git a/tftf/framework/aarch32/exceptions.S b/tftf/framework/aarch32/exceptions.S
new file mode 100644
index 000000000..1e6c574db
--- /dev/null
+++ b/tftf/framework/aarch32/exceptions.S
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <tftf.h>
+
+ .globl tftf_vector
+
+vector_base tftf_vector
+ b tftf_entrypoint
+ b . /* Undef */
+ b . /* Syscall */
+ b . /* Prefetch abort */
+ b . /* Data abort */
+ b . /* Hyp trap */
+ b tftf_intr_handle/* IRQ */
+ b . /* FIQ */
+
+/* ----------------------------------------------------------------------------
+ * The IRQ exception handler
+ * ----------------------------------------------------------------------------
+ */
+func tftf_intr_handle
+ push {r0 - r3, lr}
+ bl tftf_irq_handler_dispatcher
+ pop {r0 - r3, lr}
+ eret
+endfunc tftf_intr_handle
diff --git a/tftf/framework/aarch64/arch.c b/tftf/framework/aarch64/arch.c
new file mode 100644
index 000000000..dfaa9d1bd
--- /dev/null
+++ b/tftf/framework/aarch64/arch.c
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+
+void tftf_arch_setup(void)
+{
+ /* Do not try to configure EL2 if TFTF is running at NS-EL1 */
+ if (IS_IN_EL2()) {
+ write_hcr_el2(HCR_TGE_BIT);
+ isb();
+ }
+}
diff --git a/tftf/framework/aarch64/asm_debug.S b/tftf/framework/aarch64/asm_debug.S
new file mode 100644
index 000000000..32c454f86
--- /dev/null
+++ b/tftf/framework/aarch64/asm_debug.S
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+
+#if ENABLE_ASSERTIONS
+
+ .globl asm_assert
+
+/* Since the max decimal input number is 65536 */
+#define MAX_DEC_DIVISOR 10000
+/* The offset to add to get ascii for numerals '0 - 9' */
+#define ASCII_OFFSET_NUM 0x30
+
+.section .rodata.assert_str, "aS"
+assert_msg1:
+ .asciz "ASSERT: File "
+assert_msg2:
+ .asciz " Line "
+
+ /*
+ * This macro is intended to be used to print the
+ * line number in decimal. Used by asm_assert macro.
+ * The max number expected is 65536.
+ * In: x4 = the decimal to print.
+ * Clobber: x30, x0, x1, x2, x5, x6
+ */
+ .macro asm_print_line_dec
+ mov x6, #10 /* Divide by 10 after every loop iteration */
+ mov x5, #MAX_DEC_DIVISOR
+dec_print_loop:
+ udiv x0, x4, x5 /* Get the quotient */
+ msub x4, x0, x5, x4 /* Find the remainder */
+ add x0, x0, #ASCII_OFFSET_NUM /* Convert to ascii */
+ bl plat_crash_console_putc
+ udiv x5, x5, x6 /* Reduce divisor */
+ cbnz x5, dec_print_loop
+ .endm
+
+/* ---------------------------------------------------------------------------
+ * Assertion support in assembly.
+ * The below function helps to support assertions in assembly where we do not
+ * have a C runtime stack. Arguments to the function are :
+ * x0 - File name
+ * x1 - Line no
+ * Clobber list : x30, x0, x1, x2, x3, x4, x5, x6.
+ * ---------------------------------------------------------------------------
+ */
+func asm_assert
+ mov x5, x0
+ mov x6, x1
+ /* Ensure the console is initialized */
+ bl plat_crash_console_init
+ /* Check if the console is initialized */
+ cbz x0, _assert_loop
+ /* The console is initialized */
+ adr x4, assert_msg1
+ bl asm_print_str
+ mov x4, x5
+ bl asm_print_str
+ adr x4, assert_msg2
+ bl asm_print_str
+ /* Check if line number higher than max permitted */
+ tst x6, #~0xffff
+ b.ne _assert_loop
+ mov x4, x6
+ asm_print_line_dec
+ bl plat_crash_console_flush
+_assert_loop:
+ wfi
+ b _assert_loop
+endfunc asm_assert
+
+/*
+ * This function prints a string from address in x4.
+ * In: x4 = pointer to string.
+ * Clobber: x30, x0, x1, x2, x3
+ */
+func asm_print_str
+ mov x3, x30
+1:
+ ldrb w0, [x4], #0x1
+ cbz x0, 2f
+ bl plat_crash_console_putc
+ b 1b
+2:
+ ret x3
+endfunc asm_print_str
+
+/*
+ * This function prints a hexadecimal number in x4.
+ * In: x4 = the hexadecimal to print.
+ * Clobber: x30, x0 - x3, x5
+ */
+func asm_print_hex
+ mov x3, x30
+ mov x5, #64 /* No of bits to convert to ascii */
+1:
+ sub x5, x5, #4
+ lsrv x0, x4, x5
+ and x0, x0, #0xf
+ cmp x0, #0xA
+ b.lo 2f
+ /* Add by 0x27 in addition to ASCII_OFFSET_NUM
+ * to get ascii for characters 'a - f'.
+ */
+ add x0, x0, #0x27
+2:
+ add x0, x0, #ASCII_OFFSET_NUM
+ bl plat_crash_console_putc
+ cbnz x5, 1b
+ ret x3
+endfunc asm_print_hex
+
+#endif /* ENABLE_ASSERTIONS */
diff --git a/tftf/framework/aarch64/entrypoint.S b/tftf/framework/aarch64/entrypoint.S
new file mode 100644
index 000000000..dfedeae75
--- /dev/null
+++ b/tftf/framework/aarch64/entrypoint.S
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <tftf.h>
+
+ .globl tftf_entrypoint
+ .globl tftf_hotplug_entry
+
+
+/* ----------------------------------------------------------------------------
+ * Cold boot entry point for the primary CPU.
+ * ----------------------------------------------------------------------------
+ */
+func tftf_entrypoint
+ /* --------------------------------------------------------------------
+ * Set the exception vectors
+ * --------------------------------------------------------------------
+ */
+ adr x0, tftf_vector
+ asm_write_vbar_el1_or_el2 x1
+
+ /* --------------------------------------------------------------------
+ * Enable the instruction cache, stack pointer and data access
+ * alignment checks
+ * --------------------------------------------------------------------
+ */
+ mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
+ asm_read_sctlr_el1_or_el2
+ orr x0, x0, x1
+ asm_write_sctlr_el1_or_el2 x1
+ isb
+
+ /* --------------------------------------------------------------------
+ * This code is expected to be executed only by the primary CPU.
+ * Save the mpid for the first core that executes and if a secondary
+ * CPU has lost its way make it spin forever.
+ * --------------------------------------------------------------------
+ */
+ bl save_primary_mpid
+
+ /* --------------------------------------------------------------------
+ * Zero out NOBITS sections. There are 2 of them:
+ * - the .bss section;
+ * - the coherent memory section.
+ * --------------------------------------------------------------------
+ */
+ ldr x0, =__BSS_START__
+ ldr x1, =__BSS_SIZE__
+ bl zeromem16
+
+ ldr x0, =__COHERENT_RAM_START__
+ ldr x1, =__COHERENT_RAM_UNALIGNED_SIZE__
+ bl zeromem16
+
+ /* --------------------------------------------------------------------
+ * Give ourselves a small coherent stack to ease the pain of
+ * initializing the MMU
+ * --------------------------------------------------------------------
+ */
+ mrs x0, mpidr_el1
+ bl platform_set_coherent_stack
+
+ bl tftf_early_platform_setup
+ bl tftf_plat_arch_setup
+
+ /* --------------------------------------------------------------------
+ * Give ourselves a stack allocated in Normal -IS-WBWA memory
+ * --------------------------------------------------------------------
+ */
+ mrs x0, mpidr_el1
+ bl platform_set_stack
+
+ /* --------------------------------------------------------------------
+ * tftf_cold_boot_main() will perform the remaining architectural and
+ * platform setup, initialise the test framework's state, then run the
+ * tests.
+ * --------------------------------------------------------------------
+ */
+ b tftf_cold_boot_main
+
+dead:
+ b dead
+endfunc tftf_entrypoint
+
+/* ----------------------------------------------------------------------------
+ * Entry point for a CPU that has just been powered up.
+ * In : x0 - context_id
+ * ----------------------------------------------------------------------------
+ */
+func tftf_hotplug_entry
+
+ /* --------------------------------------------------------------------
+ * Preserve the context_id in a callee-saved register
+ * --------------------------------------------------------------------
+ */
+ mov x19, x0
+
+ /* --------------------------------------------------------------------
+ * Set the exception vectors
+ * --------------------------------------------------------------------
+ */
+ adr x0, tftf_vector
+ asm_write_vbar_el1_or_el2 x1
+
+ /* --------------------------------------------------------------------
+ * Enable the instruction cache, stack pointer and data access
+ * alignment checks
+ * --------------------------------------------------------------------
+ */
+ mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
+ asm_read_sctlr_el1_or_el2
+ orr x0, x0, x1
+ asm_write_sctlr_el1_or_el2 x1
+ isb
+
+ /* --------------------------------------------------------------------
+ * Give ourselves a small coherent stack to ease the pain of
+ * initializing the MMU
+ * --------------------------------------------------------------------
+ */
+ mrs x0, mpidr_el1
+ bl platform_set_coherent_stack
+
+ /* --------------------------------------------------------------------
+ * Enable the MMU
+ * --------------------------------------------------------------------
+ */
+ bl tftf_plat_enable_mmu
+
+ /* --------------------------------------------------------------------
+ * Give ourselves a stack in normal memory.
+ * --------------------------------------------------------------------
+ */
+ mrs x0, mpidr_el1
+ bl platform_set_stack
+
+ /* --------------------------------------------------------------------
+ * Save the context_id for later retrieval by tests
+ * --------------------------------------------------------------------
+ */
+ mrs x0, mpidr_el1
+ and x0, x0, #MPID_MASK
+ bl platform_get_core_pos
+
+ mov x1, x19
+
+ bl tftf_set_cpu_on_ctx_id
+
+ /* --------------------------------------------------------------------
+ * Jump to warm boot main function
+ * --------------------------------------------------------------------
+ */
+ b tftf_warm_boot_main
+endfunc tftf_hotplug_entry
+
+/* ----------------------------------------------------------------------------
+ * Saves the mpid of the primary core and if the primary core
+ * is already saved then it loops infinitely.
+ * ----------------------------------------------------------------------------
+ */
+func save_primary_mpid
+ adrp x1, tftf_primary_core
+ ldr w0, [x1, :lo12:tftf_primary_core]
+ mov w2, #INVALID_MPID
+ cmp w0, w2
+ b.ne panic
+ mov x2, #MPID_MASK
+ mrs x0, mpidr_el1
+ and x0, x0, x2
+ str w0, [x1, :lo12:tftf_primary_core]
+ ret
+panic:
+ /* Primary core MPID already saved */
+ b .
+ ret
+endfunc save_primary_mpid
diff --git a/tftf/framework/aarch64/exceptions.S b/tftf/framework/aarch64/exceptions.S
new file mode 100644
index 000000000..08bef4663
--- /dev/null
+++ b/tftf/framework/aarch64/exceptions.S
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+ .globl tftf_vector
+
+vector_base tftf_vector
+ //-----------------------------------------------------
+ // Current EL with SP0 : 0x0 - 0x180
+ //-----------------------------------------------------
+vector_entry SynchronousExceptionSP0
+ b SynchronousExceptionSP0
+ check_vector_size SynchronousExceptionSP0
+
+vector_entry IrqSP0
+ b IrqSP0
+ check_vector_size IrqSP0
+
+vector_entry FiqSP0
+ b FiqSP0
+ check_vector_size FiqSP0
+
+vector_entry SErrorSP0
+ b SErrorSP0
+ check_vector_size SErrorSP0
+
+ //-----------------------------------------------------
+ // Current EL with SPx: 0x200 - 0x380
+ //-----------------------------------------------------
+vector_entry SynchronousExceptionSPx
+ b SynchronousExceptionSPx
+ check_vector_size SynchronousExceptionSPx
+
+vector_entry IrqSPx
+ /*
+ * TODO: Investigate whether the Trusted Firmware-A code for context
+ * save/restore could be reused
+ */
+ stp x29, x30, [sp, #-0x10]!
+ bl save_regs
+ bl tftf_irq_handler_dispatcher
+ bl restore_regs
+ ldp x29, x30, [sp], #0x10
+ eret
+ check_vector_size IrqSPx
+
+vector_entry FiqSPx
+ b FiqSPx
+ check_vector_size FiqSPx
+
+vector_entry SErrorSPx
+ b SErrorSPx
+ check_vector_size SErrorSPx
+
+ //-----------------------------------------------------
+ // Lower EL using AArch64 : 0x400 - 0x580
+ //-----------------------------------------------------
+vector_entry SynchronousExceptionA64
+ b SynchronousExceptionA64
+ check_vector_size SynchronousExceptionA64
+
+vector_entry IrqA64
+ b IrqA64
+ check_vector_size IrqA64
+
+vector_entry FiqA64
+ b FiqA64
+ check_vector_size FiqA64
+
+vector_entry SErrorA64
+ b SErrorA64
+ check_vector_size SErrorA64
+
+ //-----------------------------------------------------
+ // Lower EL using AArch32 : 0x0 - 0x180
+ //-----------------------------------------------------
+vector_entry SynchronousExceptionA32
+ b SynchronousExceptionA32
+ check_vector_size SynchronousExceptionA32
+
+vector_entry IrqA32
+ b IrqA32
+ check_vector_size IrqA32
+
+vector_entry FiqA32
+ b FiqA32
+ check_vector_size FiqA32
+
+vector_entry SErrorA32
+ b SErrorA32
+ check_vector_size SErrorA32
+
+
+// Note: Exceptions will always be from the same EL, so no need to save spsr
+func save_regs
+ sub sp, sp, #0x100
+ stp x0, x1, [sp, #0x0]
+ stp x2, x3, [sp, #0x10]
+ stp x4, x5, [sp, #0x20]
+ stp x6, x7, [sp, #0x30]
+ stp x8, x9, [sp, #0x40]
+ stp x10, x11, [sp, #0x50]
+ stp x12, x13, [sp, #0x60]
+ stp x14, x15, [sp, #0x70]
+ stp x16, x17, [sp, #0x80]
+ stp x18, x19, [sp, #0x90]
+ stp x20, x21, [sp, #0xa0]
+ stp x22, x23, [sp, #0xb0]
+ stp x24, x25, [sp, #0xc0]
+ stp x26, x27, [sp, #0xd0]
+ mrs x0, sp_el0
+ stp x28, x0, [sp, #0xe0]
+ str x0, [sp, #0xf0]
+ ret
+endfunc save_regs
+
+
+// Note: Exceptions will always be from the same EL, so no need to restore spsr
+func restore_regs
+ ldr x9, [sp, #0xf0]
+ ldp x28, x9, [sp, #0xe0]
+ msr sp_el0, x9
+ ldp x26, x27, [sp, #0xd0]
+ ldp x24, x25, [sp, #0xc0]
+ ldp x22, x23, [sp, #0xb0]
+ ldp x20, x21, [sp, #0xa0]
+ ldp x18, x19, [sp, #0x90]
+ ldp x16, x17, [sp, #0x80]
+ ldp x14, x15, [sp, #0x70]
+ ldp x12, x13, [sp, #0x60]
+ ldp x10, x11, [sp, #0x50]
+ ldp x8, x9, [sp, #0x40]
+ ldp x6, x7, [sp, #0x30]
+ ldp x4, x5, [sp, #0x20]
+ ldp x2, x3, [sp, #0x10]
+ ldp x0, x1, [sp, #0x0]
+ add sp, sp, #0x100
+ ret
+endfunc restore_regs
diff --git a/tftf/framework/debug.c b/tftf/framework/debug.c
new file mode 100644
index 000000000..4b4364d7e
--- /dev/null
+++ b/tftf/framework/debug.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <console.h>
+#include <debug.h>
+
+#if DEBUG
+void __attribute__((__noreturn__)) do_panic(const char *file, int line)
+{
+ printf("PANIC in file: %s line: %d\n", file, line);
+
+ console_flush();
+
+ while (1)
+ continue;
+}
+
+void __attribute__((__noreturn__)) do_bug_unreachable(const char *file, int line)
+{
+ mp_printf("BUG: Unreachable code!\n");
+ do_panic(file, line);
+}
+
+#else
+void __attribute__((__noreturn__)) do_panic(void)
+{
+ printf("PANIC\n");
+
+ console_flush();
+
+ while (1)
+ continue;
+}
+
+void __attribute__((__noreturn__)) do_bug_unreachable(void)
+{
+ mp_printf("BUG: Unreachable code!\n");
+ do_panic();
+}
+#endif
diff --git a/tftf/framework/framework.mk b/tftf/framework/framework.mk
new file mode 100644
index 000000000..f6e1d7b61
--- /dev/null
+++ b/tftf/framework/framework.mk
@@ -0,0 +1,115 @@
+#
+# Copyright (c) 2018, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+AUTOGEN_DIR := $(BUILD_PLAT)/autogen
+
+include lib/xlat_tables_v2/xlat_tables.mk
+include lib/compiler-rt/compiler-rt.mk
+
+TFTF_INCLUDES := \
+ -I${AUTOGEN_DIR} \
+ -Itftf/framework/include \
+ -Iinclude/common \
+ -Iinclude/common/${ARCH} \
+ -Iinclude/drivers \
+ -Iinclude/drivers/arm \
+ -Iinclude/drivers/io \
+ -Iinclude/lib \
+ -Iinclude/lib/${ARCH} \
+ -Iinclude/lib/extensions \
+ -Iinclude/lib/stdlib \
+ -Iinclude/lib/stdlib/sys \
+ -Iinclude/lib/utils \
+ -Iinclude/lib/xlat_tables \
+ -Iinclude/plat/common \
+ -Iinclude/runtime_services \
+ -Iinclude/runtime_services/secure_el0_payloads \
+ -Iinclude/runtime_services/secure_el1_payloads
+
+# Standard C library source files
+STD_LIB_SOURCES := lib/stdlib/abort.c \
+ lib/stdlib/assert.c \
+ lib/stdlib/mem.c \
+ lib/stdlib/printf.c \
+ lib/stdlib/putchar.c \
+ lib/stdlib/puts.c \
+ lib/stdlib/rand.c \
+ lib/stdlib/strchr.c \
+ lib/stdlib/strcmp.c \
+ lib/stdlib/strlen.c \
+ lib/stdlib/strncmp.c \
+ lib/stdlib/strncpy.c \
+ lib/stdlib/subr_prf.c
+
+FRAMEWORK_SOURCES := ${AUTOGEN_DIR}/tests_list.c
+
+FRAMEWORK_SOURCES += $(addprefix tftf/, \
+ framework/${ARCH}/arch.c \
+ framework/${ARCH}/asm_debug.S \
+ framework/${ARCH}/entrypoint.S \
+ framework/${ARCH}/exceptions.S \
+ framework/debug.c \
+ framework/main.c \
+ framework/nvm_results_helpers.c \
+ framework/report.c \
+ framework/timer/timer_framework.c \
+ tests/common/test_helpers.c \
+)
+
+FRAMEWORK_SOURCES += \
+ lib/${ARCH}/cache_helpers.S \
+ lib/${ARCH}/misc_helpers.S \
+ lib/delay/delay.c \
+ lib/events/events.c \
+ lib/extensions/amu/${ARCH}/amu.c \
+ lib/extensions/amu/${ARCH}/amu_helpers.S \
+ lib/irq/irq.c \
+ lib/locks/${ARCH}/spinlock.S \
+ lib/power_management/hotplug/hotplug.c \
+ lib/power_management/suspend/${ARCH}/asm_tftf_suspend.S \
+ lib/power_management/suspend/tftf_suspend.c \
+ lib/psci/psci.c \
+ lib/sdei/sdei.c \
+ lib/smc/${ARCH}/asm_smc.S \
+ lib/smc/${ARCH}/smc.c \
+ ${STD_LIB_SOURCES} \
+ lib/trusted_os/trusted_os.c \
+ lib/utils/mp_printf.c \
+ lib/utils/uuid.c \
+ ${XLAT_TABLES_LIB_SRCS} \
+ plat/common/${ARCH}/platform_helpers.S \
+ plat/common/${ARCH}/platform_mp_stack.S \
+ plat/common/plat_common.c \
+ plat/common/plat_state_id.c \
+ plat/common/plat_topology.c \
+ plat/common/tftf_nvm_accessors.c
+
+
+FRAMEWORK_SOURCES += ${COMPILER_RT_SRCS}
+
+TFTF_LINKERFILE := tftf/framework/tftf.ld.S
+
+
+TFTF_DEFINES :=
+
+TEST_REPORTS ?= uart:raw
+$(info Selected reports: $(TEST_REPORTS))
+ifneq (,$(findstring uart:raw,$(TEST_REPORTS)))
+ TFTF_DEFINES += -DTEST_REPORT_UART_RAW
+endif
+ifneq (,$(findstring uart:junit,$(TEST_REPORTS)))
+ TFTF_DEFINES += -DTEST_REPORT_UART_JUNIT
+endif
+ifneq (,$(findstring semihosting:raw,$(TEST_REPORTS)))
+ TFTF_DEFINES += -DTEST_REPORT_SEMIHOSTING_RAW
+endif
+ifneq (,$(findstring semihosting:junit,$(TEST_REPORTS)))
+ TFTF_DEFINES += -DTEST_REPORT_SEMIHOSTING_JUNIT
+endif
+
+# Enable dynamic translation tables
+PLAT_XLAT_TABLES_DYNAMIC := 1
+$(eval $(call add_define,TFTF_DEFINES,PLAT_XLAT_TABLES_DYNAMIC))
diff --git a/tftf/framework/include/nvm.h b/tftf/framework/include/nvm.h
new file mode 100644
index 000000000..3544c2a9f
--- /dev/null
+++ b/tftf/framework/include/nvm.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __NVM_H__
+#define __NVM_H__
+
+#ifndef __ASSEMBLY__
+#include <stddef.h>
+#include <tftf.h>
+#include "tests_list.h"
+
+#define TEST_BUFFER_SIZE 0x80
+
+typedef struct {
+ /*
+ * @brief Last executed TFTF build message which consists of date and
+ * time when TFTF is built.
+ *
+ * A mismatch with the build message of currently executing binary will
+ * determine whether TFTF data structures stored in NVM needs to be
+ * initialised or not.
+ */
+ char build_message[BUILD_MESSAGE_SIZE];
+
+ /*
+ * The following 2 fields track the progress in the test session. They
+ * indicate which test case we are dealing with and the progress of this
+ * test, i.e. whether it hasn't started yet, or it is being executed
+ * right now, ...
+ */
+ test_ref_t test_to_run;
+ test_progress_t test_progress;
+
+ /*
+ * @brief Scratch buffer for test internal use.
+ *
+ * A buffer that the test can use as a scratch area for whatever it is
+ * doing.
+ */
+ char testcase_buffer[TEST_BUFFER_SIZE];
+
+ /*
+ * @brief Results of tests.
+ *
+ * @note TESTCASE_RESULT_COUNT is defined in tests_list.h
+ * (auto-generated file).
+ */
+ TESTCASE_RESULT testcase_results[TESTCASE_RESULT_COUNT];
+
+ /*
+ * @brief Size of \a result_buffer.
+ */
+ unsigned result_buffer_size;
+
+ /*
+ * Buffer containing the output of all tests.
+ * Each test appends its output to the end of \a result_buffer.
+ * Tests which produce no output write nothing in \a result_buffer.
+ */
+ char *result_buffer;
+} tftf_state_t;
+
+/*
+ * Helper macros to access fields of \a tftf_state_t structure.
+ */
+#define TFTF_STATE_OFFSET(_field) offsetof(tftf_state_t, _field)
+
+/*
+ * Return 1 if we need to start a new test session;
+ * 0 if we need to resume an interrupted session.
+ */
+unsigned int new_test_session(void);
+
+/*
+ * @brief Initialize NVM if necessary.
+ *
+ * When TFTF is launched on the target, its data structures need
+ * to be initialised in NVM. However if some test resets the board
+ * (as part of its normal behaviour or because it crashed) then
+ * TFTF data structure must be left unchanged in order to resume
+ * the test session where it has been left.
+ *
+ * This function detects whether TFTF has just been launched and if so
+ * initialises its data structures. If TFTF has just reset then it does
+ * nothing.
+ *
+ * @return STATUS_SUCCESS on success, another status code on failure.
+ */
+STATUS tftf_init_nvm(void);
+
+/*
+ * @brief Clean NVM.
+ *
+ * Clean TFTF data structures in NVM.
+ * This function must be called when all tests have completed.
+ *
+ * @return STATUS_SUCCESS on success, another status code on failure.
+ */
+STATUS tftf_clean_nvm(void);
+
+/* Writes the buffer to the flash at offset with length equal to
+ * size
+ * Returns: STATUS_FAIL, STATUS_SUCCESS, STATUS_OUT_OF_RESOURCES
+ */
+STATUS tftf_nvm_write(unsigned long long offset, const void *buffer, size_t size);
+
+/* Reads the flash into buffer at offset with length equal to
+ * size
+ * Returns: STATUS_FAIL, STATUS_SUCCESS, STATUS_OUT_OF_RESOURCES
+ */
+STATUS tftf_nvm_read(unsigned long long offset, void *buffer, size_t size);
+#endif /*__ASSEMBLY__*/
+
+#endif
diff --git a/tftf/framework/include/tftf.h b/tftf/framework/include/tftf.h
new file mode 100644
index 000000000..8231e284a
--- /dev/null
+++ b/tftf/framework/include/tftf.h
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __TFTF_H__
+#define __TFTF_H__
+
+#ifndef __ASSEMBLY__
+#include <status.h>
+#include <stddef.h>
+#include <tftf_lib.h>
+
+#define TFTF_WELCOME_STR "Booting trusted firmware test framework"
+
+/* Maximum size of test output (in bytes) */
+#define TESTCASE_OUTPUT_MAX_SIZE 512
+
+/* Size of build message used to differentiate different TFTF binaries */
+#define BUILD_MESSAGE_SIZE 0x20
+
+extern const char build_message[];
+
+typedef test_result_t (*test_function_t)(void);
+
+typedef struct {
+ /* Test result (success, crashed, failed, ...). */
+ test_result_t result;
+ unsigned long long duration;
+ /*
+ * Offset of test output string from TEST_NVM_RESULT_BUFFER_OFFSET.
+ * Only relevant if test has an output, i.e. if \a output_size is not
+ * zero.
+ */
+ unsigned output_offset;
+ /* Size of test output string, excluding final \0. */
+ unsigned output_size;
+} TESTCASE_RESULT;
+
+typedef struct {
+ unsigned index;
+ const char *name;
+ const char *description;
+ test_function_t test;
+} test_case_t;
+
+typedef struct {
+ const char *name;
+ const char *description;
+ const test_case_t *testcases;
+} test_suite_t;
+
+/*
+ * Reference to a specific test.
+ */
+typedef struct {
+ unsigned int testsuite_idx;
+ unsigned int testcase_idx;
+} test_ref_t;
+
+/*
+ * The progress in the execution of a test.
+ * This is used to implement the following state machine.
+ *
+ * +-> TEST_READY (initial state of the test) <--------------+
+ * | | |
+ * | | Test framework prepares the test environment. |
+ * | | |
+ * | v |
+ * | TEST_IN_PROGRESS |
+ * | | |
+ * | | Hand over to the test function. |
+ * | | If the test wants to reboot the platform ---> TEST_REBOOTING |
+ * | | Test function returns into framework. | |
+ * | | | Reboot |
+ * | | | |
+ * | | +---------+
+ * | v
+ * | TEST_COMPLETE
+ * | |
+ * | | Do some framework management.
+ * | | Move to next test.
+ * +--------+
+ */
+typedef enum {
+ TEST_PROGRESS_MIN = 0,
+ TEST_READY = TEST_PROGRESS_MIN,
+ TEST_IN_PROGRESS,
+ TEST_COMPLETE,
+ TEST_REBOOTING,
+
+ TEST_PROGRESS_MAX,
+} test_progress_t;
+
+#define TEST_PROGRESS_IS_VALID(_progress) \
+ ((_progress >= TEST_PROGRESS_MIN) && (_progress < TEST_PROGRESS_MAX))
+
+/*
+ * The definition of this global variable is generated by the script
+ * 'tftf_generate_test_list' during the build process
+ */
+extern const test_suite_t testsuites[];
+
+extern TESTCASE_RESULT testcase_results[];
+
+/* Set/Get the test to run in NVM */
+STATUS tftf_set_test_to_run(const test_ref_t test_to_run);
+STATUS tftf_get_test_to_run(test_ref_t *test_to_run);
+/* Set/Get the progress of the current test in NVM */
+STATUS tftf_set_test_progress(test_progress_t test_progress);
+STATUS tftf_get_test_progress(test_progress_t *test_progress);
+
+/**
+** Save test result into NVM.
+*/
+STATUS tftf_testcase_set_result(const test_case_t *testcase,
+ test_result_t result,
+ unsigned long long duration);
+/**
+** Get a testcase result from NVM.
+**
+** @param[in] testcase The targeted testcase.
+** @param[out] result Testcase result. Only \a result.result and
+** \a result.duration are of interest for the caller and the 2 other fields
+** should be ignored (they correspond to a location in NVM).
+** @param[out] test_output Buffer to store the test output, if any.
+** \a test_output must be big enough to hold the whole test output.
+** Test output will be \a TESTCASE_OUTPUT_MAX_SIZE bytes maximum.
+*/
+STATUS tftf_testcase_get_result(const test_case_t *testcase, TESTCASE_RESULT *result, char *test_output);
+
+void tftf_report_generate(void);
+
+/*
+ * Exit the TFTF.
+ * This function can be used when a fatal error is encountered or as part of the
+ * normal termination process. It does the necessary cleanups then put the
+ * core in a low-power state.
+ */
+void __dead2 tftf_exit(void);
+
+void tftf_arch_setup(void);
+
+/*
+ * This function detects the power state format used by PSCI which can
+ * be either extended or original format. For the Original format,
+ * the State-ID can either be NULL or can be using the recommended encoding.
+ * This function needs to be invoked once during cold boot prior to the
+ * invocation of any PSCI power state helper functions.
+ */
+void tftf_detect_psci_pstate_format(void);
+
+/*
+ * Run the next test on the calling CPU.
+ * Once the test is complete, if the calling CPU is the last one to exit the
+ * test then do the necessary bookkeeping, report the overall test result and
+ * move on to the next test. Otherwise, shut down the calling CPU.
+ *
+ * This function never returns.
+ */
+void __dead2 run_tests(void);
+
+/* Entry point for a CPU that has just been powered up */
+void tftf_hotplug_entry(void);
+
+#endif /*__ASSEMBLY__*/
+
+#endif
diff --git a/tftf/framework/main.c b/tftf/framework/main.c
new file mode 100644
index 000000000..3f94dc94a
--- /dev/null
+++ b/tftf/framework/main.c
@@ -0,0 +1,599 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <assert.h>
+#include <debug.h>
+#include <irq.h>
+#include <mmio.h>
+#include <nvm.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <power_management.h>
+#include <psci.h>
+#include <sgi.h>
+#include <string.h>
+#include <sys/types.h>
+#include <tftf.h>
+#include <tftf_lib.h>
+#include <timer.h>
+
+/* version information for TFTF */
+extern const char version_string[];
+
+unsigned int lead_cpu_mpid;
+
+/* Defined in hotplug.c */
+extern volatile test_function_t test_entrypoint[PLATFORM_CORE_COUNT];
+
+/* Per-CPU results for the current test */
+static test_result_t test_results[PLATFORM_CORE_COUNT];
+
+/* Context ID passed to tftf_psci_cpu_on() */
+static u_register_t cpu_on_ctx_id_arr[PLATFORM_CORE_COUNT];
+
+static unsigned int test_is_rebooting;
+
+static inline const test_suite_t *current_testsuite(void)
+{
+ test_ref_t test_to_run;
+ tftf_get_test_to_run(&test_to_run);
+ return &testsuites[test_to_run.testsuite_idx];
+}
+
+static inline const test_case_t *current_testcase(void)
+{
+ test_ref_t test_to_run;
+ tftf_get_test_to_run(&test_to_run);
+ return &testsuites[test_to_run.testsuite_idx].
+ testcases[test_to_run.testcase_idx];
+}
+
+/*
+ * Identify the next test in the tests list and update the NVM data to point to
+ * that test.
+ * If there is no more tests to execute, return NULL.
+ * Otherwise, return the test case.
+ */
+static const test_case_t *advance_to_next_test(void)
+{
+ test_ref_t test_to_run;
+ const test_case_t *testcase;
+ unsigned int testcase_idx;
+ unsigned int testsuite_idx;
+
+#if DEBUG
+ test_progress_t progress;
+ tftf_get_test_progress(&progress);
+ assert(progress == TEST_COMPLETE);
+#endif
+
+ tftf_get_test_to_run(&test_to_run);
+ testcase_idx = test_to_run.testcase_idx;
+ testsuite_idx = test_to_run.testsuite_idx;
+
+ /* Move to the next test case in the current test suite */
+ ++testcase_idx;
+ testcase = &testsuites[testsuite_idx].testcases[testcase_idx];
+
+ if (testcase->name == NULL) {
+ /*
+ * There's no more test cases in the current test suite so move
+ * to the first test case of the next test suite.
+ */
+ const test_suite_t *testsuite;
+ testcase_idx = 0;
+ ++testsuite_idx;
+ testsuite = &testsuites[testsuite_idx];
+ testcase = &testsuite->testcases[0];
+
+ if (testsuite->name == NULL) {
+ /*
+ * This was the last test suite so there's no more tests
+ * at all.
+ */
+ return NULL;
+ }
+ }
+
+ VERBOSE("Moving to test (%u,%u)\n", testsuite_idx, testcase_idx);
+ test_to_run.testsuite_idx = testsuite_idx;
+ test_to_run.testcase_idx = testcase_idx;
+ tftf_set_test_to_run(test_to_run);
+ tftf_set_test_progress(TEST_READY);
+
+ return testcase;
+}
+
+/*
+ * This function is executed only by the lead CPU.
+ * It prepares the environment for the next test to run.
+ */
+static void prepare_next_test(void)
+{
+ unsigned int mpid;
+ unsigned int core_pos;
+ unsigned int cpu_node;
+
+ /* This function should be called by the lead CPU only */
+ assert((read_mpidr_el1() & MPID_MASK) == lead_cpu_mpid);
+
+ /*
+ * Only the lead CPU should be powered on at this stage. All other CPUs
+ * should be powered off or powering off. If some CPUs are not powered
+ * off yet, wait for them to power off.
+ */
+ for_each_cpu(cpu_node) {
+ mpid = tftf_get_mpidr_from_node(cpu_node);
+ if (mpid == lead_cpu_mpid)
+ assert(tftf_is_cpu_online(mpid));
+ else
+ while (tftf_psci_affinity_info(mpid, MPIDR_AFFLVL0)
+ == PSCI_STATE_ON)
+ ;
+ }
+
+ /* No CPU should have entered the test yet */
+ assert(tftf_get_ref_cnt() == 0);
+
+ /* Populate the test entrypoint for the lead CPU */
+ core_pos = platform_get_core_pos(lead_cpu_mpid);
+ test_entrypoint[core_pos] = (test_function_t) current_testcase()->test;
+
+ for (unsigned int i = 0; i < PLATFORM_CORE_COUNT; ++i)
+ test_results[i] = TEST_RESULT_NA;
+
+ NOTICE("Starting unittest '%s - %s'\n",
+ current_testsuite()->name, current_testcase()->name);
+
+ /* Program the watchdog */
+ tftf_platform_watchdog_set();
+
+ /* TODO: Take a 1st timestamp to be able to measure test duration */
+
+ tftf_set_test_progress(TEST_IN_PROGRESS);
+}
+
+/*
+ * Go through individual CPUs' test results and determine the overall
+ * test result from that.
+ */
+static test_result_t get_overall_test_result(void)
+{
+ test_result_t result = TEST_RESULT_NA;
+ unsigned int cpu_mpid;
+ unsigned int cpu_node;
+ unsigned int core_pos;
+
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ core_pos = platform_get_core_pos(cpu_mpid);
+
+ switch (test_results[core_pos]) {
+ case TEST_RESULT_NA:
+ VERBOSE("CPU%u not involved in the test\n", core_pos);
+ /* Ignoring */
+ break;
+
+ case TEST_RESULT_SKIPPED:
+ /*
+ * If at least one CPU skipped the test, consider the
+ * whole test as skipped as well.
+ */
+ NOTICE("CPU%u skipped the test\n", core_pos);
+ return TEST_RESULT_SKIPPED;
+
+ case TEST_RESULT_SUCCESS:
+ result = TEST_RESULT_SUCCESS;
+ break;
+
+ case TEST_RESULT_FAIL:
+ ERROR("CPU%u failed the test\n", core_pos);
+ return TEST_RESULT_FAIL;
+
+ case TEST_RESULT_CRASHED:
+ /*
+ * Means the CPU never returned from the test whereas it
+ * was supposed to. Either there is a bug in the test's
+ * implementation or some sort of unexpected crash
+ * happened.
+ * If at least one CPU crashed, consider the whole test
+ * as crashed as well.
+ */
+ ERROR("CPU%u never returned from the test!\n", core_pos);
+ return TEST_RESULT_CRASHED;
+
+ default:
+ ERROR("Unknown test result value: %u\n",
+ test_results[core_pos]);
+ panic();
+ }
+ }
+
+ /*
+ * At least one CPU (i.e. the lead CPU) should have participated in the
+ * test.
+ */
+ assert(result != TEST_RESULT_NA);
+ return result;
+}
+
+/*
+ * This function is executed by the last CPU to exit the test only.
+ * It does the necessary bookkeeping and reports the overall test result.
+ * If it was the last test, it will also generate the final test report.
+ * Otherwise, it will reset the platform, provided that the platform
+ * supports reset from non-trusted world. This ensures that the next test
+ * runs in a clean environment
+ *
+ * Return 1 if this was the last test, 0 otherwise.
+ */
+static unsigned int close_test(void)
+{
+ const test_case_t *next_test;
+
+#if DEBUG
+ /*
+ * Check that the test didn't pretend resetting the platform, when in
+ * fact it returned into the framework.
+ *
+ * If that happens, the test implementation should be fixed.
+ * However, it is not a fatal error so just flag the problem in debug
+ * builds.
+ */
+ test_progress_t progress;
+ tftf_get_test_progress(&progress);
+ assert(progress != TEST_REBOOTING);
+#endif /* DEBUG */
+
+ tftf_set_test_progress(TEST_COMPLETE);
+ test_is_rebooting = 0;
+
+ /* TODO: Take a 2nd timestamp and compute test duration */
+
+ /* Reset watchdog */
+ tftf_platform_watchdog_reset();
+
+ /* Ensure no CPU is still executing the test */
+ assert(tftf_get_ref_cnt() == 0);
+
+ /* Save test result in NVM */
+ test_result_t overall_test_result = get_overall_test_result();
+ tftf_testcase_set_result(current_testcase(),
+ overall_test_result,
+ 0);
+
+ NOTICE("Unittest '%s - %s' complete. Result: %s\n",
+ current_testsuite()->name, current_testcase()->name,
+ test_result_to_string(overall_test_result));
+
+ /* The test is finished, let's move to the next one (if any) */
+ next_test = advance_to_next_test();
+
+ /* If this was the last test then report all results */
+ if (!next_test) {
+ tftf_report_generate();
+ tftf_clean_nvm();
+ return 1;
+ } else {
+#if (PLAT_SUPPORTS_NS_RESET && !NEW_TEST_SESSION && USE_NVM)
+ /*
+ * Reset the platform so that the next test runs in a clean
+ * environment.
+ */
+ INFO("Reset platform before executing next test:%p\n",
+ (void *) &(next_test->test));
+ tftf_plat_reset();
+ bug_unreachable();
+#endif
+ }
+
+ return 0;
+}
+
+/*
+ * Hand over to lead CPU, i.e.:
+ * 1) Power on lead CPU
+ * 2) Power down calling CPU
+ */
+static void __dead2 hand_over_to_lead_cpu(void)
+{
+ int ret;
+ unsigned int mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(mpid);
+
+ VERBOSE("CPU%u: Hand over to lead CPU%u\n", core_pos,
+ platform_get_core_pos(lead_cpu_mpid));
+
+ /*
+ * Power on lead CPU.
+ * The entry point address passed as the 2nd argument of tftf_cpu_on()
+ * doesn't matter because it will be overwritten by prepare_next_test().
+ * Pass a NULL pointer to easily catch the problem in case something
+ * goes wrong.
+ */
+ ret = tftf_cpu_on(lead_cpu_mpid, 0, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("CPU%u: Failed to power on lead CPU%u (%d)\n",
+ core_pos, platform_get_core_pos(lead_cpu_mpid), ret);
+ panic();
+ }
+
+ /* Wait for lead CPU to be actually powered on */
+ while (!tftf_is_cpu_online(lead_cpu_mpid))
+ ;
+
+ /*
+ * Lead CPU has successfully booted, let's now power down the calling
+ * core.
+ */
+ tftf_cpu_off();
+ panic();
+}
+
+void __dead2 run_tests(void)
+{
+ unsigned int mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(mpid);
+ unsigned int test_session_finished;
+ unsigned int cpus_cnt;
+
+ while (1) {
+ if (mpid == lead_cpu_mpid && (tftf_get_ref_cnt() == 0))
+ prepare_next_test();
+
+ /*
+ * Increment the reference count to indicate that the CPU is
+ * participating in the test.
+ */
+ tftf_inc_ref_cnt();
+
+ /*
+ * Mark the CPU's test result as "crashed". This is meant to be
+ * overwritten by the actual test result when the CPU returns
+ * from the test function into the framework. In case the CPU
+ * crashes in the test (and thus, never returns from it), this
+ * variable will hold the right value.
+ */
+ test_results[core_pos] = TEST_RESULT_CRASHED;
+
+ /*
+ * Jump to the test entrypoint for this core.
+ * - For the lead CPU, it has been populated by
+ * prepare_next_test()
+ * - For other CPUs, it has been populated by tftf_cpu_on() or
+ * tftf_try_cpu_on()
+ */
+ while (test_entrypoint[core_pos] == 0)
+ ;
+
+ test_results[core_pos] = test_entrypoint[core_pos]();
+ test_entrypoint[core_pos] = 0;
+
+ /*
+ * Decrement the reference count to indicate that the CPU is not
+ * participating in the test any longer.
+ */
+ cpus_cnt = tftf_dec_ref_cnt();
+
+ /*
+ * Last CPU to exit the test gets to do the necessary
+ * bookkeeping and to report the overall test result.
+ * Other CPUs shut down.
+ */
+ if (cpus_cnt == 0) {
+ test_session_finished = close_test();
+ if (test_session_finished)
+ break;
+
+ if (mpid != lead_cpu_mpid) {
+ hand_over_to_lead_cpu();
+ bug_unreachable();
+ }
+ } else {
+ tftf_cpu_off();
+ panic();
+ }
+ }
+
+ tftf_exit();
+
+ /* Should never reach this point */
+ bug_unreachable();
+}
+
+u_register_t tftf_get_cpu_on_ctx_id(unsigned int core_pos)
+{
+ assert(core_pos < PLATFORM_CORE_COUNT);
+
+ return cpu_on_ctx_id_arr[core_pos];
+}
+
+void tftf_set_cpu_on_ctx_id(unsigned int core_pos, u_register_t context_id)
+{
+ assert(core_pos < PLATFORM_CORE_COUNT);
+
+ cpu_on_ctx_id_arr[core_pos] = context_id;
+}
+
+unsigned int tftf_is_rebooted(void)
+{
+ return test_is_rebooting;
+}
+
+/*
+ * Return 0 if the test session can be resumed
+ * -1 otherwise.
+ */
+static int resume_test_session(void)
+{
+ test_ref_t test_to_run;
+ test_progress_t test_progress;
+ const test_case_t *next_test;
+
+ /* Get back on our feet. Where did we stop? */
+ tftf_get_test_to_run(&test_to_run);
+ tftf_get_test_progress(&test_progress);
+ assert(TEST_PROGRESS_IS_VALID(test_progress));
+
+ switch (test_progress) {
+ case TEST_READY:
+ /*
+ * The TFTF has reset in the framework code, before the test
+ * actually started.
+ * Nothing to update, just start the test from scratch.
+ */
+ break;
+
+ case TEST_IN_PROGRESS:
+ /*
+ * The test crashed, i.e. it couldn't complete.
+ * Update the test result in NVM then move to the next test.
+ */
+ INFO("Test has crashed, moving to the next one\n");
+ tftf_testcase_set_result(current_testcase(),
+ TEST_RESULT_CRASHED,
+ 0);
+ next_test = advance_to_next_test();
+ if (!next_test) {
+ INFO("No more tests\n");
+ return -1;
+ }
+ break;
+
+ case TEST_COMPLETE:
+ /*
+ * The TFTF has reset in the framework code, after the test had
+ * completed but before we finished the framework maintenance
+ * required to move to the next test.
+ *
+ * In this case, we don't know the exact state of the data:
+ * maybe we had the time to update the test result,
+ * maybe we had the time to move to the next test.
+ * We can't be sure so let's stay on the safe side and just
+ * restart the test session from the beginning...
+ */
+ NOTICE("The test framework has been interrupted in the middle "
+ "of critical maintenance operations.\n");
+ NOTICE("Can't recover execution.\n");
+ return -1;
+
+ case TEST_REBOOTING:
+ /*
+ * Nothing to update about the test session, as we want to
+ * re-enter the same test. Just remember that the test is
+ * rebooting in case it queries this information.
+ */
+ test_is_rebooting = 1;
+ break;
+
+ default:
+ bug_unreachable();
+ }
+
+ return 0;
+}
+
+/*
+ * C entry point in the TFTF.
+ * This function is executed by the primary CPU only.
+ */
+void __dead2 tftf_cold_boot_main(void)
+{
+ STATUS status;
+ int rc;
+
+ NOTICE("%s\n", TFTF_WELCOME_STR);
+ NOTICE("%s\n", build_message);
+ NOTICE("%s\n\n", version_string);
+
+#ifndef AARCH32
+ NOTICE("Running at NS-EL%u\n", IS_IN_EL(1) ? 1 : 2);
+#else
+ NOTICE("Running in AArch32 HYP mode\n");
+#endif
+
+ tftf_arch_setup();
+ tftf_platform_setup();
+ tftf_init_topology();
+
+ tftf_irq_setup();
+
+ rc = tftf_initialise_timer();
+ if (rc != 0) {
+ ERROR("Failed to initialize the timer subsystem (%d).\n", rc);
+ tftf_exit();
+ }
+
+ /* Enable the SGI used by the timer management framework */
+ tftf_irq_enable(IRQ_WAKE_SGI, GIC_HIGHEST_NS_PRIORITY);
+ enable_irq();
+
+ if (new_test_session()) {
+ NOTICE("Starting a new test session\n");
+ status = tftf_init_nvm();
+ if (status != STATUS_SUCCESS) {
+ /*
+ * TFTF will have an undetermined behavior if its data
+ * structures have not been initialised. There's no
+ * point in continuing execution.
+ */
+ ERROR("FATAL: Failed to initialise internal data structures in NVM.\n");
+ tftf_clean_nvm();
+ tftf_exit();
+ }
+ } else {
+ NOTICE("Resuming interrupted test session\n");
+ rc = resume_test_session();
+ if (rc < 0) {
+ tftf_report_generate();
+ tftf_clean_nvm();
+ tftf_exit();
+ }
+ }
+
+ /* Initialise the CPUs status map */
+ tftf_init_cpus_status_map();
+
+ /*
+ * Detect power state format and get power state information for
+ * a platform.
+ */
+ tftf_init_pstate_framework();
+
+ /* The lead CPU is always the primary core. */
+ lead_cpu_mpid = read_mpidr_el1() & MPID_MASK;
+
+ /*
+ * Hand over to lead CPU if required.
+ * If the primary CPU is not the lead CPU for the first test then:
+ * 1) Power on the lead CPU
+ * 2) Power down the primary CPU
+ */
+ if ((read_mpidr_el1() & MPID_MASK) != lead_cpu_mpid) {
+ hand_over_to_lead_cpu();
+ bug_unreachable();
+ }
+
+ /* Enter the test session */
+ run_tests();
+
+ /* Should never reach this point */
+ bug_unreachable();
+}
+
+void __dead2 tftf_exit(void)
+{
+ NOTICE("Exiting tests.\n");
+
+ /* Let the platform code clean up if required */
+ tftf_platform_end();
+
+ while (1)
+ wfi();
+}
diff --git a/tftf/framework/nvm_results_helpers.c b/tftf/framework/nvm_results_helpers.c
new file mode 100644
index 000000000..34ef19f42
--- /dev/null
+++ b/tftf/framework/nvm_results_helpers.c
@@ -0,0 +1,277 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <nvm.h>
+#include <platform.h>
+#include <spinlock.h>
+#include <stdarg.h>
+#include <string.h>
+
+/*
+ * Temporary buffer to store 1 test output.
+ * This will eventually be saved into NVM at the end of the execution
+ * of this test.
+ */
+static char testcase_output[TESTCASE_OUTPUT_MAX_SIZE];
+/*
+ * A test output can be written in several pieces by calling
+ * tftf_testcase_printf() multiple times. testcase_output_idx keeps the position
+ * of the last character written in testcase_output buffer and allows to easily
+ * append a new string at next call to tftf_testcase_printf().
+ */
+static unsigned int testcase_output_idx;
+
+/* Lock to avoid concurrent accesses to the testcase output buffer */
+static spinlock_t testcase_output_lock;
+
+static tftf_state_t tftf_init_state = {
+ .build_message = "",
+ .test_to_run = {
+ .testsuite_idx = 0,
+ .testcase_idx = 0,
+ },
+ .test_progress = TEST_READY,
+ .testcase_buffer = { 0 },
+ .testcase_results = {
+ {
+ .result = TEST_RESULT_NA,
+ .duration = 0,
+ .output_offset = 0,
+ .output_size = 0,
+ }
+ },
+ .result_buffer_size = 0,
+ .result_buffer = NULL,
+};
+
+unsigned int new_test_session(void)
+{
+/* NEW_TEST_SESSION == 1 => we always want to start a new session */
+#if NEW_TEST_SESSION
+ INFO("Always starting a new test session (NEW_TEST_SESSION == 1)\n");
+ return 1;
+#endif
+ char saved_build_msg[BUILD_MESSAGE_SIZE];
+
+ /*
+ * Check the validity of the build message stored in NVM.
+ * It is invalid when it doesn't match with the TFTF binary currently
+ * executing.
+ */
+ tftf_nvm_read(TFTF_STATE_OFFSET(build_message), saved_build_msg,
+ BUILD_MESSAGE_SIZE);
+ return !!strncmp(build_message, saved_build_msg, BUILD_MESSAGE_SIZE);
+}
+
+STATUS tftf_init_nvm(void)
+{
+ INFO("Initialising NVM\n");
+
+ /* Copy the build message to identify the TFTF */
+ strncpy(tftf_init_state.build_message, build_message, BUILD_MESSAGE_SIZE);
+ return tftf_nvm_write(0, &tftf_init_state, sizeof(tftf_init_state));
+}
+
+STATUS tftf_clean_nvm(void)
+{
+ unsigned char corrupt_build_message = '\0';
+
+ /*
+ * This will cause TFTF to re-initialise its data structures next time
+ * it runs.
+ */
+ return tftf_nvm_write(TFTF_STATE_OFFSET(build_message),
+ &corrupt_build_message,
+ sizeof(corrupt_build_message));
+}
+
+STATUS tftf_set_test_to_run(const test_ref_t test_to_run)
+{
+ return tftf_nvm_write(TFTF_STATE_OFFSET(test_to_run), &test_to_run,
+ sizeof(test_to_run));
+}
+
+STATUS tftf_get_test_to_run(test_ref_t *test_to_run)
+{
+ assert(test_to_run != NULL);
+ return tftf_nvm_read(TFTF_STATE_OFFSET(test_to_run), test_to_run,
+ sizeof(*test_to_run));
+}
+
+STATUS tftf_set_test_progress(test_progress_t test_progress)
+{
+ return tftf_nvm_write(TFTF_STATE_OFFSET(test_progress), &test_progress,
+ sizeof(test_progress));
+}
+
+STATUS tftf_get_test_progress(test_progress_t *test_progress)
+{
+ assert(test_progress != NULL);
+ return tftf_nvm_read(TFTF_STATE_OFFSET(test_progress), test_progress,
+ sizeof(*test_progress));
+}
+
+STATUS tftf_testcase_set_result(const test_case_t *testcase,
+ test_result_t result,
+ unsigned long long duration)
+{
+ STATUS status;
+ unsigned result_buffer_size = 0;
+ TESTCASE_RESULT test_result;
+
+ assert(testcase != NULL);
+
+ /* Initialize Test case result */
+ test_result.result = result;
+ test_result.duration = duration;
+ test_result.output_offset = 0;
+ test_result.output_size = strlen(testcase_output);
+
+ /* Does the test have an output? */
+ if (test_result.output_size != 0) {
+ /* Get the size of the buffer containing all tests outputs */
+ status = tftf_nvm_read(TFTF_STATE_OFFSET(result_buffer_size),
+ &result_buffer_size, sizeof(unsigned));
+ if (status != STATUS_SUCCESS)
+ goto reset_test_output;
+
+ /*
+ * Write the output buffer at the end of the string buffer in
+ * NVM
+ */
+ test_result.output_offset = result_buffer_size;
+ status = tftf_nvm_write(
+ TFTF_STATE_OFFSET(result_buffer) + result_buffer_size,
+ testcase_output, test_result.output_size + 1);
+ if (status != STATUS_SUCCESS)
+ goto reset_test_output;
+
+ /* And update the buffer size into NVM */
+ result_buffer_size += test_result.output_size + 1;
+ status = tftf_nvm_write(TFTF_STATE_OFFSET(result_buffer_size),
+ &result_buffer_size, sizeof(unsigned));
+ if (status != STATUS_SUCCESS)
+ goto reset_test_output;
+ }
+
+ /* Write the test result into NVM */
+ status = tftf_nvm_write(TFTF_STATE_OFFSET(testcase_results) +
+ (testcase->index * sizeof(TESTCASE_RESULT)),
+ &test_result, sizeof(TESTCASE_RESULT));
+
+reset_test_output:
+ /* Reset test output buffer for the next test */
+ testcase_output_idx = 0;
+ testcase_output[0] = 0;
+
+ return status;
+}
+
+STATUS tftf_testcase_get_result(const test_case_t *testcase,
+ TESTCASE_RESULT *result,
+ char *test_output)
+{
+ STATUS status;
+ unsigned output_size;
+
+ assert(testcase != NULL);
+ assert(result != NULL);
+ assert(test_output != NULL);
+
+ status = tftf_nvm_read(TFTF_STATE_OFFSET(testcase_results)
+ + (testcase->index * sizeof(TESTCASE_RESULT)),
+ result, sizeof(TESTCASE_RESULT));
+ if (status != STATUS_SUCCESS) {
+ return status;
+ }
+
+ output_size = result->output_size;
+
+ if (output_size != 0) {
+ status = tftf_nvm_read(TFTF_STATE_OFFSET(result_buffer)
+ + result->output_offset,
+ test_output, output_size);
+ if (status != STATUS_SUCCESS)
+ return status;
+ }
+
+ test_output[output_size] = 0;
+
+ return STATUS_SUCCESS;
+}
+
+int tftf_testcase_printf(const char *format, ...)
+{
+ va_list ap;
+ int available;
+ int written = -1;
+
+ spin_lock(&testcase_output_lock);
+
+ assert(sizeof(testcase_output) >= testcase_output_idx);
+ available = sizeof(testcase_output) - testcase_output_idx;
+ if (available == 0) {
+ ERROR("%s: Output buffer is full ; the string won't be printed.\n",
+ __func__);
+ ERROR("%s: Consider increasing TESTCASE_OUTPUT_MAX_SIZE value.\n",
+ __func__);
+ goto release_lock;
+ }
+
+ va_start(ap, format);
+ written = vsnprintf(&testcase_output[testcase_output_idx], available,
+ format, ap);
+ va_end(ap);
+
+ if (written < 0) {
+ ERROR("%s: Output error (%d)", __func__, written);
+ goto release_lock;
+ }
+ /*
+ * If vsnprintf() truncated the string due to the size limit passed as
+ * an argument then its return value is the number of characters (not
+ * including the trailing '\0') which would have been written to the
+ * final string if enough space had been available. Thus, a return value
+ * of size or more means that the output was truncated.
+ *
+ * Adjust the value of 'written' to reflect what has been actually
+ * written.
+ */
+ if (written >= available) {
+ ERROR("%s: String has been truncated (%u/%u bytes written).\n",
+ __func__, available - 1, written);
+ ERROR("%s: Consider increasing TESTCASE_OUTPUT_MAX_SIZE value.\n",
+ __func__);
+ written = available - 1;
+ }
+
+ /*
+ * Update testcase_output_idx to point to the '\0' of the buffer.
+ * The next call of tftf_testcase_printf() will overwrite '\0' to
+ * append its new string to the buffer.
+ */
+ testcase_output_idx += written;
+
+release_lock:
+ spin_unlock(&testcase_output_lock);
+ return written;
+}
+
+void tftf_notify_reboot(void)
+{
+#if DEBUG
+ /* This function must be called by tests, not by the framework */
+ test_progress_t test_progress;
+ tftf_get_test_progress(&test_progress);
+ assert(test_progress == TEST_IN_PROGRESS);
+#endif /* DEBUG */
+
+ VERBOSE("Test intends to reset\n");
+ tftf_set_test_progress(TEST_REBOOTING);
+}
diff --git a/tftf/framework/report.c b/tftf/framework/report.c
new file mode 100644
index 000000000..4b0a8576d
--- /dev/null
+++ b/tftf/framework/report.c
@@ -0,0 +1,250 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <debug.h>
+#include <platform_def.h> /* For TESTCASE_OUTPUT_MAX_SIZE */
+#include <semihosting.h>
+#include <stdio.h>
+#include <string.h>
+#include <tftf.h>
+
+struct tftf_report_ops {
+ long (*open)(const char *fname);
+ void (*write)(long handle, const char *str);
+ void (*close)(long handle);
+};
+
+#define TEST_REPORT_JUNIT_FILENAME "tftf_report_junit.xml"
+#define TEST_REPORT_RAW_FILENAME "tftf_report_raw.txt"
+
+#if defined(TEST_REPORT_UART_RAW) || defined(TEST_REPORT_UART_JUNIT)
+static long tftf_report_uart_open(const char *fname)
+{
+ printf("********** %s **********\n", fname);
+ return 0;
+}
+
+static void tftf_report_uart_write(long handle, const char *str)
+{
+ (void)handle;
+ assert(str);
+ /* Not using printf to avoid doing two copies. */
+ while (*str) {
+ putchar(*str++);
+ }
+}
+
+static void tftf_report_uart_close(long handle)
+{
+ (void)handle;
+ printf("************************\n");
+}
+
+const struct tftf_report_ops tftf_report_uart_ops = {
+ .open = tftf_report_uart_open,
+ .write = tftf_report_uart_write,
+ .close = tftf_report_uart_close,
+};
+#endif /* defined(TEST_REPORT_UART_RAW) || defined(TEST_REPORT_UART_JUNIT) */
+
+#if defined(TEST_REPORT_UART_RAW) || defined(TEST_REPORT_SEMIHOSTING_RAW)
+static unsigned int total_tests;
+static unsigned int tests_stats[TEST_RESULT_MAX];
+
+static void tftf_update_tests_statistics(test_result_t result)
+{
+ assert(TEST_RESULT_IS_VALID(result));
+ total_tests++;
+ tests_stats[result]++;
+}
+
+static const char *test_result_strings[TEST_RESULT_MAX] = {
+ "Skipped", "Passed", "Failed", "Crashed",
+};
+
+const char *test_result_to_string(test_result_t result)
+{
+ assert(TEST_RESULT_IS_VALID(result));
+ return test_result_strings[result];
+}
+
+static void tftf_report_generate_raw(const struct tftf_report_ops *rops,
+ const char *fname)
+{
+#define WRITE(str) rops->write(file_handle, str)
+#define BUFFER_SIZE 200
+ unsigned i, j;
+ long file_handle;
+ char buffer[BUFFER_SIZE];
+ const test_case_t *testcases;
+ TESTCASE_RESULT testcase_result;
+ char test_output[TESTCASE_OUTPUT_MAX_SIZE];
+ STATUS status;
+
+ file_handle = rops->open(fname);
+ if (file_handle == -1)
+ return;
+
+ /* Extract the result of all the testcases */
+ WRITE("========== TEST REPORT ==========\n");
+ for (i = 0; testsuites[i].name != NULL; i++) {
+ snprintf(buffer, BUFFER_SIZE, "# Test suite '%s':\n", testsuites[i].name);
+ WRITE(buffer);
+ testcases = testsuites[i].testcases;
+
+ for (j = 0; testcases[j].name != NULL; j++) {
+ status = tftf_testcase_get_result(&testcases[j], &testcase_result, test_output);
+ if (status != STATUS_SUCCESS) {
+ WRITE("Failed to get test result.\n");
+ continue;
+ }
+
+ tftf_update_tests_statistics(testcase_result.result);
+ /* TODO: print test duration */
+ snprintf(buffer, BUFFER_SIZE, "\t - %s: %s\n", testcases[j].name,
+ test_result_to_string(testcase_result.result));
+ WRITE(buffer);
+
+ if (strlen(test_output) != 0) {
+ WRITE("--- output ---\n");
+ snprintf(buffer, BUFFER_SIZE, "%s", test_output);
+ WRITE(buffer);
+ WRITE("--------------\n");
+ }
+ }
+ }
+ WRITE("=================================\n");
+
+ for (i = TEST_RESULT_MIN; i < TEST_RESULT_MAX; i++) {
+ snprintf(buffer, BUFFER_SIZE, "Tests %-8s: %d\n",
+ test_result_to_string(i), tests_stats[i]);
+ WRITE(buffer);
+ }
+ snprintf(buffer, BUFFER_SIZE, "%-14s: %d\n", "Total tests", total_tests);
+ WRITE(buffer);
+ WRITE("=================================\n");
+
+ rops->close(file_handle);
+#undef BUFFER_SIZE
+#undef WRITE
+}
+#endif /* defined(TEST_REPORT_UART_RAW) || defined(TEST_REPORT_SEMIHOSTING_RAW) */
+
+#if defined(TEST_REPORT_SEMIHOSTING_RAW) || defined(TEST_REPORT_SEMIHOSTING_JUNIT)
+static long tftf_report_semihosting_open(const char *fname)
+{
+ /* Create the report on the semihosting */
+ long handle = semihosting_file_open(fname, FOPEN_MODE_WPLUS);
+ if (handle == -1) {
+ ERROR("Failed to create test report file \"%s\" on semihosting"
+ " [status = %ld].\n", fname, handle);
+ }
+ NOTICE("Opened file \"%s\" on semihosting with handle %ld.\n", fname, handle);
+ return handle;
+}
+
+static void tftf_report_semihosting_write(long handle, const char *str)
+{
+ size_t length = strlen(str);
+ semihosting_file_write(handle, &length, (const uintptr_t) str);
+}
+
+static void tftf_report_semihosting_close(long handle)
+{
+ semihosting_file_close(handle);
+ NOTICE("Closing file with handle %ld on semihosting.\n", handle);
+}
+
+const struct tftf_report_ops tftf_report_semihosting_ops = {
+ .open = tftf_report_semihosting_open,
+ .write = tftf_report_semihosting_write,
+ .close = tftf_report_semihosting_close,
+};
+#endif /* defined(TEST_REPORT_SEMIHOSTING_RAW) || defined(TEST_REPORT_SEMIHOSTING_JUNIT) */
+
+
+#if defined(TEST_REPORT_UART_JUNIT) || defined(TEST_REPORT_SEMIHOSTING_JUNIT)
+static void tftf_report_generate_junit(const struct tftf_report_ops *rops,
+ const char *fname)
+{
+#define WRITE(str) rops->write(file_handle, str)
+#define BUFFER_SIZE 200
+
+ long file_handle;
+ unsigned i, j;
+ const test_case_t *testcases;
+ TESTCASE_RESULT result;
+ char buffer[BUFFER_SIZE];
+ char test_output[TESTCASE_OUTPUT_MAX_SIZE];
+
+ file_handle = rops->open(fname);
+
+ if (file_handle == -1) {
+ return;
+ }
+ WRITE("<?xml version=\"1.0\" encoding=\"utf-8\"?>\n");
+ WRITE("<testsuites>\n");
+
+ /* Extract the result of all the testcases */
+ for (i = 0; testsuites[i].name != NULL; i++) {
+ snprintf(buffer, BUFFER_SIZE, "<testsuite name=\"%s\">\n",
+ testsuites[i].name);
+ WRITE(buffer);
+ testcases = testsuites[i].testcases;
+ for (j = 0; testcases[j].name != NULL; j++) {
+ tftf_testcase_get_result(&testcases[j], &result, test_output);
+
+ snprintf(buffer, BUFFER_SIZE, " <testcase name=\"%s\" time=\"%llu\"",
+ testcases[j].name, result.duration);
+ WRITE(buffer);
+ if (result.result == TEST_RESULT_SUCCESS) {
+ WRITE("/>\n");
+ } else {
+ WRITE(">\n");
+ if (result.result == TEST_RESULT_SKIPPED) {
+ WRITE(" <skipped/>\n");
+ } else {
+ WRITE(" <error type=\"failed\">\n");
+ WRITE(test_output);
+ WRITE(" </error>\n");
+ }
+ WRITE(" </testcase>\n");
+ }
+ }
+ WRITE("</testsuite>\n");
+ }
+
+ WRITE("</testsuites>\n");
+ rops->close(file_handle);
+#undef BUFFER_SIZE
+#undef WRITE
+}
+#endif /* defined(TEST_REPORT_UART_JUNIT) || defined(TEST_REPORT_SEMIHOSTING_JUNIT) */
+
+void tftf_report_generate(void)
+{
+ int nb_reports = 0;
+#ifdef TEST_REPORT_UART_RAW
+ tftf_report_generate_raw(&tftf_report_uart_ops, "raw");
+ nb_reports++;
+#endif
+#ifdef TEST_REPORT_UART_JUNIT
+ tftf_report_generate_junit(&tftf_report_uart_ops, "junit");
+ nb_reports++;
+#endif
+#ifdef TEST_REPORT_SEMIHOSTING_RAW
+ tftf_report_generate_raw(&tftf_report_semihosting_ops,
+ TEST_REPORT_RAW_FILENAME);
+ nb_reports++;
+#endif
+#ifdef TEST_REPORT_SEMIHOSTING_JUNIT
+ tftf_report_generate_junit(&tftf_report_semihosting_ops,
+ TEST_REPORT_JUNIT_FILENAME);
+ nb_reports++;
+#endif
+ assert(nb_reports > 0);
+}
diff --git a/tftf/framework/tftf.ld.S b/tftf/framework/tftf.ld.S
new file mode 100644
index 000000000..9432a7484
--- /dev/null
+++ b/tftf/framework/tftf.ld.S
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <platform_def.h>
+#include <xlat_tables_defs.h>
+
+OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
+OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
+ENTRY(tftf_entrypoint)
+
+MEMORY {
+ RAM (rwx): ORIGIN = DRAM_BASE, LENGTH = DRAM_SIZE
+}
+
+
+SECTIONS
+{
+ . = TFTF_BASE;
+ __TFTF_BASE__ = .;
+
+ ro . : {
+ __RO_START__ = .;
+ *entrypoint.o(.text*)
+ *(.text*)
+ *(.rodata*)
+ *(.vectors)
+ __RO_END_UNALIGNED__ = .;
+ /*
+ * Memory page(s) mapped to this section will be marked as
+ * read-only, executable. No RW data from the next section must
+ * creep in. Ensure the rest of the current memory page is unused.
+ */
+ . = NEXT(PAGE_SIZE);
+ __RO_END__ = .;
+ } >RAM
+
+ .data : {
+ __DATA_START__ = .;
+ *(.data*)
+ __DATA_END__ = .;
+ } >RAM
+
+ stacks (NOLOAD) : {
+ __STACKS_START__ = .;
+ *(tftf_normal_stacks)
+ __STACKS_END__ = .;
+ } >RAM
+
+ /*
+ * The .bss section gets initialised to 0 at runtime.
+ * Its base address must be 16-byte aligned.
+ */
+ .bss : ALIGN(16) {
+ __BSS_START__ = .;
+ *(SORT_BY_ALIGNMENT(.bss*))
+ *(COMMON)
+ __BSS_END__ = .;
+ } >RAM
+
+ /*
+ * The xlat_table section is for full, aligned page tables (4K).
+ * Removing them from .bss avoids forcing 4K alignment on
+ * the .bss section and eliminates the unecessary zero init
+ */
+ xlat_table (NOLOAD) : {
+ *(xlat_table)
+ } >RAM
+
+ /*
+ * The base address of the coherent memory section must be page-aligned (4K)
+ * to guarantee that the coherent data are stored on their own pages and
+ * are not mixed with normal data. This is required to set up the correct
+ * memory attributes for the coherent data page tables.
+ */
+ coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
+ __COHERENT_RAM_START__ = .;
+ *(tftf_coherent_stacks)
+ *(tftf_coherent_mem)
+ __COHERENT_RAM_END_UNALIGNED__ = .;
+ /*
+ * Memory page(s) mapped to this section will be marked
+ * as device memory. No other unexpected data must creep in.
+ * Ensure the rest of the current memory page is unused.
+ */
+ . = NEXT(PAGE_SIZE);
+ __COHERENT_RAM_END__ = .;
+ } >RAM
+
+ __COHERENT_RAM_UNALIGNED_SIZE__ =
+ __COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__;
+
+
+ __TFTF_END__ = .;
+
+ __BSS_SIZE__ = SIZEOF(.bss);
+
+}
diff --git a/tftf/framework/timer/timer_framework.c b/tftf/framework/timer/timer_framework.c
new file mode 100644
index 000000000..e5e9a0f86
--- /dev/null
+++ b/tftf/framework/timer/timer_framework.c
@@ -0,0 +1,567 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <debug.h>
+#include <errno.h>
+#include <irq.h>
+#include <mmio.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <power_management.h>
+#include <sgi.h>
+#include <spinlock.h>
+#include <stddef.h>
+#include <sys/types.h>
+#include <tftf.h>
+#include <timer.h>
+
+
+/* Helper macros */
+#define TIMER_STEP_VALUE (plat_timer_info->timer_step_value)
+#define TIMER_IRQ (plat_timer_info->timer_irq)
+#define PROGRAM_TIMER(a) plat_timer_info->program(a)
+#define INVALID_CORE UINT32_MAX
+#define INVALID_TIME UINT64_MAX
+#define MAX_TIME_OUT_MS 10000
+
+/*
+ * Pointer containing available timer information for the platform.
+ */
+static const plat_timer_t *plat_timer_info;
+/*
+ * Interrupt requested time by cores in terms of absolute time.
+ */
+static volatile unsigned long long interrupt_req_time[PLATFORM_CORE_COUNT];
+/*
+ * Contains the target core number of the timer interrupt.
+ */
+static unsigned int current_prog_core = INVALID_CORE;
+/*
+ * Lock to get a consistent view for programming the timer
+ */
+static spinlock_t timer_lock;
+/*
+ * Number of system ticks per millisec
+ */
+static unsigned int systicks_per_ms;
+
+/*
+ * Stores per CPU timer handler invoked on expiration of the requested timeout.
+ */
+static irq_handler_t timer_handler[PLATFORM_CORE_COUNT];
+
+/* Helper function */
+static inline unsigned long long get_current_time_ms(void)
+{
+ assert(systicks_per_ms);
+ return mmio_read_64(SYS_CNT_BASE1 + CNTPCT_LO) / systicks_per_ms;
+}
+
+static inline unsigned long long get_current_prog_time(void)
+{
+ return current_prog_core == INVALID_CORE ?
+ 0 : interrupt_req_time[current_prog_core];
+}
+
+int tftf_initialise_timer(void)
+{
+ int rc;
+ unsigned int i;
+
+ /*
+ * Get platform specific timer information
+ */
+ rc = plat_initialise_timer_ops(&plat_timer_info);
+ if (rc) {
+ ERROR("%s %d: No timer data found\n", __func__, __LINE__);
+ return rc;
+ }
+
+ /* Systems can't support single tick as a step value */
+ assert(TIMER_STEP_VALUE);
+
+ /* Initialise the array to max possible time */
+ for (i = 0; i < PLATFORM_CORE_COUNT; i++)
+ interrupt_req_time[i] = INVALID_TIME;
+
+ tftf_irq_register_handler(TIMER_IRQ, tftf_timer_framework_handler);
+ arm_gic_set_intr_priority(TIMER_IRQ, GIC_HIGHEST_NS_PRIORITY);
+ arm_gic_intr_enable(TIMER_IRQ);
+
+ /* Save the systicks per millisecond */
+ systicks_per_ms = read_cntfrq_el0() / 1000;
+
+ return 0;
+}
+
+/*
+ * It returns the core number of next timer request to be serviced or
+ * -1 if there is no request from any core. The next service request
+ * is the core whose interrupt needs to be fired first.
+ */
+static inline unsigned int get_lowest_req_core(void)
+{
+ unsigned long long lowest_timer = INVALID_TIME;
+ unsigned int lowest_core_req = INVALID_CORE;
+ unsigned int i;
+
+ /*
+ * If 2 cores requested same value, give precedence
+ * to the core with lowest core number
+ */
+ for (i = 0; i < PLATFORM_CORE_COUNT; i++) {
+ if (interrupt_req_time[i] < lowest_timer) {
+ lowest_timer = interrupt_req_time[i];
+ lowest_core_req = i;
+ }
+ }
+
+ return lowest_core_req;
+}
+
+int tftf_program_timer(unsigned long time_out_ms)
+{
+ unsigned int core_pos;
+ unsigned long long current_time;
+ u_register_t flags;
+ int rc = 0;
+
+ /*
+ * Some timer implementations have a very small max timeouts due to
+ * this if a request is asked for greater than the max time supported
+ * by them either it has to be broken down and remembered or use
+ * some other technique. Since that use case is not intended and
+ * and to make the timer framework simple, max timeout requests
+ * accepted by timer implementations can't be greater than
+ * 10 seconds. Hence, all timer peripherals used in timer framework
+ * has to support a timeout with interval of at least MAX_TIMEOUT.
+ */
+ if ((time_out_ms > MAX_TIME_OUT_MS) || (time_out_ms == 0)) {
+ ERROR("%s : Greater than max timeout request\n", __func__);
+ return -1;
+ } else if (time_out_ms < TIMER_STEP_VALUE) {
+ time_out_ms = TIMER_STEP_VALUE;
+ }
+
+ core_pos = platform_get_core_pos(read_mpidr_el1());
+ /* A timer interrupt request is already available for the core */
+ assert(interrupt_req_time[core_pos] == INVALID_TIME);
+
+ flags = read_daif();
+ disable_irq();
+ spin_lock(&timer_lock);
+
+ assert((current_prog_core < PLATFORM_CORE_COUNT) ||
+ (current_prog_core == INVALID_CORE));
+
+ /*
+ * Read time after acquiring timer_lock to account for any time taken
+ * by lock contention.
+ */
+ current_time = get_current_time_ms();
+
+ /* Update the requested time */
+ interrupt_req_time[core_pos] = current_time + time_out_ms;
+
+ VERBOSE("Need timer interrupt at: %lld current_prog_time:%lld\n"
+ " current time: %lld\n", interrupt_req_time[core_pos],
+ get_current_prog_time(),
+ get_current_time_ms());
+
+ /*
+ * If the interrupt request time is less than the current programmed
+ * by timer_step_value or timer is not programmed. Program it with
+ * requested time and retarget the timer interrupt to the current
+ * core.
+ */
+ if ((!get_current_prog_time()) || (interrupt_req_time[core_pos] <
+ (get_current_prog_time() - TIMER_STEP_VALUE))) {
+
+ arm_gic_set_intr_target(TIMER_IRQ, core_pos);
+
+ rc = PROGRAM_TIMER(time_out_ms);
+ /* We don't expect timer programming to fail */
+ if (rc)
+ ERROR("%s %d: rc = %d\n", __func__, __LINE__, rc);
+
+ current_prog_core = core_pos;
+ }
+
+ spin_unlock(&timer_lock);
+ /* Restore DAIF flags */
+ write_daif(flags);
+ isb();
+
+ return rc;
+}
+
+int tftf_program_timer_and_suspend(unsigned long milli_secs,
+ unsigned int pwr_state,
+ int *timer_rc, int *suspend_rc)
+{
+ int rc = 0;
+ u_register_t flags;
+
+ /* Default to successful return codes */
+ int timer_rc_val = 0;
+ int suspend_rc_val = PSCI_E_SUCCESS;
+
+ /* Preserve DAIF flags. IRQs need to be disabled for this to work. */
+ flags = read_daif();
+ disable_irq();
+
+ /*
+ * Even with IRQs masked, the timer IRQ will wake the CPU up.
+ *
+ * If the timer IRQ happens before entering suspend mode (because the
+ * timer took too long to program, for example) the fact that the IRQ is
+ * pending will prevent the CPU from entering suspend mode and not being
+ * able to wake up.
+ */
+ timer_rc_val = tftf_program_timer(milli_secs);
+ if (timer_rc_val == 0) {
+ suspend_rc_val = tftf_cpu_suspend(pwr_state);
+ if (suspend_rc_val != PSCI_E_SUCCESS) {
+ rc = -1;
+ INFO("%s %d: suspend_rc = %d\n", __func__, __LINE__,
+ suspend_rc_val);
+ }
+ } else {
+ rc = -1;
+ INFO("%s %d: timer_rc = %d\n", __func__, __LINE__, timer_rc_val);
+ }
+
+ /* Restore previous DAIF flags */
+ write_daif(flags);
+ isb();
+
+ if (timer_rc)
+ *timer_rc = timer_rc_val;
+ if (suspend_rc)
+ *suspend_rc = suspend_rc_val;
+ /*
+ * If IRQs were disabled when calling this function, the timer IRQ
+ * handler won't be called and the timer interrupt will be pending, but
+ * that isn't necessarily a problem.
+ */
+
+ return rc;
+}
+
+int tftf_program_timer_and_sys_suspend(unsigned long milli_secs,
+ int *timer_rc, int *suspend_rc)
+{
+ int rc = 0;
+ u_register_t flags;
+
+ /* Default to successful return codes */
+ int timer_rc_val = 0;
+ int suspend_rc_val = PSCI_E_SUCCESS;
+
+ /* Preserve DAIF flags. IRQs need to be disabled for this to work. */
+ flags = read_daif();
+ disable_irq();
+
+ /*
+ * Even with IRQs masked, the timer IRQ will wake the CPU up.
+ *
+ * If the timer IRQ happens before entering suspend mode (because the
+ * timer took too long to program, for example) the fact that the IRQ is
+ * pending will prevent the CPU from entering suspend mode and not being
+ * able to wake up.
+ */
+ timer_rc_val = tftf_program_timer(milli_secs);
+ if (timer_rc_val == 0) {
+ suspend_rc_val = tftf_system_suspend();
+ if (suspend_rc_val != PSCI_E_SUCCESS) {
+ rc = -1;
+ INFO("%s %d: suspend_rc = %d\n", __func__, __LINE__,
+ suspend_rc_val);
+ }
+ } else {
+ rc = -1;
+ INFO("%s %d: timer_rc = %d\n", __func__, __LINE__, timer_rc_val);
+ }
+
+ /* Restore previous DAIF flags */
+ write_daif(flags);
+ isb();
+
+ /*
+ * If IRQs were disabled when calling this function, the timer IRQ
+ * handler won't be called and the timer interrupt will be pending, but
+ * that isn't necessarily a problem.
+ */
+ if (timer_rc)
+ *timer_rc = timer_rc_val;
+ if (suspend_rc)
+ *suspend_rc = suspend_rc_val;
+
+ return rc;
+}
+
+int tftf_timer_sleep(unsigned long milli_secs)
+{
+ int ret, power_state;
+ uint32_t stateid;
+
+ ret = tftf_psci_make_composite_state_id(MPIDR_AFFLVL0,
+ PSTATE_TYPE_STANDBY, &stateid);
+ if (ret != PSCI_E_SUCCESS)
+ return -1;
+
+ power_state = tftf_make_psci_pstate(MPIDR_AFFLVL0, PSTATE_TYPE_STANDBY,
+ stateid);
+ ret = tftf_program_timer_and_suspend(milli_secs, power_state,
+ NULL, NULL);
+ if (ret != 0)
+ return -1;
+
+ return 0;
+}
+
+int tftf_cancel_timer(void)
+{
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+ unsigned int next_timer_req_core_pos;
+ unsigned long long current_time;
+ u_register_t flags;
+ int rc = 0;
+
+ /*
+ * IRQ is disabled so that if a timer is fired after taking a lock,
+ * it will remain pending and a core does not hit IRQ handler trying
+ * to acquire an already locked spin_lock causing dead lock.
+ */
+ flags = read_daif();
+ disable_irq();
+ spin_lock(&timer_lock);
+
+ interrupt_req_time[core_pos] = INVALID_TIME;
+
+ if (core_pos == current_prog_core) {
+ /*
+ * Cancel the programmed interrupt at the peripheral. If the
+ * timer interrupt is level triggered and fired this also
+ * deactivates the pending interrupt.
+ */
+ rc = plat_timer_info->cancel();
+ /* We don't expect cancel timer to fail */
+ if (rc) {
+ ERROR("%s %d: rc = %d\n", __func__, __LINE__, rc);
+ goto exit;
+ }
+
+ /*
+ * For edge triggered interrupts, if an IRQ is fired before
+ * cancel timer is executed, the signal remains pending. So,
+ * clear the Timer IRQ if it is already pending.
+ */
+ if (arm_gic_is_intr_pending(TIMER_IRQ))
+ arm_gic_intr_clear(TIMER_IRQ);
+
+ /* Get next timer consumer */
+ next_timer_req_core_pos = get_lowest_req_core();
+ if (next_timer_req_core_pos != INVALID_CORE) {
+
+ /* Retarget to the next_timer_req_core_pos */
+ arm_gic_set_intr_target(TIMER_IRQ, next_timer_req_core_pos);
+ current_prog_core = next_timer_req_core_pos;
+
+ current_time = get_current_time_ms();
+
+ /*
+ * If the next timer request is lesser than or in a
+ * window of TIMER_STEP_VALUE from current time,
+ * program it to fire after TIMER_STEP_VALUE.
+ */
+ if (interrupt_req_time[next_timer_req_core_pos] >
+ current_time + TIMER_STEP_VALUE)
+ rc = PROGRAM_TIMER(interrupt_req_time[next_timer_req_core_pos] - current_time);
+ else
+ rc = PROGRAM_TIMER(TIMER_STEP_VALUE);
+ VERBOSE("Cancel and program new timer for core_pos: "
+ "%d %lld\n",
+ next_timer_req_core_pos,
+ get_current_prog_time());
+ /* We don't expect timer programming to fail */
+ if (rc)
+ ERROR("%s %d: rc = %d\n", __func__, __LINE__, rc);
+ } else {
+ current_prog_core = INVALID_CORE;
+ VERBOSE("Cancelling timer : %d\n", core_pos);
+ }
+ }
+exit:
+ spin_unlock(&timer_lock);
+
+ /* Restore DAIF flags */
+ write_daif(flags);
+ isb();
+
+ return rc;
+}
+
+int tftf_timer_framework_handler(void *data)
+{
+ unsigned int handler_core_pos = platform_get_core_pos(read_mpidr_el1());
+ unsigned int next_timer_req_core_pos;
+ unsigned long long current_time;
+ int rc = 0;
+
+ assert(interrupt_req_time[handler_core_pos] != INVALID_TIME);
+ spin_lock(&timer_lock);
+
+ current_time = get_current_time_ms();
+ /* Check if we interrupt is targeted correctly */
+ assert(handler_core_pos == current_prog_core);
+
+ interrupt_req_time[handler_core_pos] = INVALID_TIME;
+
+ /* Execute the driver handler */
+ if (plat_timer_info->handler)
+ plat_timer_info->handler();
+
+ if (arm_gic_is_intr_pending(TIMER_IRQ)) {
+ /*
+ * We might never manage to acquire the printf lock here
+ * (because we are in ISR context) but we're gonna panic right
+ * after anyway so it doesn't really matter.
+ */
+ ERROR("Timer IRQ still pending. Fatal error.\n");
+ panic();
+ }
+
+ /*
+ * Execute the handler requested by the core, the handlers for the
+ * other cores will be executed as part of handling IRQ_WAKE_SGI.
+ */
+ if (timer_handler[handler_core_pos])
+ timer_handler[handler_core_pos](data);
+
+ /* Send interrupts to all the CPUS in the min time block */
+ for (int i = 0; i < PLATFORM_CORE_COUNT; i++) {
+ if ((interrupt_req_time[i] <=
+ (current_time + TIMER_STEP_VALUE))) {
+ interrupt_req_time[i] = INVALID_TIME;
+ tftf_send_sgi(IRQ_WAKE_SGI, i);
+ }
+ }
+
+ /* Get the next lowest requested timer core and program it */
+ next_timer_req_core_pos = get_lowest_req_core();
+ if (next_timer_req_core_pos != INVALID_CORE) {
+ /* Check we have not exceeded the time for next core */
+ assert(interrupt_req_time[next_timer_req_core_pos] >
+ current_time);
+ arm_gic_set_intr_target(TIMER_IRQ, next_timer_req_core_pos);
+ rc = PROGRAM_TIMER(interrupt_req_time[next_timer_req_core_pos]
+ - current_time);
+ }
+ /* Update current program core to the newer one */
+ current_prog_core = next_timer_req_core_pos;
+
+ spin_unlock(&timer_lock);
+
+ return rc;
+}
+
+int tftf_timer_register_handler(irq_handler_t irq_handler)
+{
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+ int ret;
+
+ /* Validate no handler is registered */
+ assert(!timer_handler[core_pos]);
+ timer_handler[core_pos] = irq_handler;
+
+ /*
+ * Also register same handler to IRQ_WAKE_SGI, as it can be waken
+ * by it.
+ */
+ ret = tftf_irq_register_handler(IRQ_WAKE_SGI, irq_handler);
+ assert(!ret);
+
+ return ret;
+}
+
+int tftf_timer_unregister_handler(void)
+{
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+ int ret;
+
+ /*
+ * Unregister the handler for IRQ_WAKE_SGI also
+ */
+ ret = tftf_irq_unregister_handler(IRQ_WAKE_SGI);
+ assert(!ret);
+ /* Validate a handler is registered */
+ assert(timer_handler[core_pos]);
+ timer_handler[core_pos] = 0;
+
+ return ret;
+}
+
+unsigned int tftf_get_timer_irq(void)
+{
+ /*
+ * Check if the timer info is initialised
+ */
+ assert(TIMER_IRQ);
+ return TIMER_IRQ;
+}
+
+unsigned int tftf_get_timer_step_value(void)
+{
+ assert(TIMER_STEP_VALUE);
+
+ return TIMER_STEP_VALUE;
+}
+
+/*
+ * There are 4 cases that could happen when a system is resuming from system
+ * suspend. The cases are:
+ * 1. The resumed core is the last core to power down and the
+ * timer interrupt was targeted to it. In this case, target the
+ * interrupt to our core and set the appropriate priority and enable it.
+ *
+ * 2. The resumed core was the last core to power down but the timer interrupt
+ * is targeted to another core because of timer request grouping within
+ * TIMER_STEP_VALUE. In this case, re-target the interrupt to our core
+ * and set the appropriate priority and enable it
+ *
+ * 3. The system suspend request was down-graded by firmware and the timer
+ * interrupt is targeted to another core which woke up first. In this case,
+ * that core will wake us up and the interrupt_req_time[] corresponding to
+ * our core will be cleared. In this case, no need to do anything as GIC
+ * state is preserved.
+ *
+ * 4. The system suspend is woken up by another external interrupt other
+ * than the timer framework interrupt. In this case, just enable the
+ * timer interrupt and set the correct priority at GICD.
+ */
+void tftf_timer_gic_state_restore(void)
+{
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+ spin_lock(&timer_lock);
+
+ arm_gic_set_intr_priority(TIMER_IRQ, GIC_HIGHEST_NS_PRIORITY);
+ arm_gic_intr_enable(TIMER_IRQ);
+
+ /* Check if the programmed core is the woken up core */
+ if (interrupt_req_time[core_pos] == INVALID_TIME) {
+ INFO("The programmed core is not the one woken up\n");
+ } else {
+ current_prog_core = core_pos;
+ arm_gic_set_intr_target(TIMER_IRQ, core_pos);
+ }
+
+ spin_unlock(&timer_lock);
+}
+
diff --git a/tftf/tests/common/test_helpers.c b/tftf/tests/common/test_helpers.c
new file mode 100644
index 000000000..8fdfdedc0
--- /dev/null
+++ b/tftf/tests/common/test_helpers.c
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <power_management.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+
+int is_sys_suspend_state_ready(void)
+{
+ int aff_info;
+ unsigned int target_node;
+ u_register_t target_mpid;
+ u_register_t current_mpid = read_mpidr_el1() & MPID_MASK;
+
+ for_each_cpu(target_node) {
+ target_mpid = tftf_get_mpidr_from_node(target_node);
+
+ /* Skip current CPU, as it is powered on */
+ if (target_mpid == current_mpid)
+ continue;
+
+ aff_info = tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0);
+ if (aff_info != PSCI_STATE_OFF)
+ return 0;
+ }
+
+ return 1;
+}
+
+void psci_system_reset(void)
+{
+ smc_args args = { SMC_PSCI_SYSTEM_RESET };
+ smc_ret_values ret;
+
+ ret = tftf_smc(&args);
+
+ /* The PSCI SYSTEM_RESET call is not supposed to return */
+ tftf_testcase_printf("System didn't reboot properly (%d)\n",
+ (unsigned int)ret.ret0);
+}
+
+int psci_mem_protect(int val)
+{
+ smc_args args = { SMC_PSCI_MEM_PROTECT};
+ smc_ret_values ret;
+
+ args.arg1 = val;
+ ret = tftf_smc(&args);
+
+ return ret.ret0;
+}
+
+int psci_mem_protect_check(uintptr_t addr, size_t size)
+{
+ smc_args args = { SMC_PSCI_MEM_PROTECT_CHECK };
+ smc_ret_values ret;
+
+ args.arg1 = addr;
+ args.arg2 = size;
+ ret = tftf_smc(&args);
+ return ret.ret0;
+}
+
+/*
+ * This function returns an address that can be used as
+ * sentinel for mem_protect functions. The logic behind
+ * it is that it has to search one region that doesn't intersect
+ * with the memory used by TFTF.
+ */
+unsigned char *psci_mem_prot_get_sentinel(void)
+{
+ const mem_region_t *ranges, *rp, *lim;
+ int nranges;
+ IMPORT_SYM(uintptr_t, __TFTF_BASE__, tftf_base);
+ IMPORT_SYM(uintptr_t, __TFTF_END__, tftf_end);
+ uintptr_t p = 0;
+
+ ranges = plat_get_prot_regions(&nranges);
+ if (!ranges)
+ return NULL;
+
+ lim = &ranges[nranges];
+ for (rp = ranges ; rp < lim; rp++) {
+ p = rp->addr;
+ if (p < tftf_base || p > tftf_end)
+ break;
+ p = p + (rp->size - 1);
+ if (p < tftf_base || p > tftf_end)
+ break;
+ }
+
+ return (rp == lim) ? NULL : (unsigned char *) p;
+}
+
+/*
+ * This function maps the memory region before the
+ * test and unmap it after the test is run
+ */
+test_result_t map_test_unmap(const map_args_unmap_t *args,
+ test_function_arg_t test)
+{
+ int mmap_ret;
+ test_result_t test_ret;
+
+ mmap_ret = mmap_add_dynamic_region(args->addr, args->addr,
+ args->size, args->attr);
+
+ if (mmap_ret != 0) {
+ tftf_testcase_printf("Couldn't map memory (ret = %d)\n",
+ mmap_ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ test_ret = (*test)(args->arg);
+
+ mmap_ret = mmap_remove_dynamic_region(args->addr, args->size);
+ if (mmap_ret != 0) {
+ tftf_testcase_printf("Couldn't unmap memory (ret = %d)\n",
+ mmap_ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ return test_ret;
+}
diff --git a/tftf/tests/extensions/amu/test_amu.c b/tftf/tests/extensions/amu/test_amu.c
new file mode 100644
index 000000000..2e2ea6fba
--- /dev/null
+++ b/tftf/tests/extensions/amu/test_amu.c
@@ -0,0 +1,191 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <amu.h>
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <irq.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <power_management.h>
+#include <tftf_lib.h>
+#include <timer.h>
+
+#define SUSPEND_TIME_1_SEC 1000
+
+static volatile int wakeup_irq_received[PLATFORM_CORE_COUNT];
+
+/* Dummy timer handler that sets a flag to check it has been called. */
+static int suspend_wakeup_handler(void *data)
+{
+ u_register_t core_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(core_mpid);
+
+ assert(wakeup_irq_received[core_pos] == 0);
+
+ wakeup_irq_received[core_pos] = 1;
+
+ return 0;
+}
+
+/*
+ * Helper function to suspend a CPU to power level 0 and wake it up with
+ * a timer.
+ */
+static test_result_t suspend_and_resume_this_cpu(void)
+{
+ uint32_t stateid;
+ int psci_ret;
+ test_result_t result = TEST_RESULT_SUCCESS;
+
+ u_register_t core_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(core_mpid);
+
+ /* Prepare wakeup timer. IRQs need to be enabled. */
+ wakeup_irq_received[core_pos] = 0;
+
+ tftf_timer_register_handler(suspend_wakeup_handler);
+
+ /* Program timer to fire interrupt after timer expires */
+ tftf_program_timer(SUSPEND_TIME_1_SEC);
+
+ /*
+ * Suspend the calling CPU to power level 0 and power
+ * state.
+ */
+ psci_ret = tftf_psci_make_composite_state_id(PSTATE_AFF_LVL_0,
+ PSTATE_TYPE_POWERDOWN,
+ &stateid);
+ if (psci_ret != PSCI_E_SUCCESS) {
+ mp_printf("Failed to make composite state ID @ CPU %d. rc = %x\n",
+ core_pos, psci_ret);
+ result = TEST_RESULT_FAIL;
+ } else {
+ unsigned int power_state = tftf_make_psci_pstate(PSTATE_AFF_LVL_0,
+ PSTATE_TYPE_POWERDOWN, stateid);
+ psci_ret = tftf_cpu_suspend(power_state);
+
+ if (!wakeup_irq_received[core_pos]) {
+ mp_printf("Didn't receive wakeup IRQ in CPU %d.\n",
+ core_pos);
+ result = TEST_RESULT_FAIL;
+ }
+
+ if (psci_ret != PSCI_E_SUCCESS) {
+ mp_printf("Failed to suspend from CPU %d. ret: %x\n",
+ core_pos, psci_ret);
+ result = TEST_RESULT_FAIL;
+ }
+ }
+
+ /* Wake up. Remove timer after waking up.*/
+ tftf_cancel_timer();
+ tftf_timer_unregister_handler();
+
+ return result;
+}
+
+/*
+ * Check that group0/group1 counters are non-zero. As EL3
+ * has enabled the counters before the first entry to NS world,
+ * the counters should have increased by the time we reach this
+ * test case.
+ */
+test_result_t test_amu_nonzero_ctr(void)
+{
+ int i;
+
+ if (!amu_supported())
+ return TEST_RESULT_SKIPPED;
+
+ /* If counters are not enabled, then skip the test */
+ if (read_amcntenset0_el0() != AMU_GROUP0_COUNTERS_MASK ||
+ read_amcntenset1_el0() != AMU_GROUP1_COUNTERS_MASK)
+ return TEST_RESULT_SKIPPED;
+
+ for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++) {
+ uint64_t v;
+
+ v = amu_group0_cnt_read(i);
+ if (v == 0) {
+ tftf_testcase_printf("Group0 counter cannot be 0\n");
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++) {
+ uint64_t v;
+
+ v = amu_group1_cnt_read(i);
+ if (v == 0) {
+ tftf_testcase_printf("Group1 counter cannot be 0\n");
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Check that the counters are non-decreasing during
+ * a suspend/resume cycle.
+ */
+test_result_t test_amu_suspend_resume(void)
+{
+ uint64_t group0_ctrs[AMU_GROUP0_MAX_NR_COUNTERS];
+ uint64_t group1_ctrs[AMU_GROUP1_MAX_NR_COUNTERS];
+ int i;
+
+ if (!amu_supported())
+ return TEST_RESULT_SKIPPED;
+
+ /* If counters are not enabled, then skip the test */
+ if (read_amcntenset0_el0() != AMU_GROUP0_COUNTERS_MASK ||
+ read_amcntenset1_el0() != AMU_GROUP1_COUNTERS_MASK)
+ return TEST_RESULT_SKIPPED;
+
+ /* Save counters values before suspend */
+ for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++)
+ group0_ctrs[i] = amu_group0_cnt_read(i);
+
+ for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++)
+ group1_ctrs[i] = amu_group1_cnt_read(i);
+
+ /* Suspend/resume current core */
+ suspend_and_resume_this_cpu();
+
+ /*
+ * Check if counter values are >= than the stored values.
+ * If they are not, the AMU context save/restore in EL3 is buggy.
+ */
+ for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++) {
+ uint64_t v;
+
+ v = amu_group0_cnt_read(i);
+ if (v < group0_ctrs[i]) {
+ tftf_testcase_printf("Invalid counter value: before: %llx, after: %llx\n",
+ (unsigned long long)group0_ctrs[i],
+ (unsigned long long)v);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++) {
+ uint64_t v;
+
+ v = amu_group1_cnt_read(i);
+ if (v < group1_ctrs[i]) {
+ tftf_testcase_printf("Invalid counter value: before: %llx, after: %llx\n",
+ (unsigned long long)group1_ctrs[i],
+ (unsigned long long)v);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/framework_validation_tests/test_timer_framework.c b/tftf/tests/framework_validation_tests/test_timer_framework.c
new file mode 100644
index 000000000..d7bd4f928
--- /dev/null
+++ b/tftf/tests/framework_validation_tests/test_timer_framework.c
@@ -0,0 +1,556 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <debug.h>
+#include <events.h>
+#include <irq.h>
+#include <mmio.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <power_management.h>
+#include <psci.h>
+#include <sgi.h>
+#include <stdlib.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+#include <timer.h>
+
+static event_t cpu_ready[PLATFORM_CORE_COUNT];
+
+/* Used to confirm the CPU is woken up by IRQ_WAKE_SGI or Timer IRQ */
+static volatile int requested_irq_received[PLATFORM_CORE_COUNT];
+/* Used to count number of CPUs woken up by IRQ_WAKE_SGI */
+static int multiple_timer_count;
+/* Used to count number of CPUs woken up by Timer IRQ */
+static int timer_switch_count;
+/* Timer step value of a platform */
+static unsigned int timer_step_value;
+/* Used to program the interrupt time */
+static unsigned long long next_int_time;
+/* Lock to prevent concurrently modifying next_int_time */
+static spinlock_t int_timer_access_lock;
+/* Lock to prevent concurrently modifying irq handler data structures */
+static spinlock_t irq_handler_lock;
+
+/* Variable to confirm all cores are inside the testcase */
+static volatile unsigned int all_cores_inside_test;
+
+/*
+ * Used by test cases to confirm if the programmed timer is fired. It also
+ * keeps track of how many timer irq's are received.
+ */
+static int requested_irq_handler(void *data)
+{
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+ unsigned int irq_id = *(unsigned int *) data;
+
+ assert(irq_id == IRQ_WAKE_SGI || irq_id == tftf_get_timer_irq());
+ assert(requested_irq_received[core_pos] == 0);
+
+ if (irq_id == tftf_get_timer_irq()) {
+ spin_lock(&irq_handler_lock);
+ timer_switch_count++;
+ spin_unlock(&irq_handler_lock);
+ }
+
+ requested_irq_received[core_pos] = 1;
+
+ return 0;
+}
+
+/*
+ * Used by test cases to confirm if the programmed timer is fired. It also
+ * keeps track of how many WAKE_SGI's are received.
+ */
+static int multiple_timer_handler(void *data)
+{
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+ unsigned int irq_id = *(unsigned int *) data;
+
+ assert(irq_id == IRQ_WAKE_SGI || irq_id == tftf_get_timer_irq());
+ assert(requested_irq_received[core_pos] == 0);
+
+ if (irq_id == IRQ_WAKE_SGI) {
+ spin_lock(&irq_handler_lock);
+ multiple_timer_count++;
+ spin_unlock(&irq_handler_lock);
+ }
+
+ requested_irq_received[core_pos] = 1;
+
+ return 0;
+}
+
+/*
+ * @Test_Aim@ Validates timer interrupt framework and platform timer driver for
+ * generation and routing of interrupt to a powered on core.
+ *
+ * Returns SUCCESS or waits forever in wfi()
+ */
+test_result_t test_timer_framework_interrupt(void)
+{
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+ int ret;
+
+ /* Initialise common variable across tests */
+ requested_irq_received[core_pos] = 0;
+
+ /* Register timer handler to confirm it received the timer interrupt */
+ ret = tftf_timer_register_handler(requested_irq_handler);
+ if (ret != 0) {
+ tftf_testcase_printf("Failed to register timer handler:0x%x\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = tftf_program_timer(tftf_get_timer_step_value() + 1);
+ if (ret != 0) {
+ tftf_testcase_printf("Failed to program timer:0x%x\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+ wfi();
+
+ while (!requested_irq_received[core_pos])
+ ;
+ ret = tftf_timer_unregister_handler();
+ if (ret != 0) {
+ tftf_testcase_printf("Failed to unregister timer handler:0x%x\n", ret);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+static test_result_t timer_target_power_down_cpu(void)
+{
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+ unsigned int power_state;
+ unsigned int stateid;
+ int ret;
+ unsigned long timer_delay;
+
+ tftf_send_event(&cpu_ready[core_pos]);
+ /* Initialise common variable across tests */
+ requested_irq_received[core_pos] = 0;
+
+ /* Construct the state-id for power down */
+ ret = tftf_psci_make_composite_state_id(MPIDR_AFFLVL0,
+ PSTATE_TYPE_POWERDOWN, &stateid);
+ if (ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("Failed to construct composite state\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Register timer handler to confirm it received the timer interrupt */
+ ret = tftf_timer_register_handler(requested_irq_handler);
+ if (ret != 0) {
+ tftf_testcase_printf("Failed to register timer handler:0x%x\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Wait for all cores to be up */
+ while (!all_cores_inside_test)
+ ;
+
+ power_state = tftf_make_psci_pstate(MPIDR_AFFLVL0,
+ PSTATE_TYPE_POWERDOWN, stateid);
+
+ spin_lock(&int_timer_access_lock);
+ timer_delay = PLAT_SUSPEND_ENTRY_TIME + next_int_time;
+ next_int_time -= 2 * (timer_step_value + PLAT_SUSPEND_ENTRY_EXIT_TIME);
+ spin_unlock(&int_timer_access_lock);
+
+ ret = tftf_program_timer_and_suspend(timer_delay, power_state,
+ NULL, NULL);
+ if (ret != 0) {
+ tftf_testcase_printf(
+ "Failed to program timer or suspend CPU: 0x%x\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ while (!requested_irq_received[core_pos])
+ ;
+ ret = tftf_timer_unregister_handler();
+ if (ret != 0) {
+ tftf_testcase_printf("Failed to unregister timer handler:0x%x\n", ret);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Validates routing of timer interrupt to the lowest requested
+ * timer interrupt core on power down.
+ *
+ * Power up all the cores and each requests a timer interrupt lesser than
+ * the previous requested core by timer step value. Doing this ensures
+ * at least some cores would be waken by Time IRQ.
+ *
+ * Returns SUCCESS if all cores power up on getting the interrupt.
+ */
+test_result_t test_timer_target_power_down_cpu(void)
+{
+ unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int cpu_mpid, cpu_node;
+ unsigned int core_pos;
+ unsigned int rc;
+ unsigned int valid_cpu_count;
+
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
+
+ for (unsigned int i = 0; i < PLATFORM_CORE_COUNT; i++)
+ tftf_init_event(&cpu_ready[i]);
+
+ if (!timer_step_value)
+ timer_step_value = tftf_get_timer_step_value();
+
+ timer_switch_count = 0;
+ all_cores_inside_test = 0;
+
+ /*
+ * To be sure none of the CPUs do not fall in an atomic slice,
+ * all CPU's program the timer as close as possible with a time
+ * difference of twice the sum of step value and suspend entry
+ * exit time.
+ */
+ next_int_time = 2 * (timer_step_value + PLAT_SUSPEND_ENTRY_EXIT_TIME) * (PLATFORM_CORE_COUNT + 2);
+
+ /*
+ * Preparation step: Power on all cores.
+ */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU as it is already on */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ rc = tftf_cpu_on(cpu_mpid,
+ (uintptr_t) timer_target_power_down_cpu,
+ 0);
+ if (rc != PSCI_E_SUCCESS) {
+ tftf_testcase_printf(
+ "Failed to power on CPU 0x%x (%d)\n",
+ cpu_mpid, rc);
+ return TEST_RESULT_SKIPPED;
+ }
+ }
+
+ /* Wait for all non-lead CPUs to be ready */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_wait_for_event(&cpu_ready[core_pos]);
+ }
+
+ all_cores_inside_test = 1;
+
+ rc = timer_target_power_down_cpu();
+
+ valid_cpu_count = 0;
+ /* Wait for all cores to complete the test */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ core_pos = platform_get_core_pos(cpu_mpid);
+ while (!requested_irq_received[core_pos])
+ ;
+ valid_cpu_count++;
+ }
+
+ if (timer_switch_count != valid_cpu_count) {
+ tftf_testcase_printf("Expected timer switch: %d Actual: %d\n",
+ valid_cpu_count, timer_switch_count);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+static test_result_t timer_same_interval(void)
+{
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+ unsigned int power_state;
+ unsigned int stateid;
+ int ret;
+
+ tftf_send_event(&cpu_ready[core_pos]);
+
+ /* Initialise common variable across tests */
+ requested_irq_received[core_pos] = 0;
+
+ /* Construct the state-id for power down */
+ ret = tftf_psci_make_composite_state_id(MPIDR_AFFLVL0,
+ PSTATE_TYPE_POWERDOWN, &stateid);
+ if (ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("Failed to construct composite state\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Register timer handler to confirm it received the timer interrupt */
+ ret = tftf_timer_register_handler(multiple_timer_handler);
+ if (ret != 0) {
+ tftf_testcase_printf("Failed to register timer handler:0x%x\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Wait for all cores to be up */
+ while (!all_cores_inside_test)
+ ;
+
+ /*
+ * Lets hope with in Suspend entry time + 10ms, at least some of the CPU's
+ * have same interval
+ */
+ power_state = tftf_make_psci_pstate(MPIDR_AFFLVL0,
+ PSTATE_TYPE_POWERDOWN, stateid);
+ ret = tftf_program_timer_and_suspend(PLAT_SUSPEND_ENTRY_TIME + 10,
+ power_state, NULL, NULL);
+ if (ret != 0) {
+ tftf_testcase_printf(
+ "Failed to program timer or suspend CPU: 0x%x\n", ret);
+ }
+
+ while (!requested_irq_received[core_pos])
+ ;
+ ret = tftf_timer_unregister_handler();
+ if (ret != 0) {
+ tftf_testcase_printf("Failed to unregister timer handler:0x%x\n", ret);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Validates routing of timer interrupt when multiple cores
+ * requested same time.
+ *
+ * Power up all the cores and each core requests same time.
+ *
+ * Returns SUCCESS if all cores get an interrupt and power up.
+ */
+test_result_t test_timer_target_multiple_same_interval(void)
+{
+ unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int cpu_mpid, cpu_node;
+ unsigned int core_pos;
+ unsigned int rc;
+
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
+
+ for (unsigned int i = 0; i < PLATFORM_CORE_COUNT; i++)
+ tftf_init_event(&cpu_ready[i]);
+
+ multiple_timer_count = 0;
+ all_cores_inside_test = 0;
+
+ /*
+ * Preparation step: Power on all cores.
+ */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU as it is already on */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ rc = tftf_cpu_on(cpu_mpid,
+ (uintptr_t) timer_same_interval,
+ 0);
+ if (rc != PSCI_E_SUCCESS) {
+ tftf_testcase_printf(
+ "Failed to power on CPU 0x%x (%d)\n",
+ cpu_mpid, rc);
+ return TEST_RESULT_SKIPPED;
+ }
+ }
+
+ /* Wait for all non-lead CPUs to be ready */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_wait_for_event(&cpu_ready[core_pos]);
+ }
+
+ core_pos = platform_get_core_pos(lead_mpid);
+ /* Initialise common variable across tests */
+ requested_irq_received[core_pos] = 0;
+
+ all_cores_inside_test = 1;
+
+ rc = timer_same_interval();
+
+ /* Wait for all cores to complete the test */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ core_pos = platform_get_core_pos(cpu_mpid);
+ while (!requested_irq_received[core_pos])
+ ;
+ }
+
+ if (rc != TEST_RESULT_SUCCESS)
+ return rc;
+
+ /* At least 2 CPUs requests should fall in same timer period. */
+ return multiple_timer_count ? TEST_RESULT_SUCCESS : TEST_RESULT_SKIPPED;
+}
+
+static test_result_t do_stress_test(void)
+{
+ unsigned int power_state;
+ unsigned int stateid;
+ unsigned int timer_int_interval;
+ unsigned int verify_cancel;
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+ unsigned long long end_time;
+ unsigned long long current_time;
+ int ret;
+
+ tftf_send_event(&cpu_ready[core_pos]);
+
+ end_time = mmio_read_64(SYS_CNT_BASE1 + CNTPCT_LO) + read_cntfrq_el0() * 10;
+
+ /* Construct the state-id for power down */
+ ret = tftf_psci_make_composite_state_id(MPIDR_AFFLVL0,
+ PSTATE_TYPE_POWERDOWN, &stateid);
+ if (ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("Failed to construct composite state\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Register a handler to confirm its woken by programmed interrupt */
+ ret = tftf_timer_register_handler(requested_irq_handler);
+ if (ret != 0) {
+ tftf_testcase_printf("Failed to register timer handler:0x%x\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ do {
+ current_time = mmio_read_64(SYS_CNT_BASE1 + CNTPCT_LO);
+ if (current_time > end_time)
+ break;
+
+ timer_int_interval = 1 + rand() % 5;
+ verify_cancel = rand() % 5;
+
+ requested_irq_received[core_pos] = 0;
+
+ /*
+ * If verify_cancel == 0, cancel the programmed timer. As it can
+ * take values from 0 to 4, we will be cancelling only 20% of
+ * times.
+ */
+ if (!verify_cancel) {
+ ret = tftf_program_timer(PLAT_SUSPEND_ENTRY_TIME +
+ timer_int_interval);
+ if (ret != 0) {
+ tftf_testcase_printf("Failed to program timer: "
+ "0x%x\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = tftf_cancel_timer();
+ if (ret != 0) {
+ tftf_testcase_printf("Failed to cancel timer: "
+ "0x%x\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+ } else {
+ power_state = tftf_make_psci_pstate(
+ MPIDR_AFFLVL0, PSTATE_TYPE_POWERDOWN,
+ stateid);
+
+ ret = tftf_program_timer_and_suspend(
+ PLAT_SUSPEND_ENTRY_TIME + timer_int_interval,
+ power_state, NULL, NULL);
+ if (ret != 0) {
+ tftf_testcase_printf("Failed to program timer "
+ "or suspend: 0x%x\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!requested_irq_received[core_pos]) {
+ /*
+ * Cancel the interrupt as the CPU has been
+ * woken by some other interrupt
+ */
+ ret = tftf_cancel_timer();
+ if (ret != 0) {
+ tftf_testcase_printf("Failed to cancel timer:0x%x\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+ }
+ }
+ } while (1);
+
+ ret = tftf_timer_unregister_handler();
+ if (ret != 0) {
+ tftf_testcase_printf("Failed to unregister timer handler:0x%x\n", ret);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Stress tests timer framework by requesting combination of
+ * timer requests with SUSPEND and cancel calls.
+ *
+ * Returns SUCCESS if all the cores successfully wakes up from suspend
+ * and returns back to the framework.
+ */
+test_result_t stress_test_timer_framework(void)
+{
+ unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int cpu_mpid, cpu_node;
+ unsigned int core_pos;
+ unsigned int rc;
+
+ for (unsigned int i = 0; i < PLATFORM_CORE_COUNT; i++) {
+ tftf_init_event(&cpu_ready[i]);
+ requested_irq_received[i] = 0;
+ }
+ /*
+ * Preparation step: Power on all cores.
+ */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU as it is already on */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ rc = tftf_cpu_on(cpu_mpid,
+ (uintptr_t) do_stress_test,
+ 0);
+ if (rc != PSCI_E_SUCCESS) {
+ tftf_testcase_printf(
+ "Failed to power on CPU 0x%x (%d)\n",
+ cpu_mpid, rc);
+ return TEST_RESULT_SKIPPED;
+ }
+ }
+
+ /* Wait for all non-lead CPUs to be ready */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_wait_for_event(&cpu_ready[core_pos]);
+ }
+
+ return do_stress_test();
+}
diff --git a/tftf/tests/framework_validation_tests/test_validation_events.c b/tftf/tests/framework_validation_tests/test_validation_events.c
new file mode 100644
index 000000000..e229b17aa
--- /dev/null
+++ b/tftf/tests/framework_validation_tests/test_validation_events.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <events.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <power_management.h>
+#include <psci.h>
+#include <tftf_lib.h>
+
+/* Events structures used by this test case */
+static event_t lead_cpu_event;
+static event_t cpu_has_entered_test[PLATFORM_CORE_COUNT];
+static event_t test_is_finished;
+
+static test_result_t non_lead_cpu_fn(void)
+{
+ unsigned int mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(mpid);
+
+ /* Signal to the lead CPU that the calling CPU has entered the test */
+ tftf_send_event(&cpu_has_entered_test[core_pos]);
+
+ tftf_wait_for_event(&lead_cpu_event);
+
+ /*
+ * Wait for lead CPU's signal before exiting the test.
+ * Introduce a delay so that the lead CPU will send the event before the
+ * non-lead CPUs wait for it.
+ */
+ waitms(500);
+ tftf_wait_for_event(&test_is_finished);
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Validate the events API
+ *
+ * This test exercises the events API.
+ * - It creates a sequence of events sending and receiving. The order of
+ * operations is ensured by inserting delays at strategic points.
+ * - It tests the communication in both directions (i.e. from CPUx to CPUy and
+ * vice versa).
+ * - It tests that it doesn't matter whether CPUx waits for the event first
+ * then CPUy sends the event, or that things happen in the other order.
+ * - It tests the API on a single CPU.
+ *
+ * This test is skipped if an error occurs during the bring-up of non-lead CPUs.
+ * Otherwise, this test always returns success. If something goes wrong, the
+ * test will most probably hang because the system will go into a WFE/SEV dead
+ * lock.
+ */
+test_result_t test_validation_events(void)
+{
+ unsigned int lead_cpu;
+ unsigned int cpu_mpid;
+ unsigned int cpu_node;
+ unsigned int core_pos;
+ unsigned int cpus_count;
+ int psci_ret;
+
+ lead_cpu = read_mpidr_el1() & MPID_MASK;
+
+ /*
+ * The events API should work on a single CPU, provided that the event
+ * is sent before we wait for it. If we do things the other way around,
+ * the CPU will end up stuck in WFE state.
+ */
+ tftf_send_event(&lead_cpu_event);
+ tftf_wait_for_event(&lead_cpu_event);
+
+ /* Re-init lead_cpu_event to be able to reuse it */
+ tftf_init_event(&lead_cpu_event);
+
+ /* Power on all CPUs */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU as it is already powered on */
+ if (cpu_mpid == lead_cpu)
+ continue;
+
+ psci_ret = tftf_cpu_on(cpu_mpid, (uintptr_t) non_lead_cpu_fn, 0);
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf(
+ "Failed to power on CPU 0x%x (%d)\n",
+ cpu_mpid, psci_ret);
+ return TEST_RESULT_SKIPPED;
+ }
+ }
+
+ /* Wait for all CPUs to have entered the test */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ if (cpu_mpid == lead_cpu)
+ continue;
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_wait_for_event(&cpu_has_entered_test[core_pos]);
+ }
+
+ /*
+ * Introduce a delay so that the non-lead CPUs will wait for this event
+ * before the lead CPU sends it.
+ */
+ waitms(500);
+ /* Send the event to half of the CPUs */
+ cpus_count = PLATFORM_CORE_COUNT / 2;
+ tftf_send_event_to(&lead_cpu_event, cpus_count);
+ waitms(500);
+ /* Send the event to the other half of the CPUs */
+ tftf_send_event_to(&lead_cpu_event, PLATFORM_CORE_COUNT - cpus_count);
+
+ /* Signal termination of the test to all CPUs */
+ tftf_send_event_to_all(&test_is_finished);
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/framework_validation_tests/test_validation_irq.c b/tftf/tests/framework_validation_tests/test_validation_irq.c
new file mode 100644
index 000000000..2e9e37322
--- /dev/null
+++ b/tftf/tests/framework_validation_tests/test_validation_irq.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <irq.h>
+#include <platform.h>
+#include <sgi.h>
+#include <tftf_lib.h>
+
+static volatile unsigned int counter;
+
+/*
+ * IRQ handler for SGI #0.
+ * Increment the test counter to prove it's been successfully called.
+ */
+static int increment_counter(void *data)
+{
+ counter++;
+ return 0;
+}
+
+#if !DEBUG
+static int set_counter_to_42(void *data)
+{
+ counter = 42;
+ return 0;
+}
+#endif
+
+/*
+ * @Test_Aim@ Test IRQ handling on lead CPU
+ *
+ * Check that IRQ enabling/disabling and IRQ handler registering/unregistering
+ * work as expected on the lead CPU.
+ */
+test_result_t test_validation_irq(void)
+{
+ unsigned int mpid = read_mpidr_el1();
+ unsigned int core_pos = platform_get_core_pos(mpid);
+ const unsigned int sgi_id = IRQ_NS_SGI_0;
+ int ret;
+
+ counter = 0;
+
+ /* Now register a handler */
+ ret = tftf_irq_register_handler(sgi_id, increment_counter);
+ if (ret != 0) {
+ tftf_testcase_printf("Failed to register initial IRQ handler\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ tftf_irq_enable(sgi_id, GIC_HIGHEST_NS_PRIORITY);
+
+ /*
+ * Send the SGI to the calling core and check the IRQ handler has been
+ * successfully called
+ */
+ tftf_send_sgi(sgi_id, core_pos);
+
+ /* Wait till the handler is executed */
+ while (counter != 1)
+ ;
+
+ /*
+ * Try to overwrite the IRQ handler. This should fail.
+ * In debug builds, it would trigger an assertion so we can't test that
+ * as it will stop the test session.
+ * In release builds though, it should just do nothing, i.e. it won't
+ * replace the existing handler and that's something that can be tested.
+ */
+#if !DEBUG
+ ret = tftf_irq_register_handler(sgi_id, set_counter_to_42);
+ if (ret == 0) {
+ tftf_testcase_printf(
+ "Overwriting the IRQ handler should have failed\n");
+ return TEST_RESULT_FAIL;
+ }
+#endif
+
+ tftf_send_sgi(sgi_id, core_pos);
+ while (counter != 2)
+ ;
+
+ /* Unregister the IRQ handler */
+ ret = tftf_irq_unregister_handler(sgi_id);
+ if (ret != 0) {
+ tftf_testcase_printf("Failed to unregister IRQ handler\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Send the SGI to the calling core and check the former IRQ handler
+ * has not been called, now that it has been unregistered.
+ */
+ tftf_send_sgi(sgi_id, core_pos);
+
+ /*
+ * Wait for some time so that SGI interrupts the processor, Normally it
+ * takes a small but finite time for the IRQ to be sent to processor
+ */
+ waitms(500);
+
+ if (counter != 2) {
+ tftf_testcase_printf(
+ "IRQ handler hasn't been successfully unregistered\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Try to unregister the IRQ handler again. This should fail.
+ * In debug builds, it would trigger an assertion so we can't test that
+ * as it will stop the test session.
+ * In release builds though, it should just do nothing.
+ */
+#if !DEBUG
+ ret = tftf_irq_unregister_handler(sgi_id);
+ if (ret == 0) {
+ tftf_testcase_printf(
+ "Unregistering the IRQ handler again should have failed\n");
+ return TEST_RESULT_FAIL;
+ }
+#endif
+
+ tftf_irq_disable(sgi_id);
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/framework_validation_tests/test_validation_nvm.c b/tftf/tests/framework_validation_tests/test_validation_nvm.c
new file mode 100644
index 000000000..7a10e190d
--- /dev/null
+++ b/tftf/tests/framework_validation_tests/test_validation_nvm.c
@@ -0,0 +1,221 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <debug.h>
+#include <events.h>
+#include <nvm.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <power_management.h>
+#include <psci.h>
+#include <tftf_lib.h>
+
+#define PER_CPU_BUFFER_OFFSET 0x08
+
+/* Events to specify activity to lead cpu */
+static event_t cpu_ready[PLATFORM_CORE_COUNT];
+static event_t test_done[PLATFORM_CORE_COUNT];
+
+/* Used to make concurrent access to flash by all the cores */
+static volatile int cpu_concurrent_write;
+
+/*
+ * @Test_Aim@ Test Non-Volatile Memory support
+ *
+ * Try reading/writing data from/to NVM to check that basic NVM support is
+ * working as expected.
+ */
+test_result_t test_validation_nvm(void)
+{
+ STATUS status;
+ unsigned test_value1 = 0x12345678;
+ unsigned test_value2 = 0;
+
+ /* Write a value in NVM */
+ status = tftf_nvm_write(TFTF_STATE_OFFSET(testcase_buffer),
+ &test_value1, sizeof(test_value1));
+ if (status != STATUS_SUCCESS) {
+ tftf_testcase_printf("tftf_nvm_write: error %d\n", status);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Read it back from NVM */
+ status = tftf_nvm_read(TFTF_STATE_OFFSET(testcase_buffer),
+ &test_value2, sizeof(test_value2));
+ if (status != STATUS_SUCCESS) {
+ tftf_testcase_printf("tftf_nvm_read: error (%d)\n", status);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Check that the 2 values match */
+ if (test_value1 != test_value2) {
+ tftf_testcase_printf("Values mismatch: %u != %u\n",
+ test_value1, test_value2);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/* Odd CPU's write to flash and even CPU's read the flash */
+static unsigned int access_flash_concurrent(unsigned int core_pos)
+{
+ unsigned int ret;
+ unsigned int test_value;
+
+ if (core_pos % 2) {
+ ret = tftf_nvm_write(TFTF_STATE_OFFSET(testcase_buffer)
+ + core_pos * PER_CPU_BUFFER_OFFSET,
+ &core_pos, sizeof(core_pos));
+ if (ret != STATUS_SUCCESS) {
+ tftf_testcase_printf("Write failed\n");
+ return TEST_RESULT_FAIL;
+ }
+ } else {
+ /* Dummy read */
+ ret = tftf_nvm_read(TFTF_STATE_OFFSET(testcase_buffer),
+ &test_value, sizeof(test_value));
+ if (ret != STATUS_SUCCESS) {
+ tftf_testcase_printf("Read failed\n");
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Test concurrent memory access to Non-Volatile Memory
+ *
+ * Try reading/writing data from multiple cores to NVM and verify the operations are
+ * serialised and also the device does not crash.
+ *
+ */
+static test_result_t test_validate_nvm_secondary(void)
+{
+ unsigned int mpid = read_mpidr_el1();
+ unsigned int core_pos = platform_get_core_pos(mpid);
+ unsigned int ret;
+
+ tftf_send_event(&cpu_ready[core_pos]);
+
+ /* Wait until all cores are ready to access simultaneously */
+ while (!cpu_concurrent_write)
+ ;
+
+ ret = access_flash_concurrent(core_pos);
+
+ tftf_send_event(&test_done[core_pos]);
+
+ return ret;
+}
+
+/*
+ * @Test_Aim@ Test serialisation of access by multiple CPU's
+ *
+ * Try reading/writing data to flash from all the CPU's with as much
+ * concurrency as possible. Check the device does not hang and also
+ * the update to flash happened as expected
+ */
+test_result_t test_validate_nvm_serialisation(void)
+{
+ unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int target_mpid, target_node;
+ unsigned int core_pos;
+ unsigned int lead_core_pos;
+ unsigned int test_value;
+ unsigned int ret;
+ int rc;
+ char init_buffer[TEST_BUFFER_SIZE] = {0};
+
+ /* Initialise the scratch flash */
+ ret = tftf_nvm_write(TFTF_STATE_OFFSET(testcase_buffer),
+ &init_buffer,
+ sizeof(init_buffer));
+ if (ret != STATUS_SUCCESS) {
+ tftf_testcase_printf("Write failed\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Power on all the cores */
+ for_each_cpu(target_node) {
+ target_mpid = tftf_get_mpidr_from_node(target_node);
+ /* Skip lead CPU as it is already on */
+ if (target_mpid == lead_mpid)
+ continue;
+ rc = tftf_cpu_on(target_mpid,
+ (uintptr_t) test_validate_nvm_secondary,
+ 0);
+ if (rc != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("Failed to power on CPU 0x%x (%d)\n",
+ target_mpid, rc);
+ return TEST_RESULT_SKIPPED;
+ }
+ }
+
+ /* Wait for all non-lead CPU's to be ready */
+ for_each_cpu(target_node) {
+ target_mpid = tftf_get_mpidr_from_node(target_node);
+ /* Skip lead CPU */
+ if (target_mpid == lead_mpid)
+ continue;
+
+ core_pos = platform_get_core_pos(target_mpid);
+ tftf_wait_for_event(&cpu_ready[core_pos]);
+ }
+
+ lead_core_pos = platform_get_core_pos(read_mpidr_el1());
+
+ /*
+ * Send event to all CPU's so that we can have as much concurrent
+ * access to flash as possible
+ */
+ cpu_concurrent_write = 1;
+
+ ret = access_flash_concurrent(lead_core_pos);
+
+ if (ret != TEST_RESULT_SUCCESS)
+ return ret;
+
+ /* Wait for all non-lead CPU's to complete the test */
+ for_each_cpu(target_node) {
+ target_mpid = tftf_get_mpidr_from_node(target_node);
+ /* Skip lead CPU */
+ if (target_mpid == lead_mpid)
+ continue;
+
+ core_pos = platform_get_core_pos(target_mpid);
+ tftf_wait_for_event(&test_done[core_pos]);
+ }
+
+ /* Validate the results */
+ for_each_cpu(target_node) {
+ target_mpid = tftf_get_mpidr_from_node(target_node);
+
+ core_pos = platform_get_core_pos(target_mpid);
+
+ tftf_nvm_read(TFTF_STATE_OFFSET(testcase_buffer) +
+ core_pos * PER_CPU_BUFFER_OFFSET,
+ &test_value,
+ sizeof(test_value));
+
+ if ((core_pos % 2) && (test_value != core_pos)) {
+ tftf_testcase_printf("Concurrent flash access test "
+ "failed on cpu index: %d test_value:%d \n",
+ core_pos, test_value);
+ return TEST_RESULT_FAIL;
+ } else if (((core_pos % 2) == 0) && (test_value != 0)) {
+ tftf_testcase_printf("Concurrent flash access test "
+ "failed on cpu index: %d test_value:%d \n",
+ core_pos, test_value);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/framework_validation_tests/test_validation_sgi.c b/tftf/tests/framework_validation_tests/test_validation_sgi.c
new file mode 100644
index 000000000..806bc581f
--- /dev/null
+++ b/tftf/tests/framework_validation_tests/test_validation_sgi.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <debug.h>
+#include <irq.h>
+#include <platform.h>
+#include <sgi.h>
+#include <tftf_lib.h>
+
+/*
+ * The 2 following global variables are used by the SGI handler to return
+ * information to the main test function.
+ */
+static sgi_data_t sgi_data;
+
+/* Flag to indicate whether the SGI has been handled */
+static volatile unsigned int sgi_handled;
+
+static int sgi_handler(void *data)
+{
+ /* Save SGI data */
+ sgi_data = *(sgi_data_t *) data;
+ sgi_handled = 1;
+
+ /* Return value doesn't matter */
+ return 0;
+}
+
+/*
+ * @Test_Aim@ Test SGI support on lead CPU
+ *
+ * 1) Register a local IRQ handler for SGI 0.
+ * 2) Send SGI 0 to the calling core, i.e. the lead CPU.
+ * 3) Check the correctness of the data received in the IRQ handler.
+ *
+ * TODO: Improve this test by sending SGIs to all cores in the system.
+ * This will ensure that IRQs are correctly configured on all cores.
+ */
+test_result_t test_validation_sgi(void)
+{
+ unsigned int mpid = read_mpidr_el1();
+ unsigned int core_pos = platform_get_core_pos(mpid);
+ const unsigned int sgi_id = IRQ_NS_SGI_0;
+ test_result_t test_res = TEST_RESULT_SUCCESS;
+ int ret;
+
+ /* Register the local IRQ handler for the SGI */
+ ret = tftf_irq_register_handler(sgi_id, sgi_handler);
+ if (ret != 0) {
+ tftf_testcase_printf("Failed to register IRQ %u (%d)",
+ sgi_id, ret);
+ return TEST_RESULT_FAIL;
+ }
+ tftf_irq_enable(sgi_id, GIC_HIGHEST_NS_PRIORITY);
+
+ /* Send the SGI to the lead CPU */
+ tftf_send_sgi(sgi_id, core_pos);
+
+ /*
+ * Wait for the SGI to be handled.
+ * The SGI handler will update a global variable to reflect that.
+ */
+ while (sgi_handled == 0)
+ continue;
+
+ /* Verify the data received in the SGI handler */
+ if (sgi_data.irq_id != sgi_id) {
+ tftf_testcase_printf("Wrong IRQ ID, expected %u, got %u\n",
+ sgi_id, sgi_data.irq_id);
+ test_res = TEST_RESULT_FAIL;
+ }
+
+ tftf_irq_disable(sgi_id);
+ tftf_irq_unregister_handler(sgi_id);
+
+ return test_res;
+}
diff --git a/tftf/tests/fwu_tests/test_fwu_auth.c b/tftf/tests/fwu_tests/test_fwu_auth.c
new file mode 100644
index 000000000..ac7d71f49
--- /dev/null
+++ b/tftf/tests/fwu_tests/test_fwu_auth.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <fwu_nvm.h>
+#include <io_storage.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <psci.h>
+#include <smccc.h>
+#include <status.h>
+#include <tftf_lib.h>
+
+/*
+ * @Test_Aim@ Validate the FWU AUTH failure case.
+ * The Firmware Update feature implemented in Trusted Firmware-A
+ * code needs to be tested to check if FWU process gets started
+ * when watchdog resets the system due to Authentication
+ * failure of an image in BL1/BL2 stage.
+ * Test SUCCESS in case Firmware Update was done.
+ * Test FAIL in case Firmware Update was not done.
+ */
+test_result_t test_fwu_auth(void)
+{
+ STATUS status;
+ unsigned int flag;
+ smc_args args = { SMC_PSCI_SYSTEM_RESET };
+ smc_ret_values ret = {0};
+
+ if (tftf_is_rebooted()) {
+ /*
+ * Check if the FIP update is done.
+ */
+ status = fwu_nvm_read(FWU_TFTF_TESTCASE_BUFFER_OFFSET, &flag, 4);
+ if (status != STATUS_SUCCESS) {
+ tftf_testcase_printf("Failed to read NVM (%d)\n", status);
+ return TEST_RESULT_FAIL;
+ }
+
+ if (flag != FIP_IMAGE_UPDATE_DONE_FLAG) {
+ tftf_testcase_printf("FIP was not updated\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+ }
+
+ /*
+ * Corrupt the flash offset for authentication failure.
+ */
+ flag = 0xdeadbeef;
+ status = fwu_nvm_write(FIP_CORRUPT_OFFSET, &flag, 4);
+ if (status != STATUS_SUCCESS) {
+ tftf_testcase_printf("Failed to corrupt FIP (%d)\n", status);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /*
+ * Provide the backup FIP address.
+ */
+ flag = FIP_BKP_ADDRESS;
+ status = fwu_nvm_write(FWU_TFTF_TESTCASE_BUFFER_OFFSET, &flag, 4);
+ if (status != STATUS_SUCCESS) {
+ tftf_testcase_printf("Failed to update backup FIP address (%d)\n",
+ status);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /* Notify that we are rebooting now. */
+ tftf_notify_reboot();
+
+ /* Request PSCI system reset. */
+ ret = tftf_smc(&args);
+
+ /* The PSCI SYSTEM_RESET call is not supposed to return */
+ tftf_testcase_printf("System didn't reboot properly (%d)\n",
+ (unsigned int)ret.ret0);
+
+ return TEST_RESULT_FAIL;
+}
diff --git a/tftf/tests/fwu_tests/test_fwu_toc.c b/tftf/tests/fwu_tests/test_fwu_toc.c
new file mode 100644
index 000000000..6d220aa1b
--- /dev/null
+++ b/tftf/tests/fwu_tests/test_fwu_toc.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <firmware_image_package.h>
+#include <fwu_nvm.h>
+#include <io_storage.h>
+#include <platform.h>
+#include <psci.h>
+#include <smccc.h>
+#include <status.h>
+#include <tftf_lib.h>
+
+/*
+ * @Test_Aim@ Validate the FWU ToC invalid case.
+ * The Firmware Update feature implemented in Trusted Firmware-A
+ * code needs to be tested to check if FWU process gets started
+ * or not when the ToC header value in fip.bin is invalid.
+ * Test SUCCESS in case ToC is found valid.
+ * Test FAIL in case ToC is found invalid.
+ */
+test_result_t test_fwu_toc(void)
+{
+ STATUS status;
+ unsigned int toc_header;
+ smc_args args = { SMC_PSCI_SYSTEM_RESET };
+ smc_ret_values ret = {0};
+
+ if (tftf_is_rebooted()) {
+ /*
+ * Check whether we successfully resumed from the
+ * Firmware Update process. If we have, then the
+ * ToC header value will have been repaired.
+ */
+ status = fwu_nvm_read(0, &toc_header, 4);
+ if (status != STATUS_SUCCESS) {
+ tftf_testcase_printf("Failed to read NVM (%d)\n", status);
+ return TEST_RESULT_FAIL;
+ }
+
+ if (toc_header != TOC_HEADER_NAME) {
+ tftf_testcase_printf("ToC is Invalid (%u)\n", toc_header);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+ }
+
+ /* Corrupt the TOC in fip.bin. */
+ toc_header = 0xdeadbeef;
+ status = fwu_nvm_write(0, &toc_header, 4);
+ if (status != STATUS_SUCCESS) {
+ tftf_testcase_printf("Failed to overwrite the ToC header (%d)\n",
+ status);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /* Notify that we are rebooting now. */
+ tftf_notify_reboot();
+
+ /* Request PSCI system reset. */
+ ret = tftf_smc(&args);
+
+ /* The PSCI SYSTEM_RESET call is not supposed to return */
+ tftf_testcase_printf("System didn't reboot properly (%d)\n",
+ (unsigned int)ret.ret0);
+
+ return TEST_RESULT_FAIL;
+}
diff --git a/tftf/tests/misc_tests/boot_req_tests/test_cntfrq.c b/tftf/tests/misc_tests/boot_req_tests/test_cntfrq.c
new file mode 100644
index 000000000..78289bbe6
--- /dev/null
+++ b/tftf/tests/misc_tests/boot_req_tests/test_cntfrq.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <mmio.h>
+#include <plat_topology.h>
+#include <power_management.h>
+#include <psci.h>
+#include <stdlib.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+
+static test_result_t cntfrq_check(void)
+{
+ u_register_t cntfrq_el0, ns_cntfrq;
+ cntfrq_el0 = read_cntfrq_el0();
+
+ ns_cntfrq = mmio_read_32(SYS_CNT_BASE1 + CNTBASEN_CNTFRQ);
+
+ if (cntfrq_el0 != ns_cntfrq) {
+ tftf_testcase_printf("CNTFRQ read from sys_reg = %llx and NS timer = %llx differs/n",
+ (unsigned long long)cntfrq_el0,
+ (unsigned long long)ns_cntfrq);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * The ARM ARM says that the cntfrq_el0, cntfrq memory mapped register and
+ * the RO views in NS timer frames must all be initialized by the firmware.
+ * (See I3.6.7 and D7.5.1 section in ARM ARM).
+ * This tests the same on all the CPUs in the system.
+ * Returns:
+ * TEST_RESULT_SUCCESS: if all the cntfrq values match
+ * TEST_RESULT_FAIL: if any of the cntfrq value mismatch
+ */
+test_result_t test_cntfrq_check(void)
+{
+ unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int cpu_node, cpu_mpid;
+ int rc;
+
+ /* Bring every CPU online */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU as it is already on */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ rc = tftf_cpu_on(cpu_mpid,
+ (uintptr_t) cntfrq_check,
+ 0);
+ if (rc != PSCI_E_SUCCESS) {
+ tftf_testcase_printf(
+ "Failed to power on CPU 0x%x (%d)\n",
+ cpu_mpid, rc);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ rc = cntfrq_check();
+
+ /* Wait for the CPUs to turn OFF */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+
+ /* Wait for all non lead CPUs to turn OFF before returning */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ /* Wait for the target CPU to turn OFF */
+ while (tftf_psci_affinity_info(cpu_mpid,
+ MPIDR_AFFLVL0) != PSCI_STATE_OFF)
+ ;
+ }
+
+ return rc;
+}
diff --git a/tftf/tests/misc_tests/inject_serror.S b/tftf/tests/misc_tests/inject_serror.S
new file mode 100644
index 000000000..0d7dbf2d7
--- /dev/null
+++ b/tftf/tests/misc_tests/inject_serror.S
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <sdei.h>
+
+
+#ifdef AARCH64
+ .globl inject_serror
+ .globl inject_uncontainable
+ .globl serror_sdei_event_handler
+
+/*
+ * Program fault injection register, and wait for ever for the fault to trigger.
+ * Note that Trusted Firmware must be compiled for ARMv8.4 along with
+ * FAULT_INJECTION_SUPPORT=1 for this to work. Besides, the model has to be
+ * launched with fault inject support.
+ *
+ * x0: Fault record number to program
+ * x1: Injected fault properties
+ * x2: Type of error to be generated
+ * x3: Memory location to wait for, or 0 if no waiting is required
+ */
+func inject_serror_record
+ /* Choose Error record 0 on the PE */
+ msr ERRSELR_EL1, x0
+ isb
+
+ /* Enable error reporting */
+ orr x1, x1, #ERXCTLR_ED_BIT
+ msr ERXCTLR_EL1, x1
+
+ /* Program count down timer to 1 */
+ mov x0, #1
+ msr ERXPFGCDN_EL1, x0
+
+ /* Start count down to generate error */
+ orr x2, x2, #ERXPFGCTL_CDEN_BIT
+ msr ERXPFGCTL_EL1, x2
+ isb
+
+ cbz x3, 2f
+
+ /* Clear SError received flag */
+ str xzr, [x3, #0]
+ sevl
+
+1:
+ wfe
+ ldr x0, [x3, #0]
+ cbz x0, 1b
+
+2:
+ ret
+endfunc inject_serror_record
+
+/*
+ * Inject Unrecoverable error through fault record 0. Wait until serror_received
+ * is set by the SDEI handler in response to receving the event.
+ */
+func inject_serror
+ /* Inject fault into record 0 */
+ mov x0, #0
+
+ /* Enable error reporting */
+ mov x1, #ERXCTLR_UE_BIT
+ msr ERXCTLR_EL1, x1
+
+ /* Injected fault control */
+ mov x2, #ERXPFGCTL_UEU_BIT
+
+ /* Wait address */
+ adrp x3, serror_received
+ add x3, x3, :lo12:serror_received
+
+ b inject_serror_record
+endfunc inject_serror
+
+/*
+ * Inject Uncontainable error through fault record 0. This function doesn't wait
+ * as the handling is terminal in EL3.
+ */
+func inject_uncontainable
+ /* Inject fault into record 0 */
+ mov x0, #0
+
+ mov x1, xzr
+
+ /* Injected fault control */
+ mov x2, #ERXPFGCTL_UC_BIT
+
+ /* Nothing to wait for */
+ mov x3, xzr
+
+ b inject_serror_record
+endfunc inject_uncontainable
+
+/*
+ * SDEI event handler for SErrors.
+ */
+func serror_sdei_event_handler
+ stp x29, x30, [sp, #-16]!
+ bl serror_handler
+ ldp x29, x30, [sp], #16
+ mov_imm x0, SDEI_EVENT_COMPLETE
+ mov x1, xzr
+ smc #0
+ b .
+endfunc serror_sdei_event_handler
+#endif
diff --git a/tftf/tests/misc_tests/test_single_fault.c b/tftf/tests/misc_tests/test_single_fault.c
new file mode 100644
index 000000000..e6522118e
--- /dev/null
+++ b/tftf/tests/misc_tests/test_single_fault.c
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+
+#include <arch_helpers.h>
+#include <sdei.h>
+#include <tftf_lib.h>
+
+#ifndef AARCH32
+
+uint64_t serror_received;
+
+extern void inject_serror(void);
+
+int serror_handler(int ev, uint64_t arg)
+{
+ serror_received = 1;
+ tftf_testcase_printf("SError SDEI event received.\n");
+
+ return 0;
+}
+
+extern int serror_sdei_event_handler(int ev, uint64_t arg);
+
+test_result_t test_single_fault(void)
+{
+ int64_t ret;
+ const int event_id = 5000;
+
+ /* Register SDEI handler */
+ ret = sdei_event_register(event_id, serror_sdei_event_handler, 0,
+ SDEI_REGF_RM_PE, read_mpidr_el1());
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI event register failed: 0x%llx\n",
+ ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_enable(event_id);
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI event enable failed: 0x%llx\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_pe_unmask();
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI pe unmask failed: 0x%llx\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ inject_serror();
+
+ return TEST_RESULT_SUCCESS;
+}
+
+#else
+
+test_result_t test_single_fault(void)
+{
+ tftf_testcase_printf("Not supported on AArch32.\n");
+ return TEST_RESULT_SKIPPED;
+}
+
+#endif
diff --git a/tftf/tests/misc_tests/test_uncontainable.c b/tftf/tests/misc_tests/test_uncontainable.c
new file mode 100644
index 000000000..79c903114
--- /dev/null
+++ b/tftf/tests/misc_tests/test_uncontainable.c
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <tftf_lib.h>
+
+#ifndef AARCH32
+
+extern void inject_uncontainable(void);
+
+test_result_t test_uncontainable(void)
+{
+ inject_uncontainable();
+
+ return TEST_RESULT_SUCCESS;
+}
+
+#else
+
+test_result_t test_uncontainable(void)
+{
+ tftf_testcase_printf("Not supported on AArch32.\n");
+ return TEST_RESULT_SKIPPED;
+}
+
+#endif
diff --git a/tftf/tests/performance_tests/smc_latencies.c b/tftf/tests/performance_tests/smc_latencies.c
new file mode 100644
index 000000000..fe226790e
--- /dev/null
+++ b/tftf/tests/performance_tests/smc_latencies.c
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * This file contains tests that measure the round trip latency of an SMC.
+ * The SMC calls used are simple ones (PSCI_VERSION and the Standard Service
+ * UID) that involve almost no handling on the EL3 firmware's side so that we
+ * come close to measuring the overhead of the SMC itself.
+ */
+
+#include <arch_helpers.h>
+#include <arm_arch_svc.h>
+#include <debug.h>
+#include <psci.h>
+#include <smccc.h>
+#include <std_svc.h>
+#include <string.h>
+#include <tftf_lib.h>
+#include <utils_def.h>
+
+#define ITERATIONS_CNT 1000
+static unsigned long long raw_results[ITERATIONS_CNT];
+
+/* Latency information in nano-seconds */
+struct latency_info {
+ unsigned long long min;
+ unsigned long long max;
+ unsigned long long avg;
+};
+
+static inline unsigned long long cycles_to_ns(unsigned long long cycles)
+{
+ unsigned long long freq = read_cntfrq_el0();
+ return (cycles * 1000000000) / freq;
+}
+
+/*
+ * Send the given SMC 'ITERATIONS_CNT' times, measure the time it takes to
+ * return back from the SMC call each time, and gather some statistics across
+ * the whole series.
+ *
+ * The statistics consist of:
+ * - minimum latency (i.e the shortest duration across the whole series);
+ * - maximum latency (i.e the longest duration across the whole series);
+ * - average latency.
+ *
+ * These statistics are stored in the latency_info structure whose address
+ * is passed as an argument.
+ *
+ * This function also prints some additional, intermediate information, like the
+ * number of cycles for each SMC and the average number of cycles for an SMC
+ * round trip.
+ */
+static void test_measure_smc_latency(const smc_args *smc_args,
+ struct latency_info *latency)
+{
+ unsigned long long cycles;
+ unsigned long long min_cycles;
+ unsigned long long max_cycles;
+ unsigned long long avg_cycles;
+ unsigned long long cycles_sum = 0;
+
+ min_cycles = UINT64_MAX;
+ max_cycles = 0;
+ memset(raw_results, 0, sizeof(raw_results));
+
+ for (unsigned int i = 0; i < ITERATIONS_CNT; ++i) {
+ cycles = read_cntpct_el0();
+ tftf_smc(smc_args);
+ cycles = read_cntpct_el0() - cycles;
+
+ min_cycles = MIN(min_cycles, cycles);
+ max_cycles = MAX(max_cycles, cycles);
+
+ cycles_sum += cycles;
+
+ raw_results[i] = cycles;
+ }
+
+ avg_cycles = cycles_sum / ITERATIONS_CNT;
+ tftf_testcase_printf("Average number of cycles: %llu\n",
+ (unsigned long long) avg_cycles);
+ latency->min = cycles_to_ns(min_cycles);
+ latency->max = cycles_to_ns(max_cycles);
+ latency->avg = cycles_to_ns(avg_cycles);
+
+ NOTICE("Raw results:\n");
+ for (unsigned int i = 0; i < ITERATIONS_CNT; ++i) {
+ NOTICE("%llu cycles\t%llu ns\n",
+ raw_results[i], cycles_to_ns(raw_results[i]));
+ }
+}
+
+/*
+ * Measure the latency of the PSCI_VERSION SMC and print the result.
+ * This test always succeed.
+ */
+test_result_t smc_psci_version_latency(void)
+{
+ struct latency_info latency;
+ smc_args args = { SMC_PSCI_VERSION };
+
+ test_measure_smc_latency(&args, &latency);
+ tftf_testcase_printf(
+ "Average time: %llu ns (ranging from %llu to %llu)\n",
+ (unsigned long long) latency.avg,
+ (unsigned long long) latency.min,
+ (unsigned long long) latency.max);
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Measure the latency of the Standard Service Call UID SMC and print the
+ * result.
+ * This test always succeed.
+ */
+test_result_t smc_std_svc_call_uid_latency(void)
+{
+ struct latency_info latency;
+ smc_args args = { SMC_STD_SVC_UID };
+
+ test_measure_smc_latency(&args, &latency);
+ tftf_testcase_printf(
+ "Average time: %llu ns (ranging from %llu to %llu)\n",
+ (unsigned long long) latency.avg,
+ (unsigned long long) latency.min,
+ (unsigned long long) latency.max);
+
+ return TEST_RESULT_SUCCESS;
+}
+
+test_result_t smc_arch_workaround_1(void)
+{
+ struct latency_info latency;
+ smc_args args;
+ smc_ret_values ret;
+ int32_t expected_ver;
+
+ /* Check if SMCCC version is at least v1.1 */
+ expected_ver = MAKE_SMCCC_VERSION(1, 1);
+ memset(&args, 0, sizeof(args));
+ args.arg0 = SMCCC_VERSION;
+ ret = tftf_smc(&args);
+ if ((int32_t)ret.ret0 < expected_ver) {
+ printf("Unexpected SMCCC version: 0x%x\n",
+ (int)ret.ret0);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /* Check if SMCCC_ARCH_WORKAROUND_1 is implemented */
+ memset(&args, 0, sizeof(args));
+ args.arg0 = SMCCC_ARCH_FEATURES;
+ args.arg1 = SMCCC_ARCH_WORKAROUND_1;
+ ret = tftf_smc(&args);
+ if ((int)ret.ret0 == -1) {
+ printf("SMCCC_ARCH_WORKAROUND_1 is not implemented\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ memset(&args, 0, sizeof(args));
+ args.arg0 = SMCCC_ARCH_WORKAROUND_1;
+
+ test_measure_smc_latency(&args, &latency);
+ tftf_testcase_printf(
+ "Average time: %llu ns (ranging from %llu to %llu)\n",
+ (unsigned long long) latency.avg,
+ (unsigned long long) latency.min,
+ (unsigned long long) latency.max);
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/runtime_services/arm_arch_svc/smccc_arch_workaround_1.c b/tftf/tests/runtime_services/arm_arch_svc/smccc_arch_workaround_1.c
new file mode 100644
index 000000000..4a45ad4d8
--- /dev/null
+++ b/tftf/tests/runtime_services/arm_arch_svc/smccc_arch_workaround_1.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_arch_svc.h>
+#include <debug.h>
+#include <plat_topology.h>
+#include <power_management.h>
+#include <psci.h>
+#include <smccc.h>
+#include <string.h>
+#include <tftf_lib.h>
+
+#ifdef AARCH64
+#define CORTEX_A57_MIDR 0x410FD070
+#define CORTEX_A72_MIDR 0x410FD080
+#define CORTEX_A73_MIDR 0x410FD090
+#define CORTEX_A75_MIDR 0x410FD0A0
+
+static int cortex_a57_test(void);
+static int csv2_test(void);
+
+static struct ent {
+ unsigned int midr;
+ int (*wa_required)(void);
+} entries[] = {
+ { .midr = CORTEX_A57_MIDR, .wa_required = cortex_a57_test },
+ { .midr = CORTEX_A72_MIDR, .wa_required = csv2_test },
+ { .midr = CORTEX_A73_MIDR, .wa_required = csv2_test },
+ { .midr = CORTEX_A75_MIDR, .wa_required = csv2_test },
+};
+
+static int cortex_a57_test(void)
+{
+ return 1;
+}
+
+static int csv2_test(void)
+{
+ uint64_t pfr0;
+
+ pfr0 = read_id_aa64pfr0_el1() >> ID_AA64PFR0_CSV2_SHIFT;
+ if ((pfr0 & ID_AA64PFR0_CSV2_MASK) == 1)
+ return 0;
+ return 1;
+}
+
+static test_result_t test_smccc_entrypoint(void)
+{
+ smc_args args;
+ smc_ret_values ret;
+ int32_t expected_ver;
+ unsigned int my_midr, midr_mask;
+ int wa_required;
+ size_t i;
+
+ /* Check if SMCCC version is at least v1.1 */
+ expected_ver = MAKE_SMCCC_VERSION(1, 1);
+ memset(&args, 0, sizeof(args));
+ args.arg0 = SMCCC_VERSION;
+ ret = tftf_smc(&args);
+ if ((int32_t)ret.ret0 < expected_ver) {
+ tftf_testcase_printf("Unexpected SMCCC version: 0x%x\n",
+ (int)ret.ret0);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /* Check if SMCCC_ARCH_WORKAROUND_1 is required or not */
+ memset(&args, 0, sizeof(args));
+ args.arg0 = SMCCC_ARCH_FEATURES;
+ args.arg1 = SMCCC_ARCH_WORKAROUND_1;
+ ret = tftf_smc(&args);
+ if ((int)ret.ret0 == -1) {
+ tftf_testcase_printf("SMCCC_ARCH_WORKAROUND_1 is not implemented\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /* If the call returns 0, it means the workaround is required */
+ if ((int)ret.ret0 == 0)
+ wa_required = 1;
+ else
+ wa_required = 0;
+
+ /* Check if the SMC return value matches our expectations */
+ my_midr = (unsigned int)read_midr_el1();
+ midr_mask = (MIDR_IMPL_MASK << MIDR_IMPL_SHIFT) |
+ (MIDR_PN_MASK << MIDR_PN_SHIFT);
+ for (i = 0; i < ARRAY_SIZE(entries); i++) {
+ struct ent *entp = &entries[i];
+
+ if ((my_midr & midr_mask) == (entp->midr & midr_mask)) {
+ if (entp->wa_required() != wa_required)
+ return TEST_RESULT_FAIL;
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(entries) && wa_required) {
+ tftf_testcase_printf("TFTF workaround table out of sync with TF\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Invoke the workaround to make sure nothing nasty happens */
+ memset(&args, 0, sizeof(args));
+ args.arg0 = SMCCC_ARCH_WORKAROUND_1;
+ tftf_smc(&args);
+ return TEST_RESULT_SUCCESS;
+}
+
+test_result_t test_smccc_arch_workaround_1(void)
+{
+ u_register_t lead_mpid, target_mpid;
+ int cpu_node, ret;
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+
+ /* Power on all the non-lead cores. */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+ if (lead_mpid == target_mpid)
+ continue;
+ ret = tftf_cpu_on(target_mpid,
+ (uintptr_t)test_smccc_entrypoint, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("CPU ON failed for 0x%llx\n",
+ (unsigned long long)target_mpid);
+ return TEST_RESULT_FAIL;
+ }
+ /*
+ * Wait for test_smccc_entrypoint to return
+ * and the CPU to power down
+ */
+ while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0) !=
+ PSCI_STATE_OFF)
+ continue;
+ }
+
+ return test_smccc_entrypoint();
+}
+#else
+test_result_t test_smccc_arch_workaround_1(void)
+{
+ INFO("%s skipped on AArch32\n", __func__);
+ return TEST_RESULT_SKIPPED;
+}
+#endif
diff --git a/tftf/tests/runtime_services/arm_arch_svc/smccc_arch_workaround_2.c b/tftf/tests/runtime_services/arm_arch_svc/smccc_arch_workaround_2.c
new file mode 100644
index 000000000..dd0542ca3
--- /dev/null
+++ b/tftf/tests/runtime_services/arm_arch_svc/smccc_arch_workaround_2.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_arch_svc.h>
+#include <debug.h>
+#include <plat_topology.h>
+#include <power_management.h>
+#include <psci.h>
+#include <smccc.h>
+#include <string.h>
+#include <tftf_lib.h>
+
+#ifdef AARCH64
+#define CORTEX_A76_MIDR 0x410fd0b0
+
+static int cortex_a76_test(void);
+
+static struct ent {
+ unsigned int midr;
+ int (*wa_required)(void);
+} entries[] = {
+ { .midr = CORTEX_A76_MIDR, .wa_required = cortex_a76_test },
+};
+
+static int cortex_a76_test(void)
+{
+ return 1;
+}
+
+static test_result_t test_smccc_entrypoint(void)
+{
+ smc_args args;
+ smc_ret_values ret;
+ int32_t expected_ver;
+ unsigned int my_midr, midr_mask;
+ int wa_required;
+ size_t i;
+
+ /* Check if SMCCC version is at least v1.1 */
+ expected_ver = MAKE_SMCCC_VERSION(1, 1);
+ memset(&args, 0, sizeof(args));
+ args.arg0 = SMCCC_VERSION;
+ ret = tftf_smc(&args);
+ if ((int32_t)ret.ret0 < expected_ver) {
+ tftf_testcase_printf("Unexpected SMCCC version: 0x%x\n",
+ (int)ret.ret0);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /* Check if SMCCC_ARCH_WORKAROUND_2 is required or not */
+ memset(&args, 0, sizeof(args));
+ args.arg0 = SMCCC_ARCH_FEATURES;
+ args.arg1 = SMCCC_ARCH_WORKAROUND_2;
+ ret = tftf_smc(&args);
+ if ((int)ret.ret0 == -1) {
+ tftf_testcase_printf("SMCCC_ARCH_WORKAROUND_2 is not implemented\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /* If the call returns 0, it means the workaround is required */
+ if ((int)ret.ret0 == 0)
+ wa_required = 1;
+ else
+ wa_required = 0;
+
+ /* Check if the SMC return value matches our expectations */
+ my_midr = (unsigned int)read_midr_el1();
+ midr_mask = (MIDR_IMPL_MASK << MIDR_IMPL_SHIFT) |
+ (MIDR_PN_MASK << MIDR_PN_SHIFT);
+ for (i = 0; i < ARRAY_SIZE(entries); i++) {
+ struct ent *entp = &entries[i];
+
+ if ((my_midr & midr_mask) == (entp->midr & midr_mask)) {
+ if (entp->wa_required() != wa_required)
+ return TEST_RESULT_FAIL;
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(entries) && wa_required) {
+ tftf_testcase_printf("TFTF workaround table out of sync with TF\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Invoke the workaround to make sure nothing nasty happens */
+ memset(&args, 0, sizeof(args));
+ args.arg0 = SMCCC_ARCH_WORKAROUND_2;
+ tftf_smc(&args);
+ return TEST_RESULT_SUCCESS;
+}
+
+test_result_t test_smccc_arch_workaround_2(void)
+{
+ u_register_t lead_mpid, target_mpid;
+ int cpu_node, ret;
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+
+ /* Power on all the non-lead cores. */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+ if (lead_mpid == target_mpid)
+ continue;
+ ret = tftf_cpu_on(target_mpid,
+ (uintptr_t)test_smccc_entrypoint, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("CPU ON failed for 0x%llx\n",
+ (unsigned long long)target_mpid);
+ return TEST_RESULT_FAIL;
+ }
+ /*
+ * Wait for test_smccc_entrypoint to return
+ * and the CPU to power down
+ */
+ while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0) !=
+ PSCI_STATE_OFF)
+ continue;
+ }
+
+ return test_smccc_entrypoint();
+}
+#else
+test_result_t test_smccc_arch_workaround_2(void)
+{
+ INFO("%s skipped on AArch32\n", __func__);
+ return TEST_RESULT_SKIPPED;
+}
+#endif
diff --git a/tftf/tests/runtime_services/secure_service/secure_service_helpers.c b/tftf/tests/runtime_services/secure_service/secure_service_helpers.c
new file mode 100644
index 000000000..8675daeea
--- /dev/null
+++ b/tftf/tests/runtime_services/secure_service/secure_service_helpers.c
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <platform.h>
+#include <secure_partition.h>
+#include <string.h>
+
+
+secure_partition_request_info_t *create_sps_request(uint32_t id,
+ const void *data,
+ uint64_t data_size)
+{
+ secure_partition_request_info_t *sps_request
+ = (void *) ARM_SECURE_SERVICE_BUFFER_BASE;
+ sps_request->id = id;
+ sps_request->data_size = data_size;
+ if (data_size != 0)
+ memcpy(sps_request->data, data, data_size);
+ return sps_request;
+}
diff --git a/tftf/tests/runtime_services/secure_service/test_secure_service_handle.c b/tftf/tests/runtime_services/secure_service/test_secure_service_handle.c
new file mode 100644
index 000000000..ce4dd5c2a
--- /dev/null
+++ b/tftf/tests/runtime_services/secure_service/test_secure_service_handle.c
@@ -0,0 +1,254 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <debug.h>
+#include <events.h>
+#include <mm_svc.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <power_management.h>
+#include <secure_partition.h>
+#include <smccc.h>
+#include <spm_svc.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+#include <xlat_tables_v2.h>
+
+static event_t cpu_has_finished_test[PLATFORM_CORE_COUNT];
+
+/* Test routine for test_secure_partition_secondary_cores_seq() */
+static test_result_t test_secure_partition_secondary_cores_seq_fn(void)
+{
+ test_result_t result = TEST_RESULT_SUCCESS;
+ u_register_t cpu_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(cpu_mpid);
+
+ secure_partition_request_info_t *sps_request =
+ create_sps_request(SPS_CHECK_ALIVE, NULL, 0);
+
+ INFO("Sending MM_COMMUNICATE_AARCH64 from CPU %u\n",
+ platform_get_core_pos(read_mpidr_el1() & MPID_MASK));
+
+ smc_args mm_communicate_smc = {
+ MM_COMMUNICATE_AARCH64,
+ 0,
+ (u_register_t) sps_request,
+ 0
+ };
+
+ smc_ret_values smc_ret = tftf_smc(&mm_communicate_smc);
+
+ if ((uint32_t)smc_ret.ret0 != 0) {
+ tftf_testcase_printf("Cactus returned: 0x%x\n",
+ (uint32_t)smc_ret.ret0);
+
+ result = TEST_RESULT_FAIL;
+ }
+
+ tftf_send_event(&cpu_has_finished_test[core_pos]);
+
+ return result;
+}
+
+/*
+ * @Test_Aim@ This tests that secondary CPUs can access SPM services
+ * sequentially.
+ */
+test_result_t test_secure_partition_secondary_cores_seq(void)
+{
+ int psci_ret;
+ u_register_t lead_mpid, cpu_mpid;
+ unsigned int cpu_node, core_pos;
+ test_result_t result = TEST_RESULT_SUCCESS;
+
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
+
+ SKIP_TEST_IF_MM_VERSION_LESS_THAN(1, 0);
+
+ VERBOSE("Mapping NS<->SP shared buffer\n");
+
+ int rc = mmap_add_dynamic_region(ARM_SECURE_SERVICE_BUFFER_BASE,
+ ARM_SECURE_SERVICE_BUFFER_BASE,
+ ARM_SECURE_SERVICE_BUFFER_SIZE,
+ MT_MEMORY | MT_RW | MT_NS);
+ if (rc != 0) {
+ tftf_testcase_printf("%d: mmap_add_dynamic_region() = %d\n",
+ __LINE__, rc);
+ result = TEST_RESULT_FAIL;
+ goto exit;
+ }
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+
+ INFO("Lead CPU is CPU %u\n", platform_get_core_pos(lead_mpid));
+
+ if (test_secure_partition_secondary_cores_seq_fn() != TEST_RESULT_SUCCESS) {
+ result = TEST_RESULT_FAIL;
+ goto exit_unmap;
+ }
+
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU, we have already tested it */
+ if (cpu_mpid == lead_mpid) {
+ continue;
+ }
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+
+ tftf_init_event(&cpu_has_finished_test[core_pos]);
+
+ VERBOSE("Powering on CPU %u\n", core_pos);
+
+ psci_ret = tftf_cpu_on(cpu_mpid,
+ (uintptr_t)test_secure_partition_secondary_cores_seq_fn, 0);
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf(
+ "Failed to power on CPU %d (rc = %d)\n",
+ core_pos, psci_ret);
+ result = TEST_RESULT_FAIL;
+ goto exit_unmap;
+ }
+
+ tftf_wait_for_event(&cpu_has_finished_test[core_pos]);
+ }
+
+exit_unmap:
+ VERBOSE("Unmapping NS<->SP shared buffer\n");
+
+ mmap_remove_dynamic_region(ARM_SECURE_SERVICE_BUFFER_BASE,
+ ARM_SECURE_SERVICE_BUFFER_SIZE);
+
+exit:
+ return result;
+}
+
+/******************************************************************************/
+
+static event_t cpu_can_start_test[PLATFORM_CORE_COUNT];
+
+/* Test routine for test_secure_partition_secondary_core() */
+static test_result_t test_secure_partition_secondary_cores_sim_fn(void)
+{
+ test_result_t result = TEST_RESULT_SUCCESS;
+ u_register_t cpu_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(cpu_mpid);
+
+ secure_partition_request_info_t *sps_request =
+ create_sps_request(SPS_CHECK_ALIVE, NULL, 0);
+
+ smc_args mm_communicate_smc = {
+ MM_COMMUNICATE_AARCH64,
+ 0,
+ (u_register_t) sps_request,
+ 0
+ };
+
+ tftf_wait_for_event(&cpu_can_start_test[core_pos]);
+
+ /*
+ * Invoke SMCs for some time to make sure that all CPUs are doing it at
+ * the same time during the test.
+ */
+ for (int i = 0; i < 100; i++) {
+ smc_ret_values smc_ret = tftf_smc(&mm_communicate_smc);
+
+ if ((uint32_t)smc_ret.ret0 != 0) {
+ tftf_testcase_printf("Cactus returned 0x%x at CPU %d\n",
+ (uint32_t)smc_ret.ret0, core_pos);
+ result = TEST_RESULT_FAIL;
+ break;
+ }
+ }
+
+ tftf_send_event(&cpu_has_finished_test[core_pos]);
+
+ return result;
+}
+
+/*
+ * @Test_Aim@ This tests that secondary CPUs can access SPM services
+ * simultaneously.
+ */
+test_result_t test_secure_partition_secondary_cores_sim(void)
+{
+ int psci_ret;
+ u_register_t lead_mpid, cpu_mpid;
+ unsigned int cpu_node, core_pos;
+ test_result_t result = TEST_RESULT_SUCCESS;
+
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
+
+ SKIP_TEST_IF_MM_VERSION_LESS_THAN(1, 0);
+
+ VERBOSE("Mapping NS<->SP shared buffer\n");
+
+ int rc = mmap_add_dynamic_region(ARM_SECURE_SERVICE_BUFFER_BASE,
+ ARM_SECURE_SERVICE_BUFFER_BASE,
+ ARM_SECURE_SERVICE_BUFFER_SIZE,
+ MT_MEMORY | MT_RW | MT_NS);
+ if (rc != 0) {
+ tftf_testcase_printf("%d: mmap_add_dynamic_region() = %d\n",
+ __LINE__, rc);
+ result = TEST_RESULT_FAIL;
+ goto exit;
+ }
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+
+ INFO("Lead CPU is CPU %u\n", platform_get_core_pos(lead_mpid));
+
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_init_event(&cpu_can_start_test[core_pos]);
+ }
+
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU as it is already powered on */
+ if (cpu_mpid == lead_mpid) {
+ continue;
+ }
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+
+ VERBOSE("Powering on CPU %u\n", core_pos);
+
+ psci_ret = tftf_cpu_on(cpu_mpid,
+ (uintptr_t)test_secure_partition_secondary_cores_sim_fn, 0);
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf(
+ "Failed to power on CPU %d (rc = %d)\n",
+ core_pos, psci_ret);
+ result = TEST_RESULT_FAIL;
+ goto exit_unmap;
+ }
+ }
+
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_send_event(&cpu_can_start_test[core_pos]);
+ }
+
+ result = test_secure_partition_secondary_cores_sim_fn();
+
+ /* Wait until all CPUs have finished to unmap the NS<->SP buffer */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_wait_for_event(&cpu_has_finished_test[core_pos]);
+ }
+exit_unmap:
+ VERBOSE("Unmapping NS<->SP shared buffer\n");
+
+ mmap_remove_dynamic_region(ARM_SECURE_SERVICE_BUFFER_BASE,
+ ARM_SECURE_SERVICE_BUFFER_SIZE);
+exit:
+ return result;
+}
diff --git a/tftf/tests/runtime_services/secure_service/test_secure_service_interrupts.c b/tftf/tests/runtime_services/secure_service/test_secure_service_interrupts.c
new file mode 100644
index 000000000..50c3df670
--- /dev/null
+++ b/tftf/tests/runtime_services/secure_service/test_secure_service_interrupts.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <mm_svc.h>
+#include <secure_partition.h>
+#include <smccc.h>
+#include <spm_svc.h>
+#include <string.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+#include <timer.h>
+#include <xlat_tables_v2.h>
+
+static volatile int timer_irq_received;
+
+/*
+ * ISR for the timer interrupt.
+ * Just update a global variable to prove it has been called.
+ */
+static int timer_handler(void *data)
+{
+ assert(timer_irq_received == 0);
+ timer_irq_received = 1;
+ return 0;
+}
+
+
+/*
+ * @Test_Aim@ Test that non-secure interrupts do not interrupt secure service
+ * requests.
+ *
+ * 1. Register a handler for the non-secure timer interrupt.
+ *
+ * 2. Program the non-secure timer to fire in 500 ms.
+ *
+ * 3. Make a long-running (> 500 ms) fast secure service request.
+ * This is achieved by requesting the timer sleep service in Cactus
+ * with a 1 second sleep delay.
+ *
+ * 4. While servicing the timer sleep request, the non-secure timer should
+ * fire but not interrupt Cactus.
+ *
+ * 5. Once back in TFTF, check the response from Cactus, which shows whether the
+ * secure service indeed ran to completion.
+ *
+ * 6. Also check whether the pending non-secure timer interrupt successfully got
+ * handled in TFTF.
+ */
+test_result_t test_secure_partition_interrupt_by_ns(void)
+{
+ secure_partition_request_info_t *sps_request;
+ test_result_t result = TEST_RESULT_FAIL;
+
+ SKIP_TEST_IF_MM_VERSION_LESS_THAN(1, 0);
+
+ VERBOSE("Mapping NS<->SP shared buffer\n");
+
+ int rc = mmap_add_dynamic_region(ARM_SECURE_SERVICE_BUFFER_BASE,
+ ARM_SECURE_SERVICE_BUFFER_BASE,
+ ARM_SECURE_SERVICE_BUFFER_SIZE,
+ MT_MEMORY | MT_RW | MT_NS);
+ if (rc != 0) {
+ tftf_testcase_printf("%d: mmap_add_dynamic_region() = %d\n",
+ __LINE__, rc);
+ return TEST_RESULT_FAIL;
+ }
+
+ timer_irq_received = 0;
+ tftf_timer_register_handler(timer_handler);
+
+ NOTICE("Programming the timer...\n");
+ rc = tftf_program_timer(500);
+ if (rc < 0) {
+ tftf_testcase_printf("Failed to program timer (%d)\n", rc);
+ goto exit_test;
+ }
+
+ INFO("Sending MM_COMMUNICATE_AARCH64 to Cactus\n");
+
+ uint8_t timer_delay = 1;
+ sps_request = create_sps_request(SPS_TIMER_SLEEP,
+ &timer_delay, sizeof(timer_delay));
+ smc_args mm_communicate_smc = {
+ MM_COMMUNICATE_AARCH64,
+ 0, /* cookie, MBZ */
+ (uintptr_t) sps_request,
+ 0
+ };
+
+ smc_ret_values smc_ret = tftf_smc(&mm_communicate_smc);
+
+ INFO("Returned from Cactus, MM_COMMUNICATE_AARCH64 handling complete\n");
+
+ /*
+ * If MM_COMMUNICATE gets interrupted, SPM will return SPM_QUEUED, which
+ * is normally not a valid return value for MM_COMMUNICATE.
+ */
+ if ((uint32_t) smc_ret.ret0 != SPM_SUCCESS) {
+ tftf_testcase_printf("Cactus returned: 0x%x\n",
+ (uint32_t) smc_ret.ret0);
+ goto exit_test;
+ }
+
+ uint32_t cactus_response;
+ memcpy(&cactus_response, sps_request->data, sizeof(cactus_response));
+ if (cactus_response != CACTUS_FAST_REQUEST_SUCCESS) {
+ tftf_testcase_printf("Error code from the timer secure service: 0x%x\n",
+ cactus_response);
+ goto exit_test;
+ }
+
+ /*
+ * If the timer interrupt is still pending, make sure it is taken right
+ * now.
+ */
+ isb();
+
+ if (timer_irq_received == 1)
+ result = TEST_RESULT_SUCCESS;
+
+exit_test:
+ tftf_cancel_timer();
+ tftf_timer_unregister_handler();
+
+ VERBOSE("Unmapping NS<->SP shared buffer\n");
+
+ mmap_remove_dynamic_region(ARM_SECURE_SERVICE_BUFFER_BASE,
+ ARM_SECURE_SERVICE_BUFFER_SIZE);
+
+ return result;
+}
diff --git a/tftf/tests/runtime_services/sip_service/test_exec_state_switch.c b/tftf/tests/runtime_services/sip_service/test_exec_state_switch.c
new file mode 100644
index 000000000..120a99ee0
--- /dev/null
+++ b/tftf/tests/runtime_services/sip_service/test_exec_state_switch.c
@@ -0,0 +1,313 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * This test suite validates the execution state switch of a non-secure EL (from
+ * AArch32 to AArch64, and vice versa) by issuing ARM SiP service SMC with
+ * varying parameters. A cookie is shared between both states. A field in the
+ * cookie is updated from the other state to signal that state switch did indeed
+ * happen.
+ *
+ * Note that the suite is not AArch32-ready. All test cases will report as
+ * skipped.
+ */
+
+#include <arch_helpers.h>
+#include <debug.h>
+#include <events.h>
+#include <plat_topology.h>
+#include <power_management.h>
+#include <psci.h>
+#include <tftf_lib.h>
+
+/* Definitions from TF-A arm_sip_svc.h */
+#define ARM_SIP_SVC_VERSION 0x8200ff03
+#define ARM_SIP_SVC_EXE_STATE_SWITCH 0x82000020
+
+/* State switch error codes from SiP service */
+#define STATE_SW_E_PARAM (-2)
+#define STATE_SW_E_DENIED (-3)
+
+#define SIP_VERSION_OK 1
+
+#define HI32(val) (((u_register_t) (val)) >> 32)
+#define LO32(val) ((uint32_t) (u_register_t) (val))
+
+/* A cookie shared between states for information exchange */
+typedef struct {
+ uint32_t pc_hi;
+ uint32_t pc_lo;
+ uint64_t sp;
+ uint32_t success;
+} state_switch_cookie_t;
+
+state_switch_cookie_t state_switch_cookie;
+static event_t secondary_booted __unused;
+
+/*
+ * SiP service version check. Also a signal for test cases to execute or skip
+ * altogether.
+ */
+static int sip_version_check __unused;
+
+/* AArch32 instructions to switch state back to AArch64, stored as data */
+extern void *state_switch_a32_entry;
+
+extern int do_state_switch(void *);
+
+/*
+ * @Test_Aim@ Issue a system reset to initiate state switch SMC call that's part
+ * of ARM SiP service. System reset is required because the state switch SMC
+ * requires that no secondaries have been brought up since booting.
+ */
+test_result_t test_exec_state_switch_reset_before(void)
+{
+#ifdef AARCH64
+ int version;
+ smc_args sip_version_smc = { ARM_SIP_SVC_VERSION };
+ smc_args reset = { SMC_PSCI_SYSTEM_RESET };
+ smc_ret_values smc_ret;
+
+#if NEW_TEST_SESSION
+ /*
+ * This tests suite must start with a system reset. Following a reset,
+ * we expect TFTF to proceed with the rest of test cases. With
+ * NEW_TEST_SESSION set when built, TFTF will run this test case again
+ * after reset. Thus we'll continue resetting forever.
+ *
+ * If NEW_TEST_SESSION is set, skip this test case. sip_version_check
+ * won't be set to SIP_VERSION_OK, thereby skipping rest of test cases
+ * as well.
+ */
+ tftf_testcase_printf("This suite needs TFTF built with NEW_TEST_SESSION=0\n");
+ return TEST_RESULT_SKIPPED;
+#endif
+
+ /*
+ * Query system ARM SiP service version. State switch is available since
+ * version 0.2.
+ */
+ smc_ret = tftf_smc(&sip_version_smc);
+ if (((int) smc_ret.ret0) >= 0) {
+ version = (smc_ret.ret0 << 8) | (smc_ret.ret1 & 0xff);
+ if (version >= 0x02)
+ sip_version_check = SIP_VERSION_OK;
+ } else {
+ tftf_testcase_printf("Test needs SiP service version 0.2 or later\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /*
+ * This test will be continuously re-entered after reboot, until it
+ * returns success.
+ */
+ if (tftf_is_rebooted())
+ return TEST_RESULT_SUCCESS;
+
+ tftf_testcase_printf("Issuing system reset before state switch\n");
+
+ tftf_notify_reboot();
+ tftf_smc(&reset);
+
+ /* System reset is not expected to return */
+ return TEST_RESULT_FAIL;
+#else
+ tftf_testcase_printf("Test not ported to AArch32\n");
+ return TEST_RESULT_SKIPPED;
+#endif
+}
+
+/*
+ * @Test_Aim@ Request execution state switch with invalid entry point. Expect
+ * parameter error when switching from AArch64 to AArch32.
+ */
+test_result_t test_exec_state_switch_invalid_pc(void)
+{
+#ifdef AARCH64
+ int ret;
+
+ smc_args args = {
+ .arg0 = ARM_SIP_SVC_EXE_STATE_SWITCH,
+ .arg1 = (u_register_t) -1,
+ .arg2 = LO32(&state_switch_a32_entry),
+ .arg3 = HI32(&state_switch_cookie),
+ .arg4 = LO32(&state_switch_cookie)
+ };
+
+ if (sip_version_check != SIP_VERSION_OK)
+ return TEST_RESULT_SKIPPED;
+
+ state_switch_cookie.success = 0;
+ ret = do_state_switch(&args);
+ if (state_switch_cookie.success || (ret != STATE_SW_E_PARAM))
+ return TEST_RESULT_FAIL;
+
+ return TEST_RESULT_SUCCESS;
+#else
+ tftf_testcase_printf("Test not ported to AArch32\n");
+ return TEST_RESULT_SKIPPED;
+#endif
+}
+
+/*
+ * @Test_Aim@ Request execution state switch with context_hi, and upper part of
+ * context_lo set. Expect failure as they're not supposed to be set when
+ * switching from AArch64 to AArch32.
+ */
+test_result_t test_exec_state_switch_invalid_ctx(void)
+{
+#ifdef AARCH64
+ int ret;
+
+ smc_args args = {
+ .arg0 = ARM_SIP_SVC_EXE_STATE_SWITCH,
+ .arg1 = HI32(&state_switch_a32_entry),
+ .arg2 = LO32(&state_switch_a32_entry),
+ .arg3 = -1,
+ .arg4 = LO32(&state_switch_cookie)
+ };
+
+ if (sip_version_check != SIP_VERSION_OK)
+ return TEST_RESULT_SKIPPED;
+
+ state_switch_cookie.success = 0;
+ ret = do_state_switch(&args);
+ if (state_switch_cookie.success || (ret != STATE_SW_E_PARAM))
+ return TEST_RESULT_FAIL;
+
+ return TEST_RESULT_SUCCESS;
+#else
+ tftf_testcase_printf("Test not ported to AArch32\n");
+ return TEST_RESULT_SKIPPED;
+#endif
+}
+
+/*
+ * @Test_Aim@ Perform execution state switch, and back. We don't expect any
+ * failures.
+ */
+test_result_t test_exec_state_switch_valid(void)
+{
+#ifdef AARCH64
+ int ret;
+
+ smc_args args = {
+ .arg0 = ARM_SIP_SVC_EXE_STATE_SWITCH,
+ .arg1 = HI32(&state_switch_a32_entry),
+ .arg2 = LO32(&state_switch_a32_entry),
+ .arg3 = HI32(&state_switch_cookie),
+ .arg4 = LO32(&state_switch_cookie)
+ };
+
+ if (sip_version_check != SIP_VERSION_OK)
+ return TEST_RESULT_SKIPPED;
+
+ /* Make sure that we've a 32-bit PC to enter AArch32 */
+ if (HI32(&state_switch_a32_entry)) {
+ tftf_testcase_printf("AArch32 PC wider than 32 bits. Test skipped; needs re-link\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /*
+ * Perform a state switch to 32 and back. Expect success field in the
+ * cookie to be set and return code zero,
+ */
+ state_switch_cookie.success = 0;
+ ret = do_state_switch(&args);
+ if (!state_switch_cookie.success || (ret != 0))
+ return TEST_RESULT_FAIL;
+
+ return TEST_RESULT_SUCCESS;
+#else
+ tftf_testcase_printf("Test not ported to AArch32\n");
+ return TEST_RESULT_SKIPPED;
+#endif
+}
+
+/*
+ * Entry point for the secondary CPU. Send an event to the caller and returns
+ * immediately.
+ */
+static inline test_result_t cpu_ping(void)
+{
+#ifdef AARCH64
+ /* Tell the lead CPU that the calling CPU has entered the test */
+ tftf_send_event(&secondary_booted);
+
+ /*
+ * When returning from the function, the TFTF framework will power CPUs
+ * down, without this test needing to do anything
+ */
+ return TEST_RESULT_SUCCESS;
+#else
+ tftf_testcase_printf("Test not ported to AArch32\n");
+ return TEST_RESULT_SKIPPED;
+#endif
+}
+
+/*
+ * @Test_Aim@ Power on any secondary and request a state switch. We expect the
+ * request to be denied because a secondary had been brought up.
+ */
+test_result_t test_exec_state_switch_after_cpu_on(void)
+{
+#ifdef AARCH64
+ u_register_t other_mpidr, my_mpidr;
+ int ret;
+
+ smc_args args = {
+ .arg0 = ARM_SIP_SVC_EXE_STATE_SWITCH,
+ .arg1 = HI32(&state_switch_a32_entry),
+ .arg2 = LO32(&state_switch_a32_entry),
+ .arg3 = HI32(&state_switch_cookie),
+ .arg4 = LO32(&state_switch_cookie)
+ };
+
+ if (sip_version_check != SIP_VERSION_OK)
+ return TEST_RESULT_SKIPPED;
+
+ /* Make sure that we've a 32-bit PC to enter AArch32 */
+ if (HI32(&state_switch_a32_entry)) {
+ tftf_testcase_printf("AArch32 PC wider than 32 bits. Test skipped; needs re-link\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ tftf_init_event(&secondary_booted);
+
+ /* Find a valid CPU to power on */
+ my_mpidr = read_mpidr_el1() & MPID_MASK;
+ other_mpidr = tftf_find_any_cpu_other_than(my_mpidr);
+ if (other_mpidr == INVALID_MPID) {
+ tftf_testcase_printf("Couldn't find a valid other CPU\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Power on the other CPU */
+ ret = tftf_cpu_on(other_mpidr, (uintptr_t) cpu_ping, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ INFO("powering on %llx failed", (unsigned long long)
+ other_mpidr);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Wait for flag to proceed */
+ tftf_wait_for_event(&secondary_booted);
+
+ /*
+ * Request a state switch to 32 and back. Expect failure since we've
+ * powerd a secondary on.
+ */
+ state_switch_cookie.success = 0;
+ ret = do_state_switch(&args);
+ if ((state_switch_cookie.success != 0) || (ret != STATE_SW_E_DENIED))
+ return TEST_RESULT_FAIL;
+ else
+ return TEST_RESULT_SUCCESS;
+#else
+ tftf_testcase_printf("Test not ported to AArch32\n");
+ return TEST_RESULT_SKIPPED;
+#endif
+}
diff --git a/tftf/tests/runtime_services/sip_service/test_exec_state_switch_asm.S b/tftf/tests/runtime_services/sip_service/test_exec_state_switch_asm.S
new file mode 100644
index 000000000..577f89f7a
--- /dev/null
+++ b/tftf/tests/runtime_services/sip_service/test_exec_state_switch_asm.S
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros_common.S>
+
+#define COOKIE_SIZE 20
+
+#ifdef AARCH64
+/* int do_state_switch(void *) */
+ .globl do_state_switch
+func do_state_switch
+ /* Temporarily save beginning of stack */
+ mov x7, sp
+
+ /*
+ * When asking to switch execution state, we can't expect general
+ * purpose registers hold their values. EL3 might clear them all; even
+ * if EL3 were to preserve them, the register width shrinks and then
+ * expands, leaving the upper part unknown. So save them before and
+ * restore after call to switch.
+ */
+ stp x8, x9, [sp, #-16]!
+ stp x10, x11, [sp, #-16]!
+ stp x12, x13, [sp, #-16]!
+ stp x14, x15, [sp, #-16]!
+ stp x16, x17, [sp, #-16]!
+ stp x18, x19, [sp, #-16]!
+ stp x20, x21, [sp, #-16]!
+ stp x22, x23, [sp, #-16]!
+ stp x24, x25, [sp, #-16]!
+ stp x26, x27, [sp, #-16]!
+ stp x28, x29, [sp, #-16]!
+
+ /*
+ * State switch effectively means a soft reset; so SCTLR will lose its
+ * value too.
+ */
+ mrs x1, CurrentEL
+ cmp x1, #(2 << 2)
+ b.ne 1f
+ mrs x1, sctlr_el2
+ b 2f
+1:
+ mrs x1, sctlr_el1
+2:
+ stp x30, x1, [sp, #-16]!
+
+ /* Store the PC in the cookie when switching back to AArch64 */
+ ldr x4, =state_switch_cookie
+ adr x2, do_switch_back
+ mov w1, w2
+ lsr x1, x1, #32
+ str w1, [x4, #0] /* PC hi */
+ str w2, [x4, #4] /* PC lo */
+
+ /* Store valid stack pointer in cookie */
+ mov x8, sp
+ str x8, [x4, #8]
+
+ /* Stash stack and LR before calling functions */
+ mov x28, x7
+ mov x29, x30
+
+ mov x10, x0
+
+ /*
+ * Clean and invalidate cookie memory as it's going to be accessed with
+ * MMU off in the new state.
+ */
+ mov x0, x4
+ ldr x1, =COOKIE_SIZE
+ bl flush_dcache_range
+
+ /*
+ * Flush stack context saved on stack as it'll be accessed immediately
+ * after switching back, with MMU off.
+ */
+ mov x0, x8
+ sub x1, x28, x8
+ bl flush_dcache_range
+
+ /* Prepare arguments for state switch SMC */
+ ldr x0, [x10], #8
+ ldr x1, [x10], #8
+ ldr x2, [x10], #8
+ ldr x3, [x10], #8
+ ldr x4, [x10], #8
+
+ /* Switch state */
+ smc #0
+
+ /*
+ * We reach here only if the SMC failed. If so, restore previously
+ * modified callee-saved registers, rewind stack, and return to caller
+ * with the error code from SMC.
+ */
+ mov x1, x28
+ mov x2, x29
+ ldp x28, x29, [sp, #16]
+ mov sp, x1
+ ret x2
+
+restore_context:
+ /* Restore context */
+ ldp x30, x1, [sp], #16
+ ldp x28, x29, [sp], #16
+ ldp x26, x27, [sp], #16
+ ldp x24, x25, [sp], #16
+ ldp x22, x23, [sp], #16
+ ldp x20, x21, [sp], #16
+ ldp x18, x19, [sp], #16
+ ldp x16, x17, [sp], #16
+ ldp x14, x15, [sp], #16
+ ldp x12, x13, [sp], #16
+ ldp x10, x11, [sp], #16
+ ldp x8, x9, [sp], #16
+
+ dsb sy
+ mrs x0, CurrentEL
+ cmp x0, #(2 << 2)
+ b.ne 1f
+ msr sctlr_el2, x1
+ b 2f
+1:
+ msr sctlr_el1, x1
+2:
+ isb
+
+ mov x0, #0
+ ret
+endfunc do_state_switch
+
+/* AArch64 entry point when switching back from AArch32 */
+do_switch_back:
+ /* w0 and w1 have the cookie */
+ lsl x0, x0, #32
+ orr x0, x1, x0
+
+ ldr x1, [x0, #8]
+ mov sp, x1
+
+ b restore_context
+
+ .section .data, "aw"
+
+/* AArch32 instructions to switch state back to AArch64, stored as data */
+ .align 2
+ .globl state_switch_a32_entry
+state_switch_a32_entry:
+ /* Use the same context when switching back */
+ .word 0xe1a03000 /* mov r3, r0 */
+ .word 0xe1a04001 /* mov r4, r1 */
+
+ /* Set success flag in cookie */
+ .word 0xe3a00001 /* mov r0, #1 */
+ .word 0xe5810010 /* str r0, [r1, #16] */
+
+ /* Setup arguments for SMC */
+ .word 0xe3a00020 /* mov r0, #0x0020 */
+ .word 0xe3480200 /* movt r0, #0x8200 */
+
+ .word 0xe5912004 /* ldr r2, [r1, #4] */
+ .word 0xe5911000 /* ldr r1, [r1, #0] */
+ .word 0xe1600070 /* smc #0x0 */
+ .word 0xeafffffe /* b . */
+
+#else /* !AARCH64 */
+
+/* Not supported on AArch32 yet */
+func do_state_switch
+ mov r0, #-1
+ bx lr
+endfunc do_state_switch
+
+#endif /* AARCH64 */
diff --git a/tftf/tests/runtime_services/standard_service/pmf/api_tests/runtime_instr/test_pmf_rt_instr.c b/tftf/tests/runtime_services/standard_service/pmf/api_tests/runtime_instr/test_pmf_rt_instr.c
new file mode 100644
index 000000000..46b5a920d
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/pmf/api_tests/runtime_instr/test_pmf_rt_instr.c
@@ -0,0 +1,577 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <debug.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <pmf.h>
+#include <power_management.h>
+#include <psci.h>
+#include <smccc.h>
+#include <string.h>
+#include <sys/errno.h>
+#include <tftf_lib.h>
+#include <timer.h>
+
+#define TOTAL_IDS 6
+#define ENTER_PSCI 0
+#define EXIT_PSCI 1
+#define ENTER_HW_LOW_PWR 2
+#define EXIT_HW_LOW_PWR 3
+#define ENTER_CFLUSH 4
+#define EXIT_CFLUSH 5
+
+static spinlock_t cpu_count_lock;
+static volatile int cpu_count;
+static volatile int participating_cpu_count;
+static u_register_t timestamps[PLATFORM_CORE_COUNT][TOTAL_IDS];
+static unsigned int target_pwrlvl;
+
+/* Helper function to wait for CPUs participating in the test. */
+static void wait_for_participating_cpus(void)
+{
+ assert(participating_cpu_count <= PLATFORM_CORE_COUNT);
+
+ spin_lock(&cpu_count_lock);
+ cpu_count++;
+ spin_unlock(&cpu_count_lock);
+
+ assert(cpu_count <= PLATFORM_CORE_COUNT);
+
+ while (cpu_count != participating_cpu_count)
+ continue;
+}
+
+/*
+ * Perform an SMC call into TF-A to collect timestamp specified by `tid`
+ * and pass it as a parameter back to the caller.
+ */
+static u_register_t pmf_get_ts(u_register_t tid, u_register_t *v)
+{
+ smc_args args = { 0 };
+ smc_ret_values ret;
+
+ args.arg0 = PMF_SMC_GET_TIMESTAMP;
+ args.arg1 = tid;
+ args.arg2 = read_mpidr_el1();
+ ret = tftf_smc(&args);
+ *v = ret.ret1;
+ return ret.ret0;
+}
+
+static int cycles_to_ns(uint64_t cycles, uint64_t freq, uint64_t *ns)
+{
+ if (cycles > UINT64_MAX / 1000000000 || freq == 0)
+ return -ERANGE;
+ *ns = cycles * 1000000000 / freq;
+ return 0;
+}
+
+static u_register_t *get_core_timestamps(void)
+{
+ unsigned int pos = platform_get_core_pos(read_mpidr_el1());
+
+ assert(pos < PLATFORM_CORE_COUNT);
+ return timestamps[pos];
+}
+
+/* Check timestamps for the suspend/cpu off tests. */
+static test_result_t check_pwr_down_ts(void)
+{
+ u_register_t *ts;
+
+ ts = get_core_timestamps();
+ if (!(ts[ENTER_PSCI] <= ts[ENTER_HW_LOW_PWR] &&
+ ts[ENTER_HW_LOW_PWR] <= ts[EXIT_HW_LOW_PWR] &&
+ ts[EXIT_HW_LOW_PWR] <= ts[EXIT_PSCI])) {
+ tftf_testcase_printf("PMF timestamps are not correctly ordered\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ if (ts[ENTER_CFLUSH] > ts[EXIT_CFLUSH]) {
+ tftf_testcase_printf("PMF timestamps are not correctly ordered\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Capture all runtime instrumentation timestamps for the current
+ * CPU and store them into the timestamps array.
+ */
+static test_result_t get_ts(void)
+{
+ u_register_t tid, *ts;
+ int i;
+
+ ts = get_core_timestamps();
+ for (i = 0; i < TOTAL_IDS; i++) {
+ tid = PMF_ARM_TIF_IMPL_ID << PMF_IMPL_ID_SHIFT;
+ tid |= PMF_RT_INSTR_SVC_ID << PMF_SVC_ID_SHIFT | i;
+ if (pmf_get_ts(tid, &ts[i]) != 0) {
+ ERROR("Failed to capture PMF timestamp\n");
+ return TEST_RESULT_FAIL;
+ }
+ }
+ return TEST_RESULT_SUCCESS;
+}
+
+/* Dump suspend statistics for the suspend/cpu off test. */
+static int dump_suspend_stats(const char *msg)
+{
+ u_register_t *ts;
+ u_register_t target_mpid;
+ uint64_t freq, cycles[3], period[3];
+ int cpu_node, ret;
+ unsigned int pos;
+
+ freq = read_cntfrq_el0();
+
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+ pos = platform_get_core_pos(target_mpid);
+ assert(pos < PLATFORM_CORE_COUNT);
+ ts = timestamps[pos];
+
+ cycles[0] = ts[ENTER_HW_LOW_PWR] - ts[ENTER_PSCI];
+ ret = cycles_to_ns(cycles[0], freq, &period[0]);
+ if (ret < 0) {
+ ERROR("cycles_to_ns: out of range\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ cycles[1] = ts[EXIT_PSCI] - ts[EXIT_HW_LOW_PWR];
+ ret = cycles_to_ns(cycles[1], freq, &period[1]);
+ if (ret < 0) {
+ ERROR("cycles_to_ns: out of range\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ cycles[2] = ts[EXIT_CFLUSH] - ts[ENTER_CFLUSH];
+ ret = cycles_to_ns(cycles[2], freq, &period[2]);
+ if (ret < 0) {
+ ERROR("cycles_to_ns: out of range\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ printf("<RT_INSTR:%s\t%d\t%02llu\t%02llu\t%02llu/>\n", msg, pos,
+ (unsigned long long)period[0],
+ (unsigned long long)period[1],
+ (unsigned long long)period[2]);
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/* Dump statistics for a PSCI version call. */
+static int dump_psci_version_stats(const char *msg)
+{
+ u_register_t *ts;
+ u_register_t target_mpid;
+ uint64_t freq, cycles, period;
+ int cpu_node, ret;
+ unsigned int pos;
+
+ freq = read_cntfrq_el0();
+
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+ pos = platform_get_core_pos(target_mpid);
+ assert(pos < PLATFORM_CORE_COUNT);
+ ts = timestamps[pos];
+
+ cycles = ts[EXIT_PSCI] - ts[ENTER_PSCI];
+ ret = cycles_to_ns(cycles, freq, &period);
+ if (ret < 0) {
+ ERROR("cycles_to_ns: out of range\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ printf("<RT_INSTR:%s\t%d\t%02llu/>\n", msg, pos,
+ (unsigned long long)period);
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/* Dummy entry point to turn core off for the CPU off test. */
+static test_result_t dummy_entrypoint(void)
+{
+ wait_for_participating_cpus();
+ return TEST_RESULT_SUCCESS;
+}
+
+/* Entrypoint to collect timestamps for CPU off test. */
+static test_result_t collect_ts_entrypoint(void)
+{
+ wait_for_participating_cpus();
+
+ if (get_ts() != TEST_RESULT_SUCCESS ||
+ check_pwr_down_ts() != TEST_RESULT_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/* Suspend current core to power level specified by `target_pwrlvl`. */
+static test_result_t suspend_current_core(void)
+{
+ unsigned int pstateid_idx[PLAT_MAX_PWR_LEVEL + 1];
+ unsigned int pwrlvl, susp_type, state_id, power_state;
+ int ret;
+
+ INIT_PWR_LEVEL_INDEX(pstateid_idx);
+
+ tftf_set_deepest_pstate_idx(target_pwrlvl, pstateid_idx);
+ tftf_get_pstate_vars(&pwrlvl, &susp_type, &state_id, pstateid_idx);
+
+ power_state = tftf_make_psci_pstate(pwrlvl, susp_type, state_id);
+
+ ret = tftf_program_timer_and_suspend(PLAT_SUSPEND_ENTRY_TIME,
+ power_state, NULL, NULL);
+ if (ret != 0) {
+ ERROR("Failed to program timer or suspend CPU: 0x%x\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ tftf_cancel_timer();
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/* This entrypoint is used for all suspend tests. */
+static test_result_t suspend_core_entrypoint(void)
+{
+ wait_for_participating_cpus();
+
+ if (suspend_current_core() != TEST_RESULT_SUCCESS ||
+ get_ts() != TEST_RESULT_SUCCESS ||
+ check_pwr_down_ts() != TEST_RESULT_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/* Entrypoint used for the PSCI version test. */
+static test_result_t psci_version_entrypoint(void)
+{
+ u_register_t *ts;
+ int version;
+
+ wait_for_participating_cpus();
+
+ version = tftf_get_psci_version();
+ if (!tftf_is_valid_psci_version(version)) {
+ tftf_testcase_printf(
+ "Wrong PSCI version:0x%08x\n", version);
+ return TEST_RESULT_FAIL;
+ }
+
+ if (get_ts() != TEST_RESULT_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ /* Check timestamp order. */
+ ts = get_core_timestamps();
+ if (ts[ENTER_PSCI] > ts[EXIT_PSCI]) {
+ tftf_testcase_printf("PMF timestamps are not correctly ordered\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/* Check if runtime instrumentation is enabled in TF. */
+static int is_rt_instr_supported(void)
+{
+ u_register_t tid, dummy;
+
+ tid = PMF_ARM_TIF_IMPL_ID << PMF_IMPL_ID_SHIFT;
+ tid |= PMF_RT_INSTR_SVC_ID << PMF_SVC_ID_SHIFT;
+ return !pmf_get_ts(tid, &dummy);
+}
+
+/*
+ * This test powers on all on the non-lead cores and brings
+ * them and the lead core to a common synchronization point.
+ * Then a suspend to the deepest power level supported on the
+ * platform is initiated on all cores in parallel.
+ */
+static test_result_t test_rt_instr_susp_parallel(void)
+{
+ u_register_t lead_mpid, target_mpid;
+ int cpu_node, ret;
+
+ if (is_rt_instr_supported() == 0)
+ return TEST_RESULT_SKIPPED;
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+ participating_cpu_count = tftf_get_total_cpus_count();
+ init_spinlock(&cpu_count_lock);
+ cpu_count = 0;
+
+ /* Power on all the non-lead cores. */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node) & MPID_MASK;
+ if (lead_mpid == target_mpid)
+ continue;
+ ret = tftf_cpu_on(target_mpid,
+ (uintptr_t)suspend_core_entrypoint, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("CPU ON failed for 0x%llx\n",
+ (unsigned long long)target_mpid);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ if (suspend_core_entrypoint() != TEST_RESULT_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ /* Wait for the non-lead cores to power down. */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node) & MPID_MASK;
+ if (lead_mpid == target_mpid)
+ continue;
+ while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0) !=
+ PSCI_STATE_OFF)
+ continue;
+ cpu_count--;
+ }
+
+ cpu_count--;
+ assert(cpu_count == 0);
+
+ return dump_suspend_stats(__func__);
+}
+
+/*
+ * This tests powers on each non-lead core in sequence and
+ * suspends it to the deepest power level supported on the platform.
+ * It then waits for the core to power off. Each core in
+ * the non-lead cluster will bring the entire clust down when it
+ * powers off because it will be the only core active in the cluster.
+ * The lead core will also be suspended in a similar fashion.
+ */
+static test_result_t test_rt_instr_susp_serial(void)
+{
+ u_register_t lead_mpid, target_mpid;
+ int cpu_node, ret;
+
+ if (is_rt_instr_supported() == 0)
+ return TEST_RESULT_SKIPPED;
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+ participating_cpu_count = 1;
+ init_spinlock(&cpu_count_lock);
+ cpu_count = 0;
+
+ /* Suspend one core at a time. */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node) & MPID_MASK;
+ if (lead_mpid == target_mpid)
+ continue;
+ ret = tftf_cpu_on(target_mpid,
+ (uintptr_t)suspend_core_entrypoint, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("CPU ON failed for 0x%llx\n",
+ (unsigned long long)target_mpid);
+ return TEST_RESULT_FAIL;
+ }
+ while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0) !=
+ PSCI_STATE_OFF)
+ continue;
+ cpu_count--;
+ }
+
+ assert(cpu_count == 0);
+
+ /* Suspend lead core as well. */
+ if (suspend_core_entrypoint() != TEST_RESULT_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ cpu_count--;
+ assert(cpu_count == 0);
+
+ return dump_suspend_stats(__func__);
+}
+
+/*
+ * @Test_Aim@ CPU suspend to deepest power level on all cores in parallel.
+ *
+ * This test should exercise contention in TF-A as all the cores initiate
+ * a CPU suspend call in parallel.
+ */
+test_result_t test_rt_instr_susp_deep_parallel(void)
+{
+ target_pwrlvl = PLAT_MAX_PWR_LEVEL;
+ return test_rt_instr_susp_parallel();
+}
+
+/*
+ * @Test_Aim@ CPU suspend on all cores in parallel.
+ *
+ * Suspend all cores in parallel to target power level 0.
+ * Cache associated with power domain level 0 is flushed. For
+ * Juno, the L1 cache is flushed.
+ */
+test_result_t test_rt_instr_cpu_susp_parallel(void)
+{
+ target_pwrlvl = 0;
+ return test_rt_instr_susp_parallel();
+}
+
+/*
+ * @Test_Aim@ CPU suspend to deepest power level on all cores in sequence.
+ *
+ * Each core in the non-lead cluster brings down the entire cluster when
+ * it goes down.
+ */
+test_result_t test_rt_instr_susp_deep_serial(void)
+{
+ target_pwrlvl = PLAT_MAX_PWR_LEVEL;
+ return test_rt_instr_susp_serial();
+}
+
+/*
+ * @Test_Aim@ CPU suspend on all cores in sequence.
+ *
+ * Cache associated with level 0 power domain are flushed. For
+ * Juno, the L1 cache is flushed.
+ */
+test_result_t test_rt_instr_cpu_susp_serial(void)
+{
+ target_pwrlvl = 0;
+ return test_rt_instr_susp_serial();
+}
+
+/*
+ * @Test_Aim@ CPU off on all non-lead cores in sequence and
+ * suspend lead to deepest power level.
+ *
+ * The test sequence is as follows:
+ *
+ * 1) Turn on and turn off each non-lead core in sequence.
+ * 2) Program wake up timer and suspend the lead core to deepest power level.
+ * 3) Turn on each secondary core and get the timestamps from each core.
+ *
+ * All cores in the non-lead cluster bring the cluster
+ * down when they go down. Core 4 brings the big cluster down
+ * when it goes down.
+ */
+test_result_t test_rt_instr_cpu_off_serial(void)
+{
+ u_register_t lead_mpid, target_mpid;
+ int cpu_node, ret;
+
+ if (is_rt_instr_supported() == 0)
+ return TEST_RESULT_SKIPPED;
+
+ target_pwrlvl = PLAT_MAX_PWR_LEVEL;
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+ participating_cpu_count = 1;
+ init_spinlock(&cpu_count_lock);
+ cpu_count = 0;
+
+ /* Turn core on/off one at a time. */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+ if (lead_mpid == target_mpid)
+ continue;
+ ret = tftf_cpu_on(target_mpid,
+ (uintptr_t)dummy_entrypoint, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("CPU ON failed for 0x%llx\n",
+ (unsigned long long)target_mpid);
+ return TEST_RESULT_FAIL;
+ }
+ while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0) !=
+ PSCI_STATE_OFF)
+ continue;
+ cpu_count--;
+ }
+
+ assert(cpu_count == 0);
+
+ /* Suspend lead core as well. */
+ if (suspend_core_entrypoint() != TEST_RESULT_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ cpu_count--;
+ assert(cpu_count == 0);
+
+ /* Turn core on one at a time and collect timestamps. */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+ if (lead_mpid == target_mpid)
+ continue;
+ ret = tftf_cpu_on(target_mpid,
+ (uintptr_t)collect_ts_entrypoint, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("CPU ON failed for 0x%llx\n",
+ (unsigned long long)target_mpid);
+ return TEST_RESULT_FAIL;
+ }
+ while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0) !=
+ PSCI_STATE_OFF)
+ continue;
+ cpu_count--;
+ }
+
+ assert(cpu_count == 0);
+
+ return dump_suspend_stats(__func__);
+}
+
+/*
+ * @Test_Aim@ PSCI version call on all cores in parallel.
+ */
+test_result_t test_rt_instr_psci_version_parallel(void)
+{
+ u_register_t lead_mpid, target_mpid;
+ int cpu_node, ret;
+
+ if (is_rt_instr_supported() == 0)
+ return TEST_RESULT_SKIPPED;
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+ participating_cpu_count = tftf_get_total_cpus_count();
+ init_spinlock(&cpu_count_lock);
+ cpu_count = 0;
+
+ /* Power on all the non-lead cores. */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+ if (lead_mpid == target_mpid)
+ continue;
+ ret = tftf_cpu_on(target_mpid,
+ (uintptr_t)psci_version_entrypoint, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("CPU ON failed for 0x%llx\n",
+ (unsigned long long)target_mpid);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ if (psci_version_entrypoint() != TEST_RESULT_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ /* Wait for the non-lead cores to power down. */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+ if (lead_mpid == target_mpid)
+ continue;
+ while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0) !=
+ PSCI_STATE_OFF)
+ continue;
+ cpu_count--;
+ }
+
+ cpu_count--;
+ assert(cpu_count == 0);
+
+ return dump_psci_version_stats(__func__);
+}
diff --git a/tftf/tests/runtime_services/standard_service/psci/api_tests/affinity_info/test_psci_affinity_info.c b/tftf/tests/runtime_services/standard_service/psci/api_tests/affinity_info/test_psci_affinity_info.c
new file mode 100644
index 000000000..5abb7f0d7
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/psci/api_tests/affinity_info/test_psci_affinity_info.c
@@ -0,0 +1,434 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <assert.h>
+#include <events.h>
+#include <irq.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <power_management.h>
+#include <psci.h>
+#include <sgi.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+
+/* Special value used to terminate the array of expected return values */
+#define END_OF_EXPECTED_VALUE 0xDEADBEEF
+/*
+ * Event used by test test_affinity_info_level0_powerdown() to synchronise
+ * CPUs
+ */
+static event_t cpu_about_to_suspend;
+static unsigned int psci_version;
+
+/*
+ * `expected_values` should contain an array of expected return values
+ * terminated by `END_OF_EXPECTED_VALUE`. If 'actual_value' exists in
+ * one of 'expected_values' then return a test success.
+ * Otherwise, print an error message in the test report and report a test
+ * failure.
+ */
+static test_result_t get_test_result(const int *expected_values, int actual_value)
+{
+ const int *expected_val_list;
+
+ expected_val_list = expected_values;
+ while (*expected_val_list != END_OF_EXPECTED_VALUE) {
+ if (*expected_val_list == actual_value)
+ return TEST_RESULT_SUCCESS;
+ expected_val_list++;
+ }
+
+ expected_val_list = expected_values;
+ tftf_testcase_printf("Unexpected return value: %i Expected values are:",
+ actual_value);
+ while (*expected_val_list != END_OF_EXPECTED_VALUE) {
+ tftf_testcase_printf("%i ", *expected_val_list);
+ expected_val_list++;
+ }
+ tftf_testcase_printf("\n");
+
+ return TEST_RESULT_FAIL;
+}
+
+/*
+ * @Test_Aim@ Test PSCI AFFINITY_INFO targeted at affinity level 0 on online CPU
+ *
+ * Call PSCI AFFINITY_INFO targeted at affinity level 0 on lead CPU.
+ * Expect the PSCI implementation to report that the affinity instance is on.
+ */
+test_result_t test_affinity_info_level0_on(void)
+{
+ unsigned int mpid = read_mpidr_el1() & MPID_MASK;
+ int32_t aff_info;
+ int expected_values[] = {PSCI_STATE_ON, END_OF_EXPECTED_VALUE};
+
+ aff_info = tftf_psci_affinity_info(mpid, MPIDR_AFFLVL0);
+ return get_test_result(expected_values, aff_info);
+}
+
+/*
+ * @Test_Aim@ Test PSCI AFFINITY_INFO targeted at affinity level 0 on offline CPU
+ *
+ * Call PSCI AFFINITY_INFO targeted at affinity level 0 on all non-lead CPUs.
+ * Expect the PSCI implementation to report that the affinity instances are off.
+ *
+ * This test needs 2 CPUs to run. It will be skipped on a single core platform.
+ */
+test_result_t test_affinity_info_level0_off(void)
+{
+ unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int target_mpid, target_node;
+ int32_t aff_info;
+ test_result_t ret = TEST_RESULT_SUCCESS;
+ int expected_values[] = {PSCI_STATE_OFF, END_OF_EXPECTED_VALUE};
+
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
+
+ for_each_cpu(target_node) {
+ target_mpid = tftf_get_mpidr_from_node(target_node);
+ /* Skip lead CPU, as it is powered on */
+ if (target_mpid == lead_mpid)
+ continue;
+
+ aff_info = tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0);
+ if (get_test_result(expected_values, aff_info)
+ == TEST_RESULT_FAIL) {
+ ret = TEST_RESULT_FAIL;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * @Test_Aim@ Test PSCI AFFINITY_INFO targeted at affinity level 1 on online cluster
+ *
+ * Call PSCI AFFINITY_INFO targeted at affinity level 1 on the lead cluster
+ * (i.e. the cluster to which the lead CPU belongs to).
+ * PSCI implementation prior to PSCI 1.0 needs to report that the cluster is on
+ * and others can also return INVALID_PARAMETERS.
+ */
+test_result_t test_affinity_info_level1_on(void)
+{
+ unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int target_mpid;
+ int32_t aff_info;
+ int expected_values[3];
+
+ /*
+ * Minimum version of PSCI is 0.2, uses this info to decide if
+ * tftf_get_psci_version() needs to be called or not.
+ */
+ if (!psci_version)
+ psci_version = tftf_get_psci_version();
+
+ /*
+ * From PSCI version 1.0 onwards, Trusted Firmware-A may or may not
+ * track affinity levels greater than zero.
+ */
+ if (!(psci_version & PSCI_MAJOR_VER_MASK)) {
+ expected_values[0] = PSCI_STATE_ON;
+ expected_values[1] = END_OF_EXPECTED_VALUE;
+ } else {
+ expected_values[0] = PSCI_STATE_ON;
+ expected_values[1] = PSCI_E_INVALID_PARAMS;
+ expected_values[2] = END_OF_EXPECTED_VALUE;
+ }
+
+ /*
+ * Build an MPID corresponding to the lead cluster. Set the affinity
+ * level0 bits to some arbitrary value that doesn't correspond to any
+ * CPU on the platform. The PSCI implementation should ignore the
+ * affinity 0 field.
+ */
+ target_mpid = (lead_mpid & MPIDR_CLUSTER_MASK) | 0xE1;
+ aff_info = tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL1);
+ return get_test_result(expected_values, aff_info);
+}
+
+/*
+ * @Test_Aim@ Test PSCI AFFINITY_INFO targeted at affinity level 1 on offline cluster
+ *
+ * Call PSCI AFFINITY_INFO targeted at affinity level 1 on a non-lead cluster
+ * (i.e. another cluster than the one to which the lead CPU belongs to).
+ * PSCI implementation prior to PSCI 1.0 needs to report that the cluster is OFF
+ * and others can also return INVALID_PARAMETERS.
+ *
+ * This test needs 2 clusters to run. It will be skipped on a single cluster
+ * platform.
+ */
+test_result_t test_affinity_info_level1_off(void)
+{
+ unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int target_mpid;
+ int32_t aff_info;
+ unsigned int cluster_id;
+ int expected_values[3];
+
+ SKIP_TEST_IF_LESS_THAN_N_CLUSTERS(2);
+
+ for (cluster_id = 0;
+ cluster_id < tftf_get_total_clusters_count();
+ ++cluster_id) {
+ if (cluster_id != MPIDR_CLUSTER_ID(lead_mpid))
+ break;
+ }
+ assert(cluster_id != tftf_get_total_clusters_count());
+
+
+ /*
+ * Minimum version of PSCI is 0.2, uses this info to decide if
+ * tftf_get_psci_version() needs to be called or not.
+ */
+ if (!psci_version)
+ psci_version = tftf_get_psci_version();
+
+ /*
+ * From PSCI version 1.0 onwards, Trusted Firmware-A may or may not
+ * track affinity levels greater than zero.
+ */
+ if (!(psci_version & PSCI_MAJOR_VER_MASK)) {
+ expected_values[0] = PSCI_STATE_OFF;
+ expected_values[1] = END_OF_EXPECTED_VALUE;
+ } else {
+ expected_values[0] = PSCI_STATE_OFF;
+ expected_values[1] = PSCI_E_INVALID_PARAMS;
+ expected_values[2] = END_OF_EXPECTED_VALUE;
+ }
+
+ /*
+ * Build an MPID corresponding to a non-lead cluster. Set the affinity
+ * level0 bits to some arbitrary value that doesn't correspond to any
+ * CPU on the platform. The PSCI implementation should ignore the
+ * affinity 0 field.
+ */
+ target_mpid = make_mpid(cluster_id, 0xE1);
+ aff_info = tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL1);
+ return get_test_result(expected_values, aff_info);
+}
+
+/*
+ * @Test_Aim@ Test PSCI AFFINITY_INFO targeted at affinity level 2
+ *
+ * For PSCI implementations prior to PSCI 1.0 , the expected return value
+ * depends on the the maximum affinity level that the power management
+ * operations can apply to on this platform.
+ * - If the platform doesn't have an affinity level 2 then expect the PSCI
+ * implementation to report that it received invalid parameters.
+ * - If affinity level 2 exists then expect the PSCI implementation to report
+ * that the affinity instance is on.
+ *
+ * From PSCI 1.0 onwards, it can also return either INVALID_PARMETERS
+ */
+test_result_t test_affinity_info_level2(void)
+{
+ int expected_values[3];
+ unsigned int target_mpid;
+ int32_t aff_info;
+
+ /*
+ * Minimum version of PSCI is 0.2, uses this info to decide if
+ * tftf_get_psci_version() needs to be called or not.
+ */
+ if (!psci_version)
+ psci_version = tftf_get_psci_version();
+
+ expected_values[0] = (PLATFORM_MAX_AFFLVL >= 2)
+ ? PSCI_STATE_ON
+ : PSCI_E_INVALID_PARAMS;
+
+ /*
+ * From PSCI version 1.0 onwards, Trusted Firmware-A may or may not
+ * track affinity levels greater than zero.
+ */
+ if (!(psci_version & PSCI_MAJOR_VER_MASK)) {
+ expected_values[1] = END_OF_EXPECTED_VALUE;
+ } else {
+ expected_values[1] = PSCI_E_INVALID_PARAMS;
+ expected_values[2] = END_OF_EXPECTED_VALUE;
+ }
+
+ /*
+ * Build an MPID corresponding to the lead affinity instance at level 2.
+ * Set the affinity level0 & level1 bits to some arbitrary values that
+ * don't correspond to any affinity instance on the platform. The PSCI
+ * implementation should ignore the affinity 0 & 1 fields.
+ */
+ target_mpid = read_mpidr_el1() & (MPIDR_AFFLVL_MASK << MPIDR_AFF_SHIFT(2));
+ target_mpid |= 0xAB << MPIDR_AFF1_SHIFT;
+ target_mpid |= 0xE1 << MPIDR_AFF0_SHIFT;
+
+ aff_info = tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL2);
+ return get_test_result(expected_values, aff_info);
+}
+
+/*
+ * @Test_Aim@ Test PSCI AFFINITY_INFO targeted at affinity level 3
+ *
+ * For PSCI implementations prior to PSCI 1.0 , the expected return value
+ * depends on the maximum affinity level that the power management
+ * operations can apply to on this platform.
+ * - If the platform doesn't have an affinity level 3 then expect the PSCI
+ * implementation to report that it received invalid parameters.
+ * - If affinity level 3 exists then expect the PSCI implementation to report
+ * that the affinity instance is on.
+ *
+ * From PSCI 1.0 onwards, it can also return INVALID_PARMETERS
+ */
+test_result_t test_affinity_info_level3(void)
+{
+#ifndef AARCH32
+ int expected_values[3];
+ uint64_t target_mpid;
+ int32_t aff_info;
+
+ /*
+ * Minimum version of PSCI is 0.2, uses this info to decide if
+ * tftf_get_psci_version() needs to be called or not.
+ */
+ if (!psci_version)
+ psci_version = tftf_get_psci_version();
+
+ expected_values[0] = (PLATFORM_MAX_AFFLVL == 3)
+ ? PSCI_STATE_ON
+ : PSCI_E_INVALID_PARAMS;
+
+ /*
+ * From PSCI version 1.0 onwards, Trusted Firmware-A may or may not
+ * track affinity levels greater than zero.
+ */
+ if (!(psci_version & PSCI_MAJOR_VER_MASK)) {
+ expected_values[1] = END_OF_EXPECTED_VALUE;
+ } else {
+ expected_values[1] = PSCI_E_INVALID_PARAMS;
+ expected_values[2] = END_OF_EXPECTED_VALUE;
+ }
+
+ /*
+ * Build an MPID corresponding to the lead affinity instance at level 3.
+ * Set the affinity level0/level1/level2 bits to some arbitrary values
+ * that don't correspond to any affinity instance on the platform. The
+ * PSCI implementation should ignore the affinity 0, 1 & 2 fields.
+ */
+ target_mpid = read_mpidr_el1();
+ target_mpid &= ((uint64_t) MPIDR_AFFLVL_MASK) << MPIDR_AFF_SHIFT(3);
+ target_mpid |= 0xD2 << MPIDR_AFF2_SHIFT;
+ target_mpid |= 0xAB << MPIDR_AFF1_SHIFT;
+ target_mpid |= 0xE1 << MPIDR_AFF0_SHIFT;
+
+ aff_info = tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL3);
+ return get_test_result(expected_values, aff_info);
+#else
+ return TEST_RESULT_SKIPPED;
+#endif
+}
+
+/*
+ * Suspend to powerdown the calling CPU.
+ *
+ * 1) Enable SGI #0. This SGI will be sent by the lead CPU to wake this CPU.
+ * 2) Suspend the CPU.
+ * 3) Report success/failure of the suspend operation.
+ */
+static test_result_t suspend_to_powerdown(void)
+{
+ uint32_t power_state, stateid;
+ int psci_ret, expected_return_val;
+
+ /*
+ * Enable reception of SGI 0 on the calling CPU.
+ * SGI 0 will serve as the wake-up event to come out of suspend.
+ */
+ tftf_irq_enable(IRQ_NS_SGI_0, GIC_HIGHEST_NS_PRIORITY);
+
+ expected_return_val = tftf_psci_make_composite_state_id(
+ PSTATE_AFF_LVL_0, PSTATE_TYPE_POWERDOWN, &stateid);
+
+ /* Need at least 1 power down state defined at level 0 */
+ if (expected_return_val != PSCI_E_SUCCESS)
+ return TEST_RESULT_SKIPPED;
+
+ /*
+ * Suspend the calling CPU to the desired affinity level and power state
+ */
+ power_state = tftf_make_psci_pstate(PSTATE_AFF_LVL_0,
+ PSTATE_TYPE_POWERDOWN,
+ stateid);
+
+ /*
+ * Notify the lead CPU that the calling CPU is about to suspend itself
+ */
+ tftf_send_event(&cpu_about_to_suspend);
+
+ psci_ret = tftf_cpu_suspend(power_state);
+
+ tftf_irq_disable(IRQ_NS_SGI_0);
+
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("Failed to suspend (%i)\n", psci_ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Test PSCI AFFINITY_INFO targeted at affinity level 0 on a
+ * suspended CPU
+ *
+ * A CPU that has been physically powered down as a result of a call to
+ * CPU_SUSPEND must be reported as ON by the AFFINITY_INFO call. This test
+ * aims at verifying this behaviour.
+ *
+ * This test needs 2 CPUs to run. It will be skipped on a single core platform.
+ * It will also be skipped if an error is encountered during the bring-up of the
+ * non-lead CPU.
+ */
+test_result_t test_affinity_info_level0_powerdown(void)
+{
+ unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int target_mpid, target_core_pos;
+ int psci_ret;
+ int32_t aff_info;
+ test_result_t ret;
+ int expected_values[] = {PSCI_STATE_ON, END_OF_EXPECTED_VALUE};
+
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
+
+ /*
+ * Preparation step:
+ * Find another CPU than the lead CPU and power it on.
+ */
+ target_mpid = tftf_find_any_cpu_other_than(lead_mpid);
+ assert(target_mpid != INVALID_MPID);
+ target_core_pos = platform_get_core_pos(target_mpid);
+
+ psci_ret = tftf_cpu_on(target_mpid, (uintptr_t) suspend_to_powerdown, 0);
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("Failed to power on CPU 0x%x (%d)\n",
+ target_mpid, psci_ret);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /* Wait for the other CPU to initiate the suspend operation */
+ tftf_wait_for_event(&cpu_about_to_suspend);
+
+ /* Wait a bit for the CPU to really enter suspend state */
+ waitms(PLAT_SUSPEND_ENTRY_TIME);
+
+ /* Request status of the non-lead CPU while it is suspended */
+ aff_info = tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0);
+ ret = get_test_result(expected_values, aff_info);
+
+ /* Wake up non-lead CPU */
+ tftf_send_sgi(IRQ_NS_SGI_0, target_core_pos);
+
+ return ret;
+}
diff --git a/tftf/tests/runtime_services/standard_service/psci/api_tests/cpu_hotplug/test_psci_hotplug.c b/tftf/tests/runtime_services/standard_service/psci/api_tests/cpu_hotplug/test_psci_hotplug.c
new file mode 100644
index 000000000..73ac9513f
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/psci/api_tests/cpu_hotplug/test_psci_hotplug.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <debug.h>
+#include <events.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <power_management.h>
+#include <psci.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+
+static event_t cpu_booted[PLATFORM_CORE_COUNT];
+
+static test_result_t test_cpu_booted(void)
+{
+ unsigned int mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(mpid);
+
+ /* Tell the lead CPU that the calling CPU has entered the test */
+ tftf_send_event(&cpu_booted[core_pos]);
+
+ VERBOSE("Hello from core 0x%x\n", mpid);
+
+ return TEST_RESULT_SUCCESS;
+}
+
+
+/*
+ * @Test_Aim@ Test CPU hotplug support
+ *
+ * This test powers on all CPUs using the PSCI CPU_ON API and checks whether the
+ * operation succeeded.
+ */
+test_result_t test_psci_cpu_hotplug(void)
+{
+ test_result_t ret = TEST_RESULT_SUCCESS;
+ unsigned int cpu_node, cpu_mpid;
+ unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos;
+ int psci_ret;
+
+ /* Power on all CPUs */
+ for_each_cpu(cpu_node) {
+
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU, it is already powered on */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ psci_ret = tftf_cpu_on(cpu_mpid,
+ (uintptr_t) test_cpu_booted,
+ 0);
+ if (psci_ret != PSCI_E_SUCCESS)
+ ret = TEST_RESULT_FAIL;
+ }
+
+ /*
+ * The lead CPU needs to wait for all other CPUs to enter the test.
+ * This is because the test framework declares the end of a test when no
+ * CPU is in the test. Therefore, if the lead CPU goes ahead and exits
+ * the test then potentially there could be no CPU executing the test at
+ * this time because none of them have entered the test yet, hence the
+ * framework will be misled in thinking the test is finished.
+ */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_wait_for_event(&cpu_booted[core_pos]);
+ }
+
+ return ret;
+}
+
+/* Verify context ID passed by lead CPU */
+static test_result_t test_context_ids_non_lead_cpu(void)
+{
+ unsigned int mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(mpid);
+
+ /* Signal to the lead CPU that the calling CPU has entered the test */
+ tftf_send_event(&cpu_booted[core_pos]);
+
+ u_register_t ctx_id = tftf_get_cpu_on_ctx_id(core_pos);
+
+ if (ctx_id != (u_register_t)(mpid + core_pos)) {
+ tftf_testcase_printf("Failed to get context ID in CPU %d\n",
+ core_pos);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Verify the value of context ID from tftf_cpu_on().
+ *
+ * This test powers on all the secondary CPUs and sends different context IDs
+ * when doing so. All CPUs must receive the correct value without it having
+ * been overwritten during the boot process.
+ */
+test_result_t test_context_ids(void)
+{
+ int i;
+ unsigned int lead_mpid;
+ unsigned int cpu_mpid, cpu_node;
+ unsigned int core_pos;
+ int psci_ret;
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
+
+ for (i = 0; i < PLATFORM_CORE_COUNT; i++)
+ tftf_init_event(&cpu_booted[i]);
+
+ /* Power on all CPUs */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU as it is already powered on */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+
+ /* Pass as context ID something that the CPU can verify */
+ psci_ret = tftf_cpu_on(cpu_mpid,
+ (uintptr_t) test_context_ids_non_lead_cpu,
+ (u_register_t)(cpu_mpid + core_pos));
+
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf(
+ "Failed to power on CPU 0x%x (%d)\n",
+ cpu_mpid, psci_ret);
+ return TEST_RESULT_SKIPPED;
+ }
+ }
+
+ /* Wait for non-lead CPUs to enter the test */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_wait_for_event(&cpu_booted[core_pos]);
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/runtime_services/standard_service/psci/api_tests/cpu_hotplug/test_psci_hotplug_invalid.c b/tftf/tests/runtime_services/standard_service/psci/api_tests/cpu_hotplug/test_psci_hotplug_invalid.c
new file mode 100644
index 000000000..8d8758b20
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/psci/api_tests/cpu_hotplug/test_psci_hotplug_invalid.c
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * This file implements test cases exercising invalid scenarios of the CPU
+ * hotplug API. It checks that the PSCI implementation responds as per the
+ * PSCI specification.
+ */
+
+#include <arch_helpers.h>
+#include <events.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <power_management.h>
+#include <psci.h>
+#include <tftf_lib.h>
+
+/*
+ * Event data structures used by non-lead CPUs to tell the lead CPU they entered
+ * the test.
+ */
+static event_t entered_test[PLATFORM_CORE_COUNT];
+
+/*
+ * If 'real_value' == 'expected_value' then return a test success.
+ * Otherwise, print an error message in the test report and report a test
+ * failure.
+ */
+static test_result_t report_result(int expected_value, int real_value)
+{
+ if (real_value != expected_value) {
+ tftf_testcase_printf(
+ "Wrong return value, expected %i, got %i\n",
+ expected_value, real_value);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+static test_result_t reissue_cpu_hotplug(void)
+{
+ unsigned int mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(mpid);
+ int psci_ret;
+
+ tftf_send_event(&entered_test[core_pos]);
+
+ /*
+ * This time, we can't use tftf_cpu_on() to issue the power on request
+ * because this would go through too much test framework logic. E.g. the
+ * framework would figure out that the the CPU is already powered on by
+ * looking at the CPU state information it keeps, hence it would report
+ * an error.
+ *
+ * Here we need to bypass the framework and issue the SMC call directly
+ * from the test case itself. tftf_psci_cpu_on() is a simple wrapper
+ * over the SMC call.
+ *
+ * Entry point address argument can be any valid address.
+ */
+ psci_ret = tftf_psci_cpu_on(mpid, (uintptr_t)reissue_cpu_hotplug, 0);
+
+ return report_result(PSCI_E_ALREADY_ON, psci_ret);
+}
+
+/*
+ * @Test_Aim@ Hotplug request on a CPU which is already powered on
+ *
+ * 1) Power on all CPUs.
+ * 2) Each CPU re-issues the PSCI CPU_ON request on itself. This is expected to
+ * fail and the PSCI implementation is expected to report that CPUs are
+ * already powered on.
+ *
+ * The test is skipped if an error is encountered during the bring-up of
+ * non-lead CPUs.
+ */
+test_result_t test_psci_cpu_hotplug_plugged(void)
+{
+ unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int cpu_mpid, cpu_node;
+ int psci_ret;
+ unsigned int core_pos;
+
+ /* Power on all CPUs */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU as it is already powered on */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ psci_ret = tftf_cpu_on(cpu_mpid, (uintptr_t) reissue_cpu_hotplug, 0);
+ if (psci_ret != PSCI_E_SUCCESS)
+ return TEST_RESULT_SKIPPED;
+
+ /* Wait for the CPU to enter the test */
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_wait_for_event(&entered_test[core_pos]);
+ }
+
+ return reissue_cpu_hotplug();
+}
+
+/*
+ * @Test_Aim@ Hotplug request on a CPU that doesn't exist
+ *
+ * Such a hotplug request is expected to fail and the PSCI implementation is
+ * expected to report that the parameters are invalid.
+ */
+test_result_t test_psci_cpu_hotplug_invalid_cpu(void)
+{
+ int psci_ret;
+
+ /*
+ * 0xFFFFFFFF is an invalid MPID.
+ * Pass a valid entry point address to make sure that the call does not
+ * fail for the wrong reason.
+ */
+ psci_ret = tftf_psci_cpu_on(0xFFFFFFFF,
+ (uintptr_t) test_psci_cpu_hotplug_invalid_cpu, 0);
+
+ return report_result(PSCI_E_INVALID_PARAMS, psci_ret);
+}
+
+/*
+ * @Test_Aim@ Hotplug request on a CPU with invalid entrypoint address
+ *
+ * Such a hotplug request is expected to fail and the PSCI implementation is
+ * expected to report that the entrypoint is invalid address for PSCI 1.0
+ * onwards
+ */
+test_result_t test_psci_cpu_hotplug_invalid_ep(void)
+{
+ int psci_ret;
+ unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int cpu_mpid, cpu_node;
+ unsigned int psci_version;
+
+ psci_version = tftf_get_psci_version();
+
+ if (!(psci_version & PSCI_MAJOR_VER_MASK)) {
+ tftf_testcase_printf(
+ "PSCI Version is less then 1.0\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /* Power on all CPUs */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU as it is already powered on */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ /*
+ * Here we need to bypass the framework and issue the SMC call
+ * directly from the test case itself as tftf_cpu_on calls SMC
+ * calls with hotplug as entry point. tftf_psci_cpu_on() is a
+ * simple wrapper over the SMC call.
+ *
+ * Entry point address argument can be any invalid address.
+ */
+
+ psci_ret = tftf_psci_cpu_on(cpu_mpid, 0, 0);
+ if (psci_ret != PSCI_E_INVALID_ADDRESS) {
+ tftf_testcase_printf("CPU:0x%x Expected: %i Actual: %i\n",
+ cpu_mpid,
+ PSCI_E_INVALID_ADDRESS,
+ psci_ret);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/runtime_services/standard_service/psci/api_tests/cpu_suspend/test_suspend.c b/tftf/tests/runtime_services/standard_service/psci/api_tests/cpu_suspend/test_suspend.c
new file mode 100644
index 000000000..9e9998c9c
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/psci/api_tests/cpu_suspend/test_suspend.c
@@ -0,0 +1,319 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <events.h>
+#include <irq.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <power_management.h>
+#include <psci.h>
+#include <sgi.h>
+#include <tftf_lib.h>
+#include <timer.h>
+
+/*
+ * Desired affinity level and state type (standby or powerdown) for the next
+ * CPU_SUSPEND operation. We need these shared variables because there is no way
+ * to pass arguments to non-lead CPUs...
+ */
+static unsigned int test_aff_level;
+static unsigned int test_suspend_type;
+
+static event_t cpu_ready[PLATFORM_CORE_COUNT];
+
+/*
+ * Variable used by the non-lead CPUs to tell the lead CPU they
+ * were woken up by IRQ_WAKE_SGI
+ */
+static event_t event_received_wake_irq[PLATFORM_CORE_COUNT];
+
+/* Variable used to confirm the CPU is woken up by IRQ_WAKE_SGI or Timer IRQ */
+static volatile int requested_irq_received[PLATFORM_CORE_COUNT];
+
+static int requested_irq_handler(void *data)
+{
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+#if ENABLE_ASSERTIONS
+ unsigned int irq_id = *(unsigned int *) data;
+#endif
+
+ assert(irq_id == IRQ_WAKE_SGI || irq_id == tftf_get_timer_irq());
+ assert(requested_irq_received[core_pos] == 0);
+
+ requested_irq_received[core_pos] = 1;
+
+ return 0;
+}
+
+/*
+ * Suspend the calling (non-lead) CPU.
+ * 1) Program a wake-up event to come out of suspend state
+ * 2) Suspend the CPU to the desired affinity level and power state (standby or
+ * powerdown)
+ * 3) Report success/failure of the suspend operation
+ */
+static test_result_t suspend_non_lead_cpu(void)
+{
+ unsigned int mpid = read_mpidr_el1();
+ unsigned int core_pos = platform_get_core_pos(mpid);
+ uint32_t power_state, stateid;
+ int rc, expected_return_val;
+ u_register_t flags;
+
+ tftf_timer_register_handler(requested_irq_handler);
+
+ /* Tell the lead CPU that the calling CPU is about to suspend itself */
+ tftf_send_event(&cpu_ready[core_pos]);
+
+ /* IRQs need to be disabled prior to programming the timer */
+ /* Preserve DAIF flags*/
+ flags = read_daif();
+ disable_irq();
+
+ rc = tftf_program_timer(PLAT_SUSPEND_ENTRY_TIME);
+ if (rc != 0) {
+ /* Restore previous DAIF flags */
+ write_daif(flags);
+ isb();
+ ERROR("Timer programming failed with error %d\n", rc);
+ return TEST_RESULT_FAIL;
+ }
+
+ expected_return_val = tftf_psci_make_composite_state_id(test_aff_level,
+ test_suspend_type, &stateid);
+
+ /*
+ * Suspend the calling CPU to the desired affinity level and power state
+ */
+ power_state = tftf_make_psci_pstate(test_aff_level,
+ test_suspend_type,
+ stateid);
+ rc = tftf_cpu_suspend(power_state);
+
+ /* Restore previous DAIF flags */
+ write_daif(flags);
+ isb();
+
+ /* Wait until the IRQ wake interrupt is received */
+ while (!requested_irq_received[core_pos])
+ ;
+
+ tftf_send_event(&event_received_wake_irq[core_pos]);
+ tftf_timer_unregister_handler();
+
+ if (rc == expected_return_val)
+ return TEST_RESULT_SUCCESS;
+
+ tftf_testcase_printf("Wrong value: expected %i, got %i\n",
+ expected_return_val, rc);
+ return TEST_RESULT_FAIL;
+}
+
+/*
+ * CPU suspend test to the desired affinity level and power state
+ *
+ * 1) Power on all cores
+ * 2) Each core registers a wake-up event to come out of suspend state
+ * 3) Each core tries to enter suspend state
+ *
+ * The test is skipped if an error occurs during the bring-up of non-lead CPUs.
+ */
+static test_result_t test_psci_suspend(unsigned int aff_level,
+ unsigned int suspend_type)
+{
+ unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int target_mpid, target_node;
+ unsigned int core_pos;
+ uint32_t power_state, stateid;
+ int rc, expected_return_val;
+ u_register_t flags;
+
+ if (aff_level > MPIDR_MAX_AFFLVL)
+ return TEST_RESULT_SKIPPED;
+
+ assert((suspend_type == PSTATE_TYPE_POWERDOWN) ||
+ (suspend_type == PSTATE_TYPE_STANDBY));
+
+ /* Export these variables for the non-lead CPUs */
+ test_aff_level = aff_level;
+ test_suspend_type = suspend_type;
+
+ /*
+ * All testcases in this file use the same cpu_ready[] array so it needs
+ * to be re-initialised each time.
+ */
+ for (unsigned int i = 0; i < PLATFORM_CORE_COUNT; ++i) {
+ tftf_init_event(&cpu_ready[i]);
+ tftf_init_event(&event_received_wake_irq[i]);
+ requested_irq_received[i] = 0;
+ }
+ /* Ensure the above writes are seen before any read */
+ dmbsy();
+
+ /*
+ * Preparation step: Power on all cores.
+ */
+ for_each_cpu(target_node) {
+ target_mpid = tftf_get_mpidr_from_node(target_node);
+ /* Skip lead CPU as it is already on */
+ if (target_mpid == lead_mpid)
+ continue;
+
+ rc = tftf_cpu_on(target_mpid,
+ (uintptr_t) suspend_non_lead_cpu,
+ 0);
+ if (rc != PSCI_E_SUCCESS) {
+ tftf_testcase_printf(
+ "Failed to power on CPU 0x%x (%d)\n",
+ target_mpid, rc);
+ return TEST_RESULT_SKIPPED;
+ }
+ }
+
+ /* Wait for all non-lead CPUs to be ready */
+ for_each_cpu(target_node) {
+ target_mpid = tftf_get_mpidr_from_node(target_node);
+ /* Skip lead CPU */
+ if (target_mpid == lead_mpid)
+ continue;
+
+ core_pos = platform_get_core_pos(target_mpid);
+ tftf_wait_for_event(&cpu_ready[core_pos]);
+ }
+
+ /* IRQs need to be disabled prior to programming the timer */
+ /* Preserve DAIF flags*/
+ flags = read_daif();
+ disable_irq();
+
+ /*
+ * Program the timer, this will serve as the
+ * wake-up event to come out of suspend state.
+ */
+ rc = tftf_program_timer(PLAT_SUSPEND_ENTRY_TIME);
+ if (rc) {
+ /* Restore previous DAIF flags */
+ write_daif(flags);
+ isb();
+ ERROR("Timer programming failed with error %d\n", rc);
+ return TEST_RESULT_FAIL;
+ }
+
+ expected_return_val = tftf_psci_make_composite_state_id(test_aff_level,
+ test_suspend_type, &stateid);
+
+ /*
+ * Suspend the calling CPU to the desired affinity level and power state
+ */
+ power_state = tftf_make_psci_pstate(test_aff_level,
+ test_suspend_type,
+ stateid);
+ if (test_aff_level >= PSTATE_AFF_LVL_2)
+ rc = tftf_cpu_suspend_save_sys_ctx(power_state);
+ else
+ rc = tftf_cpu_suspend(power_state);
+
+ /* Restore previous DAIF flags */
+ write_daif(flags);
+ isb();
+
+ /*
+ * Cancel the timer set up by lead CPU in case we have returned early
+ * due to invalid parameters or it will lead to spurious wake-up later.
+ */
+ tftf_cancel_timer();
+
+ /*
+ * Wait for all non-lead CPUs to receive IRQ_WAKE_SGI. This will also
+ * ensure that the lead CPU has received the system timer IRQ
+ * because SGI #IRQ_WAKE_SGI is sent only after that.
+ */
+ for_each_cpu(target_node) {
+ target_mpid = tftf_get_mpidr_from_node(target_node);
+ /* Skip lead CPU */
+ if (target_mpid == lead_mpid)
+ continue;
+
+ core_pos = platform_get_core_pos(target_mpid);
+ tftf_wait_for_event(&event_received_wake_irq[core_pos]);
+ }
+
+ if (rc == expected_return_val)
+ return TEST_RESULT_SUCCESS;
+
+ tftf_testcase_printf("Wrong value: expected %i, got %i\n",
+ expected_return_val, rc);
+ return TEST_RESULT_FAIL;
+}
+
+/*
+ * @Test_Aim@ Suspend to powerdown state targeted at affinity level 0
+ */
+test_result_t test_psci_suspend_powerdown_level0(void)
+{
+ return test_psci_suspend(PSTATE_AFF_LVL_0, PSTATE_TYPE_POWERDOWN);
+}
+
+/*
+ * @Test_Aim@ Suspend to standby state targeted at affinity level 0
+ */
+test_result_t test_psci_suspend_standby_level0(void)
+{
+ return test_psci_suspend(PSTATE_AFF_LVL_0, PSTATE_TYPE_STANDBY);
+}
+
+/*
+ * @Test_Aim@ Suspend to powerdown state targeted at affinity level 1
+ */
+test_result_t test_psci_suspend_powerdown_level1(void)
+{
+ return test_psci_suspend(PSTATE_AFF_LVL_1, PSTATE_TYPE_POWERDOWN);
+}
+
+/*
+ * @Test_Aim@ Suspend to standby state targeted at affinity level 1
+ */
+test_result_t test_psci_suspend_standby_level1(void)
+{
+ return test_psci_suspend(PSTATE_AFF_LVL_1, PSTATE_TYPE_STANDBY);
+}
+
+/*
+ * @Test_Aim@ Suspend to powerdown state targeted at affinity level 2
+ */
+test_result_t test_psci_suspend_powerdown_level2(void)
+{
+ return test_psci_suspend(PSTATE_AFF_LVL_2, PSTATE_TYPE_POWERDOWN);
+}
+
+/*
+ * @Test_Aim@ Suspend to standby state targeted at affinity level 2
+ */
+test_result_t test_psci_suspend_standby_level2(void)
+{
+ return test_psci_suspend(PSTATE_AFF_LVL_2, PSTATE_TYPE_STANDBY);
+}
+
+/*
+ * @Test_Aim@ Suspend to powerdown state targeted at affinity level 3
+ */
+test_result_t test_psci_suspend_powerdown_level3(void)
+{
+ return test_psci_suspend(PSTATE_AFF_LVL_3, PSTATE_TYPE_POWERDOWN);
+}
+
+/*
+ * @Test_Aim@ Suspend to standby state targeted at affinity level 3
+ */
+test_result_t test_psci_suspend_standby_level3(void)
+{
+ return test_psci_suspend(PSTATE_AFF_LVL_3, PSTATE_TYPE_STANDBY);
+}
diff --git a/tftf/tests/runtime_services/standard_service/psci/api_tests/mem_protect/test_mem_protect.c b/tftf/tests/runtime_services/standard_service/psci/api_tests/mem_protect/test_mem_protect.c
new file mode 100644
index 000000000..35c26e8ac
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/psci/api_tests/mem_protect/test_mem_protect.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <debug.h>
+#include <psci.h>
+#include <stdlib.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+#include <xlat_tables_v2.h>
+
+#define SENTINEL 0x55
+#define MEM_PROT_ENABLED 1
+#define MEM_PROT_DISABLED 0
+/*
+ * Test to verify that mem_protect is executed in next boot after calling
+ * the PSCI mem_protect function
+ *
+ * Returns:
+ * TEST_RESULT_SUCCESS : when after rebooting mem_protect is activated
+ * and the sentinel is detected to have been reset.
+ * TEST_RESULT_FAIL : when some of the calls to mem_protect fails or
+ * sentinel is not cleared after resetting.
+ */
+static test_result_t test_mem_protect_helper(void *arg)
+{
+ int ret;
+ unsigned char value;
+ unsigned char *sentinel = arg;
+
+ assert(sentinel != NULL);
+
+ if (tftf_is_rebooted()) {
+ value = *sentinel;
+ if (value != 0 && value != SENTINEL) {
+ tftf_testcase_printf("Sentinel address modified out of mem_protect:%d\n",
+ value);
+ return TEST_RESULT_FAIL;
+ }
+ if (value == SENTINEL) {
+ tftf_testcase_printf("Sentinel address not cleared by mem_protect\n");
+ return TEST_RESULT_FAIL;
+ }
+ return TEST_RESULT_SUCCESS;
+ }
+
+ ret = psci_mem_protect(MEM_PROT_DISABLED);
+ if (ret != MEM_PROT_ENABLED && ret != MEM_PROT_DISABLED) {
+ INFO("Mem_protect failed %d\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* mem_protect mechanism should be disabled at this point */
+ ret = psci_mem_protect(MEM_PROT_ENABLED);
+ if (ret != MEM_PROT_DISABLED) {
+ tftf_testcase_printf("Mem_protect failed %d\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* mem_protect mechanism should be enabled at this point */
+ ret = psci_mem_protect(MEM_PROT_ENABLED);
+ if (ret != MEM_PROT_ENABLED) {
+ tftf_testcase_printf("Mem_protect failed %d\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ *sentinel = SENTINEL;
+
+ /* Notify that we are rebooting now. */
+ tftf_notify_reboot();
+
+ psci_system_reset();
+ /*
+ * psci_reset shouldn't return
+ */
+ return TEST_RESULT_FAIL;
+}
+
+test_result_t test_mem_protect(void)
+{
+ map_args_unmap_t args;
+ unsigned char *sentinel;
+ int ret;
+
+ ret = tftf_get_psci_feature_info(SMC_PSCI_MEM_PROTECT);
+ if (ret == PSCI_E_NOT_SUPPORTED) {
+ tftf_testcase_printf("Mem_protect is not supported %d\n", ret);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ sentinel = psci_mem_prot_get_sentinel();
+ if (sentinel == NULL) {
+ tftf_testcase_printf("Could not find a suitable address for the sentinel.\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ args.addr = (uintptr_t) sentinel & ~PAGE_SIZE_MASK;
+ args.size = PAGE_SIZE;
+ args.attr = MT_RW_DATA;
+ args.arg = sentinel;
+
+ return map_test_unmap(&args, test_mem_protect_helper);
+}
diff --git a/tftf/tests/runtime_services/standard_service/psci/api_tests/mem_protect_check/mem_protect_check.c b/tftf/tests/runtime_services/standard_service/psci/api_tests/mem_protect_check/mem_protect_check.c
new file mode 100644
index 000000000..68d15f706
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/psci/api_tests/mem_protect_check/mem_protect_check.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <platform.h>
+#include <psci.h>
+#include <stdlib.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+
+static test_result_t mem_prot_check(uintptr_t addr, size_t size, int expected)
+{
+ int ret;
+
+ ret = psci_mem_protect_check(addr, size);
+ if (ret != expected) {
+ tftf_testcase_printf("MEM_PROTEC_CHECK failed in (%llx,%llx)\n",
+ (unsigned long long) addr,
+ (unsigned long long) size);
+ return 0;
+ }
+ return 1;
+}
+
+static test_result_t test_region(const mem_region_t *region)
+{
+ uintptr_t max_addr = region->addr + region->size;
+
+ if (!mem_prot_check(region->addr, 0, PSCI_E_DENIED))
+ return 0;
+ if (!mem_prot_check(region->addr, SIZE_MAX, PSCI_E_DENIED))
+ return 0;
+ if (!mem_prot_check(region->addr, 1, PSCI_E_SUCCESS))
+ return 0;
+ if (!mem_prot_check(region->addr, 1, PSCI_E_SUCCESS))
+ return 0;
+ if (!mem_prot_check(region->addr, region->size - 1, PSCI_E_SUCCESS))
+ return 0;
+ if (!mem_prot_check(max_addr - 1, 1, PSCI_E_SUCCESS))
+ return 0;
+ return 1;
+}
+
+/*
+ * Test to verify that mem_protect_check_range returns correct answer
+ * for known memory locations.
+ *
+ * Returns:
+ * TEST_RESULT_SUCCESS : When all the checks return the expected value
+ * TEST_RESULT_FAIL : when some check fails or return an unepected value
+ */
+test_result_t test_mem_protect_check(void)
+{
+ int ret, nregions;
+ const mem_region_t *regions;
+
+ ret = tftf_get_psci_feature_info(SMC_PSCI_MEM_PROTECT_CHECK);
+ if (ret == PSCI_E_NOT_SUPPORTED) {
+ tftf_testcase_printf("MEM_PROTECT_CHECK is not supported\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ regions = plat_get_prot_regions(&nregions);
+ if (nregions <= 0) {
+ tftf_testcase_printf("Platform doesn't define testcases for MEM_PROTECT_CHECK\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ if (!mem_prot_check(UINTPTR_MAX, 1, PSCI_E_DENIED))
+ return TEST_RESULT_FAIL;
+
+ if (!mem_prot_check(1, SIZE_MAX, PSCI_E_DENIED))
+ return TEST_RESULT_FAIL;
+
+ if (!mem_prot_check(UINTPTR_MAX, 0, PSCI_E_DENIED))
+ return TEST_RESULT_FAIL;
+
+ if (!mem_prot_check(0, 1, PSCI_E_DENIED))
+ return TEST_RESULT_FAIL;
+
+ while (nregions-- > 0) {
+ if (!test_region(regions++))
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/runtime_services/standard_service/psci/api_tests/migrate_info_type/test_migrate_info_type.c b/tftf/tests/runtime_services/standard_service/psci/api_tests/migrate_info_type/test_migrate_info_type.c
new file mode 100644
index 000000000..73a10489f
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/psci/api_tests/migrate_info_type/test_migrate_info_type.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <debug.h>
+#include <psci.h>
+#include <smccc.h>
+#include <tftf_lib.h>
+#include <trusted_os.h>
+#include <tsp.h>
+#include <uuid_utils.h>
+
+/*
+ * @Test_Aim@ Exercise PSCI MIGRATE_INFO_TYPE API
+ *
+ * This test exercises the PSCI MIGRATE_INFO_TYPE API in the following 2
+ * scenarios:
+ *
+ * == No Trusted OS is present ==
+ * In this case,
+ * a) either the EL3 firmware doesn't implement the MIGRATE_INFO_TYPE call
+ * b) or the MIGRATE_INFO_TYPE call should report that the Trusted OS is
+ * not present.
+ * In both cases, the MIGRATE call should not be supported.
+ *
+ * == A Trusted OS is present and it is the TSP ==
+ * In this case, the MIGRATE_INFO_TYPE call should report that the TSP is
+ * MP-capable and hence the MIGRATE call should not be supported.
+ *
+ * This test doesn't support any other Trusted OS than the TSP. It will be
+ * skipped for any other TOS.
+ */
+test_result_t test_migrate_info_type(void)
+{
+ uuid_t tos_uuid;
+ char tos_uuid_str[UUID_STR_SIZE];
+ smc_args args;
+ smc_ret_values ret;
+ int32_t mp_support;
+ int32_t migrate_ret;
+
+ /* Identify the level of multicore support present in the Trusted OS */
+ args.arg0 = SMC_PSCI_MIG_INFO_TYPE;
+ ret = tftf_smc(&args);
+ mp_support = (int32_t) ret.ret0;
+
+ if (is_trusted_os_present(&tos_uuid)) {
+ /* The only Trusted OS that this test supports is the TSP */
+ if (!uuid_equal(&tos_uuid, &tsp_uuid)) {
+ tftf_testcase_printf("Trusted OS is not the TSP, "
+ "its UUID is: %s\n",
+ uuid_to_str(&tos_uuid, tos_uuid_str));
+ return TEST_RESULT_SKIPPED;
+ }
+
+ INFO("TSP detected\n");
+
+ if (mp_support != TSP_MIGRATE_INFO) {
+ tftf_testcase_printf(
+ "Wrong return value for MIGRATE_INFO_TYPE: "
+ "expected %i, got %i\n",
+ TSP_MIGRATE_INFO, mp_support);
+ return TEST_RESULT_FAIL;
+ }
+ } else {
+ INFO("No Trusted OS detected\n");
+
+ if ((mp_support != PSCI_E_NOT_SUPPORTED) &&
+ (mp_support != PSCI_TOS_NOT_PRESENT_MP)) {
+ tftf_testcase_printf(
+ "Wrong return value for MIGRATE_INFO_TYPE: "
+ "expected %i or %i, got %i\n",
+ PSCI_E_NOT_SUPPORTED,
+ PSCI_TOS_NOT_PRESENT_MP,
+ mp_support);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ /*
+ * Either there is no Trusted OS or the Trusted OS is the TSP.
+ * In both cases, the MIGRATE call should not be supported.
+ */
+ args.arg0 = SMC_PSCI_MIG;
+ /*
+ * Pass a valid MPID so that the MIGRATE call doesn't fail because of
+ * invalid parameters
+ */
+ args.arg1 = read_mpidr_el1() & MPID_MASK;
+ ret = tftf_smc(&args);
+ migrate_ret = (int32_t) ret.ret0;
+
+ if (migrate_ret != PSCI_E_NOT_SUPPORTED) {
+ tftf_testcase_printf("Wrong return value for MIGRATE: "
+ "expected %i, got %i\n",
+ PSCI_E_NOT_SUPPORTED, migrate_ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/runtime_services/standard_service/psci/api_tests/psci_features/test_psci_features.c b/tftf/tests/runtime_services/standard_service/psci/api_tests/psci_features/test_psci_features.c
new file mode 100644
index 000000000..c7b17026d
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/psci/api_tests/psci_features/test_psci_features.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <debug.h>
+#include <psci.h>
+#include <smccc.h>
+#include <tftf_lib.h>
+
+/*
+ * @Test_Aim@ Check the list of PSCI functions for PSCI support
+ *
+ * Call PSCI_FEATURES for each PSCI function ID.
+ * - If a PSCI function is mandatory (as per the PSCI specification) then check
+ * the validity of the return flags.
+ * - If a PSCI function is optional (as per the PSCI specification) and
+ * implemented, check the validity of the feature flags.
+ */
+test_result_t test_psci_features(void)
+{
+ test_result_t result = TEST_RESULT_SUCCESS;
+ int32_t ret_flag;
+ const psci_function_t *psci_fn;
+
+ for (unsigned int i = 0; i < PSCI_NUM_CALLS; i++) {
+ psci_fn = &psci_functions[i];
+
+ ret_flag = tftf_get_psci_feature_info(psci_fn->id);
+
+ if (!psci_fn->mandatory) {
+ /*
+ * If the PSCI function is optional then the PSCI
+ * implementation is allowed to not implement it.
+ */
+ if (ret_flag == PSCI_E_NOT_SUPPORTED)
+ continue;
+
+ INFO("%s non-mandatory function is SUPPORTED\n",
+ psci_fn->str);
+ } else {
+ /* Check mandatory PSCI call is supported */
+ if (ret_flag == PSCI_E_NOT_SUPPORTED) {
+ result = TEST_RESULT_FAIL;
+ tftf_testcase_printf(
+ "%s mandatory function is NOT SUPPORTED\n",
+ psci_fn->str);
+ continue;
+ }
+ }
+
+ /* Check the feature flags for CPU_SUSPEND PSCI calls */
+ if ((psci_fn->id == SMC_PSCI_CPU_SUSPEND_AARCH32) ||
+ (psci_fn->id == SMC_PSCI_CPU_SUSPEND_AARCH64)) {
+ if ((ret_flag & ~CPU_SUSPEND_FEAT_VALID_MASK) != 0) {
+ result = TEST_RESULT_FAIL;
+ tftf_testcase_printf(
+ "Invalid feature flags for CPU_SUSPEND: 0x%x\n",
+ ret_flag);
+ }
+ } else {
+ /* Check the feature flags for other PSCI calls */
+ if (ret_flag != PSCI_FEATURE_FLAGS_ZERO) {
+ result = TEST_RESULT_FAIL;
+ tftf_testcase_printf(
+ "Wrong feature flags for %s\n, "
+ "expected 0x%08x, got 0x%08x\n",
+ psci_fn->str,
+ PSCI_FEATURE_FLAGS_ZERO, ret_flag);
+ }
+ }
+ }
+
+ return result;
+}
+
+/*
+ * @Test_Aim@ Check invalid PSCI function ids (Negative Test).
+ */
+test_result_t test_psci_features_invalid_id(void)
+{
+ /* Invalid function ids for negative testing */
+ uint32_t invalid_psci_func = 0xc400a011;
+ uint32_t ret_flag = tftf_get_psci_feature_info(invalid_psci_func);
+
+ if (ret_flag == PSCI_E_NOT_SUPPORTED)
+ return TEST_RESULT_SUCCESS;
+
+ tftf_testcase_printf("ERROR: Invalid PSCI function is SUPPORTED\n");
+
+ return TEST_RESULT_FAIL;
+}
diff --git a/tftf/tests/runtime_services/standard_service/psci/api_tests/psci_node_hw_state/test_node_hw_state.c b/tftf/tests/runtime_services/standard_service/psci/api_tests/psci_node_hw_state/test_node_hw_state.c
new file mode 100644
index 000000000..d39cf5bdf
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/psci/api_tests/psci_node_hw_state/test_node_hw_state.c
@@ -0,0 +1,320 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <debug.h>
+#include <events.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <power_management.h>
+#include <psci.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+
+/* Invoke _func and verifies return value is TEST_RESULT_SUCCESS */
+#define TEST_FUNC(_func) { \
+ int ret = (_func)(); \
+ if (ret != TEST_RESULT_SUCCESS) { \
+ INFO("test_node_hw_state: function " #_func " failed!\n"); \
+ return ret; \
+ } \
+}
+
+/* Enable messages for debugging purposes */
+#if 0
+# define DBGMSG(...) INFO(__VA_ARGS__)
+#else
+# define DBGMSG(...)
+#endif
+
+#define INVALID_POWER_LEVEL (PLAT_MAX_PWR_LEVEL + 1)
+
+static event_t cpu_booted[PLATFORM_CORE_COUNT];
+static event_t cpu_continue[PLATFORM_CORE_COUNT];
+
+/* MPIDRs for CPU belonging to both own and foreign clusters */
+static u_register_t native_peer = INVALID_MPID;
+static u_register_t foreign_peer = INVALID_MPID;
+
+static test_result_t cpu_ping(void)
+{
+ u_register_t mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(mpid);
+
+ /* Tell the lead CPU that the calling CPU has entered the test */
+ tftf_send_event(&cpu_booted[core_pos]);
+
+ /* Wait for flag to proceed */
+ tftf_wait_for_event(&cpu_continue[core_pos]);
+
+ /*
+ * When returning from the function, the TFTF framework will power CPUs
+ * down, without this test needing to do anything
+ */
+ return TEST_RESULT_SUCCESS;
+}
+
+/* Helper function to detect support for PSCI NODE_HW_STATE */
+static int is_psci_node_hw_state_supported(void)
+{
+ return (tftf_get_psci_feature_info(SMC_PSCI_CPU_HW_STATE64) ==
+ PSCI_E_NOT_SUPPORTED) ? 0 : 1;
+}
+
+/*
+ * @Test_Aim@ Call NODE_HW_STATE for the current CPU and make sure it returns
+ * PSCI_HW_STATE_ON
+ */
+static test_result_t test_self_cpu(void)
+{
+ if (tftf_psci_node_hw_state(read_mpidr_el1(), 0) != PSCI_HW_STATE_ON) {
+ DBGMSG("%s: failed\n", __func__);
+ return TEST_RESULT_FAIL;
+ } else {
+ return TEST_RESULT_SUCCESS;
+ }
+}
+
+/*
+ * @Test_Aim@ Call NODE_HW_STATE for the current cluster and make sure it
+ * returns PSCI_HW_STATE_ON
+ */
+static test_result_t test_self_cluster(void)
+{
+ if (tftf_psci_node_hw_state(read_mpidr_el1(), 1) != PSCI_HW_STATE_ON) {
+ DBGMSG("%s: failed\n", __func__);
+ return TEST_RESULT_FAIL;
+ } else {
+ return TEST_RESULT_SUCCESS;
+ }
+}
+
+/*
+ * @Test_Aim@ Call NODE_HW_STATE for a foreign CPU that's currently off. Make
+ * sure it returns PSCI_HW_STATE_OFF
+ */
+static test_result_t test_offline_cpu(void)
+{
+ assert(foreign_peer != INVALID_MPID);
+ if (tftf_psci_node_hw_state(foreign_peer, 0) != PSCI_HW_STATE_OFF) {
+ DBGMSG("%s: failed\n", __func__);
+ return TEST_RESULT_FAIL;
+ } else {
+ return TEST_RESULT_SUCCESS;
+ }
+}
+
+/*
+ * @Test_Aim@ Call NODE_HW_STATE for a cluster that's currently off. Make sure
+ * it returns PSCI_HW_STATE_OFF
+ */
+static test_result_t test_offline_cluster(void)
+{
+ assert(foreign_peer != INVALID_MPID);
+ if (tftf_psci_node_hw_state(foreign_peer, 1) != PSCI_HW_STATE_OFF) {
+ DBGMSG("%s: failed\n", __func__);
+ return TEST_RESULT_FAIL;
+ } else {
+ return TEST_RESULT_SUCCESS;
+ }
+}
+
+/*
+ * @Test_Aim@ Call NODE_HW_STATE with an invalid MPIDR. Make sure it returns
+ * invalid parameters
+ */
+static test_result_t test_invalid_mpidr(void)
+{
+ if (tftf_psci_node_hw_state(INVALID_MPID, 0) != PSCI_E_INVALID_PARAMS) {
+ DBGMSG("%s: failed\n", __func__);
+ return TEST_RESULT_FAIL;
+ } else {
+ return TEST_RESULT_SUCCESS;
+ }
+}
+
+/*
+ * @Test_Aim@ Call NODE_HW_STATE with an invalid power_level. Make sure it
+ * returns invalid parameters
+ */
+static test_result_t test_invalid_power_level(void)
+{
+ if (tftf_psci_node_hw_state(read_mpidr_el1(), INVALID_POWER_LEVEL) !=
+ PSCI_E_INVALID_PARAMS) {
+ DBGMSG("%s: failed\n", __func__);
+ return TEST_RESULT_FAIL;
+ } else {
+ return TEST_RESULT_SUCCESS;
+ }
+}
+
+/*
+ * @Test_Aim@ Call NODE_HW_STATE on all powered-down CPUs on the system. Verify
+ * that the state was PSCI_HW_STATE_OFF before, but is PSCI_HW_STATE_ON
+ * afterwards
+ */
+static test_result_t test_online_all(void)
+{
+ int cpu_node, pos, state, ret, i;
+ u_register_t mpidr, my_mpidr;
+
+ /* Initialize all events */
+ for (i = 0; i < ARRAY_SIZE(cpu_booted); i++)
+ tftf_init_event(&cpu_booted[i]);
+ for (i = 0; i < ARRAY_SIZE(cpu_continue); i++)
+ tftf_init_event(&cpu_continue[i]);
+
+ DBGMSG("%s: powering cores on...\n", __func__);
+ my_mpidr = read_mpidr_el1() & MPID_MASK;
+ DBGMSG("%s: my mpidr: %llx\n", __func__,
+ (unsigned long long) my_mpidr);
+ for_each_cpu(cpu_node) {
+ mpidr = tftf_get_mpidr_from_node(cpu_node);
+ if (mpidr == my_mpidr)
+ continue;
+
+ /* Verify that the other CPU is turned off */
+ state = tftf_psci_node_hw_state(mpidr, 0);
+ if (state != PSCI_HW_STATE_OFF) {
+ DBGMSG("%s: before: mpidr %llx: state %u, expected %u\n",
+ __func__, (unsigned long long) mpidr,
+ state, PSCI_HW_STATE_OFF);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Power on the CPU and wait for its event */
+ pos = platform_get_core_pos(mpidr);
+ ret = tftf_cpu_on(mpidr, (uintptr_t) cpu_ping, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ DBGMSG("%s: powering on %llx failed", __func__,
+ (unsigned long long)mpidr);
+ return TEST_RESULT_FAIL;
+ }
+ tftf_wait_for_event(&cpu_booted[pos]);
+
+ /* Verify that the other CPU is turned on */
+ state = tftf_psci_node_hw_state(mpidr, 0);
+ if (state != PSCI_HW_STATE_ON) {
+ DBGMSG("%s: after: mpidr %llx: state %u, expected %u\n",
+ __func__, (unsigned long long)mpidr,
+ state, PSCI_HW_STATE_ON);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Allow to the CPU to proceed to power down */
+ tftf_send_event(&cpu_continue[pos]);
+ }
+
+ /* Wait for other CPUs to power down */
+ INFO("%s: waiting for all other CPUs to power down\n", __func__);
+ for_each_cpu(cpu_node) {
+ mpidr = tftf_get_mpidr_from_node(cpu_node);
+ if (mpidr == my_mpidr)
+ continue;
+
+ /* Loop until other CPU is powered down */
+ while (tftf_psci_affinity_info(mpidr, MPIDR_AFFLVL0) !=
+ PSCI_STATE_OFF)
+ tftf_timer_sleep(10);
+ }
+
+ /* Now verify that all CPUs have powered off */
+ for_each_cpu(cpu_node) {
+ mpidr = tftf_get_mpidr_from_node(cpu_node);
+ if (mpidr == my_mpidr)
+ continue;
+
+ /* Verify that the other CPU is turned off */
+ state = tftf_psci_node_hw_state(mpidr, 0);
+ if (state != PSCI_HW_STATE_OFF) {
+ DBGMSG("%s: mpidr %llx: state %u, expected %u\n",
+ __func__, (unsigned long long)mpidr,
+ state, PSCI_HW_STATE_OFF);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Find a peer CPU in the system. The 'foreign' argument specifies where to
+ * locate the peer CPU: value zero finds a CPU in the same cluster; non-zero
+ * argument finds CPU from a different cluster.
+ */
+static u_register_t find_peer(int foreign)
+{
+ int dmn, cpu;
+ u_register_t mpidr, my_mpidr;
+
+ my_mpidr = read_mpidr_el1() & MPID_MASK;
+ dmn = PWR_DOMAIN_INIT;
+ cpu = PWR_DOMAIN_INIT;
+ do {
+ dmn = tftf_get_next_peer_domain(dmn, foreign);
+ if (foreign) {
+ cpu = tftf_get_next_cpu_in_pwr_domain(dmn,
+ PWR_DOMAIN_INIT);
+ } else {
+ cpu = dmn;
+ }
+
+ assert(cpu != PWR_DOMAIN_INIT);
+ mpidr = tftf_get_mpidr_from_node(cpu);
+ assert(mpidr != INVALID_MPID);
+ } while (mpidr == my_mpidr && dmn != PWR_DOMAIN_INIT);
+
+ return mpidr;
+}
+
+/*
+ * @Test_Aim@ Validate PSCI NODE_HW_STATE API
+ */
+test_result_t test_psci_node_hw_state(void)
+{
+ DBGMSG("%s: begin\n", __func__);
+ if (!is_psci_node_hw_state_supported()) {
+ tftf_testcase_printf("PSCI NODE_HW_STATE is not supported\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ TEST_FUNC(test_invalid_mpidr);
+ TEST_FUNC(test_invalid_power_level);
+ TEST_FUNC(test_self_cpu);
+ TEST_FUNC(test_self_cluster);
+ TEST_FUNC(test_online_all);
+
+ DBGMSG("%s: end\n", __func__);
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Validate PSCI NODE_HW_STATE API in sytems with more than one
+ * cluster
+ */
+test_result_t test_psci_node_hw_state_multi(void)
+{
+ SKIP_TEST_IF_LESS_THAN_N_CLUSTERS(2);
+
+ DBGMSG("%s: begin\n", __func__);
+ if (!is_psci_node_hw_state_supported()) {
+ tftf_testcase_printf("PSCI NODE_HW_STATE is not supported\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /* Initialize peer MPDIRs */
+ native_peer = find_peer(0);
+ foreign_peer = find_peer(1);
+ DBGMSG("native=%x foreign=%x\n", native_peer, foreign_peer);
+
+ TEST_FUNC(test_offline_cpu);
+ TEST_FUNC(test_offline_cluster);
+
+ DBGMSG("%s: end\n", __func__);
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/runtime_services/standard_service/psci/api_tests/psci_stat/test_psci_stat.c b/tftf/tests/runtime_services/standard_service/psci/api_tests/psci_stat/test_psci_stat.c
new file mode 100644
index 000000000..64f6c81e6
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/psci/api_tests/psci_stat/test_psci_stat.c
@@ -0,0 +1,1004 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <assert.h>
+#include <cassert.h>
+#include <debug.h>
+#include <events.h>
+#include <irq.h>
+#include <math_utils.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <power_management.h>
+#include <psci.h>
+#include <sgi.h>
+#include <spinlock.h>
+#include <string.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+#include <timer.h>
+
+typedef struct psci_stat_data {
+ u_register_t count;
+ u_register_t residency;
+} psci_stat_data_t;
+
+/* Assuming 3 power levels as maximum */
+#define MAX_STAT_STATES (PLAT_MAX_PWR_STATES_PER_LVL * \
+ PLAT_MAX_PWR_STATES_PER_LVL * \
+ PLAT_MAX_PWR_STATES_PER_LVL)
+
+CASSERT(PLAT_MAX_PWR_LEVEL <= 2, assert_maximum_defined_stat_array_size_exceeded);
+
+/*
+ * The data structure holding stat information as queried by each CPU.
+ * We don't worry about cache line thrashing.
+ */
+static psci_stat_data_t stat_data[PLATFORM_CORE_COUNT][PLAT_MAX_PWR_LEVEL + 1]
+ [MAX_STAT_STATES];
+
+/*
+ * Synchronization event for stat tests. A 2-D event array is used to
+ * signal every CPU by each CPU. This caters for the fact some
+ * CPUs may have higher performance than others and will not
+ * cause the synchronization to fail.
+ */
+static event_t stat_sync[PLATFORM_CORE_COUNT][PLATFORM_CORE_COUNT];
+
+/* Global variables to synchronize participating CPUs on wake-up */
+static spinlock_t cpu_count_lock;
+static volatile int cpu_count;
+static volatile int participating_cpu_count;
+
+/* Helper function to wait for CPUs participating in the test */
+static void wait_for_participating_cpus(void)
+{
+ assert(participating_cpu_count <= PLATFORM_CORE_COUNT);
+
+ spin_lock(&cpu_count_lock);
+ cpu_count++;
+ spin_unlock(&cpu_count_lock);
+
+ assert(cpu_count <= PLATFORM_CORE_COUNT);
+
+ while (cpu_count != participating_cpu_count)
+ ;
+}
+
+/* Helper function to detect support for PSCI STAT APIs in firmware */
+static int is_psci_stat_supported(void)
+{
+ int ret_stat_count, ret_stat_res;
+
+ ret_stat_count = tftf_get_psci_feature_info(SMC_PSCI_STAT_COUNT64);
+ ret_stat_res = tftf_get_psci_feature_info(SMC_PSCI_STAT_RESIDENCY64);
+
+ if (ret_stat_count == PSCI_E_NOT_SUPPORTED ||
+ ret_stat_res == PSCI_E_NOT_SUPPORTED) {
+ tftf_testcase_printf("PSCI STAT APIs are not supported"
+ " in EL3 firmware\n");
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * Utility function to get the `stat` index into the `psci_stat_data_t` array
+ * (which is the index into the 3rd dimension in the array).
+ */
+static int get_stat_idx(unsigned int pstateid_idx[], unsigned int lvl)
+{
+ int i, stat_idx;
+ /* Calculate the stat_idx */
+ for (stat_idx = 0, i = lvl; i >= 0; i--) {
+ assert((pstateid_idx[i] != PWR_STATE_INIT_INDEX) &&
+ (pstateid_idx[i] < PLAT_MAX_PWR_STATES_PER_LVL));
+ stat_idx += (pstateid_idx[i] * pow(PLAT_MAX_PWR_STATES_PER_LVL, i));
+ }
+
+ assert(stat_idx >= 0 && stat_idx < MAX_STAT_STATES);
+ return stat_idx;
+}
+
+/*
+ * Return the pointer the `psci_stat_data_t` array corresponding to
+ * cpu index, power level and `stat` index (which is computed from
+ * the pstateid_idx[]).
+ */
+static psci_stat_data_t *get_psci_stat_data(int cpu_idx,
+ unsigned int pwrlvl,
+ unsigned int pstateid_idx[])
+{
+ int stat_idx;
+
+ /* Calculate the stat_idx */
+ stat_idx = get_stat_idx(pstateid_idx, pwrlvl);
+ return &stat_data[cpu_idx][pwrlvl][stat_idx];
+}
+
+/*
+ * This function validates the current stat results against a previous
+ * snapshot of stat information gathered in `stat_data` by
+ * populate_all_stats_all_lvls() function. It does 2 kinds of validation:
+ *
+ * 1. Precise validation:
+ * This ensures that the targeted power state as indicated by `pstateid_idx`
+ * has incremented according to expectation. If it hasn't incremented,
+ * then the targeted power state was downgraded by the platform (due to various
+ * reasons) and in this case the queried stats should be equal to previous
+ * stats.
+ *
+ * This validation is done for the targeted power level and all lower levels
+ * for the given power state.
+ *
+ * 2. Imprecise validation:
+ *
+ * Iterate over all the power states and ensure that the previous stats for the
+ * power state are never higher than the current one for power levels <=
+ * targeted power level. For power states at higher power levels than targeted
+ * power level, it should remain the same.
+ */
+static int validate_stat_result(unsigned int pstateid_idx[],
+ unsigned int target_pwrlvl)
+{
+ unsigned long my_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int cpu_idx = platform_get_core_pos(my_mpid);
+ unsigned int pwrlvl, susp_type, state_id, power_state;
+ int ret;
+ psci_stat_data_t current_stat_data;
+ psci_stat_data_t *pstat_data;
+ unsigned int local_pstateid_idx[PLAT_MAX_PWR_LEVEL + 1];
+
+ assert(pstateid_idx[0] != PWR_STATE_INIT_INDEX);
+
+ /* Create a local copy of pstateid_idx */
+ memcpy(local_pstateid_idx, pstateid_idx, sizeof(local_pstateid_idx));
+
+ /* First do the precise validation */
+ do {
+ /* Check if the power state is valid */
+ ret = tftf_get_pstate_vars(&pwrlvl,
+ &susp_type,
+ &state_id,
+ local_pstateid_idx);
+ assert(ret == PSCI_E_SUCCESS);
+ assert(pwrlvl <= PLAT_MAX_PWR_LEVEL);
+
+ power_state = tftf_make_psci_pstate(pwrlvl,
+ susp_type, state_id);
+
+ /* Get current stat values for the power state */
+ current_stat_data.residency = tftf_psci_stat_residency(my_mpid, power_state);
+ current_stat_data.count = tftf_psci_stat_count(my_mpid, power_state);
+
+ pstat_data = get_psci_stat_data(cpu_idx, pwrlvl, local_pstateid_idx);
+ if ((pstat_data->residency == current_stat_data.residency) &&
+ (pstat_data->count == current_stat_data.count)) {
+ /*
+ * Targeted power state has been downgraded and the
+ * queried stats should be equal to previous stats
+ */
+ WARN("The power state 0x%x at pwrlvl %d has been"
+ " downgraded by platform\n",
+ power_state, pwrlvl);
+ } else if ((pstat_data->residency > current_stat_data.residency) ||
+ (pstat_data->count + 1 != current_stat_data.count)) {
+ /*
+ * The previous residency is greater than current or the
+ * stat count has not incremented by 1 for the targeted
+ * power state. Return error in this case.
+ */
+ ERROR("Precise validation failed. Stats for CPU %d at"
+ " pwrlvl %d for power state 0x%x : Prev"
+ " stats 0x%llx 0x%llx, current stats"
+ " 0x%llx 0x%llx\n",
+ cpu_idx, pwrlvl, power_state,
+ (unsigned long long)pstat_data->residency,
+ (unsigned long long)pstat_data->count,
+ (unsigned long long)current_stat_data.residency,
+ (unsigned long long)current_stat_data.count);
+ return -1;
+ } else {
+ /*
+ * The stats are as expected for the targeted power state
+ * i.e previous residency <= current residency and
+ * previous stat count + 1 == current stat count.
+ */
+ INFO("The difference in programmed time and residency"
+ " time in us = %lld at power level %d\n",
+ (unsigned long long)
+ ((current_stat_data.residency - pstat_data->residency)
+ - (PLAT_SUSPEND_ENTRY_TIME * 1000)), pwrlvl);
+ }
+
+ local_pstateid_idx[pwrlvl] = PWR_STATE_INIT_INDEX;
+ } while (pwrlvl);
+
+ INIT_PWR_LEVEL_INDEX(local_pstateid_idx);
+
+ /* Imprecise validation */
+ do {
+ tftf_set_next_state_id_idx(PLAT_MAX_PWR_LEVEL, local_pstateid_idx);
+ if (local_pstateid_idx[0] == PWR_STATE_INIT_INDEX)
+ break;
+
+ /* Check if the power state is valid */
+ ret = tftf_get_pstate_vars(&pwrlvl,
+ &susp_type,
+ &state_id,
+ local_pstateid_idx);
+ if (ret != PSCI_E_SUCCESS)
+ continue;
+
+ assert(pwrlvl <= PLAT_MAX_PWR_LEVEL);
+
+ power_state = tftf_make_psci_pstate(pwrlvl,
+ susp_type, state_id);
+
+ pstat_data = get_psci_stat_data(cpu_idx, pwrlvl, local_pstateid_idx);
+
+ current_stat_data.residency = tftf_psci_stat_residency(my_mpid,
+ power_state);
+ current_stat_data.count = tftf_psci_stat_count(my_mpid,
+ power_state);
+ if (pwrlvl <= target_pwrlvl) {
+ /*
+ * For all power states that target power domain level
+ * <= `target_pwrlvl, the previous residency and count
+ * should never be greater than current.
+ */
+ if ((pstat_data->residency > current_stat_data.residency) ||
+ (pstat_data->count > current_stat_data.count)) {
+ ERROR("Imprecise validation failed for"
+ " pwrlvl <= target_pwrlvl. Stats for"
+ " CPU %d for power state 0x%x. Prev"
+ " stats 0x%llx 0x%llx, current stats 0x%llx"
+ " 0x%llx\n",
+ cpu_idx, power_state,
+ (unsigned long long)pstat_data->residency,
+ (unsigned long long)pstat_data->count,
+ (unsigned long long)current_stat_data.residency,
+ (unsigned long long)current_stat_data.count);
+ return -1;
+ }
+
+ } else {
+ /*
+ * For all power states that target power domain level
+ * > `target_pwrlvl, the previous residency and count
+ * should never be equal to current.
+ */
+ if ((pstat_data->residency != current_stat_data.residency) ||
+ (pstat_data->count != current_stat_data.count)) {
+ ERROR("Imprecise validation failed for pwrlvl >"
+ " target_pwrlvl. Stats for CPU"
+ " %d for power state 0x%x. Prev"
+ " stats 0x%llx 0x%llx, current stats"
+ " 0x%llx 0x%llx\n",
+ cpu_idx, power_state,
+ (unsigned long long)pstat_data->residency,
+ (unsigned long long)pstat_data->count,
+ (unsigned long long)current_stat_data.residency,
+ (unsigned long long)current_stat_data.count);
+ return -1;
+ }
+ }
+ } while (1);
+
+ return 0;
+}
+
+/*
+ * This function populates the stats for all power states at all power domain
+ * levels for the current CPU in the global `stat_data` array.
+ */
+static void populate_all_stats_all_lvls(void)
+{
+ unsigned int pstateid_idx[PLAT_MAX_PWR_LEVEL + 1];
+ int ret;
+ unsigned int pwrlvl, susp_type, state_id, power_state;
+ psci_stat_data_t *pstat_data;
+ u_register_t mpidr = read_mpidr_el1() & MPID_MASK;
+ int cpu_idx = platform_get_core_pos(mpidr);
+
+ INIT_PWR_LEVEL_INDEX(pstateid_idx);
+ do {
+ tftf_set_next_state_id_idx(PLAT_MAX_PWR_LEVEL, pstateid_idx);
+ if (pstateid_idx[0] == PWR_STATE_INIT_INDEX)
+ break;
+
+ /* Check if the power state is valid */
+ ret = tftf_get_pstate_vars(&pwrlvl,
+ &susp_type,
+ &state_id,
+ pstateid_idx);
+ if (ret != PSCI_E_SUCCESS)
+ continue;
+
+ assert(pwrlvl <= PLAT_MAX_PWR_LEVEL);
+
+ power_state = tftf_make_psci_pstate(pwrlvl,
+ susp_type, state_id);
+
+ pstat_data = get_psci_stat_data(cpu_idx, pwrlvl, pstateid_idx);
+ pstat_data->residency = tftf_psci_stat_residency(mpidr,
+ power_state);
+ pstat_data->count = tftf_psci_stat_count(mpidr, power_state);
+ } while (1);
+}
+
+/*
+ * The core function by executed by all CPUs when `test_psci_stat_all_power_states`
+ * test is executed. Each CPU queries the next valid power state using the
+ * `power state` helpers and assumes that the power state progresses from lower
+ * power levels to higher levels. It also assumes that the number of applicable
+ * low power states are same across Big - Little clusters. In future this
+ * assumption may not be true and this test may need to be reworked to have
+ * `power domain` awareness. The sequence executed by the test is as follows:
+ *
+ * 1. Populate the stats for all power states at all power domain levels for
+ * the current CPU.
+ *
+ * 2. Each CPU queries the next applicable low power state using the
+ * `power state` helpers.
+ *
+ * 3. A synchronization point is created for all the CPUs taking part in the
+ * test using TFTF events. This is needed because low power states at higher
+ * power domain levels (like cluster) can only be achieved if all the CPUs
+ * within the power domain request the same low power state at the (nearly)
+ * same time.
+ *
+ * 4. Validate the current stats with the previously cached stats (done in
+ * Step 1).
+ *
+ * 5. Iterate for the next applicable power state.
+ */
+static test_result_t test_psci_stat(void)
+{
+ int ret;
+ unsigned int pstateid_idx[PLAT_MAX_PWR_LEVEL + 1];
+ unsigned int pwrlvl, state_id, power_state, susp_type, cpu_node;
+ u_register_t mpidr = read_mpidr_el1() & MPID_MASK;
+ int cpu_idx = platform_get_core_pos(mpidr);
+
+ /* Initialize the per-cpu synchronization event */
+ for_each_cpu(cpu_node) {
+ u_register_t target_mpid;
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+ tftf_init_event(&stat_sync[platform_get_core_pos(target_mpid)][cpu_idx]);
+ }
+
+ INIT_PWR_LEVEL_INDEX(pstateid_idx);
+
+ do {
+ /* Get the next power state */
+ tftf_set_next_state_id_idx(PLAT_MAX_PWR_LEVEL, pstateid_idx);
+ if (pstateid_idx[0] == PWR_STATE_INIT_INDEX)
+ break;
+
+ /* Populate the PSCI STATs for all power levels and all states */
+ populate_all_stats_all_lvls();
+
+ /* Check if the power state is valid */
+ ret = tftf_get_pstate_vars(&pwrlvl, &susp_type, &state_id,
+ pstateid_idx);
+ if (ret != PSCI_E_SUCCESS)
+ continue;
+
+ power_state = tftf_make_psci_pstate(pwrlvl, susp_type, state_id);
+
+ /*
+ * Create a synchronization point. A 2-D event array is used to
+ * signal every CPU by each CPU. This caters for the fact some
+ * CPUs may have higher performance than others and will not
+ * cause the sychronization to fail.
+ */
+ for_each_cpu(cpu_node) {
+ unsigned int target_idx;
+ target_idx = platform_get_core_pos(
+ tftf_get_mpidr_from_node(cpu_node));
+ tftf_send_event(&stat_sync[target_idx][cpu_idx]);
+ tftf_wait_for_event(&stat_sync[cpu_idx][target_idx]);
+ }
+
+ /*
+ * Initialize the cpu_count to zero for synchronizing
+ * participating CPUs.
+ */
+ spin_lock(&cpu_count_lock);
+ if (cpu_count == participating_cpu_count)
+ cpu_count = 0;
+ spin_unlock(&cpu_count_lock);
+
+ ret = tftf_program_timer_and_suspend(PLAT_SUSPEND_ENTRY_TIME,
+ power_state, NULL, NULL);
+ tftf_cancel_timer();
+ if (ret) {
+ ERROR("PSCI-STAT: Suspend failed. "
+ "mpidr:0x%llx pwr_lvl:0x%x powerstate:0x%x\n",
+ (unsigned long long)mpidr,
+ pwrlvl, power_state);
+ return TEST_RESULT_FAIL;
+ }
+
+
+ INFO("PSCI-STAT: mpidr:0x%llx pwr_lvl:0x%x powerstate:0x%x\n",
+ (unsigned long long)mpidr,
+ pwrlvl, power_state);
+
+ wait_for_participating_cpus();
+
+ ret = validate_stat_result(pstateid_idx, pwrlvl);
+ if (ret)
+ return TEST_RESULT_FAIL;
+ } while (1);
+
+ /*
+ * Populate the PSCI STATs for all power levels and all states
+ * for final validation from lead CPU
+ */
+ populate_all_stats_all_lvls();
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * This function validates the stats for all secondary CPUs from the lead
+ * CPU. It queries the stats for the power states for other CPUs and compares
+ * it against the stats previous cached by them.
+ */
+static int validate_stat_result_from_lead(u_register_t target_cpu)
+{
+ unsigned int pwrlvl, susp_type, state_id, power_state, cpu_idx;
+ int ret;
+ psci_stat_data_t target_stat_data;
+ psci_stat_data_t *pstat_data;
+ unsigned int local_pstateid_idx[PLAT_MAX_PWR_LEVEL + 1];
+
+ cpu_idx = platform_get_core_pos(target_cpu);
+ INIT_PWR_LEVEL_INDEX(local_pstateid_idx);
+
+ do {
+ tftf_set_next_state_id_idx(PLAT_MAX_PWR_LEVEL, local_pstateid_idx);
+ if (local_pstateid_idx[0] == PWR_STATE_INIT_INDEX)
+ break;
+
+ /* Check if the power state is valid */
+ ret = tftf_get_pstate_vars(&pwrlvl,
+ &susp_type,
+ &state_id,
+ local_pstateid_idx);
+ if (ret != PSCI_E_SUCCESS)
+ continue;
+
+ assert(pwrlvl <= PLAT_MAX_PWR_LEVEL);
+
+ power_state = tftf_make_psci_pstate(pwrlvl,
+ susp_type, state_id);
+
+ /* Get target CPU stat values for the power state */
+ target_stat_data.residency = tftf_psci_stat_residency(target_cpu, power_state);
+ target_stat_data.count = tftf_psci_stat_count(target_cpu, power_state);
+
+ pstat_data = get_psci_stat_data(cpu_idx, pwrlvl, local_pstateid_idx);
+ if ((pstat_data->residency != target_stat_data.residency) ||
+ (pstat_data->count != target_stat_data.count)) {
+ INFO("Stats for CPU %d for power state 0x%x :"
+ " Recorded stats 0x%llx 0x%llx,"
+ " Target stats 0x%llx 0x%llx\n",
+ cpu_idx, power_state,
+ (unsigned long long)pstat_data->residency,
+ (unsigned long long)pstat_data->count,
+ (unsigned long long)target_stat_data.residency,
+ (unsigned long long)target_stat_data.count);
+ return -1;
+ }
+ } while (1);
+
+ return 0;
+}
+
+
+/*
+ * @Test_Aim@ Verify if PSCI Stat Count and Residency are updated
+ * correctly for all valid suspend states for every power domain at
+ * various power levels. The `test_psci_stat()` invoked by all the CPUs
+ * participating in the test. The final stats are also validated from the
+ * lead CPU to ensure that stats queried from current CPU and another (lead)
+ * CPU are the same.
+ */
+test_result_t test_psci_stat_all_power_states(void)
+{
+ u_register_t lead_mpid, target_mpid;
+ int cpu_node, ret;
+
+ if (!is_psci_stat_supported())
+ return TEST_RESULT_SKIPPED;
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+
+ /* Initialize participating CPU count */
+ participating_cpu_count = tftf_get_total_cpus_count();
+ init_spinlock(&cpu_count_lock);
+
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+
+ if (lead_mpid == target_mpid)
+ continue;
+
+ ret = tftf_cpu_on(target_mpid, (uintptr_t) test_psci_stat, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("CPU ON failed for 0x%llx\n",
+ (unsigned long long)target_mpid);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ if (test_psci_stat() != TEST_RESULT_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ INFO("Validating stats from lead CPU\n");
+
+ /* Validate the stat results from the lead CPU */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+
+ if (lead_mpid == target_mpid)
+ continue;
+
+ while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0) !=
+ PSCI_STATE_OFF)
+ ;
+
+ ret = validate_stat_result_from_lead(target_mpid);
+ if (ret)
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Helper function for the secondary CPUs to boot and populate its stats
+ * and power OFF.
+ */
+static test_result_t update_stats_and_power_off(void)
+{
+ wait_for_participating_cpus();
+
+ populate_all_stats_all_lvls();
+ return TEST_RESULT_SUCCESS;
+}
+
+/* The target level for the stats validation */
+static int verify_stats_target_lvl;
+
+/*
+ * This is lighter validation of stat results than `validate_stat_result()`.
+ * This function iterates over all the power states of type
+ * `PSTATE_TYPE_POWERDOWN` for the current CPU and ensures that stats
+ * corresponding to at least a single power state targeted to a power level
+ * <= `verify_stats_target_lvl` have incremented as expected. If the stats
+ * haven't incremented corresponding to a power state, then they must be
+ * equal to the previous stats or else return FAILURE.
+ */
+static test_result_t verify_powerdown_stats(void)
+{
+ int ret;
+ unsigned int stateid_idx[PLAT_MAX_PWR_LEVEL + 1];
+ unsigned int pwrlvl, susp_type, state_id, power_state;
+ psci_stat_data_t *pstat_data, curr_stat_data;
+ u_register_t mpidr = read_mpidr_el1() & MPID_MASK;
+ int cpu_idx = platform_get_core_pos(mpidr);
+
+ test_result_t result = TEST_RESULT_FAIL;
+
+ INIT_PWR_LEVEL_INDEX(stateid_idx);
+
+ wait_for_participating_cpus();
+
+ do {
+ tftf_set_next_state_id_idx(PLAT_MAX_PWR_LEVEL, stateid_idx);
+ if (stateid_idx[0] == PWR_STATE_INIT_INDEX)
+ break;
+
+ /* Check if the power state is valid */
+ ret = tftf_get_pstate_vars(&pwrlvl,
+ &susp_type,
+ &state_id,
+ stateid_idx);
+ if ((ret != PSCI_E_SUCCESS) ||
+ (susp_type != PSTATE_TYPE_POWERDOWN))
+ continue;
+
+ power_state = tftf_make_psci_pstate(pwrlvl,
+ susp_type, state_id);
+ pstat_data = get_psci_stat_data(cpu_idx, pwrlvl, stateid_idx);
+ curr_stat_data.residency = tftf_psci_stat_residency(mpidr,
+ power_state);
+ curr_stat_data.count = tftf_psci_stat_count(mpidr, power_state);
+
+ if ((curr_stat_data.count == (pstat_data->count + 1)) &&
+ (curr_stat_data.residency >= pstat_data->residency)) {
+ /*
+ * If the stats for at least a single power state
+ * targeted to a pwrlvl <= `verify_stats_target_lvl`
+ * satisfy the condition then set result to SUCCESS
+ */
+ if (verify_stats_target_lvl >= pwrlvl)
+ result = TEST_RESULT_SUCCESS;
+ } else if ((curr_stat_data.count != pstat_data->count) ||
+ (curr_stat_data.residency != pstat_data->residency)) {
+
+ /*
+ * If the stats havent incremented, then they should be
+ * equal to previous.
+ */
+ ERROR("Stats for CPU %d for power state 0x%x :"
+ " Recorded stats 0x%llx 0x%llx,"
+ " current stats 0x%llx 0x%llx\n",
+ cpu_idx, power_state,
+ (unsigned long long)pstat_data->residency,
+ (unsigned long long)pstat_data->count,
+ (unsigned long long)curr_stat_data.residency,
+ (unsigned long long)curr_stat_data.count);
+
+ return TEST_RESULT_FAIL;
+ }
+ } while (1);
+
+ return result;
+}
+
+/*
+ * @Test_Aim@ Validate PSCI stats after calling CPU_OFF on each secondary core.
+ * The test sequence is as follows:
+ *
+ * 1. Invoke CPU_ON on all secondary cores which wakeup and update
+ * the stats corresponding to all power states.
+ *
+ * 2. After secondaries have turned OFF, suspend the lead CPU for a short time
+ * duration.
+ *
+ * 3. On wakeup from suspend, turn ON all secondaries which then validate the
+ * stats by invoking `verify_powerdown_stats()`.
+ */
+test_result_t test_psci_stats_cpu_off(void)
+{
+ u_register_t lead_mpid, target_mpid;
+ int cpu_node, off_cpu_count = 0, ret;
+ unsigned int power_state, stateid;
+
+ if (!is_psci_stat_supported())
+ return TEST_RESULT_SKIPPED;
+
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+
+ /*
+ * The primary CPU is an external observer in this test.
+ * Count it out of the participating CPUs pool.
+ */
+ participating_cpu_count = tftf_get_total_cpus_count() - 1;
+ init_spinlock(&cpu_count_lock);
+ cpu_count = 0;
+
+ /* Turn on each secondary and update the stats. */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+
+ if (lead_mpid == target_mpid)
+ continue;
+
+ /*
+ * cpu_count will be updated by the secondary CPUs when they
+ * execute `update_stats_and_power_off` function.
+ */
+ ret = tftf_cpu_on(target_mpid,
+ (uintptr_t) update_stats_and_power_off, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("CPU ON failed for 0x%llx",
+ (unsigned long long)target_mpid);
+ return TEST_RESULT_FAIL;
+ }
+
+ }
+
+ /* Wait for the secondary to turn OFF */
+ for_each_cpu(cpu_node) {
+
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+ if (lead_mpid == target_mpid)
+ continue;
+ while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0) !=
+ PSCI_STATE_OFF)
+ ;
+ off_cpu_count++;
+ }
+
+ assert(off_cpu_count == participating_cpu_count);
+ cpu_count = 0;
+
+ ret = tftf_psci_make_composite_state_id(MPIDR_AFFLVL0,
+ PSTATE_TYPE_STANDBY, &stateid);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("Failed to construct composite state\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ power_state = tftf_make_psci_pstate(MPIDR_AFFLVL0,
+ PSTATE_TYPE_STANDBY, stateid);
+ ret = tftf_program_timer_and_suspend(PLAT_SUSPEND_ENTRY_TIME,
+ power_state, NULL, NULL);
+ if (ret != 0) {
+ ERROR("Failed to program timer or suspend CPU: 0x%x\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ tftf_cancel_timer();
+
+ /* That target level for CPU OFF is 0, (CPU power domain level) */
+ verify_stats_target_lvl = 0;
+
+ /* Now turn on each secondary and verify the stats */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+
+ if (lead_mpid == target_mpid)
+ continue;
+
+ ret = tftf_cpu_on(target_mpid,
+ (uintptr_t) verify_powerdown_stats, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("CPU ON failed for 0x%llx",
+ (unsigned long long)target_mpid);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ /* Wait for the secondary to turn OFF */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+ if (lead_mpid == target_mpid)
+ continue;
+ while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0) !=
+ PSCI_STATE_OFF)
+ ;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Validate PSCI stats after SYSTEM SUSPEND.
+ * The test sequence is as follows:
+ *
+ * 1. Invoke CPU_ON on all secondary cores which wakeup and update
+ * the stats corresponding to all power states.
+ *
+ * 2. After secondaries have turned OFF, invoke SYSTEM SUSPEND on the lead CPU.
+ *
+ * 3. On wakeup from suspend, turn ON all secondaries which then validate the
+ * stats by invoking `verify_powerdown_stats()`.
+ */
+test_result_t test_psci_stats_system_suspend(void)
+{
+ u_register_t lead_mpid, target_mpid;
+ int cpu_node, off_cpu_count = 0;
+ int ret;
+
+ ret = tftf_get_psci_feature_info(SMC_PSCI_SYSTEM_SUSPEND64);
+ if (ret == PSCI_E_NOT_SUPPORTED) {
+ tftf_testcase_printf("SYSTEM_SUSPEND not supported"
+ " in EL3 firmware\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ if (!is_psci_stat_supported())
+ return TEST_RESULT_SKIPPED;
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+
+ /* Initialize participating CPU count. The lead CPU is excluded in the count */
+ participating_cpu_count = tftf_get_total_cpus_count() - 1;
+ init_spinlock(&cpu_count_lock);
+ cpu_count = 0;
+
+ /* Turn on each secondary and update the stats. */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+
+ if (lead_mpid == target_mpid)
+ continue;
+
+ /*
+ * cpu_count will be updated by the secondary CPUs when they
+ * execute `update_stats_and_power_off` function.
+ */
+ ret = tftf_cpu_on(target_mpid,
+ (uintptr_t) update_stats_and_power_off, 0);
+ if (ret != PSCI_E_SUCCESS)
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Wait for the secondary to turn OFF */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+ if (lead_mpid == target_mpid)
+ continue;
+ while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0) !=
+ PSCI_STATE_OFF)
+ ;
+
+ off_cpu_count++;
+ }
+
+ assert(off_cpu_count == participating_cpu_count);
+ cpu_count = 0;
+
+ /* Update the stats corresponding to the lead CPU as well */
+ populate_all_stats_all_lvls();
+
+ /* Program timer to fire after delay and issue system suspend */
+ ret = tftf_program_timer_and_sys_suspend(PLAT_SUSPEND_ENTRY_TIME,
+ NULL, NULL);
+ tftf_cancel_timer();
+ if (ret)
+ return TEST_RESULT_FAIL;
+
+ /* That target level for SYSTEM SUSPEND is PLAT_MAX_PWR_LEVEL */
+ verify_stats_target_lvl = PLAT_MAX_PWR_LEVEL;
+
+ /* Now turn on each secondary CPU and verify the stats */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+
+ if (lead_mpid == target_mpid)
+ continue;
+
+ ret = tftf_cpu_on(target_mpid,
+ (uintptr_t) verify_powerdown_stats, 0);
+ if (ret != PSCI_E_SUCCESS)
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Wait for the secondary CPU to turn OFF */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+ if (lead_mpid == target_mpid)
+ continue;
+ while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0) !=
+ PSCI_STATE_OFF)
+ ;
+ }
+
+ /* Increment the participating CPU count to include the lead CPU as well */
+ participating_cpu_count++;
+
+ /* Verify the stats on the lead CPU as well */
+ return verify_powerdown_stats();
+}
+
+/*
+ * This function verifies the stats for all power states after a cold boot.
+ * Basically the stats should be zero immediately after cold boot.
+ *
+ * As part of its initialization, the test framework puts the primary
+ * CPU in CPU standby state (in order to detect the power state format).
+ * This directly affects the PSCI statistics for the primary CPU.
+ * This means that, when entering this test function, the CPU Standby
+ * state statistics for the primary CPU may no longer be zero.
+ * Thus, we just ignore it and don't test it.
+ */
+static test_result_t verify_psci_stats_cold_boot(void)
+{
+ int ret, cpu_node;
+ unsigned int stateid_idx[PLAT_MAX_PWR_LEVEL + 1];
+ unsigned int pwrlvl, susp_type, state_id, power_state;
+ u_register_t target_mpid, lead_cpu, stat_count, stat_residency;
+
+ lead_cpu = read_mpidr_el1() & MPID_MASK;
+
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+
+ INIT_PWR_LEVEL_INDEX(stateid_idx);
+ do {
+ tftf_set_next_state_id_idx(PLAT_MAX_PWR_LEVEL, stateid_idx);
+ if (stateid_idx[0] == PWR_STATE_INIT_INDEX)
+ break;
+
+ /* Check if the power state is valid */
+ ret = tftf_get_pstate_vars(&pwrlvl,
+ &susp_type,
+ &state_id,
+ stateid_idx);
+ if (ret != PSCI_E_SUCCESS)
+ continue;
+
+ if ((target_mpid == lead_cpu) && (pwrlvl == 0) &&
+ (susp_type == PSTATE_TYPE_STANDBY))
+ continue;
+
+ power_state = tftf_make_psci_pstate(pwrlvl,
+ susp_type, state_id);
+ stat_residency = tftf_psci_stat_residency(target_mpid, power_state);
+ stat_count = tftf_psci_stat_count(target_mpid, power_state);
+ if (stat_count || stat_residency) {
+ ERROR("mpid = %lld, power_state = %x, "
+ "stat count = %lld, residency = %lld\n",
+ (unsigned long long) target_mpid,
+ power_state,
+ (unsigned long long) stat_count,
+ (unsigned long long) stat_residency);
+ return TEST_RESULT_FAIL;
+ }
+ } while (1);
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Validate PSCI stats for each valid composite
+ * power state after system shutdown
+ */
+test_result_t test_psci_stats_after_shutdown(void)
+{
+ smc_args args = { SMC_PSCI_SYSTEM_OFF };
+
+ if (!is_psci_stat_supported())
+ return TEST_RESULT_SKIPPED;
+
+ if (tftf_is_rebooted()) {
+ /*
+ * Successfully resumed from system off. Verify cold
+ * boot stats.
+ */
+ return verify_psci_stats_cold_boot();
+ }
+
+ tftf_notify_reboot();
+ tftf_smc(&args);
+
+ /* The PSCI SYSTEM_OFF call is not supposed to return */
+ tftf_testcase_printf("System didn't shutdown properly\n");
+ return TEST_RESULT_FAIL;
+}
+
+/*
+ * @Test_Aim@ Validate PSCI stats for each valid composite
+ * power state after system reset
+ */
+test_result_t test_psci_stats_after_reset(void)
+{
+ smc_args args = { SMC_PSCI_SYSTEM_RESET };
+
+ if (!is_psci_stat_supported())
+ return TEST_RESULT_SKIPPED;
+
+ if (tftf_is_rebooted()) {
+ /*
+ * Successfully resumed from system reset. Verify cold
+ * boot stats.
+ */
+ return verify_psci_stats_cold_boot();
+ }
+
+ tftf_notify_reboot();
+ tftf_smc(&args);
+
+ /* The PSCI SYSTEM_RESET call is not supposed to return */
+ tftf_testcase_printf("System didn't reset properly\n");
+ return TEST_RESULT_FAIL;
+}
diff --git a/tftf/tests/runtime_services/standard_service/psci/api_tests/psci_version/test_psci_version.c b/tftf/tests/runtime_services/standard_service/psci/api_tests/psci_version/test_psci_version.c
new file mode 100644
index 000000000..27f2a0ae0
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/psci/api_tests/psci_version/test_psci_version.c
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <psci.h>
+#include <smccc.h>
+#include <tftf_lib.h>
+
+/*
+ * @Test_Aim@ Check the version of PSCI implemented
+ *
+ * This test relies on the PSCI_VERSION SMC call.
+ * It expects versions 0.1, 0.2, 1.0, 1.1
+ */
+test_result_t test_psci_version(void)
+{
+ int version;
+
+ version = tftf_get_psci_version();
+ if (!tftf_is_valid_psci_version(version)) {
+ tftf_testcase_printf(
+ "Wrong PSCI version:0x%08x\n", version);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/runtime_services/standard_service/psci/api_tests/reset2/reset2.c b/tftf/tests/runtime_services/standard_service/psci/api_tests/reset2/reset2.c
new file mode 100644
index 000000000..dfc1b35dd
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/psci/api_tests/reset2/reset2.c
@@ -0,0 +1,186 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <psci.h>
+#include <stdlib.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+#include <xlat_tables_defs.h>
+#include <xlat_tables_v2.h>
+
+#define SENTINEL 0x55
+#define INVALID_ARCH_RESET 0x00000001
+#define INVALID_VENDOR_RESET 0x80000002
+#define MEM_PROTECT_ENABLE 1
+#define MEM_PROTECT_DISABLE 0
+
+
+/*
+ * Test warm reset using PSCI RESET2 call (parameter 0)
+ * Returns:
+ * TEST_RESULT_SUCCESS: The system resets after calling RESET2
+ * TEST_RESULT_FAIL: The RESET2 PSCI call failed
+ */
+static test_result_t reset2_warm_helper(void *arg)
+{
+ smc_args args = { SMC_PSCI_RESET2, 0};
+ unsigned char *sentinel = arg;
+ unsigned char value;
+
+ assert(sentinel != NULL);
+
+ if (tftf_is_rebooted()) {
+ value = *sentinel;
+ if (value != SENTINEL) {
+ tftf_testcase_printf("Sentinel address modified\n");
+ return TEST_RESULT_FAIL;
+ }
+ return TEST_RESULT_SUCCESS;
+ }
+ *sentinel = SENTINEL;
+
+ tftf_notify_reboot();
+ tftf_smc(&args);
+
+ /* The PSCI RESET2 call is not supposed to return */
+ tftf_testcase_printf("System didn't shutdown properly\n");
+ return TEST_RESULT_FAIL;
+}
+
+test_result_t reset2_warm(void)
+{
+ map_args_unmap_t args;
+ unsigned char *sentinel;
+ int ret;
+
+ ret = tftf_get_psci_feature_info(SMC_PSCI_RESET2);
+ if (ret == PSCI_E_NOT_SUPPORTED) {
+ tftf_testcase_printf("PSCI RESET2 is not supported %d\n", ret);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ sentinel = psci_mem_prot_get_sentinel();
+ if (sentinel == NULL) {
+ tftf_testcase_printf("Could not find a suitable address for the sentinel.\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ args.addr = (uintptr_t) sentinel & ~PAGE_SIZE_MASK;
+ args.size = PAGE_SIZE;
+ args.attr = MT_RW_DATA;
+ args.arg = sentinel;
+
+ return map_test_unmap(&args, reset2_warm_helper);
+}
+
+/*
+ * Test correct error handling
+ * Returns:
+ * TEST_RESULT_SUCCESS: If the system catches all the wrong calls
+ * TEST_RESULT_FAIL: Some PSCI call failed
+ */
+test_result_t reset2_test_invalid(void)
+{
+ smc_args args = {SMC_PSCI_RESET2};
+ smc_ret_values ret_vals;
+ int ret;
+
+ ret = tftf_get_psci_feature_info(SMC_PSCI_RESET2);
+ if (ret == PSCI_E_NOT_SUPPORTED)
+ return TEST_RESULT_SKIPPED;
+
+ args.arg1 = INVALID_VENDOR_RESET;
+ ret_vals = tftf_smc(&args);
+ ret = ret_vals.ret0;
+
+ if (ret >= 0)
+ return TEST_RESULT_FAIL;
+
+ args.arg1 = INVALID_ARCH_RESET;
+ ret_vals = tftf_smc(&args);
+ ret = ret_vals.ret0;
+
+ if (ret >= 0)
+ return TEST_RESULT_FAIL;
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Test combination of reset2 and mem_protect
+ * Returns:
+ * TEST_RESULT_SUCCESS: if the system is reseted and mem_protect
+ * is disabled.
+ * TEST_RESULT_FAIL: Some PSCI call failed or mem_protect wasn't
+ * disabled
+ */
+static test_result_t reset2_mem_protect_helper(void *arg)
+{
+ int ret;
+ unsigned char value;
+ smc_args args = { SMC_PSCI_RESET2, 0};
+ unsigned char *sentinel = arg;
+
+ assert(sentinel != NULL);
+
+ ret = tftf_get_psci_feature_info(SMC_PSCI_MEM_PROTECT);
+ if (ret == PSCI_E_NOT_SUPPORTED)
+ return TEST_RESULT_SKIPPED;
+
+ if (tftf_is_rebooted()) {
+ if (psci_mem_protect(0) != MEM_PROTECT_DISABLE) {
+ tftf_testcase_printf("mem_protect is not disabled");
+ return TEST_RESULT_SUCCESS;
+ }
+ value = *sentinel;
+ if (value != SENTINEL) {
+ tftf_testcase_printf("Sentinel address modified\n");
+ return TEST_RESULT_FAIL;
+ }
+ return TEST_RESULT_SUCCESS;
+ }
+
+ *sentinel = SENTINEL;
+
+ ret = psci_mem_protect(0);
+ if (ret != MEM_PROTECT_ENABLE && ret != MEM_PROTECT_DISABLE) {
+ tftf_testcase_printf("error calling mem_protect");
+ return TEST_RESULT_FAIL;
+ }
+
+ tftf_notify_reboot();
+ tftf_smc(&args);
+
+ /* The PSCI RESET2 call is not supposed to return */
+ tftf_testcase_printf("System didn't shutdown properly\n");
+ return TEST_RESULT_FAIL;
+}
+
+test_result_t reset2_mem_protect(void)
+{
+ map_args_unmap_t args;
+ unsigned char *sentinel;
+ int ret;
+
+ ret = tftf_get_psci_feature_info(SMC_PSCI_RESET2);
+ if (ret == PSCI_E_NOT_SUPPORTED) {
+ tftf_testcase_printf("PSCI RESET2 is not supported %d\n", ret);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ sentinel = psci_mem_prot_get_sentinel();
+ if (sentinel == NULL) {
+ tftf_testcase_printf("Could not find a suitable address for the sentinel.\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ args.addr = (uintptr_t) sentinel & ~PAGE_SIZE_MASK;
+ args.size = PAGE_SIZE;
+ args.attr = MT_RW_DATA;
+ args.arg = sentinel;
+
+ return map_test_unmap(&args, reset2_mem_protect_helper);
+}
diff --git a/tftf/tests/runtime_services/standard_service/psci/api_tests/system_off/test_system_off.c b/tftf/tests/runtime_services/standard_service/psci/api_tests/system_off/test_system_off.c
new file mode 100644
index 000000000..2ac6550fd
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/psci/api_tests/system_off/test_system_off.c
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <psci.h>
+#include <smccc.h>
+#include <tftf_lib.h>
+
+/*
+ * @Test_Aim@ Validate the SYSTEM_OFF call.
+ * Test SUCCESS in case of system shutdown.
+ * Test FAIL in case of execution not terminated.
+ */
+test_result_t test_system_off(void)
+{
+ smc_args args = { SMC_PSCI_SYSTEM_OFF };
+
+ if (tftf_is_rebooted()) {
+ /* Successfully resumed from system off */
+ return TEST_RESULT_SUCCESS;
+ }
+
+ tftf_notify_reboot();
+ tftf_smc(&args);
+
+ /* The PSCI SYSTEM_OFF call is not supposed to return */
+ tftf_testcase_printf("System didn't shutdown properly\n");
+ return TEST_RESULT_FAIL;
+}
diff --git a/tftf/tests/runtime_services/standard_service/psci/api_tests/system_suspend/test_psci_system_suspend.c b/tftf/tests/runtime_services/standard_service/psci/api_tests/system_suspend/test_psci_system_suspend.c
new file mode 100644
index 000000000..04c5bf8f6
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/psci/api_tests/system_suspend/test_psci_system_suspend.c
@@ -0,0 +1,885 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <assert.h>
+#include <debug.h>
+#include <events.h>
+#include <gic_v2.h>
+#include <irq.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <power_management.h>
+#include <psci.h>
+#include <sgi.h>
+#include <test_helpers.h>
+#include <tftf.h>
+#include <tftf_lib.h>
+#include <timer.h>
+
+#define SUSPEND_TIME_3_SECS 3000
+#define SUSPEND_TIME_10_SECS 10000
+#define TEST_ITERATION_COUNT 0x5
+
+/* Helper macro to verify if system suspend API is supported */
+#define is_psci_sys_susp64_supported() \
+ (tftf_get_psci_feature_info(SMC_PSCI_SYSTEM_SUSPEND64) != \
+ PSCI_E_NOT_SUPPORTED)
+
+static unsigned int deepest_power_state;
+static unsigned int test_target_node = PWR_DOMAIN_INIT;
+static event_t cpu_ready[PLATFORM_CORE_COUNT];
+static event_t sgi_received[PLATFORM_CORE_COUNT];
+static event_t waitq[PLATFORM_CORE_COUNT];
+
+static volatile int wakeup_irq_rcvd[PLATFORM_CORE_COUNT];
+static volatile unsigned int sgi_handled[PLATFORM_CORE_COUNT];
+static sgi_data_t sgi_data;
+static volatile int cpu_ref_count;
+
+extern unsigned long __RO_START__;
+#define TFTF_RO_START (unsigned long)(&__RO_START__)
+extern unsigned long __RO_END__;
+#define TFTF_RO_END (unsigned long)(&__RO_END__)
+
+static int suspend_wakeup_handler(void *data)
+{
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+
+ assert(wakeup_irq_rcvd[core_pos] == 0);
+ wakeup_irq_rcvd[core_pos] = 1;
+
+ return 0;
+}
+
+static int sgi_handler(void *data)
+{
+ unsigned int mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(mpid);
+
+ sgi_data = *(sgi_data_t *) data;
+ sgi_handled[core_pos] = 1;
+ return 0;
+}
+
+/*
+ * Iterate over all cores and issue system suspend
+ * After returning from suspend, ensure that the core which entered
+ * suspend resumed from suspend.
+ */
+static test_result_t sys_suspend_from_all_cores(void)
+{
+ unsigned long long my_mpid = read_mpidr_el1() & MPID_MASK, target_mpid;
+ unsigned int core_pos = platform_get_core_pos(my_mpid);
+ int ret;
+ int psci_ret;
+
+ /* Increment the count of CPUs in the test */
+ cpu_ref_count++;
+ dsbsy();
+
+ while (!is_sys_suspend_state_ready())
+ ;
+
+ wakeup_irq_rcvd[core_pos] = 0;
+
+ /* Register timer handler */
+ tftf_timer_register_handler(suspend_wakeup_handler);
+
+ /* Program timer to fire after delay */
+ ret = tftf_program_timer_and_sys_suspend(PLAT_SUSPEND_ENTRY_TIME,
+ NULL, NULL);
+
+ /* Wait until the IRQ wake interrupt is received */
+ while (!wakeup_irq_rcvd[core_pos])
+ ;
+
+ if (ret) {
+ tftf_testcase_printf("Failed to program timer or suspend "
+ "system from core %x\n", core_pos);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Unregister time handler */
+ tftf_timer_unregister_handler();
+ tftf_cancel_timer();
+
+ /* Done with the suspend test. Decrement count */
+ cpu_ref_count--;
+ dsbsy();
+
+ test_target_node = tftf_topology_next_cpu(test_target_node);
+ if (test_target_node != PWR_DOMAIN_INIT) {
+ target_mpid = tftf_get_mpidr_from_node(test_target_node);
+ psci_ret = tftf_cpu_on(target_mpid,
+ (uintptr_t) sys_suspend_from_all_cores,
+ 0);
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("Failed to power on CPU 0x%x (%d) \n",
+ (unsigned int)target_mpid, psci_ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Wait for the target CPU to enter the test. The TFTF framework
+ * requires more than one CPU to be in the test to detect that the
+ * test has not finished.
+ */
+ while (!cpu_ref_count)
+ ;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Functionality test : Issue system suspend from all cores
+ * sequentially. This test ensures that system suspend can be issued
+ * from all cores and right core is resumed from system suspend.
+ */
+test_result_t test_system_suspend_from_all_cores(void)
+{
+ unsigned long long target_mpid, my_mpid;
+ int psci_ret;
+
+ test_target_node = PWR_DOMAIN_INIT;
+ my_mpid = read_mpidr_el1() & MPID_MASK;
+
+ if (!is_psci_sys_susp64_supported()) {
+ tftf_testcase_printf("System suspend is not supported "
+ "by the EL3 firmware\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
+
+ for (unsigned int i = 0; i < PLATFORM_CORE_COUNT; ++i)
+ tftf_init_event(&cpu_ready[i]);
+
+ test_target_node = tftf_topology_next_cpu(test_target_node);
+ assert(test_target_node != PWR_DOMAIN_INIT);
+
+ target_mpid = tftf_get_mpidr_from_node(test_target_node);
+ if (target_mpid == my_mpid)
+ return sys_suspend_from_all_cores();
+
+ psci_ret = tftf_cpu_on(target_mpid,
+ (uintptr_t) sys_suspend_from_all_cores,
+ 0);
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("Failed to power on CPU 0x%x (%d) \n",
+ (unsigned int)target_mpid, psci_ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Wait for the target CPU to enter the test. The TFTF framework
+ * requires more than one CPU to be in the test to detect that the
+ * test has not finished.
+ */
+ while (!cpu_ref_count)
+ ;
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Helper function to issue SYSTEM SUSPEND SMC with custom parameters.
+ */
+int sys_suspend_helper(uintptr_t entry_point_address,
+ u_register_t context_id)
+{
+ smc_args args = {
+ SMC_PSCI_SYSTEM_SUSPEND,
+ (uintptr_t)entry_point_address,
+ (u_register_t)context_id
+ };
+ smc_ret_values ret_vals;
+
+ ret_vals = tftf_smc(&args);
+
+ return ret_vals.ret0;
+}
+
+/*
+ * Function to issue system suspend with invalid entry-point on all cores
+ * sequentially.
+ */
+static test_result_t invalid_entrypoint_for_sys_suspend(void)
+{
+ unsigned long long target_mpid;
+ int psci_ret;
+
+ /* Increment the count of CPUs in the test */
+ cpu_ref_count++;
+ dsbsy();
+
+ while (!is_sys_suspend_state_ready())
+ ;
+
+ psci_ret = sys_suspend_helper((uintptr_t) 0x1, 0);
+ if (psci_ret != PSCI_E_INVALID_ADDRESS) {
+ tftf_testcase_printf("Test failed with invalid entry addr %x\n",
+ psci_ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Done with the suspend test. Decrement count */
+ cpu_ref_count--;
+ dsbsy();
+
+ test_target_node = tftf_topology_next_cpu(test_target_node);
+ if (test_target_node != PWR_DOMAIN_INIT) {
+ target_mpid = tftf_get_mpidr_from_node(test_target_node);
+ psci_ret = tftf_cpu_on(target_mpid,
+ (uintptr_t) invalid_entrypoint_for_sys_suspend,
+ 0);
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("Failed to power on CPU 0x%x (%d) \n",
+ (unsigned int)target_mpid, psci_ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Wait for the target CPU to enter the test. The TFTF framework
+ * requires more than one CPU to be in the test to detect that the
+ * test has not finished.
+ */
+ while (!cpu_ref_count)
+ ;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ API test: Issue system suspend with invalid entrypoint on all
+ * cores. It should return error.
+ */
+test_result_t test_system_suspend_invalid_entrypoint(void)
+{
+ unsigned long long target_mpid, my_mpid;
+ int psci_ret;
+
+ test_target_node = PWR_DOMAIN_INIT;
+ my_mpid = read_mpidr_el1() & MPID_MASK;
+
+ if (!is_psci_sys_susp64_supported()) {
+ tftf_testcase_printf("System suspend is not supported "
+ "by the EL3 firmware\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
+
+ for (unsigned int i = 0; i < PLATFORM_CORE_COUNT; ++i)
+ tftf_init_event(&cpu_ready[i]);
+
+ test_target_node = tftf_topology_next_cpu(test_target_node);
+ assert(test_target_node != PWR_DOMAIN_INIT);
+
+ target_mpid = tftf_get_mpidr_from_node(test_target_node);
+ if (target_mpid == my_mpid)
+ return invalid_entrypoint_for_sys_suspend();
+
+ psci_ret = tftf_cpu_on(target_mpid,
+ (uintptr_t) invalid_entrypoint_for_sys_suspend,
+ 0);
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("Failed to power on CPU 0x%x (%d) \n",
+ (unsigned int)target_mpid, psci_ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Wait for the target CPU to enter the test. The TFTF framework
+ * requires more than one CPU to be in the test to detect that the
+ * test has not finished.
+ */
+ while (!cpu_ref_count)
+ ;
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Function to test Non lead CPU response to SGIs after multiple invocations
+ * of system suspend.
+ */
+static test_result_t non_lead_cpu_sgi_test(void)
+{
+ unsigned int mpid = read_mpidr_el1();
+ unsigned int core_pos = platform_get_core_pos(mpid);
+ const unsigned int sgi_id = IRQ_NS_SGI_0;
+ int sgi_ret;
+
+ /* Register the local IRQ handler for the SGI */
+ sgi_ret = tftf_irq_register_handler(sgi_id, sgi_handler);
+ if (sgi_ret != 0) {
+ tftf_testcase_printf("Failed to register IRQ %u (%d)",
+ sgi_id, sgi_ret);
+ return TEST_RESULT_FAIL;
+ }
+ /* Enable SGI */
+ tftf_irq_enable(sgi_id, GIC_HIGHEST_NS_PRIORITY);
+
+ /* Signal to the lead CPU that we are ready to receive SGI */
+ tftf_send_event(&cpu_ready[core_pos]);
+
+ /* Wait for SGI */
+ while (sgi_handled[core_pos] == 0)
+ ;
+ /* Send event to indicate reception of SGI */
+ tftf_send_event(&sgi_received[core_pos]);
+
+ /* Unregister SGI handler */
+ tftf_irq_disable(sgi_id);
+ tftf_irq_unregister_handler(sgi_id);
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Functionality test: Issue system suspend multiple times with
+ * all non-lead cores as OFF. This test ensures invoking system suspend
+ * multiple times on lead core does not have any issue.
+ * Steps:
+ * - Register timer wakeup event and issue system suspend multiple
+ * times. Ensure that the system suspend succeeds.
+ * - Turn on all the non lead CPU and send SGIs to them to ensure that
+ * all the non lead CPU are responsive.
+ */
+test_result_t test_psci_sys_susp_multiple_iteration(void)
+{
+ unsigned int target_mpid, target_node;
+ unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(lead_mpid);
+ const unsigned int sgi_id = IRQ_NS_SGI_0;
+ int psci_ret;
+ int timer_ret;
+
+ if (!is_psci_sys_susp64_supported()) {
+ tftf_testcase_printf("System suspend is not supported "
+ "by the EL3 firmware\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ for (unsigned int i = 0; i < PLATFORM_CORE_COUNT; ++i) {
+ tftf_init_event(&cpu_ready[i]);
+ tftf_init_event(&sgi_received[i]);
+ }
+
+ /* Register timer handler */
+ tftf_timer_register_handler(suspend_wakeup_handler);
+
+ for (unsigned int i = 0; i < TEST_ITERATION_COUNT; i++) {
+ wakeup_irq_rcvd[core_pos] = 0;
+
+ /*
+ * Program the wakeup timer, this will serve as the wake-up event
+ * to come out of suspend state, and issue system suspend
+ */
+ tftf_program_timer_and_sys_suspend(
+ PLAT_SUSPEND_ENTRY_TIME, &timer_ret, &psci_ret);
+
+ while (!wakeup_irq_rcvd[core_pos])
+ ;
+
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("System suspend failed with return value %i\n",
+ psci_ret);
+ return TEST_RESULT_FAIL;
+ }
+ if (timer_ret) {
+ tftf_testcase_printf("Timer programming failed with return value %i\n",
+ timer_ret);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ tftf_cancel_timer();
+ /* Unregister timer handler */
+ tftf_timer_unregister_handler();
+
+ /* Turn on all cores after test to ensure all cores boot up*/
+ for_each_cpu(target_node) {
+ target_mpid = tftf_get_mpidr_from_node(target_node);
+ core_pos = platform_get_core_pos(target_mpid);
+
+ if (target_mpid == lead_mpid)
+ continue;
+
+ psci_ret = tftf_cpu_on(target_mpid,
+ (uintptr_t) non_lead_cpu_sgi_test,
+ 0);
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf(
+ "Failed to power on CPU 0x%x (%d)\n",
+ target_mpid, psci_ret);
+ return TEST_RESULT_FAIL;
+ }
+ tftf_wait_for_event(&cpu_ready[core_pos]);
+ }
+
+ /* Send SGI to all non lead CPUs and ensure that they receive it */
+ for_each_cpu(target_node) {
+ target_mpid = tftf_get_mpidr_from_node(target_node);
+ /* Skip lead CPU */
+ if (target_mpid == lead_mpid)
+ continue;
+
+ core_pos = platform_get_core_pos(target_mpid);
+ tftf_send_sgi(sgi_id, core_pos);
+ tftf_wait_for_event(&sgi_received[core_pos]);
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Functionality test : Issue system suspend with pending
+ * SGI on calling core. System suspend call should return prior to the
+ * programmed wake-up interval.
+ * Steps:
+ * - Mask the interrupts on lead CPU and send SGI to current CPU
+ * - Configure a wake-up timer and issue SYSTEM SUSPEND
+ * - Unmask the interrupt and verify that current CPU has woken
+ * prior to the wake-up timer firing.
+ */
+test_result_t test_psci_sys_susp_pending_irq(void)
+{
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+ const unsigned int sgi_id = IRQ_NS_SGI_0;
+ int sgi_ret;
+ int psci_ret;
+ test_result_t ret = TEST_RESULT_SUCCESS;
+
+ if (!is_psci_sys_susp64_supported()) {
+ tftf_testcase_printf("System suspend is not supported "
+ "by the EL3 firmware\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /* Initialize variables */
+ sgi_handled[core_pos] = 0;
+ wakeup_irq_rcvd[core_pos] = 0;
+
+ /* Register the local IRQ handler for the SGI */
+ sgi_ret = tftf_irq_register_handler(sgi_id, sgi_handler);
+ if (sgi_ret != 0) {
+ tftf_testcase_printf("Failed to register IRQ %u (%d)",
+ sgi_id, sgi_ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Register for timer interrupt */
+ tftf_timer_register_handler(suspend_wakeup_handler);
+
+ /*
+ * Program the MB timer, for 3 secs to fire timer interrupt if
+ * system enters suspend state with pending IRQ
+ */
+ tftf_program_timer(SUSPEND_TIME_3_SECS);
+
+ tftf_irq_enable(sgi_id, GIC_HIGHEST_NS_PRIORITY);
+ disable_irq();
+
+ /* Send the SGI to the lead CPU */
+ tftf_send_sgi(sgi_id, core_pos);
+
+ /* Check if system enters suspend state with pending IRQ or not */
+ psci_ret = tftf_system_suspend();
+
+ /* Un-mask the interrupt */
+ enable_irq();
+
+ /*
+ * If the wake-up timer has fired, then the pending interrupt did
+ * not have any effect on the SYSTEM SUSPEND which means the
+ * test case failed.
+ *
+ */
+ if (wakeup_irq_rcvd[core_pos]) {
+ tftf_testcase_printf("Timer irq received\n");
+ ret = TEST_RESULT_FAIL;
+ }
+
+ /* Wait for the SGI to be handled */
+ while (sgi_handled[core_pos] == 0)
+ ;
+
+ /* Verify the sgi data received by the SGI handler */
+ if (sgi_data.irq_id != sgi_id) {
+ tftf_testcase_printf("Wrong IRQ ID, expected %u, got %u\n",
+ sgi_id, sgi_data.irq_id);
+ ret = TEST_RESULT_FAIL;
+ }
+
+ if (psci_ret != PSCI_E_SUCCESS)
+ ret = TEST_RESULT_FAIL;
+
+ /* Unregister timer handler */
+ tftf_timer_unregister_handler();
+ tftf_cancel_timer();
+
+ /* Unregister SGI handler */
+ tftf_irq_disable(sgi_id);
+ tftf_irq_unregister_handler(sgi_id);
+
+ return ret;
+}
+
+/* Helper function to calculate checksum of a given DRAM area */
+unsigned long check_data_integrity(unsigned int *addr, unsigned int size)
+{
+ unsigned int chksum = 0;
+ unsigned int i;
+
+ for (i = 0; i < (size/sizeof(unsigned int)); i++)
+ chksum += *(addr + i);
+ return chksum;
+}
+
+/*
+ * @Test_Aim@ Functionality Test: Ensure that RAM contents are preserved on
+ * resume from system suspend
+ * Steps:
+ * - Write a known pattern to the DRAM and calculate the hash.
+ * - Configure wake-up timer and issue SYSTEM SUSPEND on lead CPU.
+ * - Recalculate the hash of the DRAM and compare it with the previous
+ * value. Both the hash values should match.
+ *
+ */
+test_result_t test_psci_sys_susp_validate_ram(void)
+{
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+ unsigned long prev_hash_val = 0;
+ unsigned long present_hash_val = 0;
+ int psci_ret;
+ int timer_ret;
+
+ test_result_t ret = TEST_RESULT_SUCCESS;
+
+ if (!is_psci_sys_susp64_supported()) {
+ tftf_testcase_printf("System suspend is not supported "
+ "by the EL3 firmware\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ wakeup_irq_rcvd[core_pos] = 0;
+
+ /* Check hash on known region of RAM before putting into suspend */
+ prev_hash_val = check_data_integrity((unsigned int *)TFTF_RO_START,
+ TFTF_RO_END - TFTF_RO_START);
+
+ tftf_timer_register_handler(suspend_wakeup_handler);
+
+ /*
+ * Program timer to fire interrupt after timer expires and issue
+ * system suspend
+ */
+ tftf_program_timer_and_sys_suspend(SUSPEND_TIME_10_SECS,
+ &timer_ret, &psci_ret);
+
+ while (!wakeup_irq_rcvd[core_pos])
+ ;
+ if (psci_ret == PSCI_E_SUCCESS) {
+ /*
+ * Check hash on known region of RAM after returning
+ * from suspend
+ */
+ present_hash_val = check_data_integrity(
+ (unsigned int *)TFTF_RO_START,
+ TFTF_RO_END - TFTF_RO_START);
+ if (present_hash_val != prev_hash_val) {
+ tftf_testcase_printf("ERROR: RAM data not retained \n");
+ ret = TEST_RESULT_FAIL;
+ }
+ } else {
+ tftf_testcase_printf("Failed: system suspend to RAM \n");
+ ret = TEST_RESULT_FAIL;
+ }
+
+ if (timer_ret) {
+ tftf_testcase_printf("Failed: timer programming \n");
+ ret = TEST_RESULT_FAIL;
+ }
+
+ /* Unregister timer handler */
+ tftf_timer_unregister_handler();
+ tftf_cancel_timer();
+
+ return ret;
+}
+
+/* Helper function to get deepest power state */
+static unsigned int get_deepest_power_state(void)
+{
+ unsigned int test_suspend_type;
+ unsigned int suspend_state_id;
+ unsigned int power_level;
+ unsigned int power_state = 0;
+ unsigned int pstate_id_idx[PLAT_MAX_PWR_LEVEL + 1];
+ int ret;
+
+ INIT_PWR_LEVEL_INDEX(pstate_id_idx);
+ do {
+ tftf_set_next_state_id_idx(PLAT_MAX_PWR_LEVEL, pstate_id_idx);
+
+ if (pstate_id_idx[0] == PWR_STATE_INIT_INDEX)
+ break;
+
+ ret = tftf_get_pstate_vars(&power_level,
+ &test_suspend_type,
+ &suspend_state_id,
+ pstate_id_idx);
+ if (ret)
+ continue;
+
+ power_state = tftf_make_psci_pstate(power_level,
+ test_suspend_type,
+ suspend_state_id);
+
+ } while (1);
+
+ return power_state;
+}
+
+/*
+ * Suspend non-lead cores
+ */
+static test_result_t suspend_non_lead_cpu(void)
+{
+ unsigned long long mpid = read_mpidr_el1();
+ unsigned int core_pos = platform_get_core_pos(mpid);
+ int ret;
+
+ tftf_irq_enable(IRQ_NS_SGI_0, GIC_HIGHEST_NS_PRIORITY);
+
+ /* Tell the lead CPU that the calling CPU is about to suspend itself */
+ tftf_send_event(&cpu_ready[core_pos]);
+
+ ret = tftf_cpu_suspend(deepest_power_state);
+ tftf_irq_disable(IRQ_NS_SGI_0);
+
+ if (ret) {
+ ERROR(" CPU suspend failed with error %x\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ API Test: Issue system suspend on a core while other
+ * cores are in suspend. This test ensures that system suspend will
+ * not be successful if cores other than core issuing suspend are not
+ * in OFF state.
+ * Steps:
+ * - Turn on non lead CPUs and suspend it to the deepest suspend
+ * power state.
+ * - Issue SYSTEM SUSPEND on primary CPU. The API should return
+ * error.
+ *
+ */
+test_result_t test_psci_sys_susp_with_cores_in_suspend(void)
+{
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+ unsigned int target_mpid, target_node;
+ unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
+ int psci_ret;
+ int timer_ret;
+ test_result_t ret = TEST_RESULT_SUCCESS;
+
+ if (!is_psci_sys_susp64_supported()) {
+ tftf_testcase_printf("System suspend is not supported "
+ "by the EL3 firmware\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ SKIP_TEST_IF_LESS_THAN_N_CLUSTERS(2);
+
+ for (unsigned int j = 0; j < PLATFORM_CORE_COUNT; j++)
+ tftf_init_event(&cpu_ready[j]);
+
+ wakeup_irq_rcvd[core_pos] = 0;
+ deepest_power_state = get_deepest_power_state();
+
+ /* Suspend all cores other than lead core */
+ for_each_cpu(target_node) {
+ target_mpid = tftf_get_mpidr_from_node(target_node);
+
+ if (target_mpid == lead_mpid)
+ continue;
+
+ /* Turn on the non lead CPU and suspend it. */
+ psci_ret = tftf_cpu_on(target_mpid,
+ (uintptr_t) suspend_non_lead_cpu,
+ 0);
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf(
+ "Failed to power on CPU 0x%x (%d)\n",
+ (unsigned int)target_mpid, psci_ret);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ /* Wait for all non-lead CPUs to be ready */
+ for_each_cpu(target_node) {
+ target_mpid = tftf_get_mpidr_from_node(target_node);
+ /* Skip lead CPU */
+ if (target_mpid == lead_mpid)
+ continue;
+
+ core_pos = platform_get_core_pos(target_mpid);
+ tftf_wait_for_event(&cpu_ready[core_pos]);
+ }
+
+ /* Wait for 10 ms to ensure all the secondaries have suspended */
+ waitms(10);
+
+ /*
+ * Register and program timer, then issue a system suspend
+ * when other cores are in suspend state
+ */
+ tftf_timer_register_handler(suspend_wakeup_handler);
+ tftf_program_timer_and_sys_suspend(
+ PLAT_SUSPEND_ENTRY_TIME, &timer_ret, &psci_ret);
+
+ /* Wake all non-lead CPUs */
+ for_each_cpu(target_node) {
+ target_mpid = tftf_get_mpidr_from_node(target_node);
+ /* Skip lead CPU */
+ if (target_mpid == lead_mpid)
+ continue;
+
+ core_pos = platform_get_core_pos(target_mpid);
+ tftf_send_sgi(IRQ_NS_SGI_0, core_pos);
+ }
+
+ /* Check return from value from system suspend API */
+ if (psci_ret != PSCI_E_DENIED) {
+ tftf_testcase_printf("Entered suspend with cores in suspend\n");
+ ret = TEST_RESULT_FAIL;
+ }
+ if (timer_ret) {
+ tftf_testcase_printf("Failed to program the timer\n");
+ ret = TEST_RESULT_FAIL;
+ }
+ /* Unregister and cancel timer */
+ tftf_timer_unregister_handler();
+ tftf_cancel_timer();
+
+ return ret;
+}
+
+/*
+ * This functions holds the CPU till a `waitq` event is received.
+ */
+static test_result_t cpu_waitq(void)
+{
+ unsigned int mpid = read_mpidr_el1();
+ unsigned int core_pos = platform_get_core_pos(mpid);
+
+ tftf_send_event(&cpu_ready[core_pos]);
+
+ /* Wait for event from primary cpu */
+ tftf_wait_for_event(&waitq[core_pos]);
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ API TEST: Ensure that system suspend will not be successful
+ * if cores other than core issuing suspend are in running state
+ *
+ * Steps :
+ * - Turn on multiple cores on the non lead cluster
+ * - Issue SYSTEM SUSPEND. The API should return error.
+ */
+test_result_t test_psci_sys_susp_with_cores_on(void)
+{
+ unsigned int lead_cluster = MPIDR_CLUSTER_ID(read_mpidr_el1());
+ unsigned int core_pos;
+ unsigned int target_mpid, target_node;
+ int psci_ret;
+ int timer_ret;
+ test_result_t ret = TEST_RESULT_SUCCESS;
+
+ if (!is_psci_sys_susp64_supported()) {
+ tftf_testcase_printf("System suspend is not supported "
+ "by the EL3 firmware\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ SKIP_TEST_IF_LESS_THAN_N_CLUSTERS(2);
+
+ for (unsigned int j = 0; j < PLATFORM_CORE_COUNT; j++) {
+ tftf_init_event(&waitq[j]);
+ tftf_init_event(&cpu_ready[j]);
+ wakeup_irq_rcvd[j] = 0;
+ }
+
+ /* Turn on cores in non-lead cluster */
+ for_each_cpu(target_node) {
+ target_mpid = tftf_get_mpidr_from_node(target_node);
+
+ if (MPIDR_CLUSTER_ID(target_mpid) == lead_cluster)
+ continue;
+
+ psci_ret = tftf_cpu_on(target_mpid,
+ (uintptr_t) cpu_waitq,
+ 0);
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("Failed to power "
+ "on CPU 0x%x (%d)\n",
+ (unsigned int)target_mpid, psci_ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ core_pos = platform_get_core_pos(target_mpid);
+ /* Ensure that the core has booted */
+ tftf_wait_for_event(&cpu_ready[core_pos]);
+ }
+
+ /* Register timer handler */
+ tftf_timer_register_handler(suspend_wakeup_handler);
+
+ /*
+ * Program timer to fire after delay and issue system suspend with
+ * other cores in ON state
+ */
+ tftf_program_timer_and_sys_suspend(PLAT_SUSPEND_ENTRY_TIME,
+ &timer_ret, &psci_ret);
+
+ /* Send event to CPUs waiting for `waitq` event. */
+ for_each_cpu(target_node) {
+ target_mpid = tftf_get_mpidr_from_node(target_node);
+
+ /* Skip lead cluster */
+ if (MPIDR_CLUSTER_ID(target_mpid) == lead_cluster)
+ continue;
+
+ core_pos = platform_get_core_pos(target_mpid);
+ tftf_send_event(&waitq[core_pos]);
+ }
+
+ /* Check return value from system suspend API */
+ if (psci_ret != PSCI_E_DENIED) {
+ tftf_testcase_printf("Test failed when suspending with return "
+ "value: %x \n", psci_ret);
+ ret = TEST_RESULT_FAIL;
+ }
+ if (timer_ret) {
+ tftf_testcase_printf("Test failed with return value when "
+ "programming the timer: %x \n", timer_ret);
+ ret = TEST_RESULT_FAIL;
+ }
+ tftf_timer_unregister_handler();
+ tftf_cancel_timer();
+ return ret;
+}
diff --git a/tftf/tests/runtime_services/standard_service/psci/api_tests/validate_power_state/test_validate_power_state.c b/tftf/tests/runtime_services/standard_service/psci/api_tests/validate_power_state/test_validate_power_state.c
new file mode 100644
index 000000000..55de1fa89
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/psci/api_tests/validate_power_state/test_validate_power_state.c
@@ -0,0 +1,581 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <debug.h>
+#include <events.h>
+#include <irq.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <power_management.h>
+#include <psci.h>
+#include <sgi.h>
+#include <stdlib.h>
+#include <tftf_lib.h>
+
+static event_t cpu_ready[PLATFORM_CORE_COUNT];
+static volatile unsigned int sgi_received[PLATFORM_CORE_COUNT];
+
+static test_result_t (*psci_validate_test_function)(void);
+
+/*
+ * Sets sgi_received flag for indicating SGI is processed so that
+ * test can exit in a clean state
+ */
+static int validate_pstate_sgi_handler(void *data)
+{
+ unsigned int core_pos;
+
+ core_pos = platform_get_core_pos(read_mpidr_el1());
+ sgi_received[core_pos] = 1;
+ return 0;
+}
+
+/*
+ * Gets the next possible composite state ID's combination and creates
+ * a composite ID from 0 to max power levels in the system. It then calculates
+ * whether the calculated composite ID is valid or invalid and validates EL3
+ * firmware's return value.
+ *
+ * Returns:
+ * TEST_RESULT_SUCCESS : If PSCI return value is as expected
+ * TEST_RESULT_FAIL : If PSCI return value is not as expected
+ */
+static test_result_t validate_el3_pstate_parsing(void)
+{
+ unsigned int j;
+ unsigned int test_suspend_type;
+ unsigned int suspend_state_id;
+ unsigned int power_level;
+ int psci_ret;
+ int expected_return_val;
+ unsigned int power_state;
+ unsigned int pstate_id_idx[PLAT_MAX_PWR_LEVEL + 1];
+
+ INIT_PWR_LEVEL_INDEX(pstate_id_idx);
+
+ for (j = 0; j <= PLAT_MAX_PWR_LEVEL; j++) {
+ do {
+ tftf_set_next_state_id_idx(j, pstate_id_idx);
+
+ if (pstate_id_idx[0] == PWR_STATE_INIT_INDEX)
+ break;
+
+ expected_return_val = tftf_get_pstate_vars(&power_level,
+ &test_suspend_type,
+ &suspend_state_id,
+ pstate_id_idx);
+ power_state = tftf_make_psci_pstate(power_level,
+ test_suspend_type,
+ suspend_state_id);
+
+ psci_ret = tftf_cpu_suspend(power_state);
+
+ if (expected_return_val != psci_ret) {
+ tftf_testcase_printf("Failed with values: "
+ " psci_ret:%d"
+ " expected_return_val:%d"
+ " power_state:0x%x\n",
+ psci_ret,
+ expected_return_val,
+ power_state);
+ return TEST_RESULT_FAIL;
+ }
+ } while (1);
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Creates a composite state ID of a single valid local level above level
+ * zero and tests the EL3 firmware's return value matches
+ * PSCI_E_INVALID_PARAMS.
+ *
+ * For level 0, both local and composite power state are same. Hence, it's
+ * skipped.
+ *
+ * Returns:
+ * TEST_RESULT_SUCCESS : If PSCI return value is as expected
+ * TEST_RESULT_FAIL : If PSCI return value is not as expected
+ * TEST_RESULT_SKIPPED : If PLAT_MAX_PWR_LEVEL is < 1
+ */
+static test_result_t valid_only_local_stateid(void)
+{
+ unsigned int power_state;
+ int psci_ret;
+ unsigned int pstate_id_idx[PLAT_MAX_PWR_LEVEL + 1];
+ const plat_state_prop_t *local_level_state;
+ unsigned int i;
+
+ /* If only single power level is possible, SKIP the test */
+ if (!PLAT_MAX_PWR_LEVEL) {
+ tftf_testcase_printf("Platform has only a single valid local level\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ INIT_PWR_LEVEL_INDEX(pstate_id_idx);
+
+ /*
+ * Start from power level 1, as local state for power level zero will
+ * be a valid composite id
+ */
+ for (i = 1; i <= PLAT_MAX_PWR_LEVEL; i++) {
+ do {
+
+ INFO("Getting next local state:\n");
+ tftf_set_next_local_state_id_idx(i, pstate_id_idx);
+
+ if (pstate_id_idx[i] == PWR_STATE_INIT_INDEX)
+ break;
+ local_level_state = plat_get_state_prop(i) + pstate_id_idx[i];
+ power_state = tftf_make_psci_pstate(i,
+ local_level_state->is_pwrdown,
+ local_level_state->state_ID << PLAT_LOCAL_PSTATE_WIDTH);
+
+ psci_ret = tftf_cpu_suspend(power_state);
+
+ if (psci_ret != PSCI_E_INVALID_PARAMS) {
+ tftf_testcase_printf("Expected invalid params but got :"
+ " psci_ret: %d"
+ " power_state:0x%x\n",
+ psci_ret,
+ power_state);
+
+ return TEST_RESULT_FAIL;
+ }
+ } while (pstate_id_idx[i] != PWR_STATE_INIT_INDEX);
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Create a composite state ID of invalid state ID's at all levels and
+ * tests the EL3 firmware's return value matches PSCI_E_INVALID_PARAMS
+ *
+ * Returns:
+ * TEST_RESULT_SUCCESS : If PSCI return value is as expected
+ * TEST_RESULT_FAIL : If PSCI return value is not as expected
+ */
+static test_result_t completely_invalid_stateid(void)
+{
+ unsigned int state_id;
+ int i;
+ unsigned int power_state;
+ int psci_ret;
+
+ state_id = 0;
+
+ /* Make stateID with all invalid ID's for all power levels */
+ for (i = 0; i < PLAT_MAX_PWR_LEVEL; i++)
+ state_id = state_id |
+ ((PLAT_PSCI_DUMMY_STATE_ID & ((1 << PLAT_LOCAL_PSTATE_WIDTH) - 1))
+ << (PLAT_LOCAL_PSTATE_WIDTH * i));
+
+ power_state = tftf_make_psci_pstate(PLAT_MAX_PWR_LEVEL, PSTATE_TYPE_POWERDOWN, state_id);
+ psci_ret = tftf_cpu_suspend(power_state);
+
+ if (psci_ret != PSCI_E_INVALID_PARAMS) {
+ tftf_testcase_printf("Expected invalid params but got : %d"
+ " power_state:0x%x\n",
+ psci_ret,
+ power_state);
+
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Creates a composite power state with invalid state type and tests
+ * the EL3 firmware's return value matches PSCI_E_INVALID_PARAMS
+ *
+ * Returns:
+ * TEST_RESULT_SUCCESS : If PSCI return value is as expected
+ * TEST_RESULT_FAIL : If PSCI return value is not as expected
+ */
+static test_result_t invalid_state_type(void)
+{
+ unsigned int test_suspend_type;
+ unsigned int suspend_state_id;
+ unsigned int power_level;
+ int psci_ret;
+ int expected_return_val;
+ unsigned int power_state;
+ unsigned int pstate_id_idx[PLAT_MAX_PWR_LEVEL + 1];
+
+ INIT_PWR_LEVEL_INDEX(pstate_id_idx);
+
+ do {
+ tftf_set_next_state_id_idx(PLAT_MAX_PWR_LEVEL, pstate_id_idx);
+
+ if (pstate_id_idx[0] == PWR_STATE_INIT_INDEX)
+ break;
+
+ expected_return_val = tftf_get_pstate_vars(&power_level,
+ &test_suspend_type,
+ &suspend_state_id,
+ pstate_id_idx);
+
+ if (expected_return_val != PSCI_E_SUCCESS)
+ continue;
+
+ /* Reverse the suspend type */
+ power_state = tftf_make_psci_pstate(power_level, !test_suspend_type, suspend_state_id);
+
+ psci_ret = tftf_cpu_suspend(power_state);
+
+ if (PSCI_E_INVALID_PARAMS != psci_ret) {
+ tftf_testcase_printf("Failed with values:"
+ " psci_ret:%d"
+ " power_state:0x%x\n",
+ psci_ret,
+ power_state);
+ return TEST_RESULT_FAIL;
+ }
+ } while (1);
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Creates a composite power state with valid local state but invalid
+ * power level and tests the EL3 firmware's return value matches
+ * PSCI_E_INVALID_PARAMS
+ *
+ * Returns:
+ * TEST_RESULT_SUCCESS : If PSCI return value is as expected
+ * TEST_RESULT_FAIL : If PSCI return value is not as expected
+ * TEST_RESULT_SKIPPED : If EL3 firmware supports extended state ID
+ */
+static test_result_t invalid_power_level(void)
+{
+ unsigned int test_suspend_type;
+ unsigned int suspend_state_id;
+ unsigned int power_level;
+ int psci_ret;
+ int expected_return_val;
+ unsigned int power_state;
+ unsigned int pstate_id_idx[PLAT_MAX_PWR_LEVEL + 1];
+
+ /* Skip the test if EL3 firmware code supports extended state ID */
+ if (!tftf_is_psci_pstate_format_original())
+ return TEST_RESULT_SKIPPED;
+
+ INIT_PWR_LEVEL_INDEX(pstate_id_idx);
+
+ do {
+ tftf_set_next_state_id_idx(PLAT_MAX_PWR_LEVEL, pstate_id_idx);
+
+ if (pstate_id_idx[0] == PWR_STATE_INIT_INDEX)
+ break;
+
+ expected_return_val = tftf_get_pstate_vars(&power_level,
+ &test_suspend_type,
+ &suspend_state_id,
+ pstate_id_idx);
+
+ if (expected_return_val != PSCI_E_SUCCESS)
+ continue;
+
+ /* Make a power state with invalid power level */
+ power_state = tftf_make_psci_pstate(power_level + 1,
+ test_suspend_type,
+ suspend_state_id);
+
+ psci_ret = tftf_cpu_suspend(power_state);
+
+ if (PSCI_E_INVALID_PARAMS != psci_ret) {
+ tftf_testcase_printf("Failed with values:"
+ " psci_ret:%d"
+ " power_state:0x%x\n",
+ psci_ret,
+ power_state);
+ return TEST_RESULT_FAIL;
+ }
+ } while (1);
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Creates a composite state ID of valid local state at some levels
+ * and invalid state ID at others and tests the EL3 firmware's return
+ * value matches PSCI_E_INVALID_PARAMS
+ *
+ * Returns:
+ * TEST_RESULT_SUCCESS : If PSCI return value is as expected
+ * TEST_RESULT_FAIL : If PSCI return value is not as expected
+ * TEST_RESULT_SKIPPED : If PLAT_MAX_PWR_LEVEL is < 1
+ */
+static test_result_t mixed_state_id(void)
+{
+ unsigned int test_suspend_type;
+ unsigned int suspend_state_id;
+ unsigned int power_level;
+ int psci_ret;
+ unsigned int power_state;
+ unsigned int j;
+ unsigned int pstate_id_idx[PLAT_MAX_PWR_LEVEL + 1];
+ unsigned int invalid_id_set;
+
+ /*
+ * Platform contains only one power level and hence we cann't have
+ * both valid and invalid local state
+ */
+ if (!PLAT_MAX_PWR_LEVEL)
+ return TEST_RESULT_SKIPPED;
+
+ INIT_PWR_LEVEL_INDEX(pstate_id_idx);
+
+ do {
+ tftf_set_next_state_id_idx(PLAT_MAX_PWR_LEVEL, pstate_id_idx);
+
+ if (pstate_id_idx[0] == PWR_STATE_INIT_INDEX)
+ break;
+
+ if (tftf_get_pstate_vars(&power_level,
+ &test_suspend_type,
+ &suspend_state_id,
+ pstate_id_idx) != PSCI_E_SUCCESS)
+ continue;
+
+ invalid_id_set = 0;
+
+ /*
+ * Generate a state ID with valid and invalid local state ID's at
+ * different levels
+ */
+ for (j = 0; j <= power_level; j++) {
+ /* Set index to invalid level for even power levels */
+ if (rand() % 2) {
+ suspend_state_id = suspend_state_id |
+ ((PLAT_PSCI_DUMMY_STATE_ID &
+ ((1 << PLAT_LOCAL_PSTATE_WIDTH) - 1))
+ << (PLAT_LOCAL_PSTATE_WIDTH * j));
+ invalid_id_set = 1;
+ }
+ }
+
+ /*
+ * Overwrite state ID for a random level if none of the
+ * levels are invalid
+ */
+ if (!invalid_id_set) {
+ j = rand() % (power_level + 1);
+ suspend_state_id = suspend_state_id |
+ ((PLAT_PSCI_DUMMY_STATE_ID &
+ ((1 << PLAT_LOCAL_PSTATE_WIDTH) - 1))
+ << (PLAT_LOCAL_PSTATE_WIDTH * j));
+ }
+
+ power_state = tftf_make_psci_pstate(power_level, test_suspend_type, suspend_state_id);
+ psci_ret = tftf_cpu_suspend(power_state);
+
+ if (psci_ret != PSCI_E_INVALID_PARAMS) {
+ tftf_testcase_printf("Failed with values: power_level: %d"
+ " test_suspend_type: %d"
+ " suspend_state_id:%d"
+ " psci_ret:%d"
+ " power_state:0x%x\n",
+ power_level,
+ test_suspend_type,
+ suspend_state_id,
+ psci_ret,
+ power_state);
+ return TEST_RESULT_FAIL;
+ }
+ } while (1);
+
+ return TEST_RESULT_SUCCESS;
+}
+
+
+/*
+ * This function contains common code for all test cases and runs the testcase
+ * specific code.
+ *
+ * Returns the return value of test case specific code
+ */
+static test_result_t test_execute_test_function(void)
+{
+ test_result_t ret;
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+
+ tftf_irq_register_handler(IRQ_NS_SGI_0, validate_pstate_sgi_handler);
+ tftf_irq_enable(IRQ_NS_SGI_0, GIC_HIGHEST_NS_PRIORITY);
+
+ /*
+ * Mask IRQ to prevent the interrupt handler being invoked
+ * and clearing the interrupt. A pending interrupt will cause this
+ * CPU to wake-up from suspend.
+ */
+ disable_irq();
+
+ /* Configure an SGI to wake-up from suspend */
+ tftf_send_sgi(IRQ_NS_SGI_0, core_pos);
+
+ ret = (*psci_validate_test_function)();
+
+ enable_irq();
+
+ while (!sgi_received[core_pos])
+ ;
+
+ tftf_irq_disable(IRQ_NS_SGI_0);
+ tftf_irq_unregister_handler(IRQ_NS_SGI_0);
+
+ return ret;
+}
+
+/*
+ * Non-lead CPU entry point function for all PSCI PSTATE validation functions.
+ *
+ * Returns the return value of test case specific code
+ */
+static test_result_t test_non_lead_cpu_validate_ep(void)
+{
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+
+ /*
+ * Tell the lead CPU that the calling CPU is ready to validate
+ * extended power state parsing
+ */
+ tftf_send_event(&cpu_ready[core_pos]);
+
+ return test_execute_test_function();
+}
+
+/*
+ * Lead CPU entry point function for all PSCI PSTATE validation functions. It
+ * powers on all secondaries and executes the test cases specific code.
+ *
+ * Returns the return value of test case specific code or SKIPPED in case
+ * if it is unable to power on a core or EL3 firmware only supports NULL
+ * stateID.
+ */
+static test_result_t test_lead_cpu_validate_ep(void)
+{
+ test_result_t ret;
+ unsigned int core_pos;
+ unsigned long lead_mpid;
+ unsigned long target_mpid;
+ unsigned long cpu_node;
+ int i;
+
+ if (tftf_is_psci_state_id_null()) {
+ tftf_testcase_printf("EL3 firmware supports only NULL stateID\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /* Initialise cpu_ready event variable */
+ for (i = 0; i < PLATFORM_CORE_COUNT; i++)
+ tftf_init_event(&cpu_ready[i]);
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+
+ /*
+ * Preparation step: Power on all cores.
+ */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU as it is already on */
+ if (target_mpid == lead_mpid)
+ continue;
+
+ ret = tftf_cpu_on(target_mpid,
+ (uintptr_t) test_non_lead_cpu_validate_ep,
+ 0);
+ if (ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf(
+ "Failed to power on CPU 0x%x (%d)\n",
+ (unsigned int)target_mpid, ret);
+
+ return TEST_RESULT_SKIPPED;
+ }
+ }
+
+ /* Wait for all non-lead CPUs to be ready */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU */
+ if (target_mpid == lead_mpid)
+ continue;
+
+ core_pos = platform_get_core_pos(target_mpid);
+ tftf_wait_for_event(&cpu_ready[core_pos]);
+ }
+
+ /* Call this to execute the test case specific code */
+ return test_execute_test_function();
+}
+
+/*
+ * Creates all possible valid local state ID's at all levels and tests
+ * the EL3 firmware's return value matches the expected one.
+ */
+test_result_t test_psci_validate_pstate(void)
+{
+ psci_validate_test_function = &validate_el3_pstate_parsing;
+ return test_lead_cpu_validate_ep();
+}
+
+/*
+ * Creates a composite state ID of a single valid local level and
+ * tests the EL3 firmware's return value matches the expected one.
+ */
+test_result_t test_psci_valid_local_pstate(void)
+{
+ psci_validate_test_function = &valid_only_local_stateid;
+ return test_lead_cpu_validate_ep();
+}
+
+/*
+ * Create a composite state ID of invalid state ID's at all levels
+ * and tests the EL3 firmware's return value matches the expected
+ * one.
+ */
+test_result_t test_psci_invalid_stateID(void)
+{
+ psci_validate_test_function = &completely_invalid_stateid;
+ return test_lead_cpu_validate_ep();
+}
+
+/*
+ * Creates a composite state ID of invalid state type and tests the
+ * EL3 firmware's return value matches the expected one.
+ */
+test_result_t test_psci_invalid_state_type(void)
+{
+ psci_validate_test_function = &invalid_state_type;
+ return test_lead_cpu_validate_ep();
+}
+
+/*
+ * Creates a composite state ID of invalid power level in original
+ * state format and tests the EL3 firmware's return value matches the
+ * expected value.
+ */
+test_result_t test_psci_invalid_power_level(void)
+{
+ psci_validate_test_function = &invalid_power_level;
+ return test_lead_cpu_validate_ep();
+}
+
+/*
+ * Creates a composite state ID of valid local state at some levels
+ * and invalid state ID at others and tests the EL3 firmware's return
+ * value matches the expected value
+ */
+test_result_t test_psci_mixed_state_id(void)
+{
+ psci_validate_test_function = &mixed_state_id;
+ return test_lead_cpu_validate_ep();
+}
diff --git a/tftf/tests/runtime_services/standard_service/psci/system_tests/test_psci_hotplug_stress.c b/tftf/tests/runtime_services/standard_service/psci/system_tests/test_psci_hotplug_stress.c
new file mode 100644
index 000000000..3ac4bc533
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/psci/system_tests/test_psci_hotplug_stress.c
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <debug.h>
+#include <events.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <power_management.h>
+#include <psci.h>
+#include <test_helpers.h>
+#include <tftf.h>
+#include <tftf_lib.h>
+
+#define STRESS_TEST_COUNT 1000
+
+static event_t cpu_booted[PLATFORM_CORE_COUNT];
+static event_t cluster_booted;
+
+/* Return success depicting CPU booted successfully */
+static test_result_t test_cpu_booted(void)
+{
+ unsigned int mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(mpid);
+
+ /* Tell the lead CPU that the calling CPU has entered the test */
+ tftf_send_event(&cpu_booted[core_pos]);
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/* Return success depicting all cores in a cluster booted successfully */
+static test_result_t test_cluster_booted(void)
+{
+ unsigned int mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(mpid);
+
+ /* Tell the lead CPU that the calling CPU has entered the test */
+ tftf_send_event(&cpu_booted[core_pos]);
+
+ tftf_wait_for_event(&cluster_booted);
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Random hotplug cores in a large iteration to stress boot path code
+ * Test Do :
+ * 1) Power up a random core
+ * 2) Ensure this core has booted successfully to TFTF
+ * 3) Wait for core to be powered off by the framework.
+ * 4) Repeat 1-2-3 STRESS_TEST_COUNT times
+ * 5) The test is aborted straight away if any failure occurs. In this case,
+ * the test is declared as failed.
+ * Note: It will be skipped on single-core platforms.
+ */
+test_result_t psci_hotplug_single_core_stress_test(void)
+{
+ unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int cpu;
+ unsigned int core_pos;
+ int psci_ret;
+
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
+
+ NOTICE("Power on and off any random core %d times\n",
+ STRESS_TEST_COUNT);
+
+ for (unsigned int i = 0; i < STRESS_TEST_COUNT; ++i) {
+ /* Reset/Initialise the event variable */
+ for (unsigned int j = 0; j < PLATFORM_CORE_COUNT; ++j)
+ tftf_init_event(&cpu_booted[j]);
+
+ /*
+ * Find a random CPU to power up and power down
+ */
+ cpu = tftf_find_random_cpu_other_than(lead_mpid);
+ assert(cpu != lead_mpid);
+
+ psci_ret = tftf_cpu_on(cpu,
+ (uintptr_t) test_cpu_booted,
+ 0);
+ if (psci_ret != PSCI_E_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ core_pos = platform_get_core_pos(cpu);
+ tftf_wait_for_event(&cpu_booted[core_pos]);
+
+ /*
+ * Wait for the CPU to be powered off by framework before issuing a
+ * CPU_ON to it
+ */
+ while (tftf_is_cpu_online(cpu))
+ ;
+ }
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Repeated cores hotplug as stress test
+ * Test Do :
+ * 1) Power up all the cores
+ * 2) Ensure all the cores have booted successfully to TFTF
+ * 3) Wait for all the cores to be powered off by the framework.
+ * 4) Repeat 1-2-3 STRESS_TEST_COUNT times
+ * 5) The test is aborted straight away if any failure occurs. In this case,
+ * the test is declared as failed.
+ * Note: It will be skipped on single-core platforms.
+ */
+test_result_t psci_hotplug_stress_test(void)
+{
+ unsigned int lead_cpu = read_mpidr_el1() & MPID_MASK;
+ unsigned int cpu_mpid, cpu_node;
+ unsigned int core_pos;
+ int psci_ret;
+
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
+
+ NOTICE("This multi-core test will repeat %d times\n",
+ STRESS_TEST_COUNT);
+
+ for (unsigned int i = 0; i < STRESS_TEST_COUNT; i++) {
+ /* Reinitialize the event variable */
+ for (unsigned int j = 0; j < PLATFORM_CORE_COUNT; ++j)
+ tftf_init_event(&cpu_booted[j]);
+
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU, it is already powered on */
+ if (cpu_mpid == lead_cpu)
+ continue;
+
+ psci_ret = tftf_cpu_on(cpu_mpid,
+ (uintptr_t) test_cpu_booted,
+ 0);
+ if (psci_ret != PSCI_E_SUCCESS)
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Confirm the non-lead cpus booted and participated in the test
+ */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU */
+ if (cpu_mpid == lead_cpu)
+ continue;
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_wait_for_event(&cpu_booted[core_pos]);
+ }
+
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /*
+ * Except lead CPU, Wait for all cores to be powered off
+ * by framework
+ */
+ if (cpu_mpid == lead_cpu)
+ continue;
+
+ while (tftf_is_cpu_online(cpu_mpid))
+ ;
+ }
+ }
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Stress test cluster hotplug
+ * Test Do :
+ * 1) Power up all the cores of non-lead cluster
+ * 2) Ensure all the cores have booted successfully to TFTF
+ * 3) Wait for all the cores to be powered off by the framework.
+ * 4) Repeat 1-2-3 STRESS_TEST_COUNT times
+ * 5) The test is aborted straight away if any failure occurs. In this case,
+ * the test is declared as failed.
+ * Note: It will be skipped on single-cluster platforms.
+ */
+test_result_t psci_cluster_hotplug_stress_test(void)
+{
+ unsigned int lead_cluster = MPIDR_CLUSTER_ID(read_mpidr_el1());
+ unsigned int cpu_mpid, cpu_node;
+ unsigned int core_pos;
+ int psci_ret;
+
+ SKIP_TEST_IF_LESS_THAN_N_CLUSTERS(2);
+
+ NOTICE("This Cluster hotplug test will repeat %d times\n",
+ STRESS_TEST_COUNT);
+
+ for (unsigned int i = 0; i < STRESS_TEST_COUNT; i++) {
+ /* Reset/Initialise the event variable */
+ tftf_init_event(&cluster_booted);
+
+ /* Reset/Initialise the event variable */
+ for (unsigned int j = 0; j < PLATFORM_CORE_COUNT; ++j)
+ tftf_init_event(&cpu_booted[j]);
+
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ if (MPIDR_CLUSTER_ID(cpu_mpid) != lead_cluster) {
+ psci_ret = tftf_cpu_on(cpu_mpid,
+ (uintptr_t) test_cluster_booted,
+ 0);
+ if (psci_ret != PSCI_E_SUCCESS)
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ /*
+ * Confirm all the CPU's in non-lead cluster booted
+ * and participated in the test
+ */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU cluster */
+ if (MPIDR_CLUSTER_ID(cpu_mpid) == lead_cluster)
+ continue;
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_wait_for_event(&cpu_booted[core_pos]);
+ }
+
+ /*
+ * All cores have booted, Now send the signal to them so that
+ * they enter the framework
+ */
+ tftf_send_event_to_all(&cluster_booted);
+
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ if (MPIDR_CLUSTER_ID(cpu_mpid) != lead_cluster)
+ /*
+ * Wait for CPU to power off before issuing a
+ * CPU_ON for it
+ */
+ while (tftf_is_cpu_online(cpu_mpid))
+ ;
+ }
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/runtime_services/standard_service/psci/system_tests/test_psci_on_off_suspend_stress.c b/tftf/tests/runtime_services/standard_service/psci/system_tests/test_psci_on_off_suspend_stress.c
new file mode 100644
index 000000000..99f685406
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/psci/system_tests/test_psci_on_off_suspend_stress.c
@@ -0,0 +1,576 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <debug.h>
+#include <events.h>
+#include <irq.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <power_management.h>
+#include <psci.h>
+#include <sgi.h>
+#include <stdlib.h>
+#include <test_helpers.h>
+#include <tftf.h>
+#include <tftf_lib.h>
+
+#define STRESS_TEST_COUNT 100
+
+/* Per-CPU counters used for the coherency test */
+typedef struct cpu_pm_ops_desc {
+ spinlock_t lock;
+ unsigned int pcpu_count[PLATFORM_CORE_COUNT];
+} cpu_pm_ops_desc_t;
+
+static cpu_pm_ops_desc_t device_pm_ops_desc
+ __attribute__ ((section("tftf_coherent_mem")));
+static cpu_pm_ops_desc_t normal_pm_ops_desc;
+
+static event_t cpu_booted[PLATFORM_CORE_COUNT];
+static volatile unsigned int start_test;
+static unsigned int exit_test;
+static unsigned int power_state;
+/* The target for CPU ON requests */
+static volatile unsigned long long target_mpid;
+
+static spinlock_t counter_lock;
+static volatile unsigned int cpu_on_count;
+/* Whether CPU suspend calls should be thrown into the test */
+static unsigned int include_cpu_suspend;
+
+static test_result_t secondary_cpu_on_race_test(void);
+
+/*
+ * Utility function to wait for all CPUs other than the caller to be
+ * OFF.
+ */
+static void wait_for_non_lead_cpus(void)
+{
+ unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int target_mpid, target_node;
+
+ for_each_cpu(target_node) {
+ target_mpid = tftf_get_mpidr_from_node(target_node);
+ /* Skip lead CPU, as it is powered on */
+ if (target_mpid == lead_mpid)
+ continue;
+
+ while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0)
+ != PSCI_STATE_OFF)
+ ;
+ }
+}
+
+/*
+ * Update per-cpu counter corresponding to the current CPU.
+ * This function updates 2 counters, one in normal memory and the other
+ * in coherent device memory. The counts are then compared to check if they
+ * match. This verifies that the caches and the interconnect are coherent
+ * during the test.
+ * Returns -1 on error, 0 on success.
+ */
+static int update_counters(void)
+{
+ unsigned int normal_count, device_count;
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+
+ /*
+ * Ensure that the copies of the counters in device and normal memory
+ * match. The locks and the data should become incoherent if any cluster
+ * is not taking part in coherency.
+ */
+ spin_lock(&normal_pm_ops_desc.lock);
+ normal_count = normal_pm_ops_desc.pcpu_count[core_pos];
+ spin_unlock(&normal_pm_ops_desc.lock);
+
+ spin_lock(&device_pm_ops_desc.lock);
+ device_count = device_pm_ops_desc.pcpu_count[core_pos];
+ spin_unlock(&device_pm_ops_desc.lock);
+
+ if (device_count != normal_count) {
+ tftf_testcase_printf("Count mismatch. Device memory count ="
+ " %u: normal memory count = %u\n",
+ device_count, normal_count);
+ return -1;
+ }
+
+ /* Increment the count in both copies of the counter */
+ spin_lock(&normal_pm_ops_desc.lock);
+ normal_pm_ops_desc.pcpu_count[core_pos]++;
+ spin_unlock(&normal_pm_ops_desc.lock);
+
+ spin_lock(&device_pm_ops_desc.lock);
+ device_pm_ops_desc.pcpu_count[core_pos]++;
+ spin_unlock(&device_pm_ops_desc.lock);
+
+ return 0;
+}
+
+/*
+ * The test loop for non lead CPUs in psci_on_off_suspend_coherency_test. It
+ * updates the counters and depending on the value of the random variable `op`,
+ * the secondaries either offlines (by returning back to Test framework) or
+ * suspends itself.
+ */
+static test_result_t random_suspend_off_loop(void)
+{
+#define OFFLINE_CORE 1
+#define SUSPEND_CORE 0
+
+ int rc, op;
+
+ while (!exit_test) {
+ rc = update_counters();
+ if (rc)
+ return TEST_RESULT_FAIL;
+
+ /* Find what we will be doing next */
+ op = rand() % 2;
+
+ /*
+ * If the chosen action is to power off, then return from the
+ * test function so that the test framework powers this CPU off.
+ */
+ if (op == OFFLINE_CORE)
+ return TEST_RESULT_SUCCESS;
+
+ /* Program timer for wake-up event. */
+ rc = tftf_program_timer_and_suspend(PLAT_SUSPEND_ENTRY_TIME,
+ power_state, NULL, NULL);
+
+ tftf_cancel_timer();
+
+ if (rc != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("CPU timer/suspend returned error"
+ " 0x%x\n", rc);
+ return TEST_RESULT_FAIL;
+ }
+ }
+ return TEST_RESULT_SUCCESS;
+}
+
+static test_result_t lead_cpu_main(unsigned long long mpid)
+{
+ unsigned int rc;
+ unsigned long long rand_mpid;
+ int i;
+
+ /* The lead cpu will not be turned off. */
+ for (i = STRESS_TEST_COUNT; i >= 0; i--) {
+ rc = update_counters();
+ if (rc)
+ return TEST_RESULT_FAIL;
+
+ /* Program timer for wake-up event. */
+ rc = tftf_program_timer_and_suspend(PLAT_SUSPEND_ENTRY_TIME,
+ power_state, NULL, NULL);
+
+ tftf_cancel_timer();
+
+ if (rc != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("CPU timer/suspend returned error"
+ " 0x%x\n", rc);
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * The lead cpu's woken up since the system timer has fired.
+ * For any cpus which have turned themselves off, generate a
+ * random MPIDR and try turning on the corresponding cpu.
+ */
+ do {
+ rand_mpid = tftf_find_random_cpu_other_than(mpid);
+ } while (tftf_psci_affinity_info(rand_mpid, MPIDR_AFFLVL0)
+ != PSCI_STATE_OFF);
+
+ rc = tftf_try_cpu_on(rand_mpid,
+ (uintptr_t) random_suspend_off_loop,
+ 0);
+ if ((rc != PSCI_E_ALREADY_ON) &&
+ (rc != PSCI_E_ON_PENDING) &&
+ (rc != PSCI_E_SUCCESS) &&
+ (rc != PSCI_E_INVALID_PARAMS)) {
+ tftf_testcase_printf("CPU ON failed with error ="
+ " 0x%x\n", rc);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ exit_test = 1;
+ /* Ensure update to `exit_test` is seen by all cores prior to
+ invoking wait_for_non_lead_cpus() */
+ dmbsy();
+
+ wait_for_non_lead_cpus();
+
+ INFO("Exiting test\n");
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Send event depicting CPU booted successfully and then invoke
+ * random_suspend_off_loop.
+ */
+static test_result_t non_lead_random_suspend_off_loop(void)
+{
+ unsigned long long mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(mpid);
+
+ /* Tell the lead CPU that the calling CPU has entered the test */
+ tftf_send_event(&cpu_booted[core_pos]);
+
+ return random_suspend_off_loop();
+}
+
+/*
+ * @Test_Aim@ Repeated cores hotplug as stress test
+ * Test Do :
+ * 1) Power up all the cores
+ * 2) Ensure all the cores have booted successfully to TFTF
+ * 3) Randomly suspend or turn OFF secondary CPU
+ * 4) The lead CPU will suspend and turn ON a random CPU which has powered OFF.
+ * 5) Repeat 1-4 STRESS_TEST_COUNT times
+ * 6) The test is aborted straight away if any failure occurs. In this case,
+ * the test is declared as failed.
+ * Note: The test will be skipped on single-core platforms.
+ */
+test_result_t psci_on_off_suspend_coherency_test(void)
+{
+ unsigned int cpu_node, core_pos;
+ unsigned long long cpu_mpid, lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int counter_lo, stateid;
+ int psci_ret;
+
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
+
+ /* Reinitialize the event variable */
+ for (unsigned int j = 0; j < PLATFORM_CORE_COUNT; ++j)
+ tftf_init_event(&cpu_booted[j]);
+
+ init_spinlock(&normal_pm_ops_desc.lock);
+ init_spinlock(&device_pm_ops_desc.lock);
+
+ exit_test = 0;
+
+ /* Seed the random number generator */
+ counter_lo = (unsigned int) read_cntpct_el0();
+ srand(counter_lo);
+
+ psci_ret = tftf_psci_make_composite_state_id(PLAT_MAX_PWR_LEVEL,
+ PSTATE_TYPE_POWERDOWN, &stateid);
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("Failed to construct composite state\n");
+ return TEST_RESULT_SKIPPED;
+ }
+ power_state = tftf_make_psci_pstate(MPIDR_AFFLVL0,
+ PSTATE_TYPE_POWERDOWN, stateid);
+
+ /* Turn on all the non-lead CPUs */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU, it is already powered on */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ psci_ret = tftf_cpu_on(cpu_mpid,
+ (uintptr_t) non_lead_random_suspend_off_loop, 0);
+ if (psci_ret != PSCI_E_SUCCESS)
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Confirm the non-lead cpus booted and participated in the test
+ */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU, it is already powered on */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_wait_for_event(&cpu_booted[core_pos]);
+ }
+
+ core_pos = platform_get_core_pos(lead_mpid);
+ return lead_cpu_main(lead_mpid);
+}
+
+/*
+ * Frantically send CPU ON requests to the target mpidr till it returns
+ * ALREADY_ON.
+ * Return 0 on success, 1 on error.
+ */
+static int test_cpu_on_race(void)
+{
+ int ret;
+
+ do {
+ ret = tftf_try_cpu_on(target_mpid,
+ (uintptr_t) secondary_cpu_on_race_test, 0);
+ if (ret != PSCI_E_SUCCESS && ret != PSCI_E_ON_PENDING &&
+ ret != PSCI_E_ALREADY_ON) {
+ tftf_testcase_printf("Unexpected return value 0x%x"
+ " from PSCI CPU ON\n", ret);
+ return 1;
+ }
+ } while (ret != PSCI_E_ALREADY_ON);
+
+ return 0;
+}
+
+/*
+ * This function runs the test_cpu_on_race() till either `exit_test`
+ * is set or the `target_mpid` is the current mpid.
+ */
+static test_result_t secondary_cpu_on_race_test(void)
+{
+ unsigned long long mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(mpid);
+ int ret;
+
+ /* Tell the lead CPU that the calling CPU has entered the test */
+ tftf_send_event(&cpu_booted[core_pos]);
+
+ /* Wait for start flag */
+ while (start_test == 0)
+ ;
+
+ do {
+ /*
+ * If the current CPU is the target mpid, then power OFF.
+ * The target mpid will be target for CPU ON requests by other
+ * cores.
+ */
+ if (mpid == target_mpid)
+ return TEST_RESULT_SUCCESS;
+
+ ret = test_cpu_on_race();
+ if (ret)
+ return TEST_RESULT_FAIL;
+ } while (exit_test == 0);
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Verify that the CPU ON race conditions are handled in Firmware.
+ * Test Do :
+ * 1. Designate a target CPU to power down.
+ * 2. Boot up all the CPUs on the system
+ * 3. If the CPU is the designated target CPU, power OFF
+ * 4. All the other cores issue CPU ON to the target CPU continuously.
+ * As per PSCI specification, only one CPU ON call will succeed in sending
+ * the ON command to the target CPU.
+ * 5. The Target CPU should turn ON and execute successfully.
+ * 6. The test should iterate again with another CPU as the target CPU this
+ * time.
+ * 7. Repeat Steps 1-6 for STRESS_TEST_COUNT times.
+ * Note: The test will be skipped on single-core platforms.
+ */
+test_result_t psci_verify_cpu_on_race(void)
+{
+ unsigned int cpu_node, core_pos, target_node, j;
+ unsigned long long lead_mpid = read_mpidr_el1() & MPID_MASK, cpu_mpid;
+ int ret, rc = 0;
+
+ exit_test = 0;
+ start_test = 0;
+
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
+
+ /* Reinitialize the event variable */
+ for (j = 0; j < PLATFORM_CORE_COUNT; j++)
+ tftf_init_event(&cpu_booted[j]);
+
+ /* Turn ON all other CPUs */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+
+ /* Skip the lead CPU */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ ret = tftf_cpu_on(cpu_mpid,
+ (uintptr_t) secondary_cpu_on_race_test, 0);
+ if (ret != PSCI_E_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_wait_for_event(&cpu_booted[core_pos]);
+ }
+
+ for (j = 0; j < STRESS_TEST_COUNT; j++) {
+ /* Choose a target CPU */
+ for_each_cpu(target_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(target_node);
+
+ /* Skip the lead CPU */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ target_mpid = cpu_mpid;
+ /*
+ * Ensure target_mpid update is visible prior to
+ * starting test.
+ */
+ dmbsy();
+
+ VERBOSE("Target MPID = %llx\n", target_mpid);
+ start_test = 1;
+
+ /* Wait for the target CPU to turn OFF */
+ while (tftf_psci_affinity_info(target_mpid,
+ MPIDR_AFFLVL0) != PSCI_STATE_OFF)
+ ;
+ rc = test_cpu_on_race();
+ if (rc)
+ break;
+ }
+ if (rc)
+ break;
+ }
+ exit_test = 1;
+ wait_for_non_lead_cpus();
+ return rc ? TEST_RESULT_FAIL : TEST_RESULT_SUCCESS;
+}
+
+/*
+ * The Test function to stress test CPU ON/OFF PSCI APIs executed by all CPUs.
+ * This function maintains a global counter which is incremented atomically
+ * when a CPU enters this function via warm boot (in case of secondary) or
+ * direct invocation (in case of lead CPU). It sends CPU ON to all OFF CPUs
+ * till another CPU has entered this function or `exit_flag` is set.
+ * If `include_cpu_suspend` is set, then it suspends the current CPU
+ * before iterating in the loop to send CPU ON requests.
+ */
+static test_result_t launch_cpu_on_off_stress(void)
+{
+ unsigned long long mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int cpu_node, temp_count;
+ int ret;
+
+ spin_lock(&counter_lock);
+ /* Store the count in a temporary variable */
+ temp_count = ++cpu_on_count;
+ spin_unlock(&counter_lock);
+
+ if (exit_test)
+ return TEST_RESULT_SUCCESS;
+
+ while (!exit_test) {
+ for_each_cpu(cpu_node) {
+ mpid = tftf_get_mpidr_from_node(cpu_node);
+
+ if (tftf_is_cpu_online(mpid))
+ continue;
+
+ ret = tftf_try_cpu_on(mpid,
+ (uintptr_t) launch_cpu_on_off_stress, 0);
+ if (ret != PSCI_E_SUCCESS && ret !=
+ PSCI_E_ON_PENDING && ret != PSCI_E_ALREADY_ON) {
+ tftf_testcase_printf("Unexpected return value"
+ " 0x%x from PSCI CPU ON\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ /* Break if another CPU has entered this test function */
+ if (temp_count != cpu_on_count)
+ break;
+
+ /* Check whether to suspend before iterating */
+ if (include_cpu_suspend) {
+ ret = tftf_program_timer_and_suspend(
+ PLAT_SUSPEND_ENTRY_TIME, power_state, NULL, NULL);
+
+ tftf_cancel_timer();
+
+ if (ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("CPU timer/suspend"
+ " returned error 0x%x\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+ }
+ }
+
+ spin_lock(&counter_lock);
+ if (cpu_on_count >= (STRESS_TEST_COUNT * PLATFORM_CORE_COUNT)) {
+ cpu_on_count = 0;
+ spin_unlock(&counter_lock);
+ exit_test = 1;
+ /* Wait for all cores to power OFF */
+ wait_for_non_lead_cpus();
+
+ /*
+ * In case any other CPUs were turned ON in the meantime, wait
+ * for them as well.
+ */
+ wait_for_non_lead_cpus();
+ } else
+ spin_unlock(&counter_lock);
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Stress test CPU ON / OFF APIs.
+ * Test Do :
+ * 1. Maintain a global counter will be atomically updated by each CPU
+ * when it turns ON.
+ * 2. Iterate over all the CPU in the topology and invoke CPU ON on CPU
+ * which are OFF.
+ * 3. Check if the global counter has updated. If not, goto step 2
+ * 4. All the cores which are turned ON executes Step 1 - 3.
+ * Note: The test will be skipped on single-core platforms.
+ */
+test_result_t psci_cpu_on_off_stress(void)
+{
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
+ init_spinlock(&counter_lock);
+ cpu_on_count = 0;
+ exit_test = 0;
+ include_cpu_suspend = 0;
+ return launch_cpu_on_off_stress();
+}
+
+/*
+ * @Test_Aim@ Stress test CPU ON / OFF APIs with SUSPEND in between.
+ * Test Do :
+ * 1. Maintain a global counter which will be atomically updated by each CPU
+ * when it turns ON.
+ * 2. Iterate over all the CPUs in the topology and invoke CPU ON on CPUs
+ * which are OFF.
+ * 3. Check if the global counter has updated. If not, program wakeup
+ * timer and suspend. On wake-up goto step 2.
+ * 4. All the cores which are turned ON executes Step 1 - 3.
+ * Note: The test will be skipped on single-core platforms.
+ */
+test_result_t psci_cpu_on_off_suspend_stress(void)
+{
+ int rc;
+ unsigned int stateid;
+
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
+
+ init_spinlock(&counter_lock);
+ cpu_on_count = 0;
+ exit_test = 0;
+
+ rc = tftf_psci_make_composite_state_id(PLAT_MAX_PWR_LEVEL,
+ PSTATE_TYPE_POWERDOWN, &stateid);
+ if (rc != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("Failed to construct composite state\n");
+ return TEST_RESULT_SKIPPED;
+ }
+ power_state = tftf_make_psci_pstate(PLAT_MAX_PWR_LEVEL,
+ PSTATE_TYPE_POWERDOWN, stateid);
+
+ include_cpu_suspend = 1;
+ return launch_cpu_on_off_stress();
+}
diff --git a/tftf/tests/runtime_services/standard_service/psci/system_tests/test_psci_system_suspend_stress.c b/tftf/tests/runtime_services/standard_service/psci/system_tests/test_psci_system_suspend_stress.c
new file mode 100644
index 000000000..5b500beb7
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/psci/system_tests/test_psci_system_suspend_stress.c
@@ -0,0 +1,301 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <assert.h>
+#include <debug.h>
+#include <events.h>
+#include <gic_v2.h>
+#include <irq.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <power_management.h>
+#include <psci.h>
+#include <sgi.h>
+#include <stdlib.h>
+#include <test_helpers.h>
+#include <tftf.h>
+#include <tftf_lib.h>
+#include <timer.h>
+
+#define MAX_TEST_ITERATIONS (100 * PLATFORM_CORE_COUNT)
+
+/* Number of iterations of the test */
+static int iteration_count;
+
+/* The CPU assigned the baton to drive the test */
+static u_register_t baton_cpu;
+
+/* Synchronization event which will be waited on by all the non-baton CPUs */
+static event_t sync_event;
+
+/* Global variables to synchronize participating CPUs on wake-up */
+static spinlock_t cpu_count_lock;
+static volatile int cpu_count;
+static volatile int participating_cpu_count;
+
+/* Variable to store the system suspend power state and its statistics */
+static int system_susp_pwr_state;
+static u_register_t susp_count;
+
+static test_result_t do_sys_susp_on_off_stress(void);
+
+/*
+ * Helper function to wait for participating CPUs participating to enter the
+ * test function.
+ */
+static void wait_for_cpus_to_enter_test(void)
+{
+ assert(participating_cpu_count <= PLATFORM_CORE_COUNT);
+ while (cpu_count != participating_cpu_count)
+ ;
+}
+
+/* Helper function to increment the cpu_count */
+static void inc_cpu_count(void)
+{
+ spin_lock(&cpu_count_lock);
+ cpu_count++;
+ spin_unlock(&cpu_count_lock);
+ assert(cpu_count <= PLATFORM_CORE_COUNT);
+}
+
+/* Helper function to decrement the cpu_count */
+static void dec_cpu_count(void)
+{
+ spin_lock(&cpu_count_lock);
+ cpu_count--;
+ spin_unlock(&cpu_count_lock);
+ assert(cpu_count >= 0);
+}
+
+/* Helper function to turn ON all the CPUs in the platform */
+static int try_cpu_on_all(void)
+{
+ int ret, cpu_node;
+ u_register_t cpu_mpid, current_cpu = read_mpidr_el1() & MPID_MASK;
+
+ /* Try to turn on all the non-lead CPUs */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+
+ /* Skip lead CPU, it is already powered on */
+ if (cpu_mpid == current_cpu)
+ continue;
+
+ do {
+ ret = tftf_try_cpu_on(cpu_mpid,
+ (uintptr_t) do_sys_susp_on_off_stress, 0);
+ if (ret != PSCI_E_SUCCESS && ret != PSCI_E_ON_PENDING &&
+ ret != PSCI_E_ALREADY_ON) {
+ ERROR("Unexpected return value 0x%x"
+ " from PSCI CPU ON\n", ret);
+ return -1;
+ }
+ } while (ret != PSCI_E_SUCCESS);
+ }
+ return 0;
+}
+
+/* Helper function function to get number of CPUs which are OFF in the system */
+static int get_off_cpu_count(void)
+{
+ int aff_off_cpus = 0;
+ u_register_t cpu_mpid, current_cpu = read_mpidr_el1() & MPID_MASK;
+ int cpu_node;
+
+ /* Query the number of OFF CPUs */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU, it is already powered on */
+ if (cpu_mpid == current_cpu)
+ continue;
+
+ if (tftf_psci_affinity_info(cpu_mpid, MPIDR_AFFLVL0) ==
+ PSCI_STATE_OFF)
+ aff_off_cpus++;
+ }
+
+ return aff_off_cpus;
+}
+
+/*
+ * The main test function which will be executed by all CPUs.
+ * 1. The CPU holding the baton will first enter this function and then turns
+ * ON all other CPUs.
+ * 2. All the `non-baton` CPUs then wait for the `sync_event` to be signaled
+ * to turn themselves OFF.
+ * 3. Number of CPUs which are signaled via `sync_event` by baton CPU is
+ * random.
+ * 4. After signaled CPUs have turned themselves OFF, SYSTEM SUSPEND is
+ * issued by the baton CPU.
+ * 5. The return value of SYSTEM_SUSPEND is checked by the baton CPU.
+ * 6. The next baton CPU is chosen randomly and the test is handed over to this
+ * CPU.
+ */
+static test_result_t do_sys_susp_on_off_stress(void)
+{
+ int psci_ret, off_cpu_count;
+ u_register_t current_cpu;
+
+ inc_cpu_count();
+
+ current_cpu = read_mpidr_el1() & MPID_MASK;
+ if (current_cpu != baton_cpu) {
+ tftf_wait_for_event(&sync_event);
+ dec_cpu_count();
+ return TEST_RESULT_SUCCESS;
+ }
+
+ INFO("System suspend test: Baton holder CPU = 0x%llx\n",
+ (unsigned long long) current_cpu);
+ if (try_cpu_on_all() == -1) {
+ tftf_testcase_printf("CPU_ON of secondary CPUs failed.\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ wait_for_cpus_to_enter_test();
+
+ /* Turn off random number of cores 1 out of 3 times */
+ if (rand() % 3)
+ off_cpu_count = rand() % participating_cpu_count;
+ else
+ off_cpu_count = participating_cpu_count - 1;
+
+ /* Signal random number of CPUs to turn OFF */
+ tftf_send_event_to(&sync_event, off_cpu_count);
+
+ /* Wait for `off_cpu_count` CPUs to turn OFF */
+ while (get_off_cpu_count() != off_cpu_count)
+ ;
+
+ /* Program timer to fire after delay */
+ tftf_program_timer(PLAT_SUSPEND_ENTRY_TIME);
+
+ /* Issue SYSTEM SUSPEND */
+ psci_ret = tftf_system_suspend();
+ tftf_cancel_timer();
+
+ /* Check return value of SYSTEM SUSPEND API */
+ if (off_cpu_count == (participating_cpu_count - 1)) {
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("SYSTEM SUSPEND did not succeed "
+ "where expected\n");
+ return TEST_RESULT_FAIL;
+ }
+ } else {
+ if (psci_ret != PSCI_E_DENIED) {
+ tftf_testcase_printf("SYSTEM SUSPEND did not fail "
+ "where expected\n");
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ /* Pass the baton top another CPU */
+ baton_cpu = tftf_find_random_cpu_other_than(current_cpu);
+
+ /* Unblock the waiting CPUs */
+ tftf_send_event_to(&sync_event,
+ (participating_cpu_count - 1) - off_cpu_count);
+
+ /* Wait for all CPUs other than current to turn OFF */
+ while (get_off_cpu_count() != (participating_cpu_count - 1))
+ ;
+
+ dec_cpu_count();
+
+ if (iteration_count++ < MAX_TEST_ITERATIONS) {
+ /* Hand over the test execution the new baton CPU */
+ psci_ret = tftf_cpu_on(baton_cpu,
+ (uintptr_t) do_sys_susp_on_off_stress, 0);
+ if (psci_ret != PSCI_E_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ /* Wait for new baton CPU to enter test */
+ while (cpu_count == 0)
+ ;
+ } else {
+ /*
+ * The test has completed. Print statistics if PSCI STAT COUNT
+ * is supported.
+ */
+ if (is_psci_stat_count_supported()) {
+ u_register_t count = tftf_psci_stat_count(baton_cpu,
+ system_susp_pwr_state);
+ tftf_testcase_printf("Iterated %d with %lld system"
+ " suspends\n", MAX_TEST_ITERATIONS,
+ (unsigned long long)(count - susp_count));
+ }
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Stress test PSCI SYSTEM SUSPEND API.
+ * This test iteratively issues PSCI SYSTEM SUSPEND on random cores after
+ * issuing turning OFF a random number of CPUs. The PSCI SYSTEM SUSPEND
+ * will only succeed if all the CPUs except the calling CPU is OFF.
+ */
+test_result_t psci_sys_susp_on_off_stress_test(void)
+{
+ unsigned int pstateid_idx[PLAT_MAX_PWR_LEVEL + 1];
+ unsigned int pwrlvl, susp_type, state_id;
+ int ret;
+
+ if (!is_psci_sys_susp_supported()) {
+ tftf_testcase_printf("System suspend is not supported "
+ "by the EL3 firmware\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
+
+ INIT_PWR_LEVEL_INDEX(pstateid_idx);
+ tftf_init_event(&sync_event);
+ init_spinlock(&cpu_count_lock);
+
+ /* Initialize participating CPU count */
+ participating_cpu_count = tftf_get_total_cpus_count();
+ cpu_count = 0;
+
+ iteration_count = 0;
+
+ /*
+ * Assign a baton to the current CPU and it is in charge of driving
+ * the test.
+ */
+ baton_cpu = read_mpidr_el1() & MPID_MASK;
+
+ /* Print SYSTEM SUSPEND statistics if PSCI STAT is supported */
+ if (is_psci_stat_count_supported()) {
+ NOTICE("PSCI STAT COUNT supported\n");
+ tftf_set_deepest_pstate_idx(PLAT_MAX_PWR_LEVEL, pstateid_idx);
+
+ /* Check if the power state is valid */
+ ret = tftf_get_pstate_vars(&pwrlvl,
+ &susp_type,
+ &state_id,
+ pstateid_idx);
+ if (ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("tftf_get_pstate_vars() failed"
+ " with ret = %x\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ assert(pwrlvl == PLAT_MAX_PWR_LEVEL);
+
+ system_susp_pwr_state = tftf_make_psci_pstate(pwrlvl,
+ susp_type, state_id);
+
+ susp_count = tftf_psci_stat_count(baton_cpu, system_susp_pwr_state);
+ }
+
+ return do_sys_susp_on_off_stress();
+}
diff --git a/tftf/tests/runtime_services/standard_service/query_std_svc.c b/tftf/tests/runtime_services/standard_service/query_std_svc.c
new file mode 100644
index 000000000..e96d4ae13
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/query_std_svc.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <psci.h>
+#include <smccc.h>
+#include <std_svc.h>
+#include <tftf_lib.h>
+#include <uuid_utils.h>
+
+/*
+ * Standard service UUID as returned by the implementation in the Trusted
+ * Firmware.
+ */
+static const uuid_t armtf_std_svc_uuid = {
+ 0x108d905b, 0xf863, 0x47e8, 0xae, 0x2d,
+ { 0xc0, 0xfb, 0x56, 0x41, 0xf6, 0xe2 }
+};
+
+/**
+ * @Test_Aim@ Query the Standard Service
+ *
+ * This test targets the implementation of the Standard Service in the Trusted
+ * Firmware. If it is interfaced with a different implementation then this test
+ * will most likely fail because the values returned by the service won't be the
+ * ones expected.
+ *
+ * The following queries are performed:
+ * 1) Call UID
+ * 2) Call count
+ * 3) Call revision details
+ */
+test_result_t test_query_std_svc(void)
+{
+ smc_args std_svc_args;
+ smc_ret_values ret;
+ uuid_t std_svc_uuid;
+ char uuid_str[UUID_STR_SIZE];
+ test_result_t test_result = TEST_RESULT_SUCCESS;
+
+ /* Standard Service Call UID */
+ std_svc_args.arg0 = SMC_STD_SVC_UID;
+ ret = tftf_smc(&std_svc_args);
+
+ make_uuid_from_4words(&std_svc_uuid,
+ ret.ret0, ret.ret1, ret.ret2, ret.ret3);
+ if (!uuid_equal(&std_svc_uuid, &armtf_std_svc_uuid)) {
+ tftf_testcase_printf("Wrong UUID: expected %s,\n",
+ uuid_to_str(&armtf_std_svc_uuid, uuid_str));
+ tftf_testcase_printf(" got %s\n",
+ uuid_to_str(&std_svc_uuid, uuid_str));
+ test_result = TEST_RESULT_FAIL;
+ }
+
+ /* Standard Service Call Count */
+ std_svc_args.arg0 = SMC_STD_SVC_CALL_COUNT;
+ ret = tftf_smc(&std_svc_args);
+
+ if (ret.ret0 == SMC_UNKNOWN) {
+ tftf_testcase_printf("Querying STD service call count"
+ " failed\n");
+ test_result = TEST_RESULT_FAIL;
+ } else {
+ tftf_testcase_printf("STD Service Call Count reported by firmware:"
+ " %llu\n", (unsigned long long)ret.ret0);
+ }
+
+ /* Standard Service Call Revision details */
+ std_svc_args.arg0 = SMC_STD_SVC_REVISION;
+ ret = tftf_smc(&std_svc_args);
+
+ if ((ret.ret0 != STD_SVC_REVISION_MAJOR) ||
+ (ret.ret1 != STD_SVC_REVISION_MINOR)) {
+ tftf_testcase_printf(
+ "Wrong Revision: expected {%u.%u}, got {%llu.%llu}\n",
+ STD_SVC_REVISION_MAJOR, STD_SVC_REVISION_MINOR,
+ (unsigned long long)ret.ret0,
+ (unsigned long long)ret.ret1);
+ test_result = TEST_RESULT_FAIL;
+ }
+
+ return test_result;
+}
diff --git a/tftf/tests/runtime_services/standard_service/sdei/system_tests/sdei_entrypoint.S b/tftf/tests/runtime_services/standard_service/sdei/system_tests/sdei_entrypoint.S
new file mode 100644
index 000000000..aae85fcc2
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/sdei/system_tests/sdei_entrypoint.S
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <platform_def.h>
+#include <sdei.h>
+
+ .globl sdei_state_entrypoint
+ .globl sdei_entrypoint
+ .globl sdei_entrypoint_resume
+ .globl sdei_handler_done
+
+ .local event_handled
+ .comm event_handled, PLATFORM_CORE_COUNT * 4, 8
+
+#ifdef AARCH64
+func sdei_entrypoint
+ stp xzr, x30, [sp, #-16]!
+ bl sdei_event_handler
+ ldp xzr, x30, [sp],#16
+ mov_imm x0, SDEI_EVENT_COMPLETE
+ mov x1, xzr
+ smc #0
+ b .
+endfunc sdei_entrypoint
+
+func sdei_entrypoint_resume
+ stp x2, x30, [sp, #-16]!
+
+ /* Dispatch to C handler */
+ bl sdei_event_handler
+
+ /* Calculate address of event completion variable */
+ mrs x0, mpidr_el1
+ and x0, x0, #MPID_MASK
+ bl platform_get_core_pos
+ lsl x0, x0, #2
+ adrp x1, event_handled
+ add x1, x1, :lo12:event_handled
+ add x1, x0, x1
+
+ /* Mark event handling as complete so `sdei_handler_done` can return */
+ mov w2, #1
+ str w2, [x1]
+ sev
+
+ /* Populate `x0` and `x1` to prepare for SMC call */
+ ldp x1, x30, [sp], #16
+ mov_imm x0, SDEI_EVENT_COMPLETE_AND_RESUME
+ smc #0
+endfunc sdei_entrypoint_resume
+
+func sdei_handler_done
+ stp x29, x30, [sp, #-16]!
+ mov x29, sp
+
+ /* Calculate address of event completion variable */
+ mrs x0, mpidr_el1
+ and x0, x0, #MPID_MASK
+ mov x29, x30
+ bl platform_get_core_pos
+ mov x30, x29
+ lsl x0, x0, #2
+ adrp x1, event_handled
+ add x1, x1, :lo12:event_handled
+ add x0, x0, x1
+
+again:
+ /*
+ * Wait until the timer interrupt fires, which will be handled
+ * as an SDEI event and take us to sdei_entrypoint_resume().
+ */
+ wfe
+ ldr w1, [x0]
+ cmp w1, #1
+ bne again
+
+ /* Reset event completion variable for next run */
+ mov w1, #0
+ str w1, [x0]
+
+ ldp x29, x30, [sp], #16
+ ret
+endfunc sdei_handler_done
+
+func sdei_state_entrypoint
+ stp x29, x30, [sp, #-16]!
+ mov x29, sp
+ blr x1
+
+ /* Calculate address of event completion variable */
+ mrs x0, mpidr_el1
+ and x0, x0, #MPID_MASK
+ bl platform_get_core_pos
+ lsl x0, x0, #2
+ adrp x1, event_handled
+ add x1, x1, :lo12:event_handled
+ add x1, x0, x1
+
+ /* Mark event handling as complete so `sdei_handler_done` can return */
+ mov w2, #1
+ str w2, [x1]
+ sev
+
+ ldp x29, x30, [sp],#16
+ mov_imm x0, SDEI_EVENT_COMPLETE
+ mov_imm x1, SDEI_EV_HANDLED
+ smc #0
+ b .
+endfunc sdei_state_entrypoint
+
+#else /* AARCH32 */
+func sdei_entrypoint
+ /* SDEI is not supported on AArch32. */
+ b .
+endfunc sdei_entrypoint
+
+func sdei_entrypoint_resume
+ /* SDEI is not supported on AArch32. */
+ b .
+endfunc sdei_entrypoint_resume
+
+func sdei_handler_done
+ /* SDEI is not supported on AArch32. */
+ b .
+endfunc sdei_handler_done
+
+func sdei_state_entrypoint
+ /* SDEI is not supported on AArch32. */
+ b .
+endfunc sdei_state_entrypoint
+#endif
diff --git a/tftf/tests/runtime_services/standard_service/sdei/system_tests/test_sdei.c b/tftf/tests/runtime_services/standard_service/sdei/system_tests/test_sdei.c
new file mode 100644
index 000000000..dc357c16a
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/sdei/system_tests/test_sdei.c
@@ -0,0 +1,447 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <debug.h>
+#include <events.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <power_management.h>
+#include <private_timer.h>
+#include <sdei.h>
+#include <tftf_lib.h>
+#include <timer.h>
+
+#define EV_COOKIE 0xDEADBEEF
+#define TIMER_TIMEO_MS 10
+
+extern sdei_handler_t sdei_entrypoint;
+extern sdei_handler_t sdei_entrypoint_resume;
+
+/*
+ * the bound event number as returned from sdei_interrupt_bind(), passed
+ * to the per-cpu SDEI test function
+ */
+static int bound_ev;
+/* true if the test is using a private interrupt source, false otherwise. */
+static int private_interrupt;
+
+static spinlock_t cpu_count_lock;
+static volatile int cpu_count;
+static volatile int participating_cpu_count;
+
+/* Helper function to wait for CPUs participating in the test. */
+static void wait_for_participating_cpus(void)
+{
+ assert(participating_cpu_count <= PLATFORM_CORE_COUNT);
+
+ spin_lock(&cpu_count_lock);
+ cpu_count++;
+ spin_unlock(&cpu_count_lock);
+
+ assert(cpu_count <= PLATFORM_CORE_COUNT);
+
+ while (cpu_count != participating_cpu_count)
+ continue;
+}
+
+void sdei_trigger_event(void)
+{
+ printf("%s: triggering SDEI event\n", __func__);
+ if (private_interrupt)
+ private_timer_start(TIMER_TIMEO_MS);
+ else
+ tftf_program_timer(TIMER_TIMEO_MS);
+}
+
+static test_result_t sdei_event(void)
+{
+ long long ret;
+
+ wait_for_participating_cpus();
+
+ printf("%s: mpidr = 0x%llx\n", __func__,
+ (unsigned long long)read_mpidr_el1());
+
+ ret = sdei_event_register(bound_ev, sdei_entrypoint_resume, EV_COOKIE,
+ SDEI_REGF_RM_PE, read_mpidr_el1());
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI event register failed: 0x%llx\n",
+ ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_enable(bound_ev);
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI event enable failed: 0x%llx\n", ret);
+ goto err0;
+ }
+
+ ret = sdei_pe_unmask();
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI pe unmask failed: 0x%llx\n", ret);
+ goto err1;
+ }
+
+ sdei_trigger_event();
+
+ sdei_handler_done();
+
+ sdei_pe_mask();
+
+err1:
+ sdei_event_disable(bound_ev);
+err0:
+ sdei_event_unregister(bound_ev);
+
+ if (ret < 0)
+ return TEST_RESULT_FAIL;
+
+ return TEST_RESULT_SUCCESS;
+}
+
+int sdei_event_handler(int ev, unsigned long long arg)
+{
+ printf("%s: handler fired\n", __func__);
+ assert(arg == EV_COOKIE);
+ if (private_interrupt)
+ private_timer_stop();
+ else
+ tftf_cancel_timer();
+ return 0;
+}
+
+/* Handle an SDEI event on all cores in sequence. */
+test_result_t test_sdei_event_serial(void)
+{
+ struct sdei_intr_ctx intr_ctx;
+ u_register_t lead_mpid, target_mpid;
+ int cpu_node;
+ long long ret;
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+ participating_cpu_count = 1;
+ init_spinlock(&cpu_count_lock);
+ cpu_count = 0;
+
+ ret = sdei_version();
+ if (ret != MAKE_SDEI_VERSION(1, 0, 0)) {
+ tftf_testcase_printf("Unexpected SDEI version: 0x%llx\n", ret);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ disable_irq();
+ bound_ev = sdei_interrupt_bind(tftf_get_timer_irq(), &intr_ctx);
+ if (bound_ev < 0) {
+ tftf_testcase_printf("SDEI interrupt bind failed: %x\n",
+ bound_ev);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* use a shared interrupt source for this test-case */
+ private_interrupt = 0;
+
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node) & MPID_MASK;
+ if (lead_mpid == target_mpid)
+ continue;
+ ret = tftf_cpu_on(target_mpid,
+ (uintptr_t)sdei_event, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("CPU ON failed for 0x0x%llx\n",
+ (unsigned long long)target_mpid);
+ goto err0;
+ }
+ while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0) !=
+ PSCI_STATE_OFF)
+ continue;
+ cpu_count--;
+ }
+
+ assert(cpu_count == 0);
+
+ if (sdei_event() != TEST_RESULT_SUCCESS)
+ goto err0;
+
+ cpu_count--;
+ assert(cpu_count == 0);
+
+ sdei_interrupt_release(bound_ev, &intr_ctx);
+ enable_irq();
+
+ return TEST_RESULT_SUCCESS;
+
+err0:
+ sdei_private_reset();
+ sdei_shared_reset();
+ sdei_interrupt_release(bound_ev, &intr_ctx);
+ enable_irq();
+ return TEST_RESULT_FAIL;
+}
+
+/* Handle an SDEI event on all cores in parallel. */
+test_result_t test_sdei_event_parallel(void)
+{
+ struct sdei_intr_ctx intr_ctx;
+ u_register_t lead_mpid, target_mpid;
+ int cpu_node;
+ long long ret;
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+ participating_cpu_count = tftf_get_total_cpus_count();
+ init_spinlock(&cpu_count_lock);
+ cpu_count = 0;
+
+ ret = sdei_version();
+ if (ret != MAKE_SDEI_VERSION(1, 0, 0)) {
+ tftf_testcase_printf("Unexpected SDEI version: 0x%llx\n", ret);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ disable_irq();
+ bound_ev = sdei_interrupt_bind(IRQ_PCPU_HP_TIMER, &intr_ctx);
+ if (bound_ev < 0) {
+ tftf_testcase_printf("SDEI interrupt bind failed: %x\n",
+ bound_ev);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* use a private interrupt source for this test-case */
+ private_interrupt = 1;
+
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node) & MPID_MASK;
+ if (lead_mpid == target_mpid)
+ continue;
+ ret = tftf_cpu_on(target_mpid,
+ (uintptr_t)sdei_event, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("CPU ON failed for 0x0x%llx\n",
+ (unsigned long long)target_mpid);
+ goto err0;
+ }
+ }
+
+ if (sdei_event() != TEST_RESULT_SUCCESS)
+ goto err0;
+
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node) & MPID_MASK;
+ if (lead_mpid == target_mpid)
+ continue;
+ while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0) !=
+ PSCI_STATE_OFF)
+ continue;
+ cpu_count--;
+ }
+
+ cpu_count--;
+ assert(cpu_count == 0);
+
+ sdei_interrupt_release(bound_ev, &intr_ctx);
+ enable_irq();
+
+ return TEST_RESULT_SUCCESS;
+err0:
+ sdei_private_reset();
+ sdei_shared_reset();
+ sdei_interrupt_release(bound_ev, &intr_ctx);
+ enable_irq();
+ return TEST_RESULT_FAIL;
+}
+
+static test_result_t sdei_event_signal_self(void)
+{
+ long long ret;
+
+ ret = sdei_event_register(0, sdei_entrypoint_resume, EV_COOKIE,
+ SDEI_REGF_RM_PE, read_mpidr_el1());
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI event register failed: 0x%llx\n",
+ ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_enable(0);
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI event enable failed: 0x%llx\n", ret);
+ goto err0;
+ }
+
+ ret = sdei_pe_unmask();
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI pe unmask failed: 0x%llx\n", ret);
+ goto err1;
+ }
+
+ ret = sdei_event_signal(read_mpidr_el1());
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI event signal failed: 0x%llx\n", ret);
+ goto err2;
+ }
+
+ sdei_handler_done();
+
+err2:
+ sdei_pe_mask();
+err1:
+ sdei_event_disable(0);
+err0:
+ sdei_event_unregister(0);
+
+ if (ret < 0)
+ return TEST_RESULT_FAIL;
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/* Each core signals itself using SDEI event signalling. */
+test_result_t test_sdei_event_signal_serial(void)
+{
+ u_register_t lead_mpid, target_mpid;
+ int cpu_node;
+ long long ret;
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+
+ ret = sdei_version();
+ if (ret != MAKE_SDEI_VERSION(1, 0, 0)) {
+ tftf_testcase_printf("Unexpected SDEI version: 0x%llx\n", ret);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ disable_irq();
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node) & MPID_MASK;
+ if (lead_mpid == target_mpid)
+ continue;
+ ret = tftf_cpu_on(target_mpid,
+ (uintptr_t)sdei_event_signal_self, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("CPU ON failed for 0x0x%llx\n",
+ (unsigned long long)target_mpid);
+ ret = -1;
+ goto err0;
+ }
+ while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0) !=
+ PSCI_STATE_OFF)
+ continue;
+ }
+
+ if (sdei_event_signal_self() != TEST_RESULT_SUCCESS) {
+ ret = -1;
+ goto err0;
+ }
+
+err0:
+ enable_irq();
+ if (ret < 0)
+ return TEST_RESULT_FAIL;
+ return TEST_RESULT_SUCCESS;
+}
+
+static event_t cpu_ready[PLATFORM_CORE_COUNT];
+
+static test_result_t sdei_wait_for_event_signal(void)
+{
+ int core_pos;
+ long long ret;
+
+ ret = sdei_event_register(0, sdei_entrypoint_resume, EV_COOKIE,
+ SDEI_REGF_RM_PE, read_mpidr_el1());
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI event register failed: 0x%llx\n",
+ ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_enable(0);
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI event enable failed: 0x%llx\n", ret);
+ goto err0;
+ }
+
+ ret = sdei_pe_unmask();
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI pe unmask failed: 0x%llx\n", ret);
+ goto err1;
+ }
+
+ core_pos = platform_get_core_pos(read_mpidr_el1());
+ tftf_send_event(&cpu_ready[core_pos]);
+
+ sdei_handler_done();
+
+ sdei_pe_mask();
+err1:
+ sdei_event_disable(0);
+err0:
+ sdei_event_unregister(0);
+
+ if (ret < 0)
+ return TEST_RESULT_FAIL;
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * The primary core will signal all other cores
+ * use SDEI event signalling.
+ */
+test_result_t test_sdei_event_signal_all(void)
+{
+ u_register_t lead_mpid, target_mpid;
+ int cpu_node, core_pos;
+ int i;
+ long long ret;
+
+ for (i = 0; i < PLATFORM_CORE_COUNT; i++)
+ tftf_init_event(&cpu_ready[i]);
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+
+ ret = sdei_version();
+ if (ret != MAKE_SDEI_VERSION(1, 0, 0)) {
+ tftf_testcase_printf("Unexpected SDEI version: 0x%llx\n", ret);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ disable_irq();
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node) & MPID_MASK;
+ if (lead_mpid == target_mpid)
+ continue;
+ ret = tftf_cpu_on(target_mpid,
+ (uintptr_t)sdei_wait_for_event_signal, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("CPU ON failed for 0x0x%llx\n",
+ (unsigned long long)target_mpid);
+ ret = -1;
+ goto err0;
+ }
+ core_pos = platform_get_core_pos(target_mpid);
+ tftf_wait_for_event(&cpu_ready[core_pos]);
+ }
+
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node) & MPID_MASK;
+ if (lead_mpid == target_mpid)
+ continue;
+ ret = sdei_event_signal(target_mpid);
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI event signal failed: 0x%llx\n",
+ ret);
+ ret = -1;
+ goto err0;
+ }
+ }
+
+err0:
+ enable_irq();
+ if (ret < 0)
+ return TEST_RESULT_FAIL;
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/runtime_services/standard_service/sdei/system_tests/test_sdei_state.c b/tftf/tests/runtime_services/standard_service/sdei/system_tests/test_sdei_state.c
new file mode 100644
index 000000000..d4ca12445
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/sdei/system_tests/test_sdei_state.c
@@ -0,0 +1,676 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <plat_topology.h>
+#include <platform_def.h>
+#include <power_management.h>
+#include <psci.h>
+#include <sdei.h>
+#include <stdint.h>
+#include <tftf_lib.h>
+#include <timer.h>
+
+/*
+ * These functions test the SDEI handler state transition as listed in section
+ * in 6.1.2 of the specification.
+ */
+
+/* Aliases for SDEI handler states: 'R'unning, 'E'nabled, and re'G'istered */
+#define r_ 0
+#define R_ (1u << 2)
+
+#define e_ 0
+#define E_ (1u << 1)
+
+#define g_ 0
+#define G_ (1u << 0)
+
+/* All possible composite handler states */
+#define reg_ (r_ | e_ | g_)
+#define reG_ (r_ | e_ | G_)
+#define rEg_ (r_ | E_ | g_)
+#define rEG_ (r_ | E_ | G_)
+#define Reg_ (R_ | e_ | g_)
+#define ReG_ (R_ | e_ | G_)
+#define REg_ (R_ | E_ | g_)
+#define REG_ (R_ | E_ | G_)
+
+#define is_running(st) ((st & R_) != 0)
+#define is_enabled(st) ((st & E_) != 0)
+#define is_registered(st) ((st & G_) != 0)
+
+extern sdei_handler_t sdei_state_entrypoint;
+
+static int handler_success;
+static int32_t ev;
+
+/* Dummy handler that won't be actually called. */
+static int sdei_unreachable_handler(int ev, uint64_t arg)
+{
+ panic();
+}
+
+/* Test all failure transitions when handler state is handler unregistered */
+static test_result_t hdlr_unregistered(void)
+{
+ int64_t status, ret;
+ struct sdei_intr_ctx intr_ctx;
+
+ ev = sdei_interrupt_bind(tftf_get_timer_irq(), &intr_ctx);
+ if (ev < 0) {
+ printf("%d: SDEI interrupt bind failed; ret=%d\n",
+ __LINE__, ev);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Query the state of handler. Expect it to be unregistered */
+ status = sdei_event_status(ev);
+ if (status != reg_) {
+ printf("%d: Unexpected event status: 0x%llx != 0x%x\n",
+ __LINE__, (unsigned long long) status, reg_);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_enable(ev);
+ if (ret != -SMC_EDENY) {
+ printf("%d: ENABLE returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_disable(ev);
+ if (ret != -SMC_EDENY) {
+ printf("%d: DISABLE returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_unregister(ev);
+ if (ret != -SMC_EDENY) {
+ printf("%d: UNREGISTER returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Make sure we're operating on a shared interrupt */
+ assert(tftf_get_timer_irq() >= 32);
+ ret = sdei_event_routing_set(ev, SDEI_REGF_RM_PE);
+ if (ret != -SMC_EDENY) {
+ printf("%d: ROUTING_SET returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_context(0);
+ if (ret != -SMC_EDENY) {
+ printf("%d: EVENT_CONTEXT returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_complete(SDEI_EV_HANDLED);
+ if (ret != -SMC_EDENY) {
+ printf("%d: COMPLETE returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_complete_and_resume(0);
+ if (ret != -SMC_EDENY) {
+ printf("%d: COMPLETE_AND_RESUME returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_interrupt_release(ev, &intr_ctx);
+ if (ret < 0) {
+ printf("%d: SDEI interrupt release failed; ret=%lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+static test_result_t hdlr_registered(void)
+{
+ int64_t status, ret;
+ struct sdei_intr_ctx intr_ctx;
+
+ ev = sdei_interrupt_bind(tftf_get_timer_irq(), &intr_ctx);
+ if (ev < 0) {
+ printf("%d: SDEI interrupt bind failed; ret=%d\n",
+ __LINE__, ev);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Register with dummy values. We aren't going to trigger the event */
+ ret = sdei_event_register(ev, sdei_unreachable_handler, 0,
+ SDEI_REGF_RM_PE, read_mpidr_el1());
+ if (ret < 0) {
+ printf("%d: SDEI interrupt register failed; ret=%lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Query the state of handler. Expect it to be registered */
+ status = sdei_event_status(ev);
+ if (status != reG_) {
+ printf("%d: Unexpected event status: 0x%llx != 0x%x\n",
+ __LINE__, (unsigned long long) status, reG_);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_register(ev, sdei_unreachable_handler, 0,
+ SDEI_REGF_RM_PE, read_mpidr_el1());
+ if (ret != -SMC_EDENY) {
+ printf("%d: REGISTER returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_interrupt_release(ev, &intr_ctx);
+ if (ret != -SMC_EDENY) {
+ printf("%d: RELEASE returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_context(0);
+ if (ret != -SMC_EDENY) {
+ printf("%d: EVENT_CONTEXT returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_complete(SDEI_EV_HANDLED);
+ if (ret != -SMC_EDENY) {
+ printf("%d: COMPLETE returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_complete_and_resume(0);
+ if (ret != -SMC_EDENY) {
+ printf("%d: COMPLETE_AND_RESUME returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_unregister(ev);
+ if (ret < 0) {
+ printf("%d: SDEI unregister failed; ret=%lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_interrupt_release(ev, &intr_ctx);
+ if (ret < 0) {
+ printf("%d: SDEI interrupt release failed; ret=%lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/* This is called from an SDEI handler */
+static void running_handler(int ev, unsigned long long arg)
+{
+ int64_t ret, status;
+ struct sdei_intr_ctx intr_ctx;
+
+ /* Cancel timer to prevent further triggers */
+ tftf_cancel_timer();
+
+ handler_success = 0;
+
+ /* Query the state of handler. Expect it to be registered */
+ status = sdei_event_status(ev);
+ if (status != REG_) {
+ printf("%d: Unexpected event status: 0x%llx != 0x%x\n",
+ __LINE__, (unsigned long long) status, REG_);
+ return;
+ }
+
+ /* Call disable and check status again */
+ ret = sdei_event_disable(ev);
+ if (ret < 0) {
+ printf("%d: DISABLE returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return;
+ }
+
+ status = sdei_event_status(ev);
+ if (status != ReG_) {
+ printf("%d: Unexpected event status: 0x%llx != 0x%x\n",
+ __LINE__, (unsigned long long) status, ReG_);
+ return;
+ }
+
+ ret = sdei_event_register(ev, sdei_unreachable_handler, 0,
+ SDEI_REGF_RM_PE, read_mpidr_el1());
+ if (ret != -SMC_EDENY) {
+ printf("%d: REGISTER returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return;
+ }
+
+ ret = sdei_interrupt_release(ev, &intr_ctx);
+ if (ret != -SMC_EDENY) {
+ printf("%d: RELEASE returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return;
+ }
+
+ ret = sdei_event_routing_set(ev, SDEI_REGF_RM_PE);
+ if (ret != -SMC_EDENY) {
+ printf("%d: ROUTING_SET returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return;
+ }
+
+ handler_success = 1;
+}
+
+test_result_t hdlr_registered_running(void)
+{
+ int64_t status, ret;
+ struct sdei_intr_ctx intr_ctx;
+
+ ev = sdei_interrupt_bind(tftf_get_timer_irq(), &intr_ctx);
+ if (ev < 0) {
+ printf("%d: SDEI interrupt bind failed; ret=%d\n",
+ __LINE__, ev);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_register(ev, sdei_state_entrypoint,
+ (uintptr_t) &running_handler, SDEI_REGF_RM_PE,
+ read_mpidr_el1());
+ if (ret < 0) {
+ printf("%d: SDEI interrupt register failed; ret=%lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Query the state of handler. Expect it to be registered */
+ status = sdei_event_status(ev);
+ if (status != reG_) {
+ printf("%d: Unexpected event status: 0x%llx != 0x%x\n",
+ __LINE__, (unsigned long long) status, reG_);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_enable(ev);
+ if (ret < 0) {
+ printf("%d: SDEI event enable failed; ret=%lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ sdei_pe_unmask();
+ sdei_trigger_event();
+ sdei_handler_done();
+
+ ret = sdei_event_unregister(ev);
+ if (ret < 0) {
+ printf("%d: SDEI unregister failed; ret=%lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_interrupt_release(ev, &intr_ctx);
+ if (ret < 0) {
+ printf("%d: SDEI interrupt release failed; ret=%lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ if (handler_success == 0)
+ return TEST_RESULT_FAIL;
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/* This is called from an SDEI handler */
+static void ureg_running_handler(int ev, unsigned long long arg)
+{
+ int64_t ret, status;
+ struct sdei_intr_ctx intr_ctx;
+
+ /* Cancel timer to prevent further triggers */
+ tftf_cancel_timer();
+
+ handler_success = 0;
+
+ /* Query the state of handler. Expect it to be registered */
+ status = sdei_event_status(ev);
+ if (!is_running(status)) {
+ printf("%d: Handler reported as not running\n", __LINE__);
+ return;
+ }
+
+ /*
+ * Unregister the event right away. Unregister while running must return
+ * pending error code.
+ */
+ ret = sdei_event_unregister(ev);
+ if (ret != -SMC_EPEND) {
+ printf("%d: SDEI unregister failed; ret=%lld\n",
+ __LINE__, (unsigned long long) ret);
+ return;
+ }
+
+ /*
+ * Query the state of handler. Expect it to be running-only now that
+ * we've unregistered.
+ */
+ status = sdei_event_status(ev);
+ if (status != Reg_) {
+ printf("%d: Handler not reported as running-only\n", __LINE__);
+ return;
+ }
+
+ ret = sdei_event_register(ev, sdei_unreachable_handler, 0,
+ SDEI_REGF_RM_PE, read_mpidr_el1());
+ if (ret != -SMC_EDENY) {
+ printf("%d: REGISTER returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return;
+ }
+
+ ret = sdei_interrupt_release(ev, &intr_ctx);
+ if (ret != -SMC_EDENY) {
+ printf("%d: RELEASE returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return;
+ }
+
+ ret = sdei_event_enable(ev);
+ if (ret != -SMC_EDENY) {
+ printf("%d: ENABLE returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return;
+ }
+
+ ret = sdei_event_disable(ev);
+ if (ret != -SMC_EDENY) {
+ printf("%d: DISABLE returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return;
+ }
+
+ /* Unregister while running will return PEND */
+ ret = sdei_event_unregister(ev);
+ if (ret != -SMC_EPEND) {
+ printf("%d: UNREGISTER returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return;
+ }
+
+ ret = sdei_event_routing_set(ev, SDEI_REGF_RM_PE);
+ if (ret != -SMC_EDENY) {
+ printf("%d: ROUTING_SET returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return;
+ }
+
+ handler_success = 1;
+}
+
+test_result_t hdlr_unregistered_running(void)
+{
+ int64_t status, ret;
+ struct sdei_intr_ctx intr_ctx;
+
+ ev = sdei_interrupt_bind(tftf_get_timer_irq(), &intr_ctx);
+ if (ev < 0) {
+ printf("%d: SDEI interrupt bind failed; ret=%d\n",
+ __LINE__, ev);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_register(ev, sdei_state_entrypoint,
+ (uintptr_t) &ureg_running_handler, SDEI_REGF_RM_PE,
+ read_mpidr_el1());
+ if (ret < 0) {
+ printf("%d: SDEI interrupt register failed; ret=%lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Query the state of handler. Expect it to be registered */
+ status = sdei_event_status(ev);
+ if (status != reG_) {
+ printf("%d: Unexpected event status: 0x%llx != 0x%x\n",
+ __LINE__, (unsigned long long) status, reG_);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_enable(ev);
+ if (ret < 0) {
+ printf("%d: SDEI event enable failed; ret=%lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ sdei_pe_unmask();
+ sdei_trigger_event();
+ sdei_handler_done();
+
+ /*
+ * We've already unregistered the event within the handler, so this call
+ * must fail.
+ */
+ ret = sdei_event_unregister(ev);
+ if (ret != -SMC_EDENY) {
+ printf("%d: SDEI unregister failed; ret=%lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_interrupt_release(ev, &intr_ctx);
+ if (ret < 0) {
+ printf("%d: SDEI interrupt release failed; ret=%lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ if (handler_success == 0)
+ return TEST_RESULT_FAIL;
+
+ return TEST_RESULT_SUCCESS;
+}
+
+test_result_t hdlr_enabled(void)
+{
+ int64_t status, ret;
+ struct sdei_intr_ctx intr_ctx;
+
+ ev = sdei_interrupt_bind(tftf_get_timer_irq(), &intr_ctx);
+ if (ev < 0) {
+ printf("%d: SDEI interrupt bind failed; ret=%d\n",
+ __LINE__, ev);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_register(ev, sdei_state_entrypoint,
+ (uintptr_t) &ureg_running_handler, SDEI_REGF_RM_PE,
+ read_mpidr_el1());
+ if (ret < 0) {
+ printf("%d: SDEI interrupt register failed; ret=%lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_enable(ev);
+ if (ret < 0) {
+ printf("%d: SDEI event enable failed; ret=%lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Query the state of handler. Expect it to be both registered and
+ * enabled.
+ */
+ status = sdei_event_status(ev);
+ if (status != rEG_) {
+ printf("%d: Unexpected event status: 0x%llx != 0x%x\n",
+ __LINE__, (unsigned long long) status, reG_);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_register(ev, sdei_unreachable_handler, 0,
+ SDEI_REGF_RM_PE, read_mpidr_el1());
+ if (ret != -SMC_EDENY) {
+ printf("%d: REGISTER returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_interrupt_release(ev, &intr_ctx);
+ if (ret != -SMC_EDENY) {
+ printf("%d: RELEASE returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_routing_set(ev, SDEI_REGF_RM_PE);
+ if (ret != -SMC_EDENY) {
+ printf("%d: ROUTING_SET returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_context(0);
+ if (ret != -SMC_EDENY) {
+ printf("%d: EVENT_CONTEXT returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_complete(SDEI_EV_HANDLED);
+ if (ret != -SMC_EDENY) {
+ printf("%d: COMPLETE returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_complete_and_resume(0);
+ if (ret != -SMC_EDENY) {
+ printf("%d: COMPLETE_AND_RESUME returned unexpected error code %lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_unregister(ev);
+ if (ret < 0) {
+ printf("%d: SDEI unregister failed; ret=%lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_interrupt_release(ev, &intr_ctx);
+ if (ret < 0) {
+ printf("%d: SDEI interrupt release failed; ret=%lld\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+static test_result_t iterate_state_machine(void)
+{
+ test_result_t ret;
+
+ printf("%d: Cranking SDEI state machine on %llx\n",
+ __LINE__, (unsigned long long) read_mpidr_el1());
+
+ ret = hdlr_unregistered();
+ if (ret != TEST_RESULT_SUCCESS) {
+ printf("%d: State test hdlr_unregistered failed\n", __LINE__);
+ return ret;
+ }
+
+ ret = hdlr_registered();
+ if (ret != TEST_RESULT_SUCCESS) {
+ printf("%d: State test hdlr_registered failed\n", __LINE__);
+ return ret;
+ }
+
+ ret = hdlr_registered_running();
+ if (ret != TEST_RESULT_SUCCESS) {
+ printf("%d: State test hdlr_registered_running failed\n",
+ __LINE__);
+ return ret;
+ }
+
+ ret = hdlr_unregistered_running();
+ if (ret != TEST_RESULT_SUCCESS) {
+ printf("%d: State test hdlr_unregistered_running failed\n",
+ __LINE__);
+ return ret;
+ }
+
+ ret = hdlr_enabled();
+ if (ret != TEST_RESULT_SUCCESS) {
+ printf("%d: State test hdlr_enabled failed\n", __LINE__);
+ return ret;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Have all CPUs run through the SDEI state machine.
+ */
+test_result_t test_sdei_state(void)
+{
+ u_register_t lead_mpid, target_mpid;
+ int cpu_node;
+ int32_t aff_info __unused;
+ int64_t ret;
+
+ ret = sdei_version();
+ if (ret != MAKE_SDEI_VERSION(1, 0, 0)) {
+ printf("%d: Unexpected SDEI version: 0x%llx\n",
+ __LINE__, (unsigned long long) ret);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node) & MPID_MASK;
+ if (lead_mpid == target_mpid) {
+ /* Run on this CPU */
+ if (iterate_state_machine() != TEST_RESULT_SUCCESS)
+ return TEST_RESULT_FAIL;
+ } else {
+ /* Power on other CPU to run through SDEI state machine */
+ ret = tftf_cpu_on(target_mpid,
+ (uintptr_t) iterate_state_machine, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("CPU ON failed for 0x%llx\n",
+ (unsigned long long) target_mpid);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Wait for other CPU to power down */
+ do {
+ aff_info = tftf_psci_affinity_info(target_mpid,
+ MPIDR_AFFLVL0);
+ } while (aff_info != PSCI_STATE_OFF);
+ }
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/runtime_services/standard_service/unknown_smc.c b/tftf/tests/runtime_services/standard_service/unknown_smc.c
new file mode 100644
index 000000000..70175e3d1
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/unknown_smc.c
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <smccc.h>
+#include <std_svc.h>
+#include <tftf_lib.h>
+#include <uuid_utils.h>
+
+/* Invalid SMC FID: chose an identifier that falls in the reserved space */
+#define INVALID_FID (0x00ff0000 | (1u << 31))
+
+/**
+ * @Test_Aim@ Force an SMC_UNKNOWN return
+ */
+test_result_t test_unknown_smc(void)
+{
+ smc_args unk_smc;
+ smc_ret_values ret;
+
+ unk_smc.arg0 = INVALID_FID;
+ ret = tftf_smc(&unk_smc);
+
+ if (ret.ret0 != SMC_UNKNOWN) {
+ tftf_testcase_printf("Expected SMC_UNKNOWN, got %ld\n",
+ (long int) ret.ret0);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/runtime_services/trusted_os/tsp/test_irq_preempted_std_smc.c b/tftf/tests/runtime_services/trusted_os/tsp/test_irq_preempted_std_smc.c
new file mode 100644
index 000000000..22e35dbc1
--- /dev/null
+++ b/tftf/tests/runtime_services/trusted_os/tsp/test_irq_preempted_std_smc.c
@@ -0,0 +1,838 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <debug.h>
+#include <events.h>
+#include <gic_common.h>
+#include <gic_v2.h>
+#include <irq.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <power_management.h>
+#include <sgi.h>
+#include <smccc.h>
+#include <string.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+
+#define TEST_ITERATIONS_COUNT 1000
+
+#define SUSPEND_TIME_1_SEC 1000
+
+#define TEST_VALUE_1 4
+#define TEST_VALUE_2 6
+
+static event_t cpu_has_entered_test[PLATFORM_CORE_COUNT];
+static event_t cpu_has_finished_test[PLATFORM_CORE_COUNT];
+
+static volatile int requested_irq_received[PLATFORM_CORE_COUNT];
+static volatile int wakeup_irq_received[PLATFORM_CORE_COUNT];
+static volatile int individual_test_failed[PLATFORM_CORE_COUNT];
+static volatile int pwr_level_being_tested;
+static volatile int test_finished_flag;
+
+/* Dummy timer handler that sets a flag to check it has been called. */
+static int suspend_wakeup_handler(void *data)
+{
+ u_register_t core_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(core_mpid);
+
+ assert(wakeup_irq_received[core_pos] == 0);
+
+ wakeup_irq_received[core_pos] = 1;
+
+ return 0;
+}
+
+/* Dummy handler that sets a flag so as to check it has been called. */
+static int test_handler(void *data)
+{
+ u_register_t core_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(core_mpid);
+
+ assert(requested_irq_received[core_pos] == 0);
+
+ requested_irq_received[core_pos] = 1;
+
+ return 0;
+}
+
+/* Register a dummy handler for SGI #0 and enable it. Returns 0 if success. */
+static int register_and_enable_test_sgi_handler(unsigned int core_pos)
+{
+ /* SGIs #0 - #6 are freely available. */
+
+ int ret = tftf_irq_register_handler(IRQ_NS_SGI_0, test_handler);
+
+ if (ret != 0) {
+ tftf_testcase_printf(
+ "Failed to register SGI handler @ CPU %d (rc = %d)\n",
+ core_pos, ret);
+ return -1;
+ }
+
+ tftf_irq_enable(IRQ_NS_SGI_0, GIC_HIGHEST_NS_PRIORITY);
+
+ return 0;
+}
+
+/* Disable and unregister the dummy handler for SGI #0. */
+static void unregister_and_disable_test_sgi_handler(void)
+{
+ tftf_irq_disable(IRQ_NS_SGI_0);
+
+ tftf_irq_unregister_handler(IRQ_NS_SGI_0);
+}
+
+/*
+ * Generate a pre-empted STD SMC on the CPU who called this function. Steps:
+ * 1. IRQs are disabled.
+ * 2. An SGI is sent to itself. It cannot be handled because IRQs are disabled.
+ * 3. Invoke an STD SMC on the TSP, which is preempted by the pending SGI.
+ * 4. IRQs are enabled, the SGI is handled.
+ * 5. This function is exited with a preempted STD SMC waiting to be resumed.
+ */
+static int preempt_std_smc_on_this_cpu(void)
+{
+ smc_args std_smc_args;
+ smc_ret_values smc_ret;
+
+ int result = TEST_RESULT_SUCCESS;
+ u_register_t core_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(core_mpid);
+
+ if (register_and_enable_test_sgi_handler(core_pos) != 0) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Set PSTATE.I to 0. */
+ disable_irq();
+
+ /*
+ * Send SGI to itself. It can't be handled because the
+ * interrupts are disabled.
+ */
+ requested_irq_received[core_pos] = 0;
+
+ tftf_send_sgi(IRQ_NS_SGI_0, core_pos);
+
+ /*
+ * Invoke an STD SMC. Should be pre-empted because of the SGI
+ * that is waiting.
+ */
+ std_smc_args.arg0 = TSP_STD_FID(TSP_ADD);
+ std_smc_args.arg1 = TEST_VALUE_1;
+ std_smc_args.arg2 = TEST_VALUE_2;
+ smc_ret = tftf_smc(&std_smc_args);
+ if (smc_ret.ret0 != TSP_SMC_PREEMPTED) {
+ tftf_testcase_printf("SMC @ CPU %d returned 0x%llX.\n", core_pos,
+ (unsigned long long)smc_ret.ret0);
+ result = TEST_RESULT_FAIL;
+ }
+
+ /* Set PSTATE.I to 1. Let the SGI be handled. */
+ enable_irq();
+
+ /* Cleanup. Disable and unregister SGI handler. */
+ unregister_and_disable_test_sgi_handler();
+
+ /*
+ * Check that the SGI has been handled, but don't fail if it hasn't
+ * because there is no guarantee that it will have actually happened at
+ * this point.
+ */
+ if (requested_irq_received[core_pos] == 0) {
+ VERBOSE("SGI not handled @ CPU %d\n", core_pos);
+ }
+
+ return result;
+}
+
+/* Resume a pre-empted STD SMC on the CPU who called this function. */
+static int resume_std_smc_on_this_cpu(void)
+{
+ smc_args std_smc_args;
+ smc_ret_values smc_ret;
+
+ u_register_t core_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(core_mpid);
+
+ /* Resume the STD SMC. Verify result. */
+ std_smc_args.arg0 = TSP_FID_RESUME;
+ smc_ret = tftf_smc(&std_smc_args);
+ if ((smc_ret.ret0 != 0) || (smc_ret.ret1 != TEST_VALUE_1 * 2)
+ || (smc_ret.ret2 != TEST_VALUE_2 * 2)) {
+ tftf_testcase_printf(
+ "SMC @ CPU %d returned 0x%llX 0x%llX 0x%llX instead of 0x0 0x%X 0x%X\n",
+ core_pos, (unsigned long long)smc_ret.ret0,
+ (unsigned long long)smc_ret.ret1,
+ (unsigned long long)smc_ret.ret2,
+ TEST_VALUE_1 * 2, TEST_VALUE_2 * 2);
+ return TEST_RESULT_FAIL;
+ }
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Try to resume a pre-empted STD SMC on the CPU who called this function,
+ * but check for SMC_UNKNOWN as a result.
+ */
+static int resume_fail_std_smc_on_this_cpu(void)
+{
+ smc_args std_smc_args;
+ smc_ret_values smc_ret;
+
+ u_register_t core_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(core_mpid);
+
+ /* Resume the STD SMC. Verify result. */
+ std_smc_args.arg0 = TSP_FID_RESUME;
+ smc_ret = tftf_smc(&std_smc_args);
+ if (smc_ret.ret0 != SMC_UNKNOWN) {
+ tftf_testcase_printf(
+ "SMC @ CPU %d returned 0x%llX 0x%llX 0x%llX instead of SMC_UNKNOWN\n",
+ core_pos, (unsigned long long)smc_ret.ret0,
+ (unsigned long long)smc_ret.ret1,
+ (unsigned long long)smc_ret.ret2);
+ return TEST_RESULT_FAIL;
+ }
+ return TEST_RESULT_SUCCESS;
+}
+
+/*******************************************************************************
+ * Test pre-emption during STD SMCs.
+ ******************************************************************************/
+
+/* Test routine for test_irq_preempted_std_smc. */
+static test_result_t test_irq_preempted_std_smc_fn(void)
+{
+ u_register_t cpu_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(cpu_mpid);
+
+ tftf_send_event(&cpu_has_entered_test[core_pos]);
+
+ for (unsigned int i = 0; i < TEST_ITERATIONS_COUNT; i++) {
+
+ if (preempt_std_smc_on_this_cpu() != TEST_RESULT_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ if (resume_std_smc_on_this_cpu() != TEST_RESULT_SUCCESS)
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Multicore preemption test. Tests IRQ preemption during STD SMC
+ * from multiple cores. Uses an SGI to trigger the preemption. TSP should be
+ * present.
+ *
+ * Steps: 1. Invoke Standard SMC on the TSP and try to preempt it via IRQ.
+ * 2. Resume the preempted SMC and verify the result.
+ *
+ * Returns SUCCESS if above 2 steps are performed correctly in every CPU else
+ * failure.
+ */
+test_result_t test_irq_preempted_std_smc(void)
+{
+ u_register_t cpu_mpid;
+ unsigned int cpu_node, core_pos;
+ int psci_ret;
+ u_register_t lead_mpid = read_mpidr_el1() & MPID_MASK;
+
+ SKIP_TEST_IF_TSP_NOT_PRESENT();
+
+ for (int i = 0; i < PLATFORM_CORE_COUNT; i++) {
+ tftf_init_event(&cpu_has_entered_test[i]);
+ }
+
+ /* Power on all CPUs */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU as it is already powered on */
+ if (cpu_mpid == lead_mpid) {
+ continue;
+ }
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+
+ psci_ret = tftf_cpu_on(cpu_mpid,
+ (uintptr_t)test_irq_preempted_std_smc_fn, 0);
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf(
+ "Failed to power on CPU %d (rc = %d)\n",
+ core_pos, psci_ret);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ /* Wait until all CPUs have started the test. */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU */
+ if (cpu_mpid == lead_mpid) {
+ continue;
+ }
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_wait_for_event(&cpu_has_entered_test[core_pos]);
+ }
+
+ /* Enter the test on lead CPU and return the result. */
+ return test_irq_preempted_std_smc_fn();
+}
+
+/*
+ * Test routine for non-lead CPUs for test_resume_preempted_std_smc_other_cpus.
+ */
+static test_result_t test_resume_preempted_std_smc_other_cpus_non_lead_fn(void)
+{
+ test_result_t result = TEST_RESULT_SUCCESS;
+
+ u_register_t mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(mpid);
+
+ /*
+ * Try to resume the STD SMC invoked from the lead CPU. It shouldn't be
+ * able to do it.
+ */
+
+ smc_args std_smc_args;
+ std_smc_args.arg0 = TSP_FID_RESUME;
+ smc_ret_values smc_ret = tftf_smc(&std_smc_args);
+ if (smc_ret.ret0 != SMC_UNKNOWN) {
+ tftf_testcase_printf(
+ "SMC @ lead CPU returned 0x%llX 0x%llX 0x%llX instead of SMC_UNKNOWN\n",
+ (unsigned long long)smc_ret.ret0,
+ (unsigned long long)smc_ret.ret1,
+ (unsigned long long)smc_ret.ret2);
+ result = TEST_RESULT_FAIL;
+ }
+
+ /* Signal to the lead CPU that the calling CPU has finished the test */
+ tftf_send_event(&cpu_has_finished_test[core_pos]);
+
+ return result;
+}
+
+/*
+ * @Test_Aim@ Multicore preemption test. For a MP Secure Payload, the
+ * pre-emption on one CPU should not affect the other CPU. Trying to resume
+ * one STD SMC that was preempted on one CPU shouldn't be possible from any
+ * other CPU.
+ *
+ * Steps: 1. Issue Standard SMC and try preempting it via IRQ on lead CPU.
+ * 2. Try to resume it from the rest of the CPUs sequentially.
+ * 3. Resume the preempted SMC from the lead CPU and verify the result.
+ *
+ * Returns SUCCESS if step 2 fails and steps 1 and 3 succeed, else failure.
+ */
+test_result_t test_resume_preempted_std_smc_other_cpus(void)
+{
+ int i;
+ u_register_t cpu_mpid;
+ unsigned int cpu_node, core_pos;
+ int psci_ret;
+
+ u_register_t lead_mpid = read_mpidr_el1() & MPID_MASK;
+
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
+
+ SKIP_TEST_IF_TSP_NOT_PRESENT();
+
+ /*
+ * Invoke a STD SMC that will be pre-empted.
+ */
+ if (preempt_std_smc_on_this_cpu() != TEST_RESULT_SUCCESS) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Try to resume the STD SMC from the rest of CPUs. It shouldn't be
+ * possible.
+ */
+
+ for (i = 0; i < PLATFORM_CORE_COUNT; i++) {
+ tftf_init_event(&cpu_has_finished_test[i]);
+ }
+
+ /* Power on all CPUs and perform test sequentially. */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU as it's the one with the pre-empted STD SMC. */
+ if (cpu_mpid == lead_mpid) {
+ continue;
+ }
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+
+ psci_ret = tftf_cpu_on(cpu_mpid,
+ (uintptr_t)test_resume_preempted_std_smc_other_cpus_non_lead_fn, 0);
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf(
+ "Failed to power on CPU %d (rc = %d)\n",
+ core_pos, psci_ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Wait until the test is finished to begin with the next CPU. */
+ tftf_wait_for_event(&cpu_has_finished_test[core_pos]);
+ }
+
+ /*
+ * Try to resume the STD SMC from the lead CPU. It should be able to do
+ * it and to return the correct result.
+ */
+ return resume_std_smc_on_this_cpu();
+}
+
+/* Test routine for secondary CPU for test_resume_different_cpu_preempted_std_smc */
+static test_result_t test_resume_different_cpu_preempted_std_smc_non_lead_fn(void)
+{
+ smc_args std_smc_args;
+ smc_ret_values smc_ret;
+
+ u_register_t mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(mpid);
+
+ /* Signal to the lead CPU that the calling CPU has entered the test */
+ tftf_send_event(&cpu_has_entered_test[core_pos]);
+
+ /* Register and enable SGI. SGIs #0 - #6 are freely available. */
+ if (register_and_enable_test_sgi_handler(core_pos) != 0) {
+ /* Signal to the lead CPU that the calling CPU has finished */
+ tftf_send_event(&cpu_has_finished_test[core_pos]);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Set PSTATE.I to 0. */
+ disable_irq();
+
+ /*
+ * Send SGI to itself. It can't be handled because the interrupts are
+ * disabled.
+ */
+ requested_irq_received[core_pos] = 0;
+
+ tftf_send_sgi(IRQ_NS_SGI_0, core_pos);
+
+ /*
+ * Invoke an STD SMC. Should be pre-empted because of the SGI that is
+ * waiting. It has to be different than the one invoked from the lead
+ * CPU.
+ */
+ std_smc_args.arg0 = TSP_STD_FID(TSP_MUL);
+ std_smc_args.arg1 = TEST_VALUE_1;
+ std_smc_args.arg2 = TEST_VALUE_2;
+ smc_ret = tftf_smc(&std_smc_args);
+ if (smc_ret.ret0 != TSP_SMC_PREEMPTED) {
+ tftf_testcase_printf(
+ "SMC @ CPU %d returned 0x%llX instead of TSP_SMC_PREEMPTED.\n",
+ core_pos, (unsigned long long)smc_ret.ret0);
+ enable_irq();
+ unregister_and_disable_test_sgi_handler();
+ /* Signal to the lead CPU that the calling CPU has finished */
+ tftf_send_event(&cpu_has_finished_test[core_pos]);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Set PSTATE.I to 1. Let the SGI be handled. */
+ enable_irq();
+
+ /* Cleanup. Disable and unregister SGI handler. */
+ unregister_and_disable_test_sgi_handler();
+
+ /*
+ * Check that the SGI has been handled, but don't fail if it hasn't
+ * because there is no guarantee that it will have actually happened at
+ * this point.
+ */
+ if (requested_irq_received[core_pos] == 0) {
+ VERBOSE("SGI not handled @ CPU %d\n", core_pos);
+ }
+
+ /* Resume the STD SMC. Verify result. */
+ std_smc_args.arg0 = TSP_FID_RESUME;
+ smc_ret = tftf_smc(&std_smc_args);
+ if ((smc_ret.ret0 != 0) || (smc_ret.ret1 != TEST_VALUE_1*TEST_VALUE_1)
+ || (smc_ret.ret2 != TEST_VALUE_2*TEST_VALUE_2)) {
+ tftf_testcase_printf(
+ "SMC @ CPU %d returned 0x%llX 0x%llX 0x%llX instead of 0x0 0x%X 0x%X\n",
+ core_pos, (unsigned long long)smc_ret.ret0,
+ (unsigned long long)smc_ret.ret1,
+ (unsigned long long)smc_ret.ret2,
+ TEST_VALUE_1*2, TEST_VALUE_2*2);
+ /* Signal to the lead CPU that the calling CPU has finished */
+ tftf_send_event(&cpu_has_finished_test[core_pos]);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Try to resume the lead CPU STD SMC. Verify result. */
+ std_smc_args.arg0 = TSP_FID_RESUME;
+ smc_ret = tftf_smc(&std_smc_args);
+ if (smc_ret.ret0 != SMC_UNKNOWN) {
+ tftf_testcase_printf(
+ "SMC @ CPU %d returned 0x%llX 0x%llX 0x%llX instead of SMC_UNKNOWN\n",
+ core_pos, (unsigned long long)smc_ret.ret0,
+ (unsigned long long)smc_ret.ret1,
+ (unsigned long long)smc_ret.ret2);
+ /* Signal to the lead CPU that the calling CPU has finished */
+ tftf_send_event(&cpu_has_finished_test[core_pos]);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Signal to the lead CPU that the calling CPU has finished the test */
+ tftf_send_event(&cpu_has_finished_test[core_pos]);
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Multicore preemption test. For a MP Secure Payload, the
+ * pre-emption on one CPU should not affect the other CPU. Trying to resume
+ * one STD SMC pre-empted on one CPU shouldn't be possible from any other CPU
+ * involved in the test, and the STD SMC that is resumed from each CPU should
+ * be the same one that was invoked from it.
+ *
+ * Steps: 1. Lead and secondary CPUs set different preempted STD SMCs.
+ * 2. Resume the preempted SMC from secondary CPU. Verify the result.
+ * 3. Try to resume again to check if it can resume the lead SMC.
+ * 4. Resume the preempted SMC from lead CPU. Verify the result.
+ *
+ * Returns SUCCESS if steps 1, 2 and 4 succeed and step 3 fails, else failure.
+ */
+test_result_t test_resume_different_cpu_preempted_std_smc(void)
+{
+ smc_args std_smc_args;
+ smc_ret_values smc_ret;
+ u_register_t cpu_mpid;
+ unsigned int core_pos;
+ int psci_ret;
+
+ u_register_t lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int lead_pos = platform_get_core_pos(lead_mpid);
+
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
+
+ SKIP_TEST_IF_TSP_NOT_PRESENT();
+
+ /*
+ * Generate a SGI on the lead CPU that can't be handled because the
+ * interrupts are disabled.
+ */
+ register_and_enable_test_sgi_handler(lead_mpid);
+ disable_irq();
+
+ requested_irq_received[lead_pos] = 0;
+
+ tftf_send_sgi(IRQ_NS_SGI_0, lead_pos);
+
+ /*
+ * Invoke an STD SMC. Should be pre-empted because of the SGI that is
+ * waiting.
+ */
+ std_smc_args.arg0 = TSP_STD_FID(TSP_ADD);
+ std_smc_args.arg1 = TEST_VALUE_1;
+ std_smc_args.arg2 = TEST_VALUE_2;
+ smc_ret = tftf_smc(&std_smc_args);
+ if (smc_ret.ret0 != TSP_SMC_PREEMPTED) {
+ tftf_testcase_printf(
+ "SMC @ lead CPU returned 0x%llX instead of TSP_SMC_PREEMPTED.\n",
+ (unsigned long long)smc_ret.ret0);
+ enable_irq();
+ unregister_and_disable_test_sgi_handler();
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Set PSTATE.I to 1. Let the SGI be handled. */
+ enable_irq();
+
+ /* Cleanup. Disable and unregister SGI handler. */
+ unregister_and_disable_test_sgi_handler();
+
+ /*
+ * Check that the SGI has been handled, but don't fail if it hasn't
+ * because there is no guarantee that it will have actually happened at
+ * this point.
+ */
+ if (requested_irq_received[lead_pos] == 0) {
+ VERBOSE("SGI not handled @ lead CPU.\n");
+ }
+
+ /* Generate a preempted SMC in a secondary CPU. */
+ cpu_mpid = tftf_find_any_cpu_other_than(lead_mpid);
+ if (cpu_mpid == INVALID_MPID) {
+ tftf_testcase_printf("Couldn't find another CPU.\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_init_event(&cpu_has_finished_test[core_pos]);
+
+ psci_ret = tftf_cpu_on(cpu_mpid, (uintptr_t)
+ test_resume_different_cpu_preempted_std_smc_non_lead_fn, 0);
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("Failed to power on CPU %d (rc = %d)\n",
+ core_pos, psci_ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Wait until the test is finished to continue. */
+ tftf_wait_for_event(&cpu_has_finished_test[core_pos]);
+
+ /*
+ * Try to resume the STD SMC from the lead CPU. It should be able resume
+ * the one it generated before and to return the correct result.
+ */
+ std_smc_args.arg0 = TSP_FID_RESUME;
+ smc_ret = tftf_smc(&std_smc_args);
+ if ((smc_ret.ret0 != 0) || (smc_ret.ret1 != TEST_VALUE_1 * 2) ||
+ (smc_ret.ret2 != TEST_VALUE_2 * 2)) {
+ tftf_testcase_printf(
+ "SMC @ lead CPU returned 0x%llX 0x%llX 0x%llX instead of 0x0 0x%X 0x%X\n",
+ (unsigned long long)smc_ret.ret0,
+ (unsigned long long)smc_ret.ret1,
+ (unsigned long long)smc_ret.ret2,
+ TEST_VALUE_1*2, TEST_VALUE_2*2);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*******************************************************************************
+ * Test PSCI APIs while preempted.
+ ******************************************************************************/
+
+/*
+ * First part of the test routine for test_psci_cpu_on_off_preempted.
+ * Prepare a pre-empted STD SMC.
+ */
+static test_result_t test_psci_cpu_on_off_preempted_non_lead_fn_1(void)
+{
+ test_result_t result = TEST_RESULT_SUCCESS;
+
+ u_register_t mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(mpid);
+
+ if (preempt_std_smc_on_this_cpu() != TEST_RESULT_SUCCESS) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Signal to the lead CPU that the calling CPU has entered the test
+ * conditions for the second part.
+ */
+ tftf_send_event(&cpu_has_entered_test[core_pos]);
+
+ /*
+ * Now this CPU has to be turned off. Since this is not a lead CPU, it
+ * will be done in run_tests(). If it was done here, cpus_cnt wouldn't
+ * decrement and the tftf would think there is still a CPU running, so
+ * it wouldn't finish.
+ *
+ * The result will be overwritten when the second part of the test is
+ * executed.
+ */
+ return result;
+}
+
+/*
+ * Second part of the test routine for test_psci_cpu_on_off_preempted.
+ * Try to resume the previously pre-empted STD SMC.
+ */
+static test_result_t test_psci_cpu_on_off_preempted_non_lead_fn_2(void)
+{
+ test_result_t result;
+
+ u_register_t mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(mpid);
+
+ /* Try to resume the STD SMC. Check that it fails. */
+ result = resume_fail_std_smc_on_this_cpu();
+
+ /* Signal to the lead CPU that the calling CPU has finished the test */
+ tftf_send_event(&cpu_has_finished_test[core_pos]);
+
+ return result;
+}
+
+/*
+ * @Test_Aim@ Resume preempted STD SMC after PSCI CPU OFF/ON cycle.
+ *
+ * Steps: 1. Each CPU sets a preempted STD SMC.
+ * 2. They send an event to the lead CPU and call PSCI CPU OFF.
+ * 3. The lead CPU invokes PSCI CPU ON for the secondaries (warm boot).
+ * 4. Try to resume the preempted STD SMC on secondary CPUs.
+ *
+ * Returns SUCCESS if steps 1, 2 or 3 succeed and step 4 fails, else failure.
+ */
+test_result_t test_psci_cpu_on_off_preempted_std_smc(void)
+{
+ int i;
+ int all_powered_down;
+ u_register_t cpu_mpid;
+ unsigned int cpu_node, core_pos;
+ int psci_ret;
+ u_register_t lead_mpid = read_mpidr_el1() & MPID_MASK;
+
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
+
+ SKIP_TEST_IF_TSP_NOT_PRESENT();
+
+ for (i = 0; i < PLATFORM_CORE_COUNT; i++) {
+ tftf_init_event(&cpu_has_entered_test[i]);
+ tftf_init_event(&cpu_has_finished_test[i]);
+ }
+
+ /* Power on all CPUs */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU as it is already powered on */
+ if (cpu_mpid == lead_mpid) {
+ continue;
+ }
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+
+ psci_ret = tftf_cpu_on(cpu_mpid,
+ (uintptr_t)test_psci_cpu_on_off_preempted_non_lead_fn_1, 0);
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("Failed to power on CPU %d (rc = %d)\n",
+ core_pos, psci_ret);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ /* Wait for non-lead CPUs to exit the first part of the test */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU */
+ if (cpu_mpid == lead_mpid) {
+ continue;
+ }
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_wait_for_event(&cpu_has_entered_test[core_pos]);
+ }
+
+ /* Check that all secondary CPUs are powered off. */
+ all_powered_down = 0;
+ while (all_powered_down == 0) {
+ all_powered_down = 1;
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ if (cpu_mpid == lead_mpid) {
+ continue;
+ }
+ if (tftf_is_cpu_online(cpu_mpid) != 0) {
+ all_powered_down = 0;
+ }
+ }
+ }
+
+ /* Start the second part of the test */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU as it is already powered on */
+ if (cpu_mpid == lead_mpid) {
+ continue;
+ }
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+
+ psci_ret = tftf_cpu_on(cpu_mpid,
+ (uintptr_t)test_psci_cpu_on_off_preempted_non_lead_fn_2, 0);
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("Failed to power on CPU 0x%x (rc = %d)\n",
+ core_pos, psci_ret);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ /* Wait for non-lead CPUs to finish the second part of the test. */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU */
+ if (cpu_mpid == lead_mpid) {
+ continue;
+ }
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_wait_for_event(&cpu_has_finished_test[core_pos]);
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/******************************************************************************/
+
+/*
+ * @Test_Aim@ Resume preempted STD SMC after PSCI SYSTEM SUSPEND (in case it is
+ * supported).
+ *
+ * Steps: 1. The lead CPU sets a preempted STD SMC.
+ * 2. It calls PSCI SYSTEM SUSPEND with a wakeup timer for 1 sec.
+ * 3. Try to resume the preempted STD SMC.
+ *
+ * Returns SUCCESS if steps 1 and 2 succeed and step 3 fails.
+ */
+test_result_t test_psci_system_suspend_preempted_std_smc(void)
+{
+ int psci_ret;
+ int result = TEST_RESULT_SUCCESS;
+
+ u_register_t lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int lead_pos = platform_get_core_pos(lead_mpid);
+
+ SKIP_TEST_IF_TSP_NOT_PRESENT();
+
+ if (!is_psci_sys_susp_supported()) {
+ tftf_testcase_printf(
+ "SYSTEM_SUSPEND is not supported.\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ if (preempt_std_smc_on_this_cpu() != TEST_RESULT_SUCCESS) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!is_sys_suspend_state_ready()) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ /* Prepare wakeup timer. IRQs need to be enabled. */
+ wakeup_irq_received[lead_pos] = 0;
+
+ tftf_timer_register_handler(suspend_wakeup_handler);
+
+ /* Program timer to fire interrupt after timer expires */
+ tftf_program_timer(SUSPEND_TIME_1_SEC);
+
+ /* Issue PSCI_SYSTEM_SUSPEND. */
+ psci_ret = tftf_system_suspend();
+
+ while (!wakeup_irq_received[lead_pos])
+ ;
+
+ if (psci_ret != PSCI_E_SUCCESS) {
+ mp_printf("SYSTEM_SUSPEND from lead CPU failed. ret: 0x%x\n",
+ psci_ret);
+ result = TEST_RESULT_FAIL;
+ }
+
+ /* Remove timer after waking up.*/
+ tftf_cancel_timer();
+ tftf_timer_unregister_handler();
+
+ if (resume_fail_std_smc_on_this_cpu() != TEST_RESULT_SUCCESS) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ return result;
+}
diff --git a/tftf/tests/runtime_services/trusted_os/tsp/test_irq_spurious_gicv2.c b/tftf/tests/runtime_services/trusted_os/tsp/test_irq_spurious_gicv2.c
new file mode 100644
index 000000000..fd6a14af5
--- /dev/null
+++ b/tftf/tests/runtime_services/trusted_os/tsp/test_irq_spurious_gicv2.c
@@ -0,0 +1,290 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <debug.h>
+#include <events.h>
+#include <gic_v2.h>
+#include <irq.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <power_management.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+
+#define TEST_VALUE_1 4
+#define TEST_VALUE_2 6
+
+#define TEST_SPURIOUS_ITERATIONS_COUNT 1000000
+
+#define TEST_SPI_ID (MIN_SPI_ID + 2)
+
+static event_t cpu_ready[PLATFORM_CORE_COUNT];
+static volatile int requested_irq_received[PLATFORM_CORE_COUNT];
+static volatile int test_finished_flag;
+
+static volatile int spurious_count[PLATFORM_CORE_COUNT];
+static volatile int preempted_count[PLATFORM_CORE_COUNT];
+
+/* Dummy handler that sets a flag so as to check it has been called. */
+static int test_handler(void *data)
+{
+ u_register_t core_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(core_mpid);
+
+ assert(requested_irq_received[core_pos] == 0);
+
+ requested_irq_received[core_pos] = 1;
+
+ return 0;
+}
+
+/* Dummy handler that increases a variable to check if it has been called. */
+static int test_spurious_handler(void *data)
+{
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+
+ spurious_count[core_pos]++;
+
+ return 0;
+}
+
+/* Helper function for test_juno_multicore_spurious_interrupt. */
+static test_result_t test_juno_multicore_spurious_interrupt_non_lead_fn(void)
+{
+ test_result_t result = TEST_RESULT_SUCCESS;
+ u_register_t mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(mpid);
+
+ /* Signal to the lead CPU that the calling CPU has entered the test */
+ tftf_send_event(&cpu_ready[core_pos]);
+
+ while (test_finished_flag == 0) {
+
+ smc_args std_smc_args;
+ smc_ret_values smc_ret;
+
+ /* Invoke an STD SMC. */
+ std_smc_args.arg0 = TSP_STD_FID(TSP_ADD);
+ std_smc_args.arg1 = TEST_VALUE_1;
+ std_smc_args.arg2 = TEST_VALUE_2;
+ smc_ret = tftf_smc(&std_smc_args);
+
+ while (result != TEST_RESULT_FAIL) {
+ if (smc_ret.ret0 == 0) {
+ /* Verify result */
+ if ((smc_ret.ret1 != TEST_VALUE_1 * 2) ||
+ (smc_ret.ret2 != TEST_VALUE_2 * 2)) {
+ tftf_testcase_printf(
+ "SMC @ CPU %d returned 0x0 0x%llX 0x%llX instead of 0x0 0x%X 0x%X\n",
+ core_pos,
+ (unsigned long long)smc_ret.ret1,
+ (unsigned long long)smc_ret.ret2,
+ TEST_VALUE_1 * 2, TEST_VALUE_2 * 2);
+ result = TEST_RESULT_FAIL;
+ } else {
+ /* Correct, exit inner loop */
+ break;
+ }
+ } else if (smc_ret.ret0 == TSP_SMC_PREEMPTED) {
+ /* Resume the STD SMC. */
+ std_smc_args.arg0 = TSP_FID_RESUME;
+ smc_ret = tftf_smc(&std_smc_args);
+ preempted_count[core_pos]++;
+ } else {
+ /* Error */
+ tftf_testcase_printf(
+ "SMC @ lead CPU returned 0x%llX 0x%llX 0x%llX\n",
+ (unsigned long long)smc_ret.ret0,
+ (unsigned long long)smc_ret.ret1,
+ (unsigned long long)smc_ret.ret2);
+ mp_printf("Panic <others> %d\n", core_pos);
+ result = TEST_RESULT_FAIL;
+ }
+ }
+
+ if (result == TEST_RESULT_FAIL)
+ break;
+ }
+
+ /* Signal to the lead CPU that the calling CPU has finished the test */
+ tftf_send_event(&cpu_ready[core_pos]);
+
+ return result;
+}
+
+/*
+ * @Test_Aim@ Test Spurious interrupt handling. GICv2 only. Only works if TF
+ * is compiled with TSP_NS_INTR_ASYNC_PREEMPT = 0.
+ *
+ * Steps: 1. Setup SPI handler and spurious interrupt handler on the lead CPU.
+ * 2. Redirect SPI interrupts to all CPUs.
+ * 3. Turn on secondary CPUs and make them invoke STD SMC all time.
+ * 4. The lead CPU starts a loop that triggers a SPI so that all CPUs
+ * will try to handle it.
+ * 5. The CPUs that can't handle the SPI will receive a spurious
+ * interrupt and increase a counter.
+ * 6. Check that there have been spurious interrupts. Not necessarily
+ * the number of (CPU - 1) * iterations as the SMC may need time to
+ * handle.
+ *
+ * Returns SUCCESS if all steps succeed, else failure.
+ */
+test_result_t test_juno_multicore_spurious_interrupt(void)
+{
+ int i, j;
+ u_register_t cpu_mpid;
+ unsigned int cpu_node, core_pos;
+ int psci_ret;
+ int ret;
+
+ u_register_t lead_mpid = read_mpidr_el1() & MPID_MASK;
+
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
+
+ SKIP_TEST_IF_TSP_NOT_PRESENT();
+
+ ret = tftf_irq_register_handler(GIC_SPURIOUS_INTERRUPT,
+ test_spurious_handler);
+ if (ret != 0) {
+ tftf_testcase_printf(
+ "Failed to register spurious handler. Error = %d\n",
+ ret);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /* Reset test variables and boot secondary cores. */
+
+ for (i = 0; i < PLATFORM_CORE_COUNT; i++) {
+ spurious_count[i] = 0;
+ preempted_count[i] = 0;
+ }
+
+ test_finished_flag = 0;
+
+ for (i = 0; i < PLATFORM_CORE_COUNT; i++)
+ tftf_init_event(&cpu_ready[i]);
+
+ dsbsy();
+
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ psci_ret = tftf_cpu_on(cpu_mpid,
+ (uintptr_t)test_juno_multicore_spurious_interrupt_non_lead_fn, 0);
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf(
+ "Failed to power on CPU 0x%x (%d)\n",
+ (unsigned int)cpu_mpid, psci_ret);
+ test_finished_flag = 1;
+ return TEST_RESULT_SKIPPED;
+ }
+ }
+
+ /* Wait for non-lead CPUs to enter the test */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_wait_for_event(&cpu_ready[core_pos]);
+ tftf_init_event(&cpu_ready[i]);
+ }
+
+ /* Wait until tftf_init_event is seen by all cores */
+ dsbsy();
+
+ /* Register SPI handler (for all CPUs) and enable it. */
+ ret = tftf_irq_register_handler(TEST_SPI_ID, test_handler);
+ if (ret != 0) {
+ tftf_testcase_printf(
+ "Failed to register SPI handler @ lead CPU. Error code = %d\n",
+ ret);
+ tftf_irq_unregister_handler(GIC_SPURIOUS_INTERRUPT);
+ test_finished_flag = 1;
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /* Enable IRQ and route it to this CPU. */
+ tftf_irq_enable(TEST_SPI_ID, GIC_HIGHEST_NS_PRIORITY);
+
+ /* Route interrupts to all CPUs */
+ gicv2_set_itargetsr_value(TEST_SPI_ID, 0xFF);
+
+ for (j = 0; j < TEST_SPURIOUS_ITERATIONS_COUNT; j++) {
+
+ /* Clear handled flags. */
+ for (i = 0; i < PLATFORM_CORE_COUNT; i++)
+ requested_irq_received[i] = 0;
+ dsbsy();
+
+ /* Request SPI */
+ gicv2_gicd_set_ispendr(TEST_SPI_ID);
+
+ /* Wait until it is handled. */
+ int wait = 1;
+
+ while (wait) {
+ for (i = 0; i < PLATFORM_CORE_COUNT; i++) {
+ if (requested_irq_received[i])
+ wait = 0;
+ }
+ }
+ }
+
+ test_finished_flag = 1;
+
+ /* Wait for non-lead CPUs to finish the test */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_wait_for_event(&cpu_ready[core_pos]);
+ }
+
+ /* Cleanup. */
+ tftf_irq_disable(TEST_SPI_ID);
+
+ tftf_irq_unregister_handler(TEST_SPI_ID);
+ tftf_irq_unregister_handler(GIC_SPURIOUS_INTERRUPT);
+
+ /* Check results. */
+ unsigned int total_spurious_count = 0;
+ unsigned int total_preempted_count = 0;
+
+ for (i = 0; i < PLATFORM_CORE_COUNT; i++) {
+ total_spurious_count += spurious_count[i];
+ total_preempted_count += preempted_count[i];
+ }
+
+ /* Check that the test has tested the behaviour. */
+ if (total_spurious_count == 0) {
+ tftf_testcase_printf("No spurious interrupts were handled.\n"
+ "The TF-A must be compiled with TSP_NS_INTR_ASYNC_PREEMPT = 0\n");
+ /*
+ * Don't flag the test as failed in case the TF-A was compiled
+ * with TSP_NS_INTR_ASYNC_PREEMPT=1.
+ */
+ return TEST_RESULT_SKIPPED;
+ }
+
+
+ if (total_preempted_count == 0) {
+ tftf_testcase_printf("No preempted STD SMCs.\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/runtime_services/trusted_os/tsp/test_normal_int_switch.c b/tftf/tests/runtime_services/trusted_os/tsp/test_normal_int_switch.c
new file mode 100644
index 000000000..c2223894c
--- /dev/null
+++ b/tftf/tests/runtime_services/trusted_os/tsp/test_normal_int_switch.c
@@ -0,0 +1,479 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <assert.h>
+#include <debug.h>
+#include <irq.h>
+#include <mmio.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <sgi.h>
+#include <smccc.h>
+#include <std_svc.h>
+#include <stdlib.h>
+#include <string.h>
+#include <test_helpers.h>
+#include <tftf.h>
+
+#define STRESS_COUNT 100
+
+/*
+ * The shared data between the handler and the
+ * preempt_tsp_via_SGI routine.
+ */
+typedef struct {
+ smc_ret_values tsp_result;
+ int wait_for_fiq;
+} irq_handler_shared_data;
+
+static irq_handler_shared_data shared_data;
+
+/*
+ * Handler for the SGI #0.
+ */
+static int sgi_handler(void *data)
+{
+ /* Ensure this is the SGI we expect */
+ assert(*(unsigned int *)data == IRQ_NS_SGI_0);
+
+ if (shared_data.wait_for_fiq)
+ wfi(); /* We will get woken by the FIQ firing */
+
+ return 0;
+}
+
+/*
+ * This routine issues a SGI with interrupts disabled to make sure that the
+ * pending SGI will preempt a STD SMC.
+ */
+static test_result_t preempt_tsp_via_SGI(const smc_args *tsp_svc_params,
+ int hold_irq_handler_for_fiq)
+{
+ int rc;
+ unsigned int core_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(core_mpid);
+ test_result_t result = TEST_RESULT_SUCCESS;
+
+ memset(&shared_data, 0, sizeof(shared_data));
+
+ if (hold_irq_handler_for_fiq)
+ shared_data.wait_for_fiq = 1;
+
+ /* Register Handler for the interrupt. SGIs #0 - #6 are available. */
+ rc = tftf_irq_register_handler(IRQ_NS_SGI_0, sgi_handler);
+ if (rc != 0) {
+ tftf_testcase_printf("Failed to register SGI handler. "
+ "Error code = %d\n", rc);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /* Enable SGI #0 */
+ tftf_irq_enable(IRQ_NS_SGI_0, GIC_HIGHEST_NS_PRIORITY);
+
+ /* Set PSTATE.I to 0. */
+ disable_irq();
+
+ /*
+ * Send SGI to the current CPU. It can't be handled because the
+ * interrupts are disabled.
+ */
+ tftf_send_sgi(IRQ_NS_SGI_0, core_pos);
+
+ /*
+ * Invoke an STD SMC. Should be pre-empted because of the SGI that is
+ * waiting.
+ */
+ shared_data.tsp_result = tftf_smc(tsp_svc_params);
+ if (shared_data.tsp_result.ret0 != TSP_SMC_PREEMPTED) {
+ tftf_testcase_printf("SMC returned 0x%llX instead of "
+ "TSP_SMC_PREEMPTED.\n",
+ (unsigned long long)shared_data.tsp_result.ret0);
+ result = TEST_RESULT_FAIL;
+ }
+
+ /* Set PSTATE.I to 1. The SGI will be handled after this. */
+ enable_irq();
+
+ /* Disable SGI #0 */
+ tftf_irq_disable(IRQ_NS_SGI_0);
+
+ /* Unregister handler */
+ rc = tftf_irq_unregister_handler(IRQ_NS_SGI_0);
+ if (rc != 0) {
+ tftf_testcase_printf("Failed to unregister IRQ handler. "
+ "Error code = %d\n", rc);
+ result = TEST_RESULT_FAIL;
+ }
+
+ return result;
+}
+
+/*
+ * @Test_Aim@ Test the secure world preemption by non secure interrupt.
+ *
+ * Steps: 1. Issue Standard SMC and preempt it via SGI
+ * 2. Resume the preempted SMC
+ * Returns SUCCESS if above 2 steps are performed correctly else failure.
+ */
+test_result_t tsp_int_and_resume(void)
+{
+ smc_args tsp_svc_params;
+ smc_ret_values tsp_result = {0};
+ test_result_t res;
+
+ SKIP_TEST_IF_TSP_NOT_PRESENT();
+
+ /* Standard SMC */
+ tsp_svc_params.arg0 = TSP_STD_FID(TSP_ADD);
+ tsp_svc_params.arg1 = 4;
+ tsp_svc_params.arg2 = 6;
+ res = preempt_tsp_via_SGI(&tsp_svc_params, 0);
+ if (res == TEST_RESULT_FAIL)
+ return res;
+
+ /* Now that we have ensured preemption, issue RESUME */
+ tsp_svc_params.arg0 = TSP_FID_RESUME;
+ tsp_result = tftf_smc(&tsp_svc_params);
+
+ /* Check the result of the addition */
+ if (tsp_result.ret0 != 0 || tsp_result.ret1 != 8 ||
+ tsp_result.ret2 != 12) {
+ tftf_testcase_printf("SMC resume returned wrong result:"
+ "got %d %d %d expected: 0 8 12\n",
+ (unsigned int)tsp_result.ret0,
+ (unsigned int)tsp_result.ret1,
+ (unsigned int)tsp_result.ret2);
+ tftf_testcase_printf("SMC resume returned wrong result\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Standard SMC */
+ tsp_svc_params.arg0 = TSP_STD_FID(TSP_SUB);
+ tsp_svc_params.arg1 = 4;
+ tsp_svc_params.arg2 = 6;
+ res = preempt_tsp_via_SGI(&tsp_svc_params, 0);
+ if (res == TEST_RESULT_FAIL)
+ return res;
+
+ /* Now that we have ensured preemption, issue RESUME */
+ tsp_svc_params.arg0 = TSP_FID_RESUME;
+ tsp_result = tftf_smc(&tsp_svc_params);
+
+ /* Check the result of the substraction */
+ if (tsp_result.ret0 != 0 || tsp_result.ret1 != 0 ||
+ tsp_result.ret2 != 0) {
+ tftf_testcase_printf("SMC resume returned wrong result:"
+ "got %d %d %d expected: 0 0 0\n",
+ (unsigned int)tsp_result.ret0,
+ (unsigned int)tsp_result.ret1,
+ (unsigned int)tsp_result.ret2);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Standard SMC */
+ tsp_svc_params.arg0 = TSP_STD_FID(TSP_MUL);
+ tsp_svc_params.arg1 = 4;
+ tsp_svc_params.arg2 = 6;
+ res = preempt_tsp_via_SGI(&tsp_svc_params, 0);
+ if (res == TEST_RESULT_FAIL)
+ return res;
+
+ /* Now that we have ensured preemption, issue RESUME */
+ tsp_svc_params.arg0 = TSP_FID_RESUME;
+ tsp_result = tftf_smc(&tsp_svc_params);
+
+ /* Check the result of the multiplication */
+ if (tsp_result.ret0 != 0 || tsp_result.ret1 != 16 ||
+ tsp_result.ret2 != 36) {
+ tftf_testcase_printf("SMC resume returned wrong result:"
+ "got %d %d %d expected: 0 16 36\n",
+ (unsigned int)tsp_result.ret0,
+ (unsigned int)tsp_result.ret1,
+ (unsigned int)tsp_result.ret2);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Standard SMC */
+ tsp_svc_params.arg0 = TSP_STD_FID(TSP_DIV);
+ tsp_svc_params.arg1 = 4;
+ tsp_svc_params.arg2 = 6;
+ res = preempt_tsp_via_SGI(&tsp_svc_params, 0);
+ if (res == TEST_RESULT_FAIL)
+ return res;
+
+ /* Now that we have ensured preemption, issue RESUME */
+ tsp_svc_params.arg0 = TSP_FID_RESUME;
+ tsp_result = tftf_smc(&tsp_svc_params);
+
+ /* Check the result of the division */
+ if (tsp_result.ret0 != 0 || tsp_result.ret1 != 1 ||
+ tsp_result.ret2 != 1) {
+ tftf_testcase_printf("SMC resume returned wrong result:"
+ "got %d %d %d expected: 0 1 1\n",
+ (unsigned int)tsp_result.ret0,
+ (unsigned int)tsp_result.ret1,
+ (unsigned int)tsp_result.ret2);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Verify Fast SMC request on an interrupted tsp returns error.
+ *
+ * Steps: 1. Issue Standard SMC and preempt it via SGI
+ * 2. Issue Fast SMC, this is not expected and TSP should return error.
+ * 3. Resume the preempted SMC and verify the result.
+ * Returns SUCCESS if above 3 steps are performed correctly else failure.
+ */
+test_result_t test_fast_smc_when_tsp_preempted(void)
+{
+ smc_args tsp_svc_params;
+ smc_ret_values tsp_result = {0};
+ test_result_t res = TEST_RESULT_SUCCESS;
+
+ SKIP_TEST_IF_TSP_NOT_PRESENT();
+
+ /* Standard SMC */
+ tsp_svc_params.arg0 = TSP_STD_FID(TSP_ADD);
+ tsp_svc_params.arg1 = 4;
+ tsp_svc_params.arg2 = 6;
+ res = preempt_tsp_via_SGI(&tsp_svc_params, 0);
+ if (res == TEST_RESULT_FAIL)
+ return res;
+
+ /* Now that we have ensured preemption, issue Fast SMC */
+ tsp_svc_params.arg0 = TSP_FAST_FID(TSP_ADD);
+ tsp_svc_params.arg1 = 4;
+ tsp_svc_params.arg2 = 6;
+
+ tsp_result = tftf_smc(&tsp_svc_params);
+
+ if (tsp_result.ret0 != SMC_UNKNOWN) {
+ tftf_testcase_printf("Fast SMC should not execute"
+ "while SMC is preempted\n");
+ res = TEST_RESULT_FAIL;
+ }
+
+ /* Issue RESUME */
+ tsp_svc_params.arg0 = TSP_FID_RESUME;
+ tsp_result = tftf_smc(&tsp_svc_params);
+
+ /* Check the result of the addition */
+ if (tsp_result.ret0 != 0 || tsp_result.ret1 != 8 ||
+ tsp_result.ret2 != 12) {
+ tftf_testcase_printf("SMC resume returned wrong result:"
+ "got %d %d %d expected: 0 8 12\n",
+ (unsigned int)tsp_result.ret0,
+ (unsigned int)tsp_result.ret1,
+ (unsigned int)tsp_result.ret2);
+
+ res = TEST_RESULT_FAIL;
+ }
+
+ return res;
+}
+
+/*
+ * @Test_Aim@ Test the Standard SMC when tsp is pre-empted by interrupt.
+ *
+ * Steps:
+ * 1. Issue Standard SMC and preempt it via SGI
+ * 2. Issue another Standard SMC. this is not expected and TSP should return
+ * error.
+ * 3. Resume the preempted SMC or abort if the parameter `abort_smc` is set to
+ * 1.
+ * 4. Check the result if the SMC was resumed, or just carry on if it was
+ * aborted.
+ * Returns SUCCESS if above 4 steps are performed correctly else failure.
+ */
+static test_result_t test_std_smc_when_tsp_preempted(int abort_smc)
+{
+ smc_args tsp_svc_params;
+ smc_ret_values tsp_result = {0};
+ test_result_t res = TEST_RESULT_SUCCESS;
+
+ SKIP_TEST_IF_TSP_NOT_PRESENT();
+
+ /* Standard SMC */
+ tsp_svc_params.arg0 = TSP_STD_FID(TSP_ADD);
+ tsp_svc_params.arg1 = 4;
+ tsp_svc_params.arg2 = 6;
+ res = preempt_tsp_via_SGI(&tsp_svc_params, 0);
+ if (res == TEST_RESULT_FAIL)
+ return res;
+
+ /* Now that we have ensured preemption, issue Standard SMC */
+ tsp_svc_params.arg0 = TSP_STD_FID(TSP_ADD);
+ tsp_svc_params.arg1 = 4;
+ tsp_svc_params.arg2 = 6;
+
+ tsp_result = tftf_smc(&tsp_svc_params);
+
+ if (tsp_result.ret0 != SMC_UNKNOWN) {
+ tftf_testcase_printf("Standard SMC should not execute while SMC is preempted\n");
+ res = TEST_RESULT_FAIL;
+ }
+
+ /* Issue ABORT or RESUME */
+ tsp_svc_params.arg0 = abort_smc ? TSP_FID_ABORT : TSP_FID_RESUME;
+ tsp_result = tftf_smc(&tsp_svc_params);
+
+ /*
+ * There is no way to check that the ABORT succeeded or failed because
+ * it will return SMC_UNKNOWN in both cases.
+ */
+ if (!abort_smc) {
+ /*
+ * Check the result of the addition if we issued RESUME.
+ */
+ if (tsp_result.ret0 != 0 || tsp_result.ret1 != 8 ||
+ tsp_result.ret2 != 12) {
+ tftf_testcase_printf("SMC resume returned wrong result: got %d %d %d expected: 0 8 12\n",
+ (unsigned int)tsp_result.ret0,
+ (unsigned int)tsp_result.ret1,
+ (unsigned int)tsp_result.ret2);
+ res = TEST_RESULT_FAIL;
+ }
+ }
+
+ return res;
+}
+
+test_result_t test_std_smc_when_tsp_preempted_resume(void)
+{
+ return test_std_smc_when_tsp_preempted(0);
+}
+
+test_result_t test_std_smc_when_tsp_preempted_abort(void)
+{
+ return test_std_smc_when_tsp_preempted(1);
+}
+
+/*
+ * @Test_Aim@ Test RESUME SMC call when TSP is not preempted. RESUME should fail.
+ *
+ * Issues resume SMC. This is not expected by TSP and returns error.
+ * This is a negative test, Return SUCCESS is RESUME returns SMC_UNKNOWN
+ */
+test_result_t test_resume_smc_without_preemption(void)
+{
+ smc_args tsp_svc_params;
+ smc_ret_values tsp_result = {0};
+
+ SKIP_TEST_IF_TSP_NOT_PRESENT();
+
+ /* Issue RESUME */
+ tsp_svc_params.arg0 = TSP_FID_RESUME;
+ tsp_result = tftf_smc(&tsp_svc_params);
+
+ if (tsp_result.ret0 != SMC_UNKNOWN) {
+ tftf_testcase_printf("SMC Resume should return UNKNOWN, got:%d\n", \
+ (unsigned int)tsp_result.ret0);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Stress Test the secure world preemption by non secure interrupt
+ *
+ * Steps: 1. Issue Standard SMC and preempt it via SGI
+ * 2. Resume the preempted SMC and repeat from Step 1 for STRESS_COUNT times.
+ * Returns SUCCESS if above 2 steps are performed correctly else failure.
+ */
+test_result_t tsp_int_and_resume_stress(void)
+{
+ smc_args tsp_svc_params;
+ smc_ret_values tsp_result = {0};
+ test_result_t res = TEST_RESULT_SUCCESS;
+
+ SKIP_TEST_IF_TSP_NOT_PRESENT();
+
+ int count = 0;
+
+ NOTICE("This stress test will repeat %d times\n", STRESS_COUNT);
+ while ((count < STRESS_COUNT) &&
+ (res == TEST_RESULT_SUCCESS)) {
+ /* Standard SMC */
+ tsp_svc_params.arg0 = TSP_STD_FID(TSP_ADD);
+ tsp_svc_params.arg1 = 4;
+ tsp_svc_params.arg2 = 6;
+ /* Try to preempt TSP via IRQ */
+ res = preempt_tsp_via_SGI(&tsp_svc_params, 0);
+ if (res == TEST_RESULT_FAIL)
+ return res;
+
+ /* Issue RESUME */
+ tsp_svc_params.arg0 = TSP_FID_RESUME;
+ tsp_result = tftf_smc(&tsp_svc_params);
+
+ /* Check the result of the addition */
+ if (tsp_result.ret0 != 0 || tsp_result.ret1 != 8 ||
+ tsp_result.ret2 != 12) {
+ tftf_testcase_printf("SMC resume returned wrong result:"
+ "got %d %d %d expected: 0 8 12\n",
+ (unsigned int)tsp_result.ret0,
+ (unsigned int)tsp_result.ret1,
+ (unsigned int)tsp_result.ret2);
+ res = TEST_RESULT_FAIL;
+ }
+
+ count++;
+ }
+ return res;
+}
+
+/*
+ * @Test_Aim@ Test Secure FIQ when pre-empted by non secure interrupt.
+ *
+ * We really cannot verify whether FIQ fired and preempted the SGI handler
+ * or not. The TSP prints the address at which the execution was interrupted
+ * for the FIQ. By looking at the address printed from the TSP logs, we can
+ * verify that the SGI handler was interrupted by FIQ. For now, We are assuming
+ * CPU is woken by Secure Timer Interrupt.
+ *
+ * Steps: 1. Issue Standard SMC and preempt it via SGI
+ * 2. Wait in the SGI handler for FIQ which is firing every 500 ms.
+ * 3. Resume the preempted SMC
+ * Returns SUCCESS if above 3 steps are performed correctly else failure.
+ */
+test_result_t tsp_fiq_while_int(void)
+{
+ smc_args tsp_svc_params;
+ smc_ret_values tsp_result = {0};
+ test_result_t res;
+
+ SKIP_TEST_IF_TSP_NOT_PRESENT();
+
+ /* Standard SMC */
+ tsp_svc_params.arg0 = TSP_STD_FID(TSP_ADD);
+ tsp_svc_params.arg1 = 4;
+ tsp_svc_params.arg2 = 6;
+ res = preempt_tsp_via_SGI(&tsp_svc_params, 1);
+ if (res == TEST_RESULT_FAIL)
+ return res;
+
+ /* Now that we have ensured preemption, issue RESUME */
+ tsp_svc_params.arg0 = TSP_FID_RESUME;
+ tsp_result = tftf_smc(&tsp_svc_params);
+
+ /* Check the result of the addition */
+ if (tsp_result.ret0 != 0 || tsp_result.ret1 != 8 ||
+ tsp_result.ret2 != 12) {
+ tftf_testcase_printf("SMC resume returned wrong result:"
+ "got %d %d %d expected: 0 8 12\n",
+ (unsigned int)tsp_result.ret0,
+ (unsigned int)tsp_result.ret1,
+ (unsigned int)tsp_result.ret2);
+ return TEST_RESULT_FAIL;
+ }
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/runtime_services/trusted_os/tsp/test_smc_tsp_std_fn_call.c b/tftf/tests/runtime_services/trusted_os/tsp/test_smc_tsp_std_fn_call.c
new file mode 100644
index 000000000..836367667
--- /dev/null
+++ b/tftf/tests/runtime_services/trusted_os/tsp/test_smc_tsp_std_fn_call.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <smccc.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+
+/**
+ * @Test_Aim@ test_smc_tsp_std_fns_call - Query standard function information
+ * against TrustedOS service calls.
+ *
+ * This test targets the TSP, i.e. the Trusted Firmware-A Test Secure-EL1
+ * Payload. If there is no Trusted OS in the software stack, or if it is not
+ * the TSP, this test will be skipped.
+ *
+ * The following queries are performed:
+ * 1) UID
+ * 2) Call count
+ * 3) Call revision info
+ */
+test_result_t test_smc_tsp_std_fns_call(void)
+{
+ smc_args std_svc_args;
+ smc_ret_values ret;
+
+ SKIP_TEST_IF_TSP_NOT_PRESENT();
+
+ /* TrustedOS Service Call Count */
+ std_svc_args.arg0 = SMC_TOS_CALL_COUNT;
+ ret = tftf_smc(&std_svc_args);
+ if (ret.ret0 != TSP_NUM_FID) {
+ tftf_testcase_printf("Wrong Call Count: expected %u,\n"
+ " got %llu\n", TSP_NUM_FID,
+ (unsigned long long)ret.ret0);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* TrustedOS Service Call Revision details */
+ std_svc_args.arg0 = SMC_TOS_REVISION;
+ ret = tftf_smc(&std_svc_args);
+ if ((ret.ret0 != TSP_REVISION_MAJOR) ||
+ ret.ret1 != TSP_REVISION_MINOR) {
+ tftf_testcase_printf("Wrong Revision: expected {%u.%u}\n"
+ " got {%llu.%llu}\n",
+ TSP_REVISION_MAJOR, TSP_REVISION_MINOR,
+ (unsigned long long)ret.ret0,
+ (unsigned long long)ret.ret1);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/runtime_services/trusted_os/tsp/test_tsp_fast_smc.c b/tftf/tests/runtime_services/trusted_os/tsp/test_tsp_fast_smc.c
new file mode 100644
index 000000000..b98725932
--- /dev/null
+++ b/tftf/tests/runtime_services/trusted_os/tsp/test_tsp_fast_smc.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <debug.h>
+#include <events.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <power_management.h>
+#include <psci.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+
+#define TEST_ITERATIONS_COUNT 1000
+
+static event_t cpu_has_entered_test[PLATFORM_CORE_COUNT];
+
+/*
+ * This function calls and validates the results of TSP operation.
+ * It expects:
+ * - fn_identifier: SMC function identifier
+ * - arg1, arg2: data on which TSP performs operation
+ * - ret1, ret2: results expected after performing operation
+ * on arg1 and arg2
+ * Returns Success if return values of SMC operation are same as
+ * expected else failure.
+ */
+static test_result_t validate_tsp_operations(uint64_t fn_identifier,
+ uint64_t arg1,
+ uint64_t arg2,
+ uint64_t ret1,
+ uint64_t ret2)
+{
+ smc_args tsp_svc_params = {fn_identifier, arg1, arg2};
+ smc_ret_values tsp_result;
+
+ tsp_result = tftf_smc(&tsp_svc_params);
+
+ if (tsp_result.ret0) {
+ tftf_testcase_printf("TSP operation 0x%x failed, error:0x%x\n",
+ (unsigned int) fn_identifier,
+ (unsigned int) tsp_result.ret0);
+ return TEST_RESULT_FAIL;
+ }
+
+ if (tsp_result.ret1 != ret1 || tsp_result.ret2 != ret2) {
+ tftf_testcase_printf("TSP function:0x%x returned wrong result:"
+ "got 0x%x 0x%x expected: 0x%x 0x%x\n",
+ (unsigned int)fn_identifier,
+ (unsigned int)tsp_result.ret1,
+ (unsigned int)tsp_result.ret2,
+ (unsigned int)ret1,
+ (unsigned int)ret2);
+ return TEST_RESULT_FAIL;
+ }
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * This function issues SMC calls to trusted OS(TSP) to perform basic mathematical
+ * operations supported by it and validates the result.
+ */
+static test_result_t issue_trustedos_service_calls(void)
+{
+ u_register_t mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(mpid);
+ test_result_t ret;
+ int i;
+
+ /* Signal to the lead CPU that the calling CPU has entered the test */
+ tftf_send_event(&cpu_has_entered_test[core_pos]);
+
+ for (i = 0; i < TEST_ITERATIONS_COUNT; i++) {
+ /*
+ * TSP add function performs addition of argx to itself and
+ * returns the result in argx where x is 1, 2
+ */
+ ret = validate_tsp_operations(TSP_FAST_FID(TSP_ADD), 4, 6, 8, 12);
+ if (ret != TEST_RESULT_SUCCESS)
+ return ret;
+
+ /*
+ * TSP sub function performs substraction of argx to itself and
+ * returns the result in argx where x is 1, 2
+ */
+ ret = validate_tsp_operations(TSP_FAST_FID(TSP_SUB), 4, 6, 0, 0);
+ if (ret != TEST_RESULT_SUCCESS)
+ return ret;
+
+ /*
+ * TSP mul function performs multiplication of argx to itself and
+ * returns the result in argx where x is 1, 2
+ */
+ ret = validate_tsp_operations(TSP_FAST_FID(TSP_MUL), 4, 6, 16, 36);
+ if (ret != TEST_RESULT_SUCCESS)
+ return ret;
+
+ /*
+ * TSP div function performs division of argx to itself and
+ * returns the result in argx where x is 1, 2
+ */
+ ret = validate_tsp_operations(TSP_FAST_FID(TSP_DIV), 4, 6, 1, 1);
+ if (ret != TEST_RESULT_SUCCESS)
+ return ret;
+ }
+ return ret;
+}
+
+/*
+ * @Test_Aim@ Stress test the tsp functionality by issuing fast smc calls
+ * to perform trusted OS operations on multiple CPUs
+ * Returns Success/Failure/Skipped (if Trusted OS is absent or is not TSP)
+ */
+test_result_t test_tsp_fast_smc_operations(void)
+{
+ u_register_t lead_cpu;
+ u_register_t cpu_mpid;
+ int cpu_node;
+ unsigned int core_pos;
+ int ret;
+
+ SKIP_TEST_IF_TSP_NOT_PRESENT();
+
+ lead_cpu = read_mpidr_el1() & MPID_MASK;
+
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU as it is already on */
+ if (cpu_mpid == lead_cpu)
+ continue;
+
+ ret = tftf_cpu_on(cpu_mpid,
+ (uintptr_t) issue_trustedos_service_calls,
+ 0);
+ if (ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("Failed to power on CPU 0x%llx (%i)\n",
+ (unsigned long long) cpu_mpid, ret);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ /* Wait for non-lead CPUs to enter the test */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU */
+ if (cpu_mpid == lead_cpu)
+ continue;
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_wait_for_event(&cpu_has_entered_test[core_pos]);
+ }
+
+ return issue_trustedos_service_calls();
+}
diff --git a/tftf/tests/template_tests/test_template_multi_core.c b/tftf/tests/template_tests/test_template_multi_core.c
new file mode 100644
index 000000000..098653240
--- /dev/null
+++ b/tftf/tests/template_tests/test_template_multi_core.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <events.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <power_management.h>
+#include <psci.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+
+static event_t cpu_has_entered_test[PLATFORM_CORE_COUNT];
+
+/*
+ * Test entry point function for non-lead CPUs.
+ * Specified by the lead CPU when bringing up other CPUs.
+ */
+static test_result_t non_lead_cpu_fn(void)
+{
+ unsigned int mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(mpid);
+
+ /* Signal to the lead CPU that the calling CPU has entered the test */
+ tftf_send_event(&cpu_has_entered_test[core_pos]);
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Template code for a test running on multiple CPUs.
+ *
+ * This "test" powers on all CPUs on the platform and report test success.
+ * The function test_template_multi_core() runs on the lead CPU only.
+ * The test entrypoint for other CPUs is non_lead_cpu_fn(), as specified when
+ * bringing them up.
+ *
+ * This "test" is skipped on single-core platforms. If an error occurs during
+ * the bring-up of non-lead CPUs, it is skipped as well. Otherwise, this test
+ * always returns success.
+ */
+test_result_t test_template_multi_core(void)
+{
+ unsigned int lead_mpid;
+ unsigned int cpu_mpid, cpu_node;
+ unsigned int core_pos;
+ int psci_ret;
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(2);
+
+ /* Power on all CPUs */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU as it is already powered on */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ psci_ret = tftf_cpu_on(cpu_mpid, (uintptr_t) non_lead_cpu_fn, 0);
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf(
+ "Failed to power on CPU 0x%x (%d)\n",
+ cpu_mpid, psci_ret);
+ return TEST_RESULT_SKIPPED;
+ }
+ }
+
+ /* Wait for non-lead CPUs to enter the test */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+ /* Skip lead CPU */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_wait_for_event(&cpu_has_entered_test[core_pos]);
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/template_tests/test_template_single_core.c b/tftf/tests/template_tests/test_template_single_core.c
new file mode 100644
index 000000000..3155baf5a
--- /dev/null
+++ b/tftf/tests/template_tests/test_template_single_core.c
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <tftf_lib.h>
+
+/*
+ * @Test_Aim@ Template code for a test running on a single CPU.
+ *
+ * This "test" does nothing but reporting test success. It runs on the lead CPU.
+ */
+test_result_t test_template_single_core(void)
+{
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/tests-arm-state-switch.xml b/tftf/tests/tests-arm-state-switch.xml
new file mode 100644
index 000000000..244f1bc4a
--- /dev/null
+++ b/tftf/tests/tests-arm-state-switch.xml
@@ -0,0 +1,25 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2018, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+ <!--
+ Test suite exercising execution state switch SiP service.
+
+ 'test_exec_state_switch_reset_before' must execute first in the suite, and
+ 'test_exec_state_switch_after_cpu_on' the last. See comments in
+ test_exec_state_switch.c for details.
+ -->
+ <testsuite name="State switch" description="Test ARM SiP State Switch service">
+ <testcase name="System reset before state switch" function="test_exec_state_switch_reset_before" />
+ <testcase name="Request state switch with invalid PC" function="test_exec_state_switch_invalid_pc" />
+ <testcase name="Request state switch with invalid context" function="test_exec_state_switch_invalid_ctx" />
+ <testcase name="Request a valid state switch" function="test_exec_state_switch_valid" />
+ <testcase name="Request a valid state switch after CPU_ON" function="test_exec_state_switch_after_cpu_on" />
+ </testsuite>
+
+</testsuites>
diff --git a/tftf/tests/tests-boot-req.xml b/tftf/tests/tests-boot-req.xml
new file mode 100644
index 000000000..27d484406
--- /dev/null
+++ b/tftf/tests/tests-boot-req.xml
@@ -0,0 +1,15 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2018, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+
+ <testsuite name="Boot requirement tests" description="Tests for boot requirement according to ARM ARM and PSCI">
+ <testcase name="CNTFRQ compare test" function="test_cntfrq_check" />
+ </testsuite>
+
+</testsuites>
diff --git a/tftf/tests/tests-common.xml b/tftf/tests/tests-common.xml
new file mode 100644
index 000000000..e2a1a2bd8
--- /dev/null
+++ b/tftf/tests/tests-common.xml
@@ -0,0 +1,37 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2018, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+
+<!-- External references to all individual tests files. -->
+<!DOCTYPE testsuites [
+ <!ENTITY tests-tftf-validation SYSTEM "tests-tftf-validation.xml">
+ <!ENTITY tests-boot-req SYSTEM "tests-boot-req.xml">
+ <!ENTITY tests-psci SYSTEM "tests-psci.xml">
+ <!ENTITY tests-sdei SYSTEM "tests-sdei.xml">
+ <!ENTITY tests-rt-instr SYSTEM "tests-runtime-instrumentation.xml">
+ <!ENTITY tests-tsp SYSTEM "tests-tsp.xml">
+ <!ENTITY tests-el3-pstate SYSTEM "tests-el3-power-state.xml">
+ <!ENTITY tests-state-switch SYSTEM "tests-arm-state-switch.xml">
+ <!ENTITY tests-cpu-extensions SYSTEM "tests-cpu-extensions.xml">
+ <!ENTITY tests-performance SYSTEM "tests-performance.xml">
+]>
+
+<testsuites>
+
+ &tests-tftf-validation;
+ &tests-boot-req;
+ &tests-psci;
+ &tests-sdei;
+ &tests-rt-instr;
+ &tests-tsp;
+ &tests-el3-pstate;
+ &tests-state-switch;
+ &tests-cpu-extensions;
+ &tests-performance;
+
+</testsuites>
diff --git a/tftf/tests/tests-cpu-extensions.xml b/tftf/tests/tests-cpu-extensions.xml
new file mode 100644
index 000000000..17f37fd40
--- /dev/null
+++ b/tftf/tests/tests-cpu-extensions.xml
@@ -0,0 +1,21 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2018, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+
+ <testsuite name="CPU extensions" description="Various CPU extensions tests">
+ <testcase name="AMUv1 non-zero counters" function="test_amu_nonzero_ctr" />
+ <testcase name="AMUv1 suspend/resume" function="test_amu_suspend_resume" />
+ </testsuite>
+
+ <testsuite name="ARM_ARCH_SVC" description="Arm Architecture Service tests">
+ <testcase name="SMCCC_ARCH_WORKAROUND_1 test" function="test_smccc_arch_workaround_1" />
+ <testcase name="SMCCC_ARCH_WORKAROUND_2 test" function="test_smccc_arch_workaround_2" />
+ </testsuite>
+
+</testsuites>
diff --git a/tftf/tests/tests-el3-power-state.xml b/tftf/tests/tests-el3-power-state.xml
new file mode 100644
index 000000000..852401dc9
--- /dev/null
+++ b/tftf/tests/tests-el3-power-state.xml
@@ -0,0 +1,20 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2018, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+
+ <testsuite name="EL3 power state parser validation" description="Validation of EL3 power state parsing algorithm">
+ <testcase name="Create all power states and validate EL3 power state parsing" function="test_psci_validate_pstate" />
+ <testcase name="Create only local power state and validate EL3 power state parsing" function="test_psci_valid_local_pstate" />
+ <testcase name="Create invalid local power state at all levels and validate EL3 power state parsing" function="test_psci_invalid_stateID" />
+ <testcase name="Create invalid power state type and validate EL3 power state parsing" function="test_psci_invalid_state_type" />
+ <testcase name="Create invalid power level and validate EL3 power state parsing for original state format" function="test_psci_invalid_power_level" />
+ <testcase name="Create a power state with valid and invalid local state ID at different levels and validate power state parsing" function="test_psci_mixed_state_id" />
+ </testsuite>
+
+</testsuites>
diff --git a/tftf/tests/tests-extensive.xml b/tftf/tests/tests-extensive.xml
new file mode 100644
index 000000000..7ba17d302
--- /dev/null
+++ b/tftf/tests/tests-extensive.xml
@@ -0,0 +1,39 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2018, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+
+<!-- External references to all individual tests files. -->
+<!DOCTYPE testsuites [
+ <!ENTITY tests-psci-extensive SYSTEM "tests-psci-extensive.xml">
+
+ <!ENTITY tests-tftf-validation SYSTEM "tests-tftf-validation.xml">
+ <!ENTITY tests-psci SYSTEM "tests-psci.xml">
+ <!ENTITY tests-sdei SYSTEM "tests-sdei.xml">
+ <!ENTITY tests-rt-instr SYSTEM "tests-runtime-instrumentation.xml">
+ <!ENTITY tests-tsp SYSTEM "tests-tsp.xml">
+ <!ENTITY tests-el3-pstate SYSTEM "tests-el3-power-state.xml">
+ <!ENTITY tests-state-switch SYSTEM "tests-arm-state-switch.xml">
+ <!ENTITY tests-cpu-extensions SYSTEM "tests-cpu-extensions.xml">
+ <!ENTITY tests-performance SYSTEM "tests-performance.xml">
+]>
+
+<testsuites>
+
+ &tests-psci-extensive;
+
+ &tests-tftf-validation;
+ &tests-psci;
+ &tests-sdei;
+ &tests-rt-instr;
+ &tests-tsp;
+ &tests-el3-pstate;
+ &tests-state-switch;
+ &tests-cpu-extensions;
+ &tests-performance;
+
+</testsuites>
diff --git a/tftf/tests/tests-fwu.xml b/tftf/tests/tests-fwu.xml
new file mode 100644
index 000000000..0d86309f3
--- /dev/null
+++ b/tftf/tests/tests-fwu.xml
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2018, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+
+ <testsuite name="fwu toc test" description="Validate FWU TOC invalid scenario">
+ <testcase name="FWU TOC Invalid" function="test_fwu_toc" />
+ </testsuite>
+
+ <testsuite name="fwu auth test" description="Validate FWU AUTH Failure scenario">
+ <testcase name="FWU AUTH Failure" function="test_fwu_auth" />
+ </testsuite>
+
+</testsuites>
diff --git a/tftf/tests/tests-manual.xml b/tftf/tests/tests-manual.xml
new file mode 100644
index 000000000..15a9609fc
--- /dev/null
+++ b/tftf/tests/tests-manual.xml
@@ -0,0 +1,31 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2018, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+
+
+ <testsuite name="PSCI STAT" description="Test PSCI STAT support System level">
+ <testcase name="for stats after system reset" function="test_psci_stats_after_reset" />
+ <testcase name="for stats after system shutdown" function="test_psci_stats_after_shutdown" />
+ </testsuite>
+
+ <testsuite name="System off test" description="Validate SYSTEM_OFF PSCI call">
+ <testcase name="System Off" function="test_system_off" />
+ </testsuite>
+
+ <testsuite name="PSCI mem_protect" description="Check the mem_protect feature">
+ <testcase name="PSCI mem_protect" function="test_mem_protect" />
+ </testsuite>
+
+ <testsuite name="PSCI reset2" description="Check the reset2 feature">
+ <testcase name="PSCI reset2 - warm reset" function="reset2_warm" />
+ <testcase name="PSCI reset2 - invalid reset options" function="reset2_test_invalid" />
+ <testcase name="PSCI reset2 - warm reset and mem_protect" function="reset2_mem_protect" />
+ </testsuite>
+
+</testsuites>
diff --git a/tftf/tests/tests-performance.xml b/tftf/tests/tests-performance.xml
new file mode 100644
index 000000000..5ad53478d
--- /dev/null
+++ b/tftf/tests/tests-performance.xml
@@ -0,0 +1,17 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2018, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+
+ <testsuite name="Performance tests" description="Measure some performance">
+ <testcase name="PSCI_VERSION latency" function="smc_psci_version_latency" />
+ <testcase name="Standard Service Call UID latency" function="smc_std_svc_call_uid_latency" />
+ <testcase name="SMCCC_ARCH_WORKAROUND_1 latency" function="smc_arch_workaround_1" />
+ </testsuite>
+
+</testsuites>
diff --git a/tftf/tests/tests-psci-extensive.xml b/tftf/tests/tests-psci-extensive.xml
new file mode 100644
index 000000000..a2c05b248
--- /dev/null
+++ b/tftf/tests/tests-psci-extensive.xml
@@ -0,0 +1,29 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2018, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+
+ <testsuite name="PSCI CPU ON OFF Stress Tests" description="Stress-test hotplug">
+ <testcase name="Repeated shutdown of all cores to stress test CPU_ON, CPU_SUSPEND and CPU_OFF"
+ function="psci_on_off_suspend_coherency_test" />
+ <!-- testcase name="Verify PSCI CPU ON race" function="psci_verify_cpu_on_race" / -->
+ <testcase name="PSCI CPU ON OFF stress test" function="psci_cpu_on_off_stress" />
+ <testcase name="PSCI CPU ON OFF SUSPEND stress test" function="psci_cpu_on_off_suspend_stress" />
+ <testcase name="Repeated hotplug of all cores to stress test CPU_ON and CPU_OFF"
+ function="psci_hotplug_stress_test" />
+ <testcase name="Random hotplug cores in a large iteration to stress boot path code"
+ function="psci_hotplug_single_core_stress_test" />
+ <testcase name="Hotplug a cluster in a large iteration to stress cluster on and off functionality"
+ function="psci_cluster_hotplug_stress_test" />
+ </testsuite>
+
+ <testsuite name="PSCI SYSTEM SUSPEND stress tests" description="Stress-test SYSTEM SUSPEND">
+ <testcase name="Stress test PSCI_SYSTEM_SUSPEND" function="psci_sys_susp_on_off_stress_test" />
+ </testsuite>
+
+</testsuites>
diff --git a/tftf/tests/tests-psci.xml b/tftf/tests/tests-psci.xml
new file mode 100644
index 000000000..e2be55724
--- /dev/null
+++ b/tftf/tests/tests-psci.xml
@@ -0,0 +1,88 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2018, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+
+ <!--
+ Strictly speaking, this testsuite is not testing PSCI but we put it here
+ nonetheless to avoid having it alone in a separate XML file.
+ -->
+ <testsuite name="Query runtime services" description="Generic queries as defined by the SMCCC">
+ <testcase name="Unknown SMC" function="test_unknown_smc" />
+ <testcase name="Query Standard Service" function="test_query_std_svc" />
+ </testsuite>
+
+ <testsuite name="PSCI Version" description="Check the version of PSCI implemented">
+ <testcase name="PSCI Version" function="test_psci_version" />
+ </testsuite>
+
+ <testsuite name="PSCI Affinity Info" description="Test PSCI AFFINITY_INFO support">
+ <testcase name="Affinity info level0 on" function="test_affinity_info_level0_on" />
+ <testcase name="Affinity info level0 off" function="test_affinity_info_level0_off" />
+ <testcase name="Affinity info level1 on" function="test_affinity_info_level1_on" />
+ <testcase name="Affinity info level1 off" function="test_affinity_info_level1_off" />
+ <testcase name="Affinity info level2" function="test_affinity_info_level2" />
+ <testcase name="Affinity info level3" function="test_affinity_info_level3" />
+ <testcase name="Affinity info level0 powerdown" function="test_affinity_info_level0_powerdown" />
+ </testsuite>
+
+ <testsuite name="CPU Hotplug" description="Test PSCI CPU Hotplug support">
+ <testcase name="CPU hotplug" function="test_psci_cpu_hotplug" />
+ <testcase name="CPU already on" function="test_psci_cpu_hotplug_plugged" />
+ <testcase name="Context ID passing" function="test_context_ids" />
+ <testcase name="Invalid CPU" function="test_psci_cpu_hotplug_invalid_cpu" />
+ <testcase name="Invalid entry point" function="test_psci_cpu_hotplug_invalid_ep" />
+ </testsuite>
+
+ <testsuite name="PSCI CPU Suspend" description="Test PSCI CPU Suspend support">
+ <testcase name="CPU suspend to powerdown at level 0" function="test_psci_suspend_powerdown_level0" />
+ <testcase name="CPU suspend to powerdown at level 1" function="test_psci_suspend_powerdown_level1" />
+ <testcase name="CPU suspend to powerdown at level 2" function="test_psci_suspend_powerdown_level2" />
+ <testcase name="CPU suspend to powerdown at level 3" function="test_psci_suspend_powerdown_level3" />
+
+ <testcase name="CPU suspend to standby at level 0" function="test_psci_suspend_standby_level0" />
+ <testcase name="CPU suspend to standby at level 1" function="test_psci_suspend_standby_level1" />
+ <testcase name="CPU suspend to standby at level 2" function="test_psci_suspend_standby_level2" />
+ <testcase name="CPU suspend to standby at level 3" function="test_psci_suspend_standby_level3" />
+ </testsuite>
+
+ <testsuite name="PSCI STAT" description="Test PSCI STAT support Core level">
+ <testcase name="for valid composite state CPU suspend" function="test_psci_stat_all_power_states" />
+ <testcase name="Stats test cases for CPU OFF" function="test_psci_stats_cpu_off" />
+ <testcase name="Stats test cases after system suspend" function="test_psci_stats_system_suspend" />
+ </testsuite>
+
+ <testsuite name="PSCI NODE_HW_STATE" description="Test PSCI NODE_HW_STATE API">
+ <testcase name="Tests for NODE_HW_STATE" function="test_psci_node_hw_state" />
+ <testcase name="Tests for NODE_HW_STATE on multicluster" function="test_psci_node_hw_state_multi" />
+ </testsuite>
+
+ <testsuite name="PSCI Features" description="Check the PSCI features implemented">
+ <testcase name="PSCI Features" function="test_psci_features" />
+ <testcase name="PSCI Invalid Features" function="test_psci_features_invalid_id" />
+ </testsuite>
+
+ <testsuite name="PSCI MIGRATE_INFO_TYPE" description="Test MIGRATE_INFO_TYPE support">
+ <testcase name="PSCI MIGRATE_INFO_TYPE" function="test_migrate_info_type" />
+ </testsuite>
+
+ <testsuite name="PSCI mem_protect_check" description="Check the mem_protect_check_range feature">
+ <testcase name="PSCI mem_protect_check" function="test_mem_protect_check" />
+ </testsuite>
+
+ <testsuite name="PSCI System Suspend Validation" description="Validate PSCI System Suspend API">
+ <testcase name="System suspend multiple times" function="test_psci_sys_susp_multiple_iteration" />
+ <testcase name="system suspend from all cores" function="test_system_suspend_from_all_cores" />
+ <testcase name="System suspend with cores on" function="test_psci_sys_susp_with_cores_on" />
+ <testcase name="Suspend system with cores in suspend" function="test_psci_sys_susp_with_cores_in_suspend" />
+ <testcase name="Validate suspend to RAM functionality" function="test_psci_sys_susp_validate_ram" />
+ <testcase name="System suspend with invalid entrypoint address" function="test_system_suspend_invalid_entrypoint" />
+ <testcase name="System suspend with pending IRQ" function="test_psci_sys_susp_pending_irq" />
+ </testsuite>
+
+</testsuites>
diff --git a/tftf/tests/tests-runtime-instrumentation.xml b/tftf/tests/tests-runtime-instrumentation.xml
new file mode 100644
index 000000000..4d4590024
--- /dev/null
+++ b/tftf/tests/tests-runtime-instrumentation.xml
@@ -0,0 +1,20 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2018, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+
+ <testsuite name="Runtime Instrumentation Validation" description="Validate PMF Runtime Instrumentation">
+ <testcase name="Suspend to deepest power level on all cores in parallel" function="test_rt_instr_susp_deep_parallel" />
+ <testcase name="Suspend to deepest power level on all cores in sequence" function="test_rt_instr_susp_deep_serial" />
+ <testcase name="CPU suspend on all cores in parallel" function="test_rt_instr_cpu_susp_parallel" />
+ <testcase name="CPU suspend on all cores in sequence" function="test_rt_instr_cpu_susp_serial" />
+ <testcase name="CPU off on all non-lead cores in sequence and suspend lead to deepest power level" function="test_rt_instr_cpu_off_serial" />
+ <testcase name="PSCI version call on all cores in parallel" function="test_rt_instr_psci_version_parallel" />
+ </testsuite>
+
+</testsuites>
diff --git a/tftf/tests/tests-sdei.xml b/tftf/tests/tests-sdei.xml
new file mode 100644
index 000000000..db6b0c9b8
--- /dev/null
+++ b/tftf/tests/tests-sdei.xml
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2018, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+
+ <testsuite name="SDEI" description="SDEI test framework">
+ <testcase name="SDEI event handler state machine testing" function="test_sdei_state" />
+ <testcase name="SDEI event handling on all cores in sequence" function="test_sdei_event_serial" />
+ <testcase name="SDEI event handling on all cores in parallel" function="test_sdei_event_parallel" />
+ <testcase name="SDEI event signaling: each core signals itself" function="test_sdei_event_signal_serial" />
+ <testcase name="SDEI event signaling: one core signals all others" function="test_sdei_event_signal_all" />
+ </testsuite>
+
+</testsuites>
diff --git a/tftf/tests/tests-single-fault.xml b/tftf/tests/tests-single-fault.xml
new file mode 100644
index 000000000..570be467a
--- /dev/null
+++ b/tftf/tests/tests-single-fault.xml
@@ -0,0 +1,13 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2018, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+ <testsuite name="Single fault" description="Single fault injection">
+ <testcase name="Inject SError and wait" function="test_single_fault" />
+ </testsuite>
+</testsuites>
diff --git a/tftf/tests/tests-spm.xml b/tftf/tests/tests-spm.xml
new file mode 100644
index 000000000..ff3ff44d1
--- /dev/null
+++ b/tftf/tests/tests-spm.xml
@@ -0,0 +1,21 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2018, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+
+ <testsuite name="SPM tests"
+ description="Test SPM APIs">
+ <testcase name="SPM NS interrupts test"
+ function="test_secure_partition_interrupt_by_ns" />
+ <testcase name="SPM secondary CPUs sequential test"
+ function="test_secure_partition_secondary_cores_seq" />
+ <testcase name="SPM secondary CPUs simultaneous test"
+ function="test_secure_partition_secondary_cores_sim" />
+ </testsuite>
+
+</testsuites>
diff --git a/tftf/tests/tests-template.xml b/tftf/tests/tests-template.xml
new file mode 100644
index 000000000..d55595e4a
--- /dev/null
+++ b/tftf/tests/tests-template.xml
@@ -0,0 +1,20 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2018, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+ <!--
+ The "template" testsuite aims at providing template test code as a
+ starting point for developing new tests. These tests don't do anything
+ useful in terms of testing.
+ -->
+ <testsuite name="Template" description="Template test code">
+ <testcase name="Single core test" function="test_template_single_core" />
+ <testcase name="Multi core test" function="test_template_multi_core" />
+ </testsuite>
+
+</testsuites>
diff --git a/tftf/tests/tests-tftf-validation.xml b/tftf/tests/tests-tftf-validation.xml
new file mode 100644
index 000000000..932b10e8b
--- /dev/null
+++ b/tftf/tests/tests-tftf-validation.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2018, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+
+ <testsuite name="Framework Validation" description="Validate the core features of the test framework">
+ <testcase name="NVM support" function="test_validation_nvm" />
+ <testcase name="NVM serialisation" function="test_validate_nvm_serialisation" />
+ <testcase name="Events API" function="test_validation_events" />
+ <testcase name="IRQ handling" function="test_validation_irq" />
+ <testcase name="SGI support" function="test_validation_sgi" />
+ </testsuite>
+
+ <testsuite name="Timer framework Validation" description="Validate the timer driver and timer framework">
+ <testcase name="Verify the timer interrupt generation" function="test_timer_framework_interrupt" />
+ <testcase name="Target timer to a power down cpu" function="test_timer_target_power_down_cpu" />
+ <testcase name="Test scenario where multiple CPUs call same timeout" function="test_timer_target_multiple_same_interval" />
+ <testcase name="Stress test the timer framework" function="stress_test_timer_framework" />
+ </testsuite>
+
+</testsuites>
diff --git a/tftf/tests/tests-tsp.xml b/tftf/tests/tests-tsp.xml
new file mode 100644
index 000000000..e98df5f78
--- /dev/null
+++ b/tftf/tests/tests-tsp.xml
@@ -0,0 +1,34 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2018, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+
+ <testsuite name="IRQ support in TSP" description="Test the normal IRQ preemption support in TSP.">
+ <testcase name="TSP preempt by IRQ and resume" function="tsp_int_and_resume" />
+ <testcase name="Fast SMC while TSP preempted" function="test_fast_smc_when_tsp_preempted" />
+ <testcase name="STD SMC resumption while TSP preempted" function="test_std_smc_when_tsp_preempted_resume" />
+ <testcase name="STD SMC abortion while TSP preempted" function="test_std_smc_when_tsp_preempted_abort" />
+ <testcase name="Resume SMC without TSP preemption" function="test_resume_smc_without_preemption" />
+ <testcase name="Stress TSP preemption and resumption" function="tsp_int_and_resume_stress" />
+ <testcase name="Test Secure FIQ while TSP is preempted" function="tsp_fiq_while_int" />
+ <testcase name="Resume preempted STD SMC" function="test_irq_preempted_std_smc" />
+ <testcase name="Resume preempted STD SMC from other CPUs" function="test_resume_preempted_std_smc_other_cpus" />
+ <testcase name="Resume STD SMC from different CPUs" function="test_resume_different_cpu_preempted_std_smc" />
+ <testcase name="Resume preempted STD SMC after PSCI CPU OFF/ON cycle" function="test_psci_cpu_on_off_preempted_std_smc" />
+ <testcase name="Resume preempted STD SMC after PSCI SYSTEM SUSPEND" function="test_psci_system_suspend_preempted_std_smc" />
+ </testsuite>
+
+ <testsuite name="TSP handler standard functions result test" description="Validate TSP SMC standard function call">
+ <testcase name="TestSecurePayload standard functions service call" function="test_smc_tsp_std_fns_call" />
+ </testsuite>
+
+ <testsuite name="Stress test TSP functionality" description="Validate TSP functionality">
+ <testcase name="Stress test TSP functionality" function="test_tsp_fast_smc_operations" />
+ </testsuite>
+
+</testsuites>
diff --git a/tftf/tests/tests-uncontainable.xml b/tftf/tests/tests-uncontainable.xml
new file mode 100644
index 000000000..ecff3ac78
--- /dev/null
+++ b/tftf/tests/tests-uncontainable.xml
@@ -0,0 +1,14 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2018, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+ <testsuite name="Uncontainable error" description="Uncontainable error">
+ <testcase name="Inject Uncontainable error" function="test_uncontainable" />
+ </testsuite>
+</testsuites>
+
diff --git a/tftf/tests/tests.mk b/tftf/tests/tests.mk
new file mode 100644
index 000000000..8ee8cc248
--- /dev/null
+++ b/tftf/tests/tests.mk
@@ -0,0 +1,88 @@
+#
+# Copyright (c) 2018, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# Path to the XML file listing the tests to run. If there is a platform-specific
+# test file, use it. If not, use the common one. If the user specified another
+# one, use that one instead.
+ifneq ($(wildcard ${PLAT_PATH}/tests.xml),)
+ TESTS_FILE := ${PLAT_PATH}/tests.xml
+else
+ TESTS_FILE := tftf/tests/tests-common.xml
+endif
+
+# Check that the selected tests file exists.
+ifeq (,$(wildcard ${TESTS_FILE}))
+ $(error "The file TESTS_FILE points to cannot be found")
+endif
+
+SPM_TESTS_SOURCES := \
+ $(addprefix tftf/tests/runtime_services/secure_service/, \
+ secure_service_helpers.c \
+ test_secure_service_handle.c \
+ test_secure_service_interrupts.c \
+ )
+
+FWU_TESTS_SOURCES := \
+ tftf/tests/fwu_tests/test_fwu_toc.c \
+ tftf/tests/fwu_tests/test_fwu_auth.c \
+ plat/common/fwu_nvm_accessors.c
+
+TESTS_SOURCES := $(addprefix tftf/tests/, \
+ extensions/amu/test_amu.c \
+ framework_validation_tests/test_timer_framework.c \
+ framework_validation_tests/test_validation_events.c \
+ framework_validation_tests/test_validation_irq.c \
+ framework_validation_tests/test_validation_nvm.c \
+ framework_validation_tests/test_validation_sgi.c \
+ misc_tests/inject_serror.S \
+ misc_tests/test_single_fault.c \
+ misc_tests/test_uncontainable.c \
+ performance_tests/smc_latencies.c \
+ misc_tests/boot_req_tests/test_cntfrq.c \
+ runtime_services/arm_arch_svc/smccc_arch_workaround_1.c \
+ runtime_services/arm_arch_svc/smccc_arch_workaround_2.c \
+ runtime_services/sip_service/test_exec_state_switch.c \
+ runtime_services/sip_service/test_exec_state_switch_asm.S \
+ runtime_services/standard_service/pmf/api_tests/runtime_instr/test_pmf_rt_instr.c \
+ runtime_services/standard_service/psci/api_tests/affinity_info/test_psci_affinity_info.c \
+ runtime_services/standard_service/psci/api_tests/cpu_hotplug/test_psci_hotplug.c \
+ runtime_services/standard_service/psci/api_tests/cpu_hotplug/test_psci_hotplug_invalid.c \
+ runtime_services/standard_service/psci/api_tests/cpu_suspend/test_suspend.c \
+ runtime_services/standard_service/psci/api_tests/migrate_info_type/test_migrate_info_type.c \
+ runtime_services/standard_service/psci/api_tests/psci_features/test_psci_features.c \
+ runtime_services/standard_service/psci/api_tests/psci_node_hw_state/test_node_hw_state.c \
+ runtime_services/standard_service/psci/api_tests/psci_stat/test_psci_stat.c \
+ runtime_services/standard_service/psci/api_tests/psci_version/test_psci_version.c \
+ runtime_services/standard_service/psci/api_tests/system_off/test_system_off.c \
+ runtime_services/standard_service/psci/api_tests/system_suspend/test_psci_system_suspend.c \
+ runtime_services/standard_service/psci/api_tests/validate_power_state/test_validate_power_state.c \
+ runtime_services/standard_service/psci/system_tests/test_psci_hotplug_stress.c \
+ runtime_services/standard_service/psci/system_tests/test_psci_on_off_suspend_stress.c \
+ runtime_services/standard_service/psci/system_tests/test_psci_system_suspend_stress.c \
+ runtime_services/standard_service/psci/api_tests/mem_protect/test_mem_protect.c \
+ runtime_services/standard_service/psci/api_tests/mem_protect_check/mem_protect_check.c \
+ runtime_services/standard_service/psci/api_tests/reset2/reset2.c \
+ runtime_services/standard_service/query_std_svc.c \
+ runtime_services/standard_service/unknown_smc.c \
+ runtime_services/standard_service/sdei/system_tests/sdei_entrypoint.S \
+ runtime_services/standard_service/sdei/system_tests/test_sdei.c \
+ runtime_services/standard_service/sdei/system_tests/test_sdei_state.c \
+ runtime_services/trusted_os/tsp/test_irq_preempted_std_smc.c \
+ runtime_services/trusted_os/tsp/test_normal_int_switch.c \
+ runtime_services/trusted_os/tsp/test_smc_tsp_std_fn_call.c \
+ runtime_services/trusted_os/tsp/test_tsp_fast_smc.c \
+)
+
+TESTS_SOURCES += ${SPM_TESTS_SOURCES} \
+ ${FWU_TESTS_SOURCES}
+
+# The following source files are part of the "template" testsuite, which aims
+# at providing template test code as a starting point for developing new tests.
+# They don't do anything useful in terms of testing so they are disabled by
+# default. Uncomment those lines along with the corresponding test suite XML
+# node in the tests/tests.xml file to enable them.
+# TESTS_SOURCES += tftf/tests/template_tests/test_template_single_core.c \
+# tftf/tests/template_tests/test_template_multi_core.c
diff --git a/tools/generate_test_list/generate_test_list.pl b/tools/generate_test_list/generate_test_list.pl
new file mode 100755
index 000000000..bcfdaffba
--- /dev/null
+++ b/tools/generate_test_list/generate_test_list.pl
@@ -0,0 +1,192 @@
+#!/usr/bin/env perl
+
+#
+# Copyright (c) 2018, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+#
+# Arg0: Name of the C file to generate.
+# Arg1: Name of the header file to generate.
+# Arg2: XML file that contains the list of test suites.
+# Arg3: Text file listing the files to skip. Takes precedence over Arg2 file.
+#
+
+my $TESTLIST_SRC_FILENAME = $ARGV[0];
+my $TESTLIST_HDR_FILENAME = $ARGV[1];
+my $XML_TEST_FILENAME = $ARGV[2];
+my $SKIPPED_LIST_FILENAME = $ARGV[3];
+
+use strict;
+use warnings;
+use File::Temp;
+use XML::LibXML;
+
+# Create the source & header files
+open FILE_SRC, ">", $TESTLIST_SRC_FILENAME or die $!;
+open FILE_HDR, ">", $TESTLIST_HDR_FILENAME or die $!;
+
+#
+# Open the test list
+#
+my $doc;
+my $testsuite_elem;
+my $failure_elem;
+
+if (-e $XML_TEST_FILENAME) {
+ my $parser = XML::LibXML->new();
+ $doc = $parser->parse_file($XML_TEST_FILENAME);
+} else {
+ exit 1
+}
+
+# We assume if there is a root then it is a 'testsuites' element
+my $root = $doc->documentElement();
+my @all_testcases = $root->findnodes("//testcase");
+my @all_testsuites = $root->findnodes("//testsuite");
+
+
+# Check the validity of the XML file:
+# - A testsuite name must be unique.
+# - A testsuite name must not contain a '/' character.
+# - All test cases belonging to a given testsuite must have unique names.
+for my $testsuite (@all_testsuites) {
+ my $testsuite_name = $testsuite->getAttribute('name');
+ if ($testsuite_name =~ /\//) {
+ print "ERROR: $XML_TEST_FILENAME: Invalid test suite name '$testsuite_name'.\n";
+ print "ERROR: $XML_TEST_FILENAME: Test suite names can't include a '/' character.\n";
+ exit 1;
+ }
+ my @testsuites = $root->findnodes("//testsuite[\@name='$testsuite_name']");
+ if (@testsuites != 1) {
+ print "ERROR: $XML_TEST_FILENAME: Can't have 2 test suites named '$testsuite_name'.\n";
+ exit 1;
+ }
+
+ my @testcases_of_testsuite = $testsuite->findnodes("testcase");
+ for my $testcase (@testcases_of_testsuite) {
+ my $testcase_name = $testcase->getAttribute('name');
+ my @testcases = $testsuite->findnodes("testcase[\@name='$testcase_name']");
+ if (@testcases != 1) {
+ print "ERROR: $XML_TEST_FILENAME: Can't have 2 tests named '$testsuite_name/$testcase_name'.\n";
+ exit 1;
+ }
+ }
+}
+
+#
+# Get the list of tests to skip.
+# For each test to skip, find it in the XML tree and remove its node.
+#
+if (($SKIPPED_LIST_FILENAME) && (open SKIPPED_FILE, "<", $SKIPPED_LIST_FILENAME)) {
+ my @lines = <SKIPPED_FILE>;
+ close $SKIPPED_LIST_FILENAME;
+
+ # Remove the newlines from the end of each line.
+ chomp @lines;
+
+ my $line_no = 0;
+ my $testsuite_name;
+ my $testcase_name;
+ my $index = 0;
+
+ for my $line (@lines) {
+ ++$line_no;
+
+ # Skip empty lines.
+ if ($line =~ /^ *$/) { next; }
+ # Skip comments.
+ if ($line =~ /^#/) { next; }
+
+ ($testsuite_name, $testcase_name) = split('/', $line);
+
+ my @testsuites = $root->findnodes("//testsuite[\@name=\"$testsuite_name\"]");
+ if (!@testsuites) {
+ print "WARNING: $SKIPPED_LIST_FILENAME:$line_no: Test suite '$testsuite_name' doesn't exist or has already been deleted.\n";
+ next;
+ }
+
+ if (!defined $testcase_name) {
+ print "INFO: Testsuite '$testsuite_name' will be skipped.\n";
+ $testsuites[0]->unbindNode();
+ next;
+ }
+
+ my @testcases = $testsuites[0]->findnodes("testcase[\@name=\"$testcase_name\"]");
+ if (!@testcases) {
+ print "WARNING: $SKIPPED_LIST_FILENAME:$line_no: Test case '$testsuite_name/$testcase_name' doesn't exist or has already been deleted.\n";
+ next;
+ }
+
+ print "INFO: Testcase '$testsuite_name/$testcase_name' will be skipped.\n";
+ $testcases[0]->unbindNode();
+ }
+}
+
+
+@all_testcases = $root->findnodes("//testcase");
+
+#
+# Generate the test function prototypes
+#
+my $testcase_count = 0;
+
+print FILE_SRC "#include \"tftf.h\"\n\n";
+
+for my $testcase (@all_testcases) {
+ my $testcase_function = $testcase->getAttribute('function');
+ $testcase_count++;
+ print FILE_SRC "test_result_t $testcase_function(void);\n";
+}
+
+#
+# Generate the header file.
+#
+print FILE_HDR "#ifndef __TEST_LIST_H__\n";
+print FILE_HDR "#define __TEST_LIST_H__\n\n";
+print FILE_HDR "#define TESTCASE_RESULT_COUNT $testcase_count\n\n";
+print FILE_HDR "#endif\n";
+
+#
+# Generate the lists of testcases
+#
+my $testsuite_index = 0;
+my $testcase_index = 0;
+@all_testsuites = $root->findnodes("//testsuite");
+for my $testsuite (@all_testsuites) {
+ my $testsuite_name = $testsuite->getAttribute('name');
+ my @testcases = $testsuite->findnodes("//testsuite[\@name='$testsuite_name']//testcase");
+
+ print FILE_SRC "\nconst test_case_t testcases_${testsuite_index}[] = {\n";
+
+ for my $testcase (@testcases) {
+ my $testcase_name = $testcase->getAttribute('name');
+ my $testcase_description = $testcase->getAttribute('description');
+ my $testcase_function = $testcase->getAttribute('function');
+
+ if (!defined($testcase_description)) { $testcase_description = ""; }
+
+ print FILE_SRC " { $testcase_index, \"$testcase_name\", \"$testcase_description\", $testcase_function },\n";
+
+ $testcase_index++;
+ }
+ print FILE_SRC " { 0, NULL, NULL, NULL }\n";
+ print FILE_SRC "};\n\n";
+ $testsuite_index++;
+}
+
+#
+# Generate the lists of testsuites
+#
+$testsuite_index = 0;
+print FILE_SRC "const test_suite_t testsuites[] = {\n";
+for my $testsuite (@all_testsuites) {
+ my $testsuite_name = $testsuite->getAttribute('name');
+ my $testsuite_description = $testsuite->getAttribute('description');
+ print FILE_SRC " { \"$testsuite_name\", \"$testsuite_description\", testcases_${testsuite_index} },\n";
+ $testsuite_index++;
+}
+print FILE_SRC " { NULL, NULL, NULL }\n";
+print FILE_SRC "};\n";
+