Added framework as a flattened directory

Signed-off-by: Minos Galanakis <minos.galanakis@arm.com>
diff --git a/framework/scripts/all-core.sh b/framework/scripts/all-core.sh
new file mode 100644
index 0000000..1882c1c
--- /dev/null
+++ b/framework/scripts/all-core.sh
@@ -0,0 +1,1038 @@
+# all-core.sh
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+################################################################
+#### Documentation
+################################################################
+
+# Purpose
+# -------
+#
+# To run all tests possible or available on the platform.
+#
+# Files structure
+# ---------------
+#
+# The executable entry point for users and the CI is tests/scripts/all.sh.
+#
+# The actual content is in the following files:
+# - all-core.sh contains the core logic for running test components,
+#   processing command line options, reporting results, etc.
+# - all-helpers.sh contains helper functions used by more than 1 component.
+# - components-*.sh contain the definitions of the various components.
+#
+# The first two parts are shared between repos and branches;
+# the component files are repo&branch-specific.
+#
+# The files all-*.sh and components-*.sh should only define functions and not
+# run code when sourced; the only exception being that all-core.sh runs
+# 'shopt' because that is necessary for the rest of the file to parse.
+#
+# Notes for users
+# ---------------
+#
+# Warning: the test is destructive. It includes various build modes and
+# configurations, and can and will arbitrarily change the current CMake
+# configuration. The following files must be committed into git:
+#    * include/mbedtls/mbedtls_config.h
+#    * Makefile, library/Makefile, programs/Makefile, tests/Makefile,
+#      programs/fuzz/Makefile
+# After running this script, the CMake cache will be lost and CMake
+# will no longer be initialised.
+#
+# The script assumes the presence of a number of tools:
+#   * Basic Unix tools (Windows users note: a Unix-style find must be before
+#     the Windows find in the PATH)
+#   * Perl
+#   * GNU Make
+#   * CMake
+#   * GCC and Clang (recent enough for using ASan with gcc and MemSan with clang, or valgrind)
+#   * G++
+#   * arm-gcc and mingw-gcc
+#   * ArmCC 6 (aka armclang), unless invoked with --no-armcc
+#   * OpenSSL and GnuTLS command line tools, in suitable versions for the
+#     interoperability tests. The following are the official versions at the
+#     time of writing:
+#     * GNUTLS_{CLI,SERV} = 3.4.10
+#     * GNUTLS_NEXT_{CLI,SERV} = 3.7.2
+#     * OPENSSL = 1.0.2g (without Debian/Ubuntu patches)
+#     * OPENSSL_NEXT = 3.1.2
+# See the invocation of check_tools below for details.
+#
+# This script must be invoked from the toplevel directory of a git
+# working copy of Mbed TLS.
+#
+# The behavior on an error depends on whether --keep-going (alias -k)
+# is in effect.
+#  * Without --keep-going: the script stops on the first error without
+#    cleaning up. This lets you work in the configuration of the failing
+#    component.
+#  * With --keep-going: the script runs all requested components and
+#    reports failures at the end. In particular the script always cleans
+#    up on exit.
+#
+# Note that the output is not saved. You may want to run
+#   script -c tests/scripts/all.sh
+# or
+#   tests/scripts/all.sh >all.log 2>&1
+#
+# Notes for maintainers
+# ---------------------
+#
+# The bulk of the code is organized into functions that follow one of the
+# following naming conventions:
+# * in all-core.sh:
+#   * pre_XXX: things to do before running the tests, in order.
+#   * post_XXX: things to do after running the tests.
+# * in components-*.sh:
+#   * component_XXX: independent components. They can be run in any order.
+#     * component_check_XXX: quick tests that aren't worth parallelizing.
+#     * component_build_XXX: build things but don't run them.
+#     * component_test_XXX: build and test.
+#     * component_release_XXX: tests that the CI should skip during PR testing.
+#   * support_XXX: if support_XXX exists and returns false then
+#     component_XXX is not run by default.
+# * in various files:
+#   * other: miscellaneous support functions.
+#
+# Each component must start by invoking `msg` with a short informative message.
+#
+# Warning: due to the way bash detects errors, the failure of a command
+# inside 'if' or '!' is not detected. Use the 'not' function instead of '!'.
+#
+# Each component is executed in a separate shell process. The component
+# fails if any command in it returns a non-zero status.
+#
+# The framework performs some cleanup tasks after each component. This
+# means that components can assume that the working directory is in a
+# cleaned-up state, and don't need to perform the cleanup themselves.
+# * Run `make clean`.
+# * Restore the various config files (potentially modified by config.py) from
+#   a backup made when starting the script.
+# * If in Mbed TLS, restore the various `Makefile`s (potentially modified by
+#   in-tree use of CMake) from a backup made when starting the script. (Note:
+#   if the files look generated when starting the script, they will be
+#   restored from the git index before making the backup.)
+
+
+################################################################
+#### Initialization and command line parsing
+################################################################
+
+# Enable ksh/bash extended file matching patterns.
+# Must come before function definitions or some of them wouldn't parse.
+shopt -s extglob
+
+pre_set_shell_options () {
+    # Abort on errors (even on the left-hand side of a pipe).
+    # Treat uninitialised variables as errors.
+    set -e -o pipefail -u
+}
+
+pre_check_environment () {
+
+    source $FRAMEWORK/scripts/project_detection.sh
+
+    if in_mbedtls_repo || in_tf_psa_crypto_repo; then :; else
+        echo "Must be run from Mbed TLS / TF-PSA-Crypto root" >&2
+        exit 1
+    fi
+}
+
+# Must be called before pre_initialize_variables which sets ALL_COMPONENTS.
+pre_load_components () {
+    # Include the components from components.sh
+    # Use a path relative to the current directory, aka project's root.
+    for file in tests/scripts/components-*.sh; do
+        source $file
+    done
+}
+
+pre_initialize_variables () {
+    if in_mbedtls_repo; then
+        CONFIG_H='include/mbedtls/mbedtls_config.h'
+        if in_3_6_branch; then
+            CRYPTO_CONFIG_H='include/psa/crypto_config.h'
+            # helper_armc6_build_test() relies on these being defined,
+            # but empty if the paths don't exist (as in 3.6).
+            PSA_CORE_PATH=''
+            BUILTIN_SRC_PATH=''
+            CONFIG_TEST_DRIVER_H='tests/configs/config_test_driver.h'
+        else
+            CRYPTO_CONFIG_H='tf-psa-crypto/include/psa/crypto_config.h'
+            PSA_CORE_PATH='tf-psa-crypto/core'
+            BUILTIN_SRC_PATH='tf-psa-crypto/drivers/builtin/src'
+            CONFIG_TEST_DRIVER_H='tf-psa-crypto/tests/configs/crypto_config_test_driver.h'
+            MBEDTLS_ROOT_DIR="$PWD"
+            TF_PSA_CRYPTO_ROOT_DIR="$PWD/tf-psa-crypto"
+        fi
+        config_files="$CONFIG_H $CRYPTO_CONFIG_H $CONFIG_TEST_DRIVER_H"
+    else
+        CRYPTO_CONFIG_H='include/psa/crypto_config.h'
+        PSA_CORE_PATH='core'
+        BUILTIN_SRC_PATH='drivers/builtin/src'
+        CONFIG_TEST_DRIVER_H='tests/configs/config_test_driver.h'
+        TF_PSA_CRYPTO_ROOT_DIR="$PWD"
+        MBEDTLS_ROOT_DIR=""
+
+        config_files="$CRYPTO_CONFIG_H $CONFIG_TEST_DRIVER_H"
+    fi
+
+    # Files that are clobbered by some jobs will be backed up. Use a different
+    # suffix from auxiliary scripts so that all.sh and auxiliary scripts can
+    # independently decide when to remove the backup file.
+    backup_suffix='.all.bak'
+    # Files clobbered by config.py
+    files_to_back_up="$config_files"
+    if in_mbedtls_repo; then
+        # Files clobbered by in-tree cmake
+        files_to_back_up="$files_to_back_up Makefile library/Makefile programs/Makefile tests/Makefile programs/fuzz/Makefile"
+    fi
+
+    append_outcome=0
+    MEMORY=0
+    FORCE=0
+    QUIET=0
+    KEEP_GOING=0
+
+    # Seed value used with the --release-test option.
+    #
+    # See also RELEASE_SEED in basic-build-test.sh. Debugging is easier if
+    # both values are kept in sync. If you change the value here because it
+    # breaks some tests, you'll definitely want to change it in
+    # basic-build-test.sh as well.
+    RELEASE_SEED=1
+
+    # Specify character collation for regular expressions and sorting with C locale
+    export LC_COLLATE=C
+
+    : ${MBEDTLS_TEST_OUTCOME_FILE=}
+    : ${MBEDTLS_TEST_PLATFORM="$(uname -s | tr -c \\n0-9A-Za-z _)-$(uname -m | tr -c \\n0-9A-Za-z _)"}
+    export MBEDTLS_TEST_OUTCOME_FILE
+    export MBEDTLS_TEST_PLATFORM
+
+    # Default commands, can be overridden by the environment
+    : ${OPENSSL:="openssl"}
+    : ${OPENSSL_NEXT:="$OPENSSL"}
+    : ${GNUTLS_CLI:="gnutls-cli"}
+    : ${GNUTLS_SERV:="gnutls-serv"}
+    : ${OUT_OF_SOURCE_DIR:=$PWD/out_of_source_build}
+    : ${ARMC6_BIN_DIR:=/usr/bin}
+    : ${ARM_NONE_EABI_GCC_PREFIX:=arm-none-eabi-}
+    : ${ARM_LINUX_GNUEABI_GCC_PREFIX:=arm-linux-gnueabi-}
+    : ${ARM_LINUX_GNUEABIHF_GCC_PREFIX:=arm-linux-gnueabihf-}
+    : ${AARCH64_LINUX_GNU_GCC_PREFIX:=aarch64-linux-gnu-}
+    : ${CLANG_LATEST:="clang-latest"}
+    : ${CLANG_EARLIEST:="clang-earliest"}
+    : ${GCC_LATEST:="gcc-latest"}
+    : ${GCC_EARLIEST:="gcc-earliest"}
+    # if MAKEFLAGS is not set add the -j option to speed up invocations of make
+    if [ -z "${MAKEFLAGS+set}" ]; then
+        export MAKEFLAGS="-j$(all_sh_nproc)"
+    fi
+    # if CC is not set, use clang by default (if present) to improve build times
+    if [ -z "${CC+set}" ] && (type clang > /dev/null 2>&1); then
+        export CC="clang"
+    fi
+
+    if [ -n "${OPENSSL_3+set}" ]; then
+        export OPENSSL_NEXT="$OPENSSL_3"
+    fi
+
+    # Include more verbose output for failing tests run by CMake or make
+    export CTEST_OUTPUT_ON_FAILURE=1
+
+    # CFLAGS and LDFLAGS for Asan builds that don't use CMake
+    # default to -O2, use -Ox _after_ this if you want another level
+    ASAN_CFLAGS='-O2 -Werror -fsanitize=address,undefined -fno-sanitize-recover=all'
+    # Normally, tests should use this compiler for ASAN testing
+    ASAN_CC=clang
+
+    # Platform tests have an allocation that returns null
+    export ASAN_OPTIONS="allocator_may_return_null=1"
+    export MSAN_OPTIONS="allocator_may_return_null=1"
+
+    # Gather the list of available components. These are the functions
+    # defined in this script whose name starts with "component_".
+    ALL_COMPONENTS=$(compgen -A function component_ | sed 's/component_//')
+
+    PSASIM_PATH='tests/psa-client-server/psasim/'
+
+    # Delay determining SUPPORTED_COMPONENTS until the command line options have a chance to override
+    # the commands set by the environment
+}
+
+setup_quiet_wrappers()
+{
+    # Pick up "quiet" wrappers for make and cmake, which don't output very much
+    # unless there is an error. This reduces logging overhead in the CI.
+    #
+    # Note that the cmake wrapper breaks unless we use an absolute path here.
+    if [[ -e ${PWD}/framework/scripts/quiet ]]; then
+        export PATH=${PWD}/framework/scripts/quiet:$PATH
+    fi
+}
+
+# Test whether the component $1 is included in the command line patterns.
+is_component_included()
+{
+    # Temporarily disable wildcard expansion so that $COMMAND_LINE_COMPONENTS
+    # only does word splitting.
+    set -f
+    for pattern in $COMMAND_LINE_COMPONENTS; do
+        set +f
+        case ${1#component_} in $pattern) return 0;; esac
+    done
+    set +f
+    return 1
+}
+
+usage()
+{
+    cat <<EOF
+Usage: $0 [OPTION]... [COMPONENT]...
+Run mbedtls release validation tests.
+By default, run all tests. With one or more COMPONENT, run only those.
+COMPONENT can be the name of a component or a shell wildcard pattern.
+
+Examples:
+  $0 "check_*"
+    Run all sanity checks.
+  $0 --no-armcc --except test_memsan
+    Run everything except builds that require armcc and MemSan.
+
+Special options:
+  -h|--help             Print this help and exit.
+  --list-all-components List all available test components and exit.
+  --list-components     List components supported on this platform and exit.
+
+General options:
+  -q|--quiet            Only output component names, and errors if any.
+  -f|--force            Force the tests to overwrite any modified files.
+  -k|--keep-going       Run all tests and report errors at the end.
+  -m|--memory           Additional optional memory tests.
+     --append-outcome   Append to the outcome file (if used).
+     --arm-none-eabi-gcc-prefix=<string>
+                        Prefix for a cross-compiler for arm-none-eabi
+                        (default: "${ARM_NONE_EABI_GCC_PREFIX}")
+     --arm-linux-gnueabi-gcc-prefix=<string>
+                        Prefix for a cross-compiler for arm-linux-gnueabi
+                        (default: "${ARM_LINUX_GNUEABI_GCC_PREFIX}")
+     --arm-linux-gnueabihf-gcc-prefix=<string>
+                        Prefix for a cross-compiler for arm-linux-gnueabihf
+                        (default: "${ARM_LINUX_GNUEABIHF_GCC_PREFIX}")
+     --aarch64-linux-gnu-gcc-prefix=<string>
+                        Prefix for a cross-compiler for aarch64-linux-gnu
+                        (default: "${AARCH64_LINUX_GNU_GCC_PREFIX}")
+     --armcc            Run ARM Compiler builds (on by default).
+     --restore          First clean up the build tree, restoring backed up
+                        files. Do not run any components unless they are
+                        explicitly specified.
+     --error-test       Error test mode: run a failing function in addition
+                        to any specified component. May be repeated.
+     --except           Exclude the COMPONENTs listed on the command line,
+                        instead of running only those.
+     --no-append-outcome    Write a new outcome file and analyze it (default).
+     --no-armcc         Skip ARM Compiler builds.
+     --no-force         Refuse to overwrite modified files (default).
+     --no-keep-going    Stop at the first error (default).
+     --no-memory        No additional memory tests (default).
+     --no-quiet         Print full output from components.
+     --out-of-source-dir=<path>  Directory used for CMake out-of-source build tests.
+     --outcome-file=<path>  File where test outcomes are written (not done if
+                            empty; default: \$MBEDTLS_TEST_OUTCOME_FILE).
+     --random-seed      Use a random seed value for randomized tests (default).
+  -r|--release-test     Run this script in release mode. This fixes the seed value to ${RELEASE_SEED}.
+  -s|--seed             Integer seed value to use for this test run.
+
+Tool path options:
+     --armc6-bin-dir=<ARMC6_bin_dir_path>       ARM Compiler 6 bin directory.
+     --clang-earliest=<Clang_earliest_path>     Earliest version of clang available
+     --clang-latest=<Clang_latest_path>         Latest version of clang available
+     --gcc-earliest=<GCC_earliest_path>         Earliest version of GCC available
+     --gcc-latest=<GCC_latest_path>             Latest version of GCC available
+     --gnutls-cli=<GnuTLS_cli_path>             GnuTLS client executable to use for most tests.
+     --gnutls-serv=<GnuTLS_serv_path>           GnuTLS server executable to use for most tests.
+     --openssl=<OpenSSL_path>                   OpenSSL executable to use for most tests.
+     --openssl-next=<OpenSSL_path>              OpenSSL executable to use for recent things like ARIA
+EOF
+}
+
+# Cleanup before/after running a component.
+# Remove built files as well as the cmake cache/config.
+# Does not remove generated source files.
+cleanup()
+{
+    if in_mbedtls_repo; then
+        command make clean
+    fi
+
+    # Remove CMake artefacts
+    find . -name .git -prune -o \
+           -iname CMakeFiles -exec rm -rf {} \+ -o \
+           \( -iname cmake_install.cmake -o \
+              -iname CTestTestfile.cmake -o \
+              -iname CMakeCache.txt -o \
+              -path './cmake/*.cmake' \) -exec rm -f {} \+
+    # Remove Makefiles generated by in-tree CMake builds
+    # (Not all files will exist in all branches, but that's OK.)
+    rm -f 3rdparty/Makefile 3rdparty/*/Makefile
+    rm -f pkgconfig/Makefile framework/Makefile
+    rm -f include/Makefile programs/!(fuzz)/Makefile
+    rm -f tf-psa-crypto/Makefile tf-psa-crypto/include/Makefile
+    rm -f tf-psa-crypto/core/Makefile tf-psa-crypto/drivers/Makefile
+    rm -f tf-psa-crypto/tests/Makefile
+    rm -f tf-psa-crypto/drivers/everest/Makefile
+    rm -f tf-psa-crypto/drivers/p256-m/Makefile
+    rm -f tf-psa-crypto/drivers/builtin/Makefile
+    rm -f tf-psa-crypto/drivers/builtin/src/Makefile
+
+    # Remove any artifacts from the component_test_cmake_as_subdirectory test.
+    rm -rf programs/test/cmake_subproject/build
+    rm -f programs/test/cmake_subproject/Makefile
+    rm -f programs/test/cmake_subproject/cmake_subproject
+
+    # Remove any artifacts from the component_test_cmake_as_package test.
+    rm -rf programs/test/cmake_package/build
+    rm -f programs/test/cmake_package/Makefile
+    rm -f programs/test/cmake_package/cmake_package
+
+    # Remove any artifacts from the component_test_cmake_as_installed_package test.
+    rm -rf programs/test/cmake_package_install/build
+    rm -f programs/test/cmake_package_install/Makefile
+    rm -f programs/test/cmake_package_install/cmake_package_install
+
+    # Remove out of source directory
+    if in_tf_psa_crypto_repo; then
+        rm -rf "$OUT_OF_SOURCE_DIR"
+    fi
+
+    # Restore files that may have been clobbered by the job
+    restore_backed_up_files
+}
+
+# Restore files that may have been clobbered
+restore_backed_up_files () {
+    for x in $files_to_back_up; do
+        if [[ -e "$x$backup_suffix" ]]; then
+            cp -p "$x$backup_suffix" "$x"
+        fi
+    done
+}
+
+# Final cleanup when this script exits (except when exiting on a failure
+# in non-keep-going mode).
+final_cleanup () {
+    cleanup
+
+    for x in $files_to_back_up; do
+        rm -f "$x$backup_suffix"
+    done
+}
+
+# Executed on exit. May be redefined depending on command line options.
+final_report () {
+    :
+}
+
+fatal_signal () {
+    final_cleanup
+    final_report $1
+    trap - $1
+    kill -$1 $$
+}
+
+pre_set_signal_handlers () {
+    trap 'fatal_signal HUP' HUP
+    trap 'fatal_signal INT' INT
+    trap 'fatal_signal TERM' TERM
+}
+
+# Number of processors on this machine. Used as the default setting
+# for parallel make.
+all_sh_nproc ()
+{
+    {
+        nproc || # Linux
+        sysctl -n hw.ncpuonline || # NetBSD, OpenBSD
+        sysctl -n hw.ncpu || # FreeBSD
+        echo 1
+    } 2>/dev/null
+}
+
+msg()
+{
+    if [ -n "${current_component:-}" ]; then
+        current_section="${current_component#component_}: $1"
+    else
+        current_section="$1"
+    fi
+
+    if [ $QUIET -eq 1 ]; then
+        return
+    fi
+
+    echo ""
+    echo "******************************************************************"
+    echo "* $current_section "
+    printf "* "; date
+    echo "******************************************************************"
+}
+
+err_msg()
+{
+    echo "$1" >&2
+}
+
+check_tools()
+{
+    for tool in "$@"; do
+        if ! `type "$tool" >/dev/null 2>&1`; then
+            err_msg "$tool not found!"
+            exit 1
+        fi
+    done
+}
+
+pre_parse_command_line () {
+    COMMAND_LINE_COMPONENTS=
+    all_except=0
+    error_test=0
+    list_components=0
+    restore_first=0
+    no_armcc=
+
+    # Note that legacy options are ignored instead of being omitted from this
+    # list of options, so invocations that worked with previous version of
+    # all.sh will still run and work properly.
+    while [ $# -gt 0 ]; do
+        case "$1" in
+            --append-outcome) append_outcome=1;;
+            --arm-none-eabi-gcc-prefix) shift; ARM_NONE_EABI_GCC_PREFIX="$1";;
+            --arm-linux-gnueabi-gcc-prefix) shift; ARM_LINUX_GNUEABI_GCC_PREFIX="$1";;
+            --arm-linux-gnueabihf-gcc-prefix) shift; ARM_LINUX_GNUEABIHF_GCC_PREFIX="$1";;
+            --aarch64-linux-gnu-gcc-prefix) shift; AARCH64_LINUX_GNU_GCC_PREFIX="$1";;
+            --armcc) no_armcc=;;
+            --armc6-bin-dir) shift; ARMC6_BIN_DIR="$1";;
+            --clang-earliest) shift; CLANG_EARLIEST="$1";;
+            --clang-latest) shift; CLANG_LATEST="$1";;
+            --error-test) error_test=$((error_test + 1));;
+            --except) all_except=1;;
+            --force|-f) FORCE=1;;
+            --gcc-earliest) shift; GCC_EARLIEST="$1";;
+            --gcc-latest) shift; GCC_LATEST="$1";;
+            --gnutls-cli) shift; GNUTLS_CLI="$1";;
+            --gnutls-legacy-cli) shift;; # ignored for backward compatibility
+            --gnutls-legacy-serv) shift;; # ignored for backward compatibility
+            --gnutls-serv) shift; GNUTLS_SERV="$1";;
+            --help|-h) usage; exit;;
+            --keep-going|-k) KEEP_GOING=1;;
+            --list-all-components) printf '%s\n' $ALL_COMPONENTS; exit;;
+            --list-components) list_components=1;;
+            --memory|-m) MEMORY=1;;
+            --no-append-outcome) append_outcome=0;;
+            --no-armcc) no_armcc=1;;
+            --no-force) FORCE=0;;
+            --no-keep-going) KEEP_GOING=0;;
+            --no-memory) MEMORY=0;;
+            --no-quiet) QUIET=0;;
+            --openssl) shift; OPENSSL="$1";;
+            --openssl-next) shift; OPENSSL_NEXT="$1";;
+            --outcome-file) shift; MBEDTLS_TEST_OUTCOME_FILE="$1";;
+            --out-of-source-dir) shift; OUT_OF_SOURCE_DIR="$1";;
+            --quiet|-q) QUIET=1;;
+            --random-seed) unset SEED;;
+            --release-test|-r) SEED=$RELEASE_SEED;;
+            --restore) restore_first=1;;
+            --seed|-s) shift; SEED="$1";;
+            -*)
+                echo >&2 "Unknown option: $1"
+                echo >&2 "Run $0 --help for usage."
+                exit 120
+                ;;
+            *) COMMAND_LINE_COMPONENTS="$COMMAND_LINE_COMPONENTS $1";;
+        esac
+        shift
+    done
+
+    # Exclude components that are not supported on this platform.
+    SUPPORTED_COMPONENTS=
+    for component in $ALL_COMPONENTS; do
+        case $(type "support_$component" 2>&1) in
+            *' function'*)
+                if ! support_$component; then continue; fi;;
+        esac
+        SUPPORTED_COMPONENTS="$SUPPORTED_COMPONENTS $component"
+    done
+
+    if [ $list_components -eq 1 ]; then
+        printf '%s\n' $SUPPORTED_COMPONENTS
+        exit
+    fi
+
+    # With no list of components, run everything.
+    if [ -z "$COMMAND_LINE_COMPONENTS" ] && [ $restore_first -eq 0 ]; then
+        all_except=1
+    fi
+
+    # --no-armcc is a legacy option. The modern way is --except '*_armcc*'.
+    # Ignore it if components are listed explicitly on the command line.
+    if [ -n "$no_armcc" ] && [ $all_except -eq 1 ]; then
+        COMMAND_LINE_COMPONENTS="$COMMAND_LINE_COMPONENTS *_armcc*"
+    fi
+
+    # Error out if an explicitly requested component doesn't exist.
+    if [ $all_except -eq 0 ]; then
+        unsupported=0
+        # Temporarily disable wildcard expansion so that $COMMAND_LINE_COMPONENTS
+        # only does word splitting.
+        set -f
+        for component in $COMMAND_LINE_COMPONENTS; do
+            set +f
+            # If the requested name includes a wildcard character, don't
+            # check it. Accept wildcard patterns that don't match anything.
+            case $component in
+                *[*?\[]*) continue;;
+            esac
+            case " $SUPPORTED_COMPONENTS " in
+                *" $component "*) :;;
+                *)
+                    echo >&2 "Component $component was explicitly requested, but is not known or not supported."
+                    unsupported=$((unsupported + 1));;
+            esac
+        done
+        set +f
+        if [ $unsupported -ne 0 ]; then
+            exit 2
+        fi
+    fi
+
+    # Build the list of components to run.
+    RUN_COMPONENTS=
+    for component in $SUPPORTED_COMPONENTS; do
+        if is_component_included "$component"; [ $? -eq $all_except ]; then
+            RUN_COMPONENTS="$RUN_COMPONENTS $component"
+        fi
+    done
+
+    unset all_except
+    unset no_armcc
+}
+
+pre_check_git () {
+    if [ $FORCE -eq 1 ]; then
+        rm -rf "$OUT_OF_SOURCE_DIR"
+        git checkout-index -f -q $config_files
+        cleanup
+    else
+
+        if [ -d "$OUT_OF_SOURCE_DIR" ]; then
+            echo "Warning - there is an existing directory at '$OUT_OF_SOURCE_DIR'" >&2
+            echo "You can either delete this directory manually, or force the test by rerunning"
+            echo "the script as: $0 --force --out-of-source-dir $OUT_OF_SOURCE_DIR"
+            exit 1
+        fi
+
+        for config in $config_files; do
+            if ! git diff --quiet "$config"; then
+                err_msg "Warning - the configuration file '$config' has been edited. "
+                echo "You can either delete or preserve your work, or force the test by rerunning the"
+                echo "script as: $0 --force"
+                exit 1
+            fi
+        done
+    fi
+}
+
+pre_restore_files () {
+    # If the makefiles have been generated by a framework such as cmake,
+    # restore them from git. If the makefiles look like modifications from
+    # the ones checked into git, take care not to modify them. Whatever
+    # this function leaves behind is what the script will restore before
+    # each component.
+    case "$(head -n1 Makefile)" in
+        *[Gg]enerated*)
+            git update-index --no-skip-worktree Makefile library/Makefile programs/Makefile tests/Makefile programs/fuzz/Makefile
+            git checkout -- Makefile library/Makefile programs/Makefile tests/Makefile programs/fuzz/Makefile
+            ;;
+    esac
+}
+
+pre_back_up () {
+    for x in $files_to_back_up; do
+        cp -p "$x" "$x$backup_suffix"
+    done
+}
+
+pre_setup_keep_going () {
+    failure_count=0 # Number of failed components
+    last_failure_status=0 # Last failure status in this component
+
+    # See err_trap
+    previous_failure_status=0
+    previous_failed_command=
+    previous_failure_funcall_depth=0
+    unset report_failed_command
+
+    start_red=
+    end_color=
+    if [ -t 1 ]; then
+        case "${TERM:-}" in
+            *color*|cygwin|linux|rxvt*|screen|[Eex]term*)
+                start_red=$(printf '\033[31m')
+                end_color=$(printf '\033[0m')
+                ;;
+        esac
+    fi
+
+    # Keep a summary of failures in a file. We'll print it out at the end.
+    failure_summary_file=$PWD/all-sh-failures-$$.log
+    : >"$failure_summary_file"
+
+    # Whether it makes sense to keep a component going after the specified
+    # command fails (test command) or not (configure or build).
+    # This function normally receives the failing simple command
+    # ($BASH_COMMAND) as an argument, but if $report_failed_command is set,
+    # this is passed instead.
+    # This doesn't have to be 100% accurate: all failures are recorded anyway.
+    # False positives result in running things that can't be expected to
+    # work. False negatives result in things not running after something else
+    # failed even though they might have given useful feedback.
+    can_keep_going_after_failure () {
+        case "$1" in
+            "msg "*) false;;
+            "cd "*) false;;
+            "diff "*) true;;
+            *make*[\ /]tests*) false;; # make tests, make CFLAGS=-I../tests, ...
+            *test*) true;; # make test, tests/stuff, env V=v tests/stuff, ...
+            *make*check*) true;;
+            "grep "*) true;;
+            "[ "*) true;;
+            "! "*) true;;
+            *) false;;
+        esac
+    }
+
+    # This function runs if there is any error in a component.
+    # It must either exit with a nonzero status, or set
+    # last_failure_status to a nonzero value.
+    err_trap () {
+        # Save $? (status of the failing command). This must be the very
+        # first thing, before $? is overridden.
+        last_failure_status=$?
+        failed_command=${report_failed_command-$BASH_COMMAND}
+
+        if [[ $last_failure_status -eq $previous_failure_status &&
+              "$failed_command" == "$previous_failed_command" &&
+              ${#FUNCNAME[@]} == $((previous_failure_funcall_depth - 1)) ]]
+        then
+            # The same command failed twice in a row, but this time one level
+            # less deep in the function call stack. This happens when the last
+            # command of a function returns a nonzero status, and the function
+            # returns that same status. Ignore the second failure.
+            previous_failure_funcall_depth=${#FUNCNAME[@]}
+            return
+        fi
+        previous_failure_status=$last_failure_status
+        previous_failed_command=$failed_command
+        previous_failure_funcall_depth=${#FUNCNAME[@]}
+
+        text="$current_section: $failed_command -> $last_failure_status"
+        echo "${start_red}^^^^$text^^^^${end_color}" >&2
+        echo "$text" >>"$failure_summary_file"
+
+        # If the command is fatal (configure or build command), stop this
+        # component. Otherwise (test command) keep the component running
+        # (run more tests from the same build).
+        if ! can_keep_going_after_failure "$failed_command"; then
+            exit $last_failure_status
+        fi
+    }
+
+    final_report () {
+        if [ $failure_count -gt 0 ]; then
+            echo
+            echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
+            echo "${start_red}FAILED: $failure_count components${end_color}"
+            cat "$failure_summary_file"
+            echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
+        elif [ -z "${1-}" ]; then
+            echo "SUCCESS :)"
+        fi
+        if [ -n "${1-}" ]; then
+            echo "Killed by SIG$1."
+        fi
+        rm -f "$failure_summary_file"
+        if [ $failure_count -gt 0 ]; then
+            exit 1
+        fi
+    }
+}
+
+# '! true' does not trigger the ERR trap. Arrange to trigger it, with
+# a reasonably informative error message (not just "$@").
+not () {
+    if "$@"; then
+        report_failed_command="! $*"
+        false
+        unset report_failed_command
+    fi
+}
+
+pre_prepare_outcome_file () {
+    case "$MBEDTLS_TEST_OUTCOME_FILE" in
+      [!/]*) MBEDTLS_TEST_OUTCOME_FILE="$PWD/$MBEDTLS_TEST_OUTCOME_FILE";;
+    esac
+    if [ -n "$MBEDTLS_TEST_OUTCOME_FILE" ] && [ "$append_outcome" -eq 0 ]; then
+        rm -f "$MBEDTLS_TEST_OUTCOME_FILE"
+    fi
+}
+
+pre_print_configuration () {
+    if [ $QUIET -eq 1 ]; then
+        return
+    fi
+
+    msg "info: $0 configuration"
+    echo "MEMORY: $MEMORY"
+    echo "FORCE: $FORCE"
+    echo "MBEDTLS_TEST_OUTCOME_FILE: ${MBEDTLS_TEST_OUTCOME_FILE:-(none)}"
+    echo "SEED: ${SEED-"UNSET"}"
+    echo
+    echo "OPENSSL: $OPENSSL"
+    echo "OPENSSL_NEXT: $OPENSSL_NEXT"
+    echo "GNUTLS_CLI: $GNUTLS_CLI"
+    echo "GNUTLS_SERV: $GNUTLS_SERV"
+    echo "ARMC6_BIN_DIR: $ARMC6_BIN_DIR"
+}
+
+# Make sure the tools we need are available.
+pre_check_tools () {
+    # Build the list of variables to pass to output_env.sh.
+    set env
+
+    case " $RUN_COMPONENTS " in
+        # Require OpenSSL and GnuTLS if running any tests (as opposed to
+        # only doing builds). Not all tests run OpenSSL and GnuTLS, but this
+        # is a good enough approximation in practice.
+        *" test_"* | *" release_test_"*)
+            # To avoid setting OpenSSL and GnuTLS for each call to compat.sh
+            # and ssl-opt.sh, we just export the variables they require.
+            export OPENSSL="$OPENSSL"
+            export GNUTLS_CLI="$GNUTLS_CLI"
+            export GNUTLS_SERV="$GNUTLS_SERV"
+            # Avoid passing --seed flag in every call to ssl-opt.sh
+            if [ -n "${SEED-}" ]; then
+                export SEED
+            fi
+            set "$@" OPENSSL="$OPENSSL"
+            set "$@" GNUTLS_CLI="$GNUTLS_CLI" GNUTLS_SERV="$GNUTLS_SERV"
+            check_tools "$OPENSSL" "$OPENSSL_NEXT" \
+                        "$GNUTLS_CLI" "$GNUTLS_SERV"
+            ;;
+    esac
+
+    case " $RUN_COMPONENTS " in
+        *_doxygen[_\ ]*) check_tools "doxygen" "dot";;
+    esac
+
+    case " $RUN_COMPONENTS " in
+        *_arm_none_eabi_gcc[_\ ]*) check_tools "${ARM_NONE_EABI_GCC_PREFIX}gcc";;
+    esac
+
+    case " $RUN_COMPONENTS " in
+        *_mingw[_\ ]*) check_tools "i686-w64-mingw32-gcc";;
+    esac
+
+    case " $RUN_COMPONENTS " in
+        *" test_zeroize "*) check_tools "gdb";;
+    esac
+
+    case " $RUN_COMPONENTS " in
+        *_armcc*)
+            ARMC6_CC="$ARMC6_BIN_DIR/armclang"
+            ARMC6_AR="$ARMC6_BIN_DIR/armar"
+            ARMC6_FROMELF="$ARMC6_BIN_DIR/fromelf"
+            check_tools "$ARMC6_CC" "$ARMC6_AR" "$ARMC6_FROMELF";;
+    esac
+
+    # past this point, no call to check_tool, only printing output
+    if [ $QUIET -eq 1 ]; then
+        return
+    fi
+
+    msg "info: output_env.sh"
+    case $RUN_COMPONENTS in
+        *_armcc*)
+            set "$@" ARMC6_CC="$ARMC6_CC" RUN_ARMCC=1;;
+        *) set "$@" RUN_ARMCC=0;;
+    esac
+    # Use a path relative to the currently-sourced file.
+    "$@" "${BASH_SOURCE%/*}"/output_env.sh
+}
+
+pre_generate_files() {
+    # since make doesn't have proper dependencies, remove any possibly outdate
+    # file that might be around before generating fresh ones
+    make neat
+    if [ $QUIET -eq 1 ]; then
+        make generated_files >/dev/null
+    else
+        make generated_files
+    fi
+}
+
+pre_load_helpers () {
+    # Use a path relative to the currently-sourced file.
+    test_script_dir="${BASH_SOURCE%/*}"
+    source "$test_script_dir"/all-helpers.sh
+}
+
+################################################################
+#### Termination
+################################################################
+
+post_report () {
+    msg "Done, cleaning up"
+    final_cleanup
+
+    final_report
+}
+
+################################################################
+#### Run all the things
+################################################################
+
+# Function invoked by --error-test to test error reporting.
+pseudo_component_error_test () {
+    msg "Testing error reporting $error_test_i"
+    if [ $KEEP_GOING -ne 0 ]; then
+        echo "Expect three failing commands."
+    fi
+    # If the component doesn't run in a subshell, changing error_test_i to an
+    # invalid integer will cause an error in the loop that runs this function.
+    error_test_i=this_should_not_be_used_since_the_component_runs_in_a_subshell
+    # Expected error: 'grep non_existent /dev/null -> 1'
+    grep non_existent /dev/null
+    # Expected error: '! grep -q . tests/scripts/all.sh -> 1'
+    not grep -q . "$0"
+    # Expected error: 'make unknown_target -> 2'
+    make unknown_target
+    false "this should not be executed"
+}
+
+# Run one component and clean up afterwards.
+run_component () {
+    current_component="$1"
+    export MBEDTLS_TEST_CONFIGURATION="$current_component"
+
+    # Unconditionally create a seedfile that's sufficiently long.
+    # Do this before each component, because a previous component may
+    # have messed it up or shortened it.
+    local dd_cmd
+    dd_cmd=(dd if=/dev/urandom of=./tests/seedfile bs=64 count=1)
+    case $OSTYPE in
+        linux*|freebsd*|openbsd*) dd_cmd+=(status=none)
+    esac
+    "${dd_cmd[@]}"
+
+    if in_mbedtls_repo && in_4_x_branch; then
+        dd_cmd=(dd if=/dev/urandom of=./tf-psa-crypto/tests/seedfile bs=64 count=1)
+        case $OSTYPE in
+            linux*|freebsd*|openbsd*) dd_cmd+=(status=none)
+        esac
+        "${dd_cmd[@]}"
+    fi
+
+    if in_tf_psa_crypto_repo; then
+        pre_create_tf_psa_crypto_out_of_source_directory
+    fi
+
+    # Run the component in a subshell, with error trapping and output
+    # redirection set up based on the relevant options.
+    if [ $KEEP_GOING -eq 1 ]; then
+        # We want to keep running if the subshell fails, so 'set -e' must
+        # be off when the subshell runs.
+        set +e
+    fi
+    (
+        if [ $QUIET -eq 1 ]; then
+            # msg() will be silenced, so just print the component name here.
+            echo "${current_component#component_}"
+            exec >/dev/null
+        fi
+        if [ $KEEP_GOING -eq 1 ]; then
+            # Keep "set -e" off, and run an ERR trap instead to record failures.
+            set -E
+            trap err_trap ERR
+        fi
+        # The next line is what runs the component
+        "$@"
+        if [ $KEEP_GOING -eq 1 ]; then
+            trap - ERR
+            exit $last_failure_status
+        fi
+    )
+    component_status=$?
+    if [ $KEEP_GOING -eq 1 ]; then
+        set -e
+        if [ $component_status -ne 0 ]; then
+            failure_count=$((failure_count + 1))
+        fi
+    fi
+
+    # Restore the build tree to a clean state.
+    cleanup
+    unset current_component
+}
+
+pre_create_tf_psa_crypto_out_of_source_directory () {
+    rm -rf "$OUT_OF_SOURCE_DIR"
+    mkdir "$OUT_OF_SOURCE_DIR"
+}
+
+################################################################
+#### Main
+################################################################
+
+main () {
+    # Preliminary setup
+    pre_set_shell_options
+    pre_set_signal_handlers
+    pre_check_environment
+    pre_load_helpers
+    pre_load_components
+    pre_initialize_variables
+    pre_parse_command_line "$@"
+
+    setup_quiet_wrappers
+    pre_check_git
+    pre_restore_files
+    pre_back_up
+
+    build_status=0
+    if [ $KEEP_GOING -eq 1 ]; then
+        pre_setup_keep_going
+    fi
+    pre_prepare_outcome_file
+    pre_print_configuration
+    pre_check_tools
+    cleanup
+    if in_mbedtls_repo; then
+        pre_generate_files
+    fi
+
+    # Run the requested tests.
+    for ((error_test_i=1; error_test_i <= error_test; error_test_i++)); do
+        run_component pseudo_component_error_test
+    done
+    unset error_test_i
+    for component in $RUN_COMPONENTS; do
+        run_component "component_$component"
+    done
+
+    # We're done.
+    post_report
+}
diff --git a/framework/scripts/all-helpers.sh b/framework/scripts/all-helpers.sh
new file mode 100644
index 0000000..205c83f
--- /dev/null
+++ b/framework/scripts/all-helpers.sh
@@ -0,0 +1,337 @@
+# all-helpers.sh
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+# This file contains helpers for test components that are executed by all.sh.
+# See "Files structure" in all-core.sh for other files used by all.sh.
+#
+# This file is the right place for helpers:
+# - that are used by more than one component living in more than one file;
+# - or (inclusive) that we want to share accross repos or branches.
+#
+# Helpers that are used in a single component file that is
+# repo&branch-specific can be defined in the file where they are used.
+
+################################################################
+#### Helpers for components using libtestdriver1
+################################################################
+
+# How to use libtestdriver1
+# -------------------------
+#
+# 1. Define the list algorithms and key types to accelerate,
+#    designated the same way as PSA_WANT_ macros but without PSA_WANT_.
+#    Examples:
+#      - loc_accel_list="ALG_JPAKE"
+#      - loc_accel_list="ALG_FFDH KEY_TYPE_DH_KEY_PAIR KEY_TYPE_DH_PUBLIC_KEY"
+# 2. Make configurations changes for the driver and/or main libraries.
+#    2a. Call helper_libtestdriver1_adjust_config <base>, where the argument
+#        can be either "default" to start with the default config, or a name
+#        supported by scripts/config.py (for example, "full"). This selects
+#        the base to use, and makes common adjustments.
+#    2b. If desired, adjust the PSA_WANT symbols in psa/crypto_config.h.
+#        These changes affect both the driver and the main libraries.
+#        (Note: they need to have the same set of PSA_WANT symbols, as that
+#        determines the ABI between them.)
+#    2c. Adjust MBEDTLS_ symbols in mbedtls_config.h. This only affects the
+#        main libraries. Typically, you want to disable the module(s) that are
+#        being accelerated. You may need to also disable modules that depend
+#        on them or options that are not supported with drivers.
+#    2d. On top of psa/crypto_config.h, the driver library uses its own config
+#        file: tests/configs/config_test_driver.h. You usually don't need to
+#        edit it: using loc_extra_list (see below) is preferred. However, when
+#        there's no PSA symbol for what you want to enable, calling
+#        scripts/config.py on this file remains the only option.
+# 3. Build the driver library, then the main libraries, test, and programs.
+#    3a. Call helper_libtestdriver1_make_drivers "$loc_accel_list". You may
+#        need to enable more algorithms here, typically hash algorithms when
+#        accelerating some signature algorithms (ECDSA, RSAv2). This is done
+#        by passing a 2nd argument listing the extra algorithms.
+#        Example:
+#          loc_extra_list="ALG_SHA_224 ALG_SHA_256 ALG_SHA_384 ALG_SHA_512"
+#          helper_libtestdriver1_make_drivers "$loc_accel_list" "$loc_extra_list"
+#    3b. Call helper_libtestdriver1_make_main "$loc_accel_list". Any
+#        additional arguments will be passed to make: this can be useful if
+#        you don't want to build everything when iterating during development.
+#        Example:
+#          helper_libtestdriver1_make_main "$loc_accel_list" -C tests test_suite_foo
+# 4. Run the tests you want.
+
+# Adjust the configuration - for both libtestdriver1 and main library,
+# as they should have the same PSA_WANT macros.
+helper_libtestdriver1_adjust_config() {
+    base_config=$1
+    # Select the base configuration
+    if [ "$base_config" != "default" ]; then
+        scripts/config.py "$base_config"
+    fi
+
+    # Enable PSA-based config (necessary to use drivers)
+    # MBEDTLS_PSA_CRYPTO_CONFIG is a legacy setting which should only be set on 3.6 LTS branches.
+    if in_mbedtls_repo && in_3_6_branch; then
+        scripts/config.py set MBEDTLS_PSA_CRYPTO_CONFIG
+    fi
+
+    # Dynamic secure element support is a deprecated feature and needs to be disabled here.
+    # This is done to have the same form of psa_key_attributes_s for libdriver and library.
+    scripts/config.py unset MBEDTLS_PSA_CRYPTO_SE_C
+
+    # If threading is enabled on the normal build, then we need to enable it in the drivers as well,
+    # otherwise we will end up running multithreaded tests without mutexes to protect them.
+    if scripts/config.py get MBEDTLS_THREADING_C; then
+        if in_3_6_branch; then
+            scripts/config.py -f "$CONFIG_TEST_DRIVER_H" set MBEDTLS_THREADING_C
+        else
+            scripts/config.py -c "$CONFIG_TEST_DRIVER_H" set MBEDTLS_THREADING_C
+        fi
+    fi
+
+    if scripts/config.py get MBEDTLS_THREADING_PTHREAD; then
+        if in_3_6_branch; then
+            scripts/config.py -f "$CONFIG_TEST_DRIVER_H" set MBEDTLS_THREADING_PTHREAD
+        else
+            scripts/config.py -c "$CONFIG_TEST_DRIVER_H" set MBEDTLS_THREADING_PTHREAD
+        fi
+    fi
+}
+
+# Build the drivers library libtestdriver1.a (with ASan).
+#
+# Parameters:
+# 1. a space-separated list of things to accelerate;
+# 2. optional: a space-separate list of things to also support.
+# Here "things" are PSA_WANT_ symbols but with PSA_WANT_ removed.
+helper_libtestdriver1_make_drivers() {
+    loc_accel_flags=$( echo "$1 ${2-}" | sed 's/[^ ]* */-DLIBTESTDRIVER1_MBEDTLS_PSA_ACCEL_&/g' )
+    make CC=$ASAN_CC -C tests libtestdriver1.a CFLAGS=" $ASAN_CFLAGS $loc_accel_flags" LDFLAGS="$ASAN_CFLAGS"
+}
+
+# Build the main libraries, programs and tests,
+# linking to the drivers library (with ASan).
+#
+# Parameters:
+# 1. a space-separated list of things to accelerate;
+# *. remaining arguments if any are passed directly to make
+#    (examples: lib, -C tests test_suite_xxx, etc.)
+# Here "things" are PSA_WANT_ symbols but with PSA_WANT_ removed.
+helper_libtestdriver1_make_main() {
+    loc_accel_list=$1
+    shift
+
+    # we need flags both with and without the LIBTESTDRIVER1_ prefix
+    loc_accel_flags=$( echo "$loc_accel_list" | sed 's/[^ ]* */-DLIBTESTDRIVER1_MBEDTLS_PSA_ACCEL_&/g' )
+    loc_accel_flags="$loc_accel_flags $( echo "$loc_accel_list" | sed 's/[^ ]* */-DMBEDTLS_PSA_ACCEL_&/g' )"
+    make CC=$ASAN_CC CFLAGS="$ASAN_CFLAGS -I../tests/include -I../framework/tests/include -I../tests -I../../tests -DPSA_CRYPTO_DRIVER_TEST -DMBEDTLS_TEST_LIBTESTDRIVER1 $loc_accel_flags" LDFLAGS="-ltestdriver1 $ASAN_CFLAGS" "$@"
+}
+
+################################################################
+#### Helpers for components using psasim
+################################################################
+
+# Set some default values $CONFIG_H in order to build server or client sides
+# in PSASIM. There is only 1 mandatory parameter:
+# - $1: target which can be "client" or "server"
+helper_psasim_config() {
+    TARGET=$1
+
+    if [ "$TARGET" == "client" ]; then
+        scripts/config.py full
+        scripts/config.py unset MBEDTLS_PSA_CRYPTO_C
+        scripts/config.py unset MBEDTLS_PSA_CRYPTO_STORAGE_C
+        # Dynamic secure element support is a deprecated feature and it is not
+        # available when CRYPTO_C and PSA_CRYPTO_STORAGE_C are disabled.
+        scripts/config.py unset MBEDTLS_PSA_CRYPTO_SE_C
+        # Disable potentially problematic features
+        scripts/config.py unset MBEDTLS_X509_RSASSA_PSS_SUPPORT
+        scripts/config.py unset MBEDTLS_KEY_EXCHANGE_ECDHE_RSA_ENABLED
+        scripts/config.py unset MBEDTLS_KEY_EXCHANGE_ECDH_RSA_ENABLED
+        scripts/config.py unset MBEDTLS_ECP_RESTARTABLE
+        scripts/config.py unset MBEDTLS_PSA_CRYPTO_KEY_ID_ENCODES_OWNER
+    else
+        scripts/config.py crypto_full
+        scripts/config.py unset MBEDTLS_PSA_CRYPTO_BUILTIN_KEYS
+        # We need to match the client with MBEDTLS_PSA_CRYPTO_SE_C
+        scripts/config.py unset MBEDTLS_PSA_CRYPTO_SE_C
+        # Also ensure MBEDTLS_PSA_CRYPTO_KEY_ID_ENCODES_OWNER not set (to match client)
+        scripts/config.py unset MBEDTLS_PSA_CRYPTO_KEY_ID_ENCODES_OWNER
+    fi
+}
+
+# This is a helper function to be used in psasim builds. It is meant to clean
+# up the library's workspace after the server build and before the client
+# build. Built libraries (mbedcrypto, mbedx509 and mbedtls) are supposed to be
+# already copied to psasim folder at this point.
+helper_psasim_cleanup_before_client() {
+    # Clean up library files
+    make -C library clean
+
+    # Restore files that were backup before building library files. This
+    # includes $CONFIG_H and $CRYPTO_CONFIG_H.
+    restore_backed_up_files
+}
+
+# Helper to build the libraries for client/server in PSASIM. If the server is
+# being built, then it builds also the final executable.
+# There is only 1 mandatory parameter:
+# - $1: target which can be "client" or "server"
+helper_psasim_build() {
+    TARGET=$1
+    shift
+    TARGET_LIB=${TARGET}_libs
+
+    make -C $PSASIM_PATH CFLAGS="$ASAN_CFLAGS" LDFLAGS="$ASAN_CFLAGS" $TARGET_LIB "$@"
+
+    # Build also the server application after its libraries have been built.
+    if [ "$TARGET" == "server" ]; then
+        make -C $PSASIM_PATH CFLAGS="$ASAN_CFLAGS" LDFLAGS="$ASAN_CFLAGS" test/psa_server
+    fi
+}
+
+################################################################
+#### Configuration helpers
+################################################################
+
+# When called with no parameter this function disables all builtin curves.
+# The function optionally accepts 1 parameter: a space-separated list of the
+# curves that should be kept enabled.
+helper_disable_builtin_curves() {
+    allowed_list="${1:-}"
+    scripts/config.py unset-all "MBEDTLS_ECP_DP_[0-9A-Z_a-z]*_ENABLED"
+
+    for curve in $allowed_list; do
+        scripts/config.py set $curve
+    done
+}
+
+# Helper returning the list of supported elliptic curves from CRYPTO_CONFIG_H,
+# without the "PSA_WANT_" prefix. This becomes handy for accelerating curves
+# in the following helpers.
+helper_get_psa_curve_list () {
+    loc_list=""
+    for item in $(sed -n 's/^#define PSA_WANT_\(ECC_[0-9A-Z_a-z]*\).*/\1/p' <"$CRYPTO_CONFIG_H"); do
+        loc_list="$loc_list $item"
+    done
+
+    echo "$loc_list"
+}
+
+# Helper returning the list of supported DH groups from CRYPTO_CONFIG_H,
+# without the "PSA_WANT_" prefix. This becomes handy for accelerating DH groups
+# in the following helpers.
+helper_get_psa_dh_group_list () {
+    loc_list=""
+    for item in $(sed -n 's/^#define PSA_WANT_\(DH_RFC7919_[0-9]*\).*/\1/p' <"$CRYPTO_CONFIG_H"); do
+        loc_list="$loc_list $item"
+    done
+
+    echo "$loc_list"
+}
+
+# Get the list of uncommented PSA_WANT_KEY_TYPE_xxx_ from CRYPTO_CONFIG_H. This
+# is useful to easily get a list of key type symbols to accelerate.
+# The function accepts a single argument which is the key type: ECC, DH, RSA.
+helper_get_psa_key_type_list() {
+    key_type="$1"
+    loc_list=""
+    for item in $(sed -n "s/^#define PSA_WANT_\(KEY_TYPE_${key_type}_[0-9A-Z_a-z]*\).*/\1/p" <"$CRYPTO_CONFIG_H"); do
+        # Skip DERIVE for elliptic keys since there is no driver dispatch for
+        # it so it cannot be accelerated.
+        if [ "$item" != "KEY_TYPE_ECC_KEY_PAIR_DERIVE" ]; then
+            loc_list="$loc_list $item"
+        fi
+    done
+
+    echo "$loc_list"
+}
+
+################################################################
+#### Misc. helpers for components
+################################################################
+
+helper_armc6_build_test()
+{
+    FLAGS="$1"
+
+    msg "build: ARM Compiler 6 ($FLAGS)"
+    make clean
+    ARM_TOOL_VARIANT="ult" CC="$ARMC6_CC" AR="$ARMC6_AR" CFLAGS="$FLAGS" \
+                    WARNING_CFLAGS='-Werror -xc -std=c99' make lib
+
+    msg "size: ARM Compiler 6 ($FLAGS)"
+    "$ARMC6_FROMELF" -z library/*.o
+    if [ -n "${PSA_CORE_PATH}" ]; then
+        "$ARMC6_FROMELF" -z ${PSA_CORE_PATH}/*.o
+    fi
+    if [ -n "${BUILTIN_SRC_PATH}" ]; then
+        "$ARMC6_FROMELF" -z ${BUILTIN_SRC_PATH}/*.o
+    fi
+}
+
+clang_version() {
+    if command -v clang > /dev/null ; then
+        clang --version|grep version|sed -E 's#.*version ([0-9]+).*#\1#'
+    else
+        echo 0  # report version 0 for "no clang"
+    fi
+}
+
+gcc_version() {
+    gcc="$1"
+    if command -v "$gcc" > /dev/null ; then
+        "$gcc" --version | sed -En '1s/^[^ ]* \([^)]*\) ([0-9]+).*/\1/p'
+    else
+        echo 0  # report version 0 for "no gcc"
+    fi
+}
+
+can_run_cc_output() {
+    cc="$1"
+    result=false
+    if type "$cc" >/dev/null 2>&1; then
+        testbin=$(mktemp)
+        if echo 'int main(void){return 0;}' | "$cc" -o "$testbin" -x c -; then
+            if "$testbin" 2>/dev/null; then
+                result=true
+            fi
+        fi
+        rm -f "$testbin"
+    fi
+    $result
+}
+
+can_run_arm_linux_gnueabi=
+can_run_arm_linux_gnueabi () {
+    if [ -z "$can_run_arm_linux_gnueabi" ]; then
+        if can_run_cc_output "${ARM_LINUX_GNUEABI_GCC_PREFIX}gcc"; then
+            can_run_arm_linux_gnueabi=true
+        else
+            can_run_arm_linux_gnueabi=false
+        fi
+    fi
+    $can_run_arm_linux_gnueabi
+}
+
+can_run_arm_linux_gnueabihf=
+can_run_arm_linux_gnueabihf () {
+    if [ -z "$can_run_arm_linux_gnueabihf" ]; then
+        if can_run_cc_output "${ARM_LINUX_GNUEABIHF_GCC_PREFIX}gcc"; then
+            can_run_arm_linux_gnueabihf=true
+        else
+            can_run_arm_linux_gnueabihf=false
+        fi
+    fi
+    $can_run_arm_linux_gnueabihf
+}
+
+can_run_aarch64_linux_gnu=
+can_run_aarch64_linux_gnu () {
+    if [ -z "$can_run_aarch64_linux_gnu" ]; then
+        if can_run_cc_output "${AARCH64_LINUX_GNU_GCC_PREFIX}gcc"; then
+            can_run_aarch64_linux_gnu=true
+        else
+            can_run_aarch64_linux_gnu=false
+        fi
+    fi
+    $can_run_aarch64_linux_gnu
+}
diff --git a/framework/scripts/apidoc_full.sh b/framework/scripts/apidoc_full.sh
new file mode 100755
index 0000000..902a515
--- /dev/null
+++ b/framework/scripts/apidoc_full.sh
@@ -0,0 +1,59 @@
+#!/bin/sh
+
+# Generate doxygen documentation with a full mbedtls_config.h (this ensures that every
+# available flag is documented, and avoids warnings about documentation
+# without a corresponding #define).
+#
+# /!\ This must not be a Makefile target, as it would create a race condition
+# when multiple targets are invoked in the same parallel build.
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+set -eu
+
+. $(dirname "$0")/project_detection.sh
+
+if in_mbedtls_repo; then
+    CONFIG_H='include/mbedtls/mbedtls_config.h'
+    if [ -r $CONFIG_H ]; then :; else
+        echo "$CONFIG_H not found" >&2
+    fi
+    if ! in_3_6_branch; then
+        CRYPTO_CONFIG_H='tf-psa-crypto/include/psa/crypto_config.h'
+    fi
+fi
+
+if in_tf_psa_crypto_repo; then
+    CRYPTO_CONFIG_H='include/psa/crypto_config.h'
+fi
+
+if in_tf_psa_crypto_repo || (in_mbedtls_repo && ! in_3_6_branch); then
+    if [ -r $CRYPTO_CONFIG_H ]; then :; else
+        echo "$CRYPTO_CONFIG_H not found" >&2
+        exit 1
+    fi
+    CRYPTO_CONFIG_BAK=${CRYPTO_CONFIG_H}.bak
+    cp -p $CRYPTO_CONFIG_H $CRYPTO_CONFIG_BAK
+fi
+
+if in_mbedtls_repo; then
+    CONFIG_BAK=${CONFIG_H}.bak
+    cp -p $CONFIG_H $CONFIG_BAK
+    scripts/config.py realfull
+    make apidoc
+    mv $CONFIG_BAK $CONFIG_H
+elif in_tf_psa_crypto_repo; then
+    scripts/config.py realfull
+    TF_PSA_CRYPTO_ROOT_DIR=$PWD
+    rm -rf doxygen/build-apidoc-full
+    mkdir doxygen/build-apidoc-full
+    cd doxygen/build-apidoc-full
+    cmake -DCMAKE_BUILD_TYPE:String=Check -DGEN_FILES=ON $TF_PSA_CRYPTO_ROOT_DIR
+    make tfpsacrypto-apidoc
+    cd $TF_PSA_CRYPTO_ROOT_DIR
+fi
+
+if in_tf_psa_crypto_repo || (in_mbedtls_repo && ! in_3_6_branch); then
+    mv $CRYPTO_CONFIG_BAK $CRYPTO_CONFIG_H
+fi
diff --git a/framework/scripts/assemble_changelog.py b/framework/scripts/assemble_changelog.py
new file mode 100755
index 0000000..07e6fc5
--- /dev/null
+++ b/framework/scripts/assemble_changelog.py
@@ -0,0 +1,534 @@
+#!/usr/bin/env python3
+
+"""Assemble Mbed TLS change log entries into the change log file.
+
+Add changelog entries to the first level-2 section.
+Create a new level-2 section for unreleased changes if needed.
+Remove the input files unless --keep-entries is specified.
+
+In each level-3 section, entries are sorted in chronological order
+(oldest first). From oldest to newest:
+* Merged entry files are sorted according to their merge date (date of
+  the merge commit that brought the commit that created the file into
+  the target branch).
+* Committed but unmerged entry files are sorted according to the date
+  of the commit that adds them.
+* Uncommitted entry files are sorted according to their modification time.
+
+You must run this program from within a git working directory.
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+import argparse
+from collections import OrderedDict, namedtuple
+import datetime
+import functools
+import glob
+import os
+import re
+import subprocess
+import sys
+
+class InputFormatError(Exception):
+    def __init__(self, filename, line_number, message, *args, **kwargs):
+        message = '{}:{}: {}'.format(filename, line_number,
+                                     message.format(*args, **kwargs))
+        super().__init__(message)
+
+class CategoryParseError(Exception):
+    def __init__(self, line_offset, error_message):
+        self.line_offset = line_offset
+        self.error_message = error_message
+        super().__init__('{}: {}'.format(line_offset, error_message))
+
+class LostContent(Exception):
+    def __init__(self, filename, line):
+        message = ('Lost content from {}: "{}"'.format(filename, line))
+        super().__init__(message)
+
+class FilePathError(Exception):
+    def __init__(self, filenames):
+        message = ('Changelog filenames do not end with .txt: {}'.format(", ".join(filenames)))
+        super().__init__(message)
+
+# The category names we use in the changelog.
+# If you edit this, update ChangeLog.d/README.md.
+STANDARD_CATEGORIES = (
+    'API changes',
+    'Default behavior changes',
+    'Requirement changes',
+    'New deprecations',
+    'Removals',
+    'Features',
+    'Security',
+    'Bugfix',
+    'Changes',
+)
+
+# The maximum line length for an entry
+MAX_LINE_LENGTH = 80
+
+CategoryContent = namedtuple('CategoryContent', [
+    'name', 'title_line', # Title text and line number of the title
+    'body', 'body_line', # Body text and starting line number of the body
+])
+
+class ChangelogFormat:
+    """Virtual class documenting how to write a changelog format class."""
+
+    @classmethod
+    def extract_top_version(cls, changelog_file_content):
+        """Split out the top version section.
+
+        If the top version is already released, create a new top
+        version section for an unreleased version.
+
+        Return ``(header, top_version_title, top_version_body, trailer)``
+        where the "top version" is the existing top version section if it's
+        for unreleased changes, and a newly created section otherwise.
+        To assemble the changelog after modifying top_version_body,
+        concatenate the four pieces.
+        """
+        raise NotImplementedError
+
+    @classmethod
+    def version_title_text(cls, version_title):
+        """Return the text of a formatted version section title."""
+        raise NotImplementedError
+
+    @classmethod
+    def split_categories(cls, version_body):
+        """Split a changelog version section body into categories.
+
+        Return a list of `CategoryContent` the name is category title
+        without any formatting.
+        """
+        raise NotImplementedError
+
+    @classmethod
+    def format_category(cls, title, body):
+        """Construct the text of a category section from its title and body."""
+        raise NotImplementedError
+
+class TextChangelogFormat(ChangelogFormat):
+    """The traditional Mbed TLS changelog format."""
+
+    _unreleased_version_text = '= {} x.x.x branch released xxxx-xx-xx'
+    @classmethod
+    def is_released_version(cls, title):
+        # Look for an incomplete release date
+        return not re.search(r'[0-9x]{4}-[0-9x]{2}-[0-9x]?x', title)
+
+    _top_version_re = re.compile(r'(?:\A|\n)(=[^\n]*\n+)(.*?\n)(?:=|$)',
+                                 re.DOTALL)
+    _name_re = re.compile(r'=\s(.*)\s[0-9x]+\.', re.DOTALL)
+    @classmethod
+    def extract_top_version(cls, changelog_file_content):
+        """A version section starts with a line starting with '='."""
+        m = re.search(cls._top_version_re, changelog_file_content)
+        top_version_start = m.start(1)
+        top_version_end = m.end(2)
+        top_version_title = m.group(1)
+        top_version_body = m.group(2)
+        name = re.match(cls._name_re, top_version_title).group(1)
+        if cls.is_released_version(top_version_title):
+            top_version_end = top_version_start
+            top_version_title = cls._unreleased_version_text.format(name) + '\n\n'
+            top_version_body = ''
+        return (changelog_file_content[:top_version_start],
+                top_version_title, top_version_body,
+                changelog_file_content[top_version_end:])
+
+    @classmethod
+    def version_title_text(cls, version_title):
+        return re.sub(r'\n.*', version_title, re.DOTALL)
+
+    _category_title_re = re.compile(r'(^\w.*)\n+', re.MULTILINE)
+    @classmethod
+    def split_categories(cls, version_body):
+        """A category title is a line with the title in column 0."""
+        if not version_body:
+            return []
+        title_matches = list(re.finditer(cls._category_title_re, version_body))
+        if not title_matches or title_matches[0].start() != 0:
+            # There is junk before the first category.
+            raise CategoryParseError(0, 'Junk found where category expected')
+        title_starts = [m.start(1) for m in title_matches]
+        body_starts = [m.end(0) for m in title_matches]
+        body_ends = title_starts[1:] + [len(version_body)]
+        bodies = [version_body[body_start:body_end].rstrip('\n') + '\n'
+                  for (body_start, body_end) in zip(body_starts, body_ends)]
+        title_lines = [version_body[:pos].count('\n') for pos in title_starts]
+        body_lines = [version_body[:pos].count('\n') for pos in body_starts]
+        return [CategoryContent(title_match.group(1), title_line,
+                                body, body_line)
+                for title_match, title_line, body, body_line
+                in zip(title_matches, title_lines, bodies, body_lines)]
+
+    @classmethod
+    def format_category(cls, title, body):
+        # `split_categories` ensures that each body ends with a newline.
+        # Make sure that there is additionally a blank line between categories.
+        if not body.endswith('\n\n'):
+            body += '\n'
+        return title + '\n' + body
+
+class ChangeLog:
+    """An Mbed TLS changelog.
+
+    A changelog file consists of some header text followed by one or
+    more version sections. The version sections are in reverse
+    chronological order. Each version section consists of a title and a body.
+
+    The body of a version section consists of zero or more category
+    subsections. Each category subsection consists of a title and a body.
+
+    A changelog entry file has the same format as the body of a version section.
+
+    A `ChangelogFormat` object defines the concrete syntax of the changelog.
+    Entry files must have the same format as the changelog file.
+    """
+
+    # Only accept dotted version numbers (e.g. "3.1", not "3").
+    # Refuse ".x" in a version number where x is a letter: this indicates
+    # a version that is not yet released. Something like "3.1a" is accepted.
+    _version_number_re = re.compile(r'[0-9]+\.[0-9A-Za-z.]+')
+    _incomplete_version_number_re = re.compile(r'.*\.[A-Za-z]')
+    _only_url_re = re.compile(r'^\s*\w+://\S+\s*$')
+    _has_url_re = re.compile(r'.*://.*')
+
+    def add_categories_from_text(self, filename, line_offset,
+                                 text, allow_unknown_category):
+        """Parse a version section or entry file."""
+        try:
+            categories = self.format.split_categories(text)
+        except CategoryParseError as e:
+            raise InputFormatError(filename, line_offset + e.line_offset,
+                                   e.error_message)
+        for category in categories:
+            if not allow_unknown_category and \
+               category.name not in self.categories:
+                raise InputFormatError(filename,
+                                       line_offset + category.title_line,
+                                       'Unknown category: "{}"',
+                                       category.name)
+
+            body_split = category.body.splitlines()
+
+            for line_number, line in enumerate(body_split, 1):
+                if not self._only_url_re.match(line) and \
+                   len(line) > MAX_LINE_LENGTH:
+                    long_url_msg = '. URL exceeding length limit must be alone in its line.' \
+                        if self._has_url_re.match(line) else ""
+                    raise InputFormatError(filename,
+                                           category.body_line + line_number,
+                                           'Line is longer than allowed: '
+                                           'Length {} (Max {}){}',
+                                           len(line), MAX_LINE_LENGTH,
+                                           long_url_msg)
+
+            self.categories[category.name] += category.body
+
+    def __init__(self, input_stream, changelog_format):
+        """Create a changelog object.
+
+        Populate the changelog object from the content of the file
+        input_stream.
+        """
+        self.format = changelog_format
+        whole_file = input_stream.read()
+        (self.header,
+         self.top_version_title, top_version_body,
+         self.trailer) = self.format.extract_top_version(whole_file)
+        # Split the top version section into categories.
+        self.categories = OrderedDict()
+        for category in STANDARD_CATEGORIES:
+            self.categories[category] = ''
+        offset = (self.header + self.top_version_title).count('\n') + 1
+
+        self.add_categories_from_text(input_stream.name, offset,
+                                      top_version_body, True)
+
+    def add_file(self, input_stream):
+        """Add changelog entries from a file.
+        """
+        self.add_categories_from_text(input_stream.name, 1,
+                                      input_stream.read(), False)
+
+    def write(self, filename):
+        """Write the changelog to the specified file.
+        """
+        with open(filename, 'w', encoding='utf-8') as out:
+            out.write(self.header)
+            out.write(self.top_version_title)
+            for title, body in self.categories.items():
+                if not body:
+                    continue
+                out.write(self.format.format_category(title, body))
+            out.write(self.trailer)
+
+
+@functools.total_ordering
+class EntryFileSortKey:
+    """This classes defines an ordering on changelog entry files: older < newer.
+
+    * Merged entry files are sorted according to their merge date (date of
+      the merge commit that brought the commit that created the file into
+      the target branch).
+    * Committed but unmerged entry files are sorted according to the date
+      of the commit that adds them.
+    * Uncommitted entry files are sorted according to their modification time.
+
+    This class assumes that the file is in a git working directory with
+    the target branch checked out.
+    """
+
+    # Categories of files. A lower number is considered older.
+    MERGED = 0
+    COMMITTED = 1
+    LOCAL = 2
+
+    @staticmethod
+    def creation_hash(filename):
+        """Return the git commit id at which the given file was created.
+
+        Return None if the file was never checked into git.
+        """
+        hashes = subprocess.check_output(['git', 'log', '--format=%H',
+                                          '--follow',
+                                          '--', filename])
+        m = re.search('(.+)$', hashes.decode('ascii'))
+        if not m:
+            # The git output is empty. This means that the file was
+            # never checked in.
+            return None
+        # The last commit in the log is the oldest one, which is when the
+        # file was created.
+        return m.group(0)
+
+    @staticmethod
+    def list_merges(some_hash, target, *options):
+        """List merge commits from some_hash to target.
+
+        Pass options to git to select which commits are included.
+        """
+        text = subprocess.check_output(['git', 'rev-list',
+                                        '--merges', *options,
+                                        '..'.join([some_hash, target])])
+        return text.decode('ascii').rstrip('\n').split('\n')
+
+    @classmethod
+    def merge_hash(cls, some_hash):
+        """Return the git commit id at which the given commit was merged.
+
+        Return None if the given commit was never merged.
+        """
+        target = 'HEAD'
+        # List the merges from some_hash to the target in two ways.
+        # The ancestry list is the ones that are both descendants of
+        # some_hash and ancestors of the target.
+        ancestry = frozenset(cls.list_merges(some_hash, target,
+                                             '--ancestry-path'))
+        # The first_parents list only contains merges that are directly
+        # on the target branch. We want it in reverse order (oldest first).
+        first_parents = cls.list_merges(some_hash, target,
+                                        '--first-parent', '--reverse')
+        # Look for the oldest merge commit that's both on the direct path
+        # and directly on the target branch. That's the place where some_hash
+        # was merged on the target branch. See
+        # https://stackoverflow.com/questions/8475448/find-merge-commit-which-include-a-specific-commit
+        for commit in first_parents:
+            if commit in ancestry:
+                return commit
+        return None
+
+    @staticmethod
+    def commit_timestamp(commit_id):
+        """Return the timestamp of the given commit."""
+        text = subprocess.check_output(['git', 'show', '-s',
+                                        '--format=%ct',
+                                        commit_id])
+        return datetime.datetime.utcfromtimestamp(int(text))
+
+    @staticmethod
+    def file_timestamp(filename):
+        """Return the modification timestamp of the given file."""
+        mtime = os.stat(filename).st_mtime
+        return datetime.datetime.fromtimestamp(mtime)
+
+    def __init__(self, filename):
+        """Determine position of the file in the changelog entry order.
+
+        This constructor returns an object that can be used with comparison
+        operators, with `sort` and `sorted`, etc. Older entries are sorted
+        before newer entries.
+        """
+        self.filename = filename
+        creation_hash = self.creation_hash(filename)
+        if not creation_hash:
+            self.category = self.LOCAL
+            self.datetime = self.file_timestamp(filename)
+            return
+        merge_hash = self.merge_hash(creation_hash)
+        if not merge_hash:
+            self.category = self.COMMITTED
+            self.datetime = self.commit_timestamp(creation_hash)
+            return
+        self.category = self.MERGED
+        self.datetime = self.commit_timestamp(merge_hash)
+
+    def sort_key(self):
+        """"Return a concrete sort key for this entry file sort key object.
+
+        ``ts1 < ts2`` is implemented as ``ts1.sort_key() < ts2.sort_key()``.
+        """
+        return (self.category, self.datetime, self.filename)
+
+    def __eq__(self, other):
+        return self.sort_key() == other.sort_key()
+
+    def __lt__(self, other):
+        return self.sort_key() < other.sort_key()
+
+
+def check_output(generated_output_file, main_input_file, merged_files):
+    """Make sanity checks on the generated output.
+
+    The intent of these sanity checks is to have reasonable confidence
+    that no content has been lost.
+
+    The sanity check is that every line that is present in an input file
+    is also present in an output file. This is not perfect but good enough
+    for now.
+    """
+    with open(generated_output_file, 'r', encoding='utf-8') as fd:
+        generated_output = set(fd)
+        for line in open(main_input_file, 'r', encoding='utf-8'):
+            if line not in generated_output:
+                raise LostContent('original file', line)
+        for merged_file in merged_files:
+            for line in open(merged_file, 'r', encoding='utf-8'):
+                if line not in generated_output:
+                    raise LostContent(merged_file, line)
+
+def finish_output(changelog, output_file, input_file, merged_files):
+    """Write the changelog to the output file.
+
+    The input file and the list of merged files are used only for sanity
+    checks on the output.
+    """
+    if os.path.exists(output_file) and not os.path.isfile(output_file):
+        # The output is a non-regular file (e.g. pipe). Write to it directly.
+        output_temp = output_file
+    else:
+        # The output is a regular file. Write to a temporary file,
+        # then move it into place atomically.
+        output_temp = output_file + '.tmp'
+    changelog.write(output_temp)
+    check_output(output_temp, input_file, merged_files)
+    if output_temp != output_file:
+        os.rename(output_temp, output_file)
+
+def remove_merged_entries(files_to_remove):
+    for filename in files_to_remove:
+        os.remove(filename)
+
+def list_files_to_merge(options):
+    """List the entry files to merge, oldest first.
+
+    "Oldest" is defined by `EntryFileSortKey`.
+
+    Also check for required .txt extension
+    """
+    files_to_merge = glob.glob(os.path.join(options.dir, '*'))
+
+    # Ignore 00README.md
+    readme = os.path.join(options.dir, "00README.md")
+    if readme in files_to_merge:
+        files_to_merge.remove(readme)
+
+    # Identify files without the required .txt extension
+    bad_files = [x for x in files_to_merge if not x.endswith(".txt")]
+    if bad_files:
+        raise FilePathError(bad_files)
+
+    files_to_merge.sort(key=EntryFileSortKey)
+    return files_to_merge
+
+def merge_entries(options):
+    """Merge changelog entries into the changelog file.
+
+    Read the changelog file from options.input.
+    Check that all entries have a .txt extension
+    Read entries to merge from the directory options.dir.
+    Write the new changelog to options.output.
+    Remove the merged entries if options.keep_entries is false.
+    """
+    with open(options.input, 'r', encoding='utf-8') as input_file:
+        changelog = ChangeLog(input_file, TextChangelogFormat)
+    files_to_merge = list_files_to_merge(options)
+    if not files_to_merge:
+        sys.stderr.write('There are no pending changelog entries.\n')
+        return
+    for filename in files_to_merge:
+        with open(filename, 'r', encoding='utf-8') as input_file:
+            changelog.add_file(input_file)
+    finish_output(changelog, options.output, options.input, files_to_merge)
+    if not options.keep_entries:
+        remove_merged_entries(files_to_merge)
+
+def show_file_timestamps(options):
+    """List the files to merge and their timestamp.
+
+    This is only intended for debugging purposes.
+    """
+    files = list_files_to_merge(options)
+    for filename in files:
+        ts = EntryFileSortKey(filename)
+        print(ts.category, ts.datetime, filename)
+
+def set_defaults(options):
+    """Add default values for missing options."""
+    output_file = getattr(options, 'output', None)
+    if output_file is None:
+        options.output = options.input
+    if getattr(options, 'keep_entries', None) is None:
+        options.keep_entries = (output_file is not None)
+
+def main():
+    """Command line entry point."""
+    parser = argparse.ArgumentParser(description=__doc__)
+    parser.add_argument('--dir', '-d', metavar='DIR',
+                        default='ChangeLog.d',
+                        help='Directory to read entries from'
+                             ' (default: ChangeLog.d)')
+    parser.add_argument('--input', '-i', metavar='FILE',
+                        default='ChangeLog',
+                        help='Existing changelog file to read from and augment'
+                             ' (default: ChangeLog)')
+    parser.add_argument('--keep-entries',
+                        action='store_true', dest='keep_entries', default=None,
+                        help='Keep the files containing entries'
+                             ' (default: remove them if --output/-o is not specified)')
+    parser.add_argument('--no-keep-entries',
+                        action='store_false', dest='keep_entries',
+                        help='Remove the files containing entries after they are merged'
+                             ' (default: remove them if --output/-o is not specified)')
+    parser.add_argument('--output', '-o', metavar='FILE',
+                        help='Output changelog file'
+                             ' (default: overwrite the input)')
+    parser.add_argument('--list-files-only',
+                        action='store_true',
+                        help=('Only list the files that would be processed '
+                              '(with some debugging information)'))
+    options = parser.parse_args()
+    set_defaults(options)
+    if options.list_files_only:
+        show_file_timestamps(options)
+        return
+    merge_entries(options)
+
+if __name__ == '__main__':
+    main()
diff --git a/framework/scripts/check-doxy-blocks.pl b/framework/scripts/check-doxy-blocks.pl
new file mode 100755
index 0000000..aa121b6
--- /dev/null
+++ b/framework/scripts/check-doxy-blocks.pl
@@ -0,0 +1,76 @@
+#!/usr/bin/env perl
+
+# Detect comment blocks that are likely meant to be doxygen blocks but aren't.
+#
+# More precisely, look for normal comment block containing '\'.
+# Of course one could use doxygen warnings, eg with:
+#   sed -e '/EXTRACT/s/YES/NO/' doxygen/mbedtls.doxyfile | doxygen -
+# but that would warn about any undocumented item, while our goal is to find
+# items that are documented, but not marked as such by mistake.
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+use warnings;
+use strict;
+use File::Basename;
+
+# C/header files in the following directories will be checked
+my @mbedtls_directories = qw(include/mbedtls library doxygen/input);
+my @tf_psa_crypto_directories = qw(include/psa include/tf-psa-crypto
+                                   drivers/builtin/include/mbedtls
+                                   drivers/builtin/src core doxygen/input);
+
+# very naive pattern to find directives:
+# everything with a backslach except '\0' and backslash at EOL
+my $doxy_re = qr/\\(?!0|\n)/;
+
+# Return an error code to the environment if a potential error in the
+# source code is found.
+my $exit_code = 0;
+
+sub check_file {
+    my ($fname) = @_;
+    open my $fh, '<', $fname or die "Failed to open '$fname': $!\n";
+
+    # first line of the last normal comment block,
+    # or 0 if not in a normal comment block
+    my $block_start = 0;
+    while (my $line = <$fh>) {
+        $block_start = $.   if $line =~ m/\/\*(?![*!])/;
+        $block_start = 0    if $line =~ m/\*\//;
+        if ($block_start and $line =~ m/$doxy_re/) {
+            print "$fname:$block_start: directive on line $.\n";
+            $block_start = 0; # report only one directive per block
+            $exit_code = 1;
+        }
+    }
+
+    close $fh;
+}
+
+sub check_dir {
+    my ($dirname) = @_;
+    for my $file (<$dirname/*.[ch]>) {
+        check_file($file);
+    }
+}
+
+open my $project_file, "scripts/project_name.txt" or die "This script must be run from Mbed TLS or TF-PSA-Crypto root directory";
+my $project = <$project_file>;
+chomp($project);
+my @directories;
+
+if ($project eq "TF-PSA-Crypto") {
+    @directories = @tf_psa_crypto_directories
+} elsif ($project eq "Mbed TLS") {
+    @directories = @mbedtls_directories
+}
+# Check that the script is being run from the project's root directory.
+for my $dir (@directories) {
+    check_dir($dir)
+}
+
+exit $exit_code;
+
+__END__
diff --git a/framework/scripts/check-python-files.sh b/framework/scripts/check-python-files.sh
new file mode 100755
index 0000000..a51766f
--- /dev/null
+++ b/framework/scripts/check-python-files.sh
@@ -0,0 +1,68 @@
+#! /usr/bin/env sh
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+# Purpose: check Python files for potential programming errors or maintenance
+# hurdles. Run pylint to detect some potential mistakes and enforce PEP8
+# coding standards. Run mypy to perform static type checking.
+
+# We'll keep going on errors and report the status at the end.
+ret=0
+
+if type python3 >/dev/null 2>/dev/null; then
+    PYTHON=python3
+else
+    PYTHON=python
+fi
+
+check_version () {
+    $PYTHON - "$2" <<EOF
+import packaging.version
+import sys
+import $1 as package
+actual = package.__version__
+wanted = sys.argv[1]
+if packaging.version.parse(actual) < packaging.version.parse(wanted):
+    sys.stderr.write("$1: version %s is too old (want %s)\n" % (actual, wanted))
+    exit(1)
+EOF
+}
+
+can_pylint () {
+    # Pylint 1.5.2 from Ubuntu 16.04 is too old:
+    #     E: 34, 0: Unable to import 'mbedtls_framework' (import-error)
+    # Pylint 1.8.3 from Ubuntu 18.04 passed on the first commit containing this line.
+    check_version pylint 1.8.3
+}
+
+can_mypy () {
+    # mypy 0.770 is too old:
+    #     framework/scripts/test_psa_constant_names.py:34: error: Cannot find implementation or library stub for module named 'mbedtls_framework'
+    # mypy 0.780 from pip passed on the first commit containing this line.
+    check_version mypy.version 0.780
+}
+
+# With just a --can-xxx option, check whether the tool for xxx is available
+# with an acceptable version, and exit without running any checks. The exit
+# status is true if the tool is available and acceptable and false otherwise.
+if [ "$1" = "--can-pylint" ]; then
+    can_pylint
+    exit
+elif [ "$1" = "--can-mypy" ]; then
+    can_mypy
+    exit
+fi
+
+echo 'Running pylint ...'
+$PYTHON -m pylint framework/scripts/*.py framework/scripts/mbedtls_framework/*.py scripts/*.py tests/scripts/*.py || {
+    echo >&2 "pylint reported errors"
+    ret=1
+}
+
+echo
+echo 'Running mypy ...'
+$PYTHON -m mypy framework/scripts/*.py framework/scripts/mbedtls_framework/*.py scripts/*.py tests/scripts/*.py ||
+  ret=1
+
+exit $ret
diff --git a/framework/scripts/check_files.py b/framework/scripts/check_files.py
new file mode 100755
index 0000000..d3a61c1
--- /dev/null
+++ b/framework/scripts/check_files.py
@@ -0,0 +1,575 @@
+#!/usr/bin/env python3
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+"""
+This script checks the current state of the source code for minor issues,
+including incorrect file permissions, presence of tabs, non-Unix line endings,
+trailing whitespace, and presence of UTF-8 BOM.
+Note: requires python 3, must be run from Mbed TLS root.
+"""
+
+import argparse
+import codecs
+import inspect
+import logging
+import os
+import re
+import subprocess
+import sys
+try:
+    from typing import FrozenSet, Optional, Pattern # pylint: disable=unused-import
+except ImportError:
+    pass
+
+from mbedtls_framework import build_tree
+
+
+class FileIssueTracker:
+    """Base class for file-wide issue tracking.
+
+    To implement a checker that processes a file as a whole, inherit from
+    this class and implement `check_file_for_issue` and define ``heading``.
+
+    ``suffix_exemptions``: files whose name ends with a string in this set
+     will not be checked.
+
+    ``path_exemptions``: files whose path (relative to the root of the source
+    tree) matches this regular expression will not be checked. This can be
+    ``None`` to match no path. Paths are normalized and converted to ``/``
+    separators before matching.
+
+    ``heading``: human-readable description of the issue
+    """
+
+    suffix_exemptions = frozenset() #type: FrozenSet[str]
+    path_exemptions = None #type: Optional[Pattern[str]]
+    # heading must be defined in derived classes.
+    # pylint: disable=no-member
+
+    def __init__(self):
+        self.files_with_issues = {}
+
+    @staticmethod
+    def normalize_path(filepath):
+        """Normalize ``filepath`` with / as the directory separator."""
+        filepath = os.path.normpath(filepath)
+        # On Windows, we may have backslashes to separate directories.
+        # We need slashes to match exemption lists.
+        seps = os.path.sep
+        if os.path.altsep is not None:
+            seps += os.path.altsep
+        return '/'.join(filepath.split(seps))
+
+    def should_check_file(self, filepath):
+        """Whether the given file name should be checked.
+
+        Files whose name ends with a string listed in ``self.suffix_exemptions``
+        or whose path matches ``self.path_exemptions`` will not be checked.
+        """
+        for files_exemption in self.suffix_exemptions:
+            if filepath.endswith(files_exemption):
+                return False
+        if self.path_exemptions and \
+           re.match(self.path_exemptions, self.normalize_path(filepath)):
+            return False
+        return True
+
+    def check_file_for_issue(self, filepath):
+        """Check the specified file for the issue that this class is for.
+
+        Subclasses must implement this method.
+        """
+        raise NotImplementedError
+
+    def record_issue(self, filepath, line_number):
+        """Record that an issue was found at the specified location."""
+        if filepath not in self.files_with_issues.keys():
+            self.files_with_issues[filepath] = []
+        self.files_with_issues[filepath].append(line_number)
+
+    def output_file_issues(self, logger):
+        """Log all the locations where the issue was found."""
+        if self.files_with_issues.values():
+            logger.info(self.heading)
+            for filename, lines in sorted(self.files_with_issues.items()):
+                if lines:
+                    logger.info("{}: {}".format(
+                        filename, ", ".join(str(x) for x in lines)
+                    ))
+                else:
+                    logger.info(filename)
+            logger.info("")
+
+BINARY_FILE_PATH_RE_LIST = [
+    r'docs/.*\.pdf\Z',
+    r'docs/.*\.png\Z',
+    r'tf-psa-crypto/docs/.*\.pdf\Z',
+    r'tf-psa-crypto/docs/.*\.png\Z',
+    r'programs/fuzz/corpuses/[^.]+\Z',
+    r'framework/data_files/[^.]+\Z',
+    r'framework/data_files/.*\.(crt|csr|db|der|key|pubkey)\Z',
+    r'framework/data_files/.*\.req\.[^/]+\Z',
+    r'framework/data_files/.*malformed[^/]+\Z',
+    r'framework/data_files/format_pkcs12\.fmt\Z',
+    r'framework/data_files/.*\.bin\Z',
+]
+BINARY_FILE_PATH_RE = re.compile('|'.join(BINARY_FILE_PATH_RE_LIST))
+
+class LineIssueTracker(FileIssueTracker):
+    """Base class for line-by-line issue tracking.
+
+    To implement a checker that processes files line by line, inherit from
+    this class and implement `line_with_issue`.
+    """
+
+    # Exclude binary files.
+    path_exemptions = BINARY_FILE_PATH_RE
+
+    def issue_with_line(self, line, filepath, line_number):
+        """Check the specified line for the issue that this class is for.
+
+        Subclasses must implement this method.
+        """
+        raise NotImplementedError
+
+    def check_file_line(self, filepath, line, line_number):
+        if self.issue_with_line(line, filepath, line_number):
+            self.record_issue(filepath, line_number)
+
+    def check_file_for_issue(self, filepath):
+        """Check the lines of the specified file.
+
+        Subclasses must implement the ``issue_with_line`` method.
+        """
+        with open(filepath, "rb") as f:
+            for i, line in enumerate(iter(f.readline, b"")):
+                self.check_file_line(filepath, line, i + 1)
+
+
+def is_windows_file(filepath):
+    _root, ext = os.path.splitext(filepath)
+    return ext in ('.bat', '.dsp', '.dsw', '.sln', '.vcxproj')
+
+
+class ShebangIssueTracker(FileIssueTracker):
+    """Track files with a bad, missing or extraneous shebang line.
+
+    Executable scripts must start with a valid shebang (#!) line.
+    """
+
+    heading = "Invalid shebang line:"
+
+    # Allow either /bin/sh, /bin/bash, or /usr/bin/env.
+    # Allow at most one argument (this is a Linux limitation).
+    # For sh and bash, the argument if present must be options.
+    # For env, the argument must be the base name of the interpreter.
+    _shebang_re = re.compile(rb'^#! ?(?:/bin/(bash|sh)(?: -[^\n ]*)?'
+                             rb'|/usr/bin/env ([^\n /]+))$')
+    _extensions = {
+        b'bash': 'sh',
+        b'perl': 'pl',
+        b'python3': 'py',
+        b'sh': 'sh',
+    }
+
+    path_exemptions = re.compile(r'framework/scripts/quiet/.*')
+
+    def is_valid_shebang(self, first_line, filepath):
+        m = re.match(self._shebang_re, first_line)
+        if not m:
+            return False
+        interpreter = m.group(1) or m.group(2)
+        if interpreter not in self._extensions:
+            return False
+        if not filepath.endswith('.' + self._extensions[interpreter]):
+            return False
+        return True
+
+    def check_file_for_issue(self, filepath):
+        is_executable = os.access(filepath, os.X_OK)
+        with open(filepath, "rb") as f:
+            first_line = f.readline()
+        if first_line.startswith(b'#!'):
+            if not is_executable:
+                # Shebang on a non-executable file
+                self.files_with_issues[filepath] = None
+            elif not self.is_valid_shebang(first_line, filepath):
+                self.files_with_issues[filepath] = [1]
+        elif is_executable:
+            # Executable without a shebang
+            self.files_with_issues[filepath] = None
+
+
+class EndOfFileNewlineIssueTracker(FileIssueTracker):
+    """Track files that end with an incomplete line
+    (no newline character at the end of the last line)."""
+
+    heading = "Missing newline at end of file:"
+
+    path_exemptions = BINARY_FILE_PATH_RE
+
+    def check_file_for_issue(self, filepath):
+        with open(filepath, "rb") as f:
+            try:
+                f.seek(-1, 2)
+            except OSError:
+                # This script only works on regular files. If we can't seek
+                # 1 before the end, it means that this position is before
+                # the beginning of the file, i.e. that the file is empty.
+                return
+            if f.read(1) != b"\n":
+                self.files_with_issues[filepath] = None
+
+
+class Utf8BomIssueTracker(FileIssueTracker):
+    """Track files that start with a UTF-8 BOM.
+    Files should be ASCII or UTF-8. Valid UTF-8 does not start with a BOM."""
+
+    heading = "UTF-8 BOM present:"
+
+    suffix_exemptions = frozenset([".vcxproj", ".sln"])
+    path_exemptions = BINARY_FILE_PATH_RE
+
+    def check_file_for_issue(self, filepath):
+        with open(filepath, "rb") as f:
+            if f.read().startswith(codecs.BOM_UTF8):
+                self.files_with_issues[filepath] = None
+
+
+class UnicodeIssueTracker(LineIssueTracker):
+    """Track lines with invalid characters or invalid text encoding."""
+
+    heading = "Invalid UTF-8 or forbidden character:"
+
+    # Only allow valid UTF-8, and only other explicitly allowed characters.
+    # We deliberately exclude all characters that aren't a simple non-blank,
+    # non-zero-width glyph, apart from a very small set (tab, ordinary space,
+    # line breaks, "basic" no-break space and soft hyphen). In particular,
+    # non-ASCII control characters, combinig characters, and Unicode state
+    # changes (e.g. right-to-left text) are forbidden.
+    # Note that we do allow some characters with a risk of visual confusion,
+    # for example '-' (U+002D HYPHEN-MINUS) vs '­' (U+00AD SOFT HYPHEN) vs
+    # '‐' (U+2010 HYPHEN), or 'A' (U+0041 LATIN CAPITAL LETTER A) vs
+    # 'Α' (U+0391 GREEK CAPITAL LETTER ALPHA).
+    GOOD_CHARACTERS = ''.join([
+        '\t\n\r -~', # ASCII (tabs and line endings are checked separately)
+        '\u00A0-\u00FF', # Latin-1 Supplement (for NO-BREAK SPACE and punctuation)
+        '\u2010-\u2027\u2030-\u205E', # General Punctuation (printable)
+        '\u2070\u2071\u2074-\u208E\u2090-\u209C', # Superscripts and Subscripts
+        '\u2190-\u21FF', # Arrows
+        '\u2200-\u22FF', # Mathematical Symbols
+        '\u2500-\u257F' # Box Drawings characters used in markdown trees
+    ])
+    # Allow any of the characters and ranges above, and anything classified
+    # as a word constituent.
+    GOOD_CHARACTERS_RE = re.compile(r'[\w{}]+\Z'.format(GOOD_CHARACTERS))
+
+    def issue_with_line(self, line, _filepath, line_number):
+        try:
+            text = line.decode('utf-8')
+        except UnicodeDecodeError:
+            return True
+        if line_number == 1 and text.startswith('\uFEFF'):
+            # Strip BOM (U+FEFF ZERO WIDTH NO-BREAK SPACE) at the beginning.
+            # Which files are allowed to have a BOM is handled in
+            # Utf8BomIssueTracker.
+            text = text[1:]
+        return not self.GOOD_CHARACTERS_RE.match(text)
+
+class UnixLineEndingIssueTracker(LineIssueTracker):
+    """Track files with non-Unix line endings (i.e. files with CR)."""
+
+    heading = "Non-Unix line endings:"
+
+    def should_check_file(self, filepath):
+        if not super().should_check_file(filepath):
+            return False
+        return not is_windows_file(filepath)
+
+    def issue_with_line(self, line, _filepath, _line_number):
+        return b"\r" in line
+
+
+class WindowsLineEndingIssueTracker(LineIssueTracker):
+    """Track files with non-Windows line endings (i.e. CR or LF not in CRLF)."""
+
+    heading = "Non-Windows line endings:"
+
+    def should_check_file(self, filepath):
+        if not super().should_check_file(filepath):
+            return False
+        return is_windows_file(filepath)
+
+    def issue_with_line(self, line, _filepath, _line_number):
+        return not line.endswith(b"\r\n") or b"\r" in line[:-2]
+
+
+class TrailingWhitespaceIssueTracker(LineIssueTracker):
+    """Track lines with trailing whitespace."""
+
+    heading = "Trailing whitespace:"
+    suffix_exemptions = frozenset([".dsp", ".md"])
+
+    def issue_with_line(self, line, _filepath, _line_number):
+        return line.rstrip(b"\r\n") != line.rstrip()
+
+
+class TabIssueTracker(LineIssueTracker):
+    """Track lines with tabs."""
+
+    heading = "Tabs present:"
+    suffix_exemptions = frozenset([
+        ".make",
+        ".pem", # some openssl dumps have tabs
+        ".sln",
+        "/.gitmodules",
+        "/Makefile",
+        "/Makefile.inc",
+        "/generate_visualc_files.pl",
+    ])
+
+    def issue_with_line(self, line, _filepath, _line_number):
+        return b"\t" in line
+
+
+class MergeArtifactIssueTracker(LineIssueTracker):
+    """Track lines with merge artifacts.
+    These are leftovers from a ``git merge`` that wasn't fully edited."""
+
+    heading = "Merge artifact:"
+
+    def issue_with_line(self, line, _filepath, _line_number):
+        # Detect leftover git conflict markers.
+        if line.startswith(b'<<<<<<< ') or line.startswith(b'>>>>>>> '):
+            return True
+        if line.startswith(b'||||||| '): # from merge.conflictStyle=diff3
+            return True
+        if line.rstrip(b'\r\n') == b'=======' and \
+           not _filepath.endswith('.md'):
+            return True
+        return False
+
+
+def this_location():
+    frame = inspect.currentframe()
+    assert frame is not None
+    info = inspect.getframeinfo(frame)
+    return os.path.basename(info.filename), info.lineno
+THIS_FILE_BASE_NAME, LINE_NUMBER_BEFORE_LICENSE_ISSUE_TRACKER = this_location()
+
+class LicenseIssueTracker(LineIssueTracker):
+    """Check copyright statements and license indications.
+
+    This class only checks that statements are correct if present. It does
+    not enforce the presence of statements in each file.
+    """
+
+    heading = "License issue:"
+
+    LICENSE_EXEMPTION_RE_LIST = []
+
+    # Exempt third-party drivers which may be under a different license
+    if build_tree.looks_like_tf_psa_crypto_root(os.getcwd()):
+        LICENSE_EXEMPTION_RE_LIST.append(r'drivers/(?=(everest)/.*)')
+    elif build_tree.is_mbedtls_3_6():
+        LICENSE_EXEMPTION_RE_LIST.append(r'3rdparty/(?!(p256-m)/.*)')
+
+    LICENSE_EXEMPTION_RE_LIST += [
+        # Documentation explaining the license may have accidental
+        # false positives.
+        r'(ChangeLog|LICENSE|framework\/LICENSE|[-0-9A-Z_a-z]+\.md)\Z',
+        # Files imported from TF-M, and not used except in test builds,
+        # may be under a different license.
+        r'configs/ext/crypto_config_profile_medium\.h\Z',
+        r'configs/ext/tfm_mbedcrypto_config_profile_medium\.h\Z',
+        r'configs/ext/README\.md\Z',
+        # Third-party file.
+        r'dco\.txt\Z',
+        r'framework\/dco\.txt\Z',
+    ]
+    path_exemptions = re.compile('|'.join(BINARY_FILE_PATH_RE_LIST +
+                                          LICENSE_EXEMPTION_RE_LIST))
+
+    COPYRIGHT_HOLDER = rb'The Mbed TLS Contributors'
+    # Catch "Copyright foo", "Copyright (C) foo", "Copyright © foo", etc.
+    COPYRIGHT_RE = re.compile(rb'.*\bcopyright\s+((?:\w|\s|[()]|[^ -~])*\w)', re.I)
+
+    SPDX_HEADER_KEY = b'SPDX-License-Identifier'
+    LICENSE_IDENTIFIER = b'Apache-2.0 OR GPL-2.0-or-later'
+    SPDX_RE = re.compile(br'.*?(' +
+                         re.escape(SPDX_HEADER_KEY) +
+                         br')(:\s*(.*?)\W*\Z|.*)', re.I)
+
+    LICENSE_MENTION_RE = re.compile(rb'.*(?:' + rb'|'.join([
+        rb'Apache License',
+        rb'General Public License',
+    ]) + rb')', re.I)
+
+    def __init__(self):
+        super().__init__()
+        # Record what problem was caused. We can't easily report it due to
+        # the structure of the script. To be fixed after
+        # https://github.com/Mbed-TLS/mbedtls/pull/2506
+        self.problem = None
+
+    def issue_with_line(self, line, filepath, line_number):
+        #pylint: disable=too-many-return-statements
+
+        # Use endswith() rather than the more correct os.path.basename()
+        # because experimentally, it makes a significant difference to
+        # the running time.
+        if filepath.endswith(THIS_FILE_BASE_NAME) and \
+           line_number > LINE_NUMBER_BEFORE_LICENSE_ISSUE_TRACKER:
+            # Avoid false positives from the code in this class.
+            # Also skip the rest of this file, which is highly unlikely to
+            # contain any problematic statements since we put those near the
+            # top of files.
+            return False
+
+        m = self.COPYRIGHT_RE.match(line)
+        if m and m.group(1) != self.COPYRIGHT_HOLDER:
+            self.problem = 'Invalid copyright line'
+            return True
+
+        m = self.SPDX_RE.match(line)
+        if m:
+            if m.group(1) != self.SPDX_HEADER_KEY:
+                self.problem = 'Misspelled ' + self.SPDX_HEADER_KEY.decode()
+                return True
+            if not m.group(3):
+                self.problem = 'Improperly formatted SPDX license identifier'
+                return True
+            if m.group(3) != self.LICENSE_IDENTIFIER:
+                self.problem = 'Wrong SPDX license identifier'
+                return True
+
+        m = self.LICENSE_MENTION_RE.match(line)
+        if m:
+            self.problem = 'Suspicious license mention'
+            return True
+
+        return False
+
+
+class ErrorAddIssueTracker(LineIssueTracker):
+    """Signal direct additions of error codes.
+
+    Adding a low-level error code with a high-level error code is deprecated
+    and should use MBEDTLS_ERROR_ADD.
+    """
+
+    heading = "Direct addition of error codes"
+
+    _ERR_PLUS_RE = re.compile(br'MBEDTLS_ERR_\w+ *\+|'
+                              br'\+ *MBEDTLS_ERR_')
+    _EXCLUDE_RE = re.compile(br' *case ')
+
+    def issue_with_line(self, line, filepath, line_number):
+        if self._ERR_PLUS_RE.search(line) and not self._EXCLUDE_RE.match(line):
+            return True
+        return False
+
+
+class IntegrityChecker:
+    """Sanity-check files under the current directory."""
+
+    def __init__(self, log_file):
+        """Instantiate the sanity checker.
+        Check files under the current directory.
+        Write a report of issues to log_file."""
+        if not build_tree.looks_like_root(os.getcwd()):
+            raise Exception("This script must be run from Mbed TLS or TF-PSA-Crypto root")
+        self.logger = None
+        self.setup_logger(log_file)
+        self.issues_to_check = [
+            ShebangIssueTracker(),
+            EndOfFileNewlineIssueTracker(),
+            Utf8BomIssueTracker(),
+            UnicodeIssueTracker(),
+            UnixLineEndingIssueTracker(),
+            WindowsLineEndingIssueTracker(),
+            TrailingWhitespaceIssueTracker(),
+            TabIssueTracker(),
+            MergeArtifactIssueTracker(),
+            LicenseIssueTracker(),
+        ]
+
+        if not build_tree.is_mbedtls_3_6():
+            self.issues_to_check.append(ErrorAddIssueTracker())
+
+    def setup_logger(self, log_file, level=logging.INFO):
+        """Log to log_file if provided, or to stderr if None."""
+        self.logger = logging.getLogger()
+        self.logger.setLevel(level)
+        if log_file:
+            handler = logging.FileHandler(log_file)
+            self.logger.addHandler(handler)
+        else:
+            console = logging.StreamHandler()
+            self.logger.addHandler(console)
+
+    @staticmethod
+    def collect_files():
+        """Return the list of files to check.
+
+        These are the regular files commited into Git.
+        """
+        bytes_output = subprocess.check_output(['git', '-C', 'framework',
+                                                'ls-files', '-z'])
+        bytes_framework_filepaths = bytes_output.split(b'\0')[:-1]
+        bytes_framework_filepaths = ["framework/".encode() + filepath
+                                     for filepath in bytes_framework_filepaths]
+
+        bytes_output = subprocess.check_output(['git', 'ls-files', '-z'])
+        bytes_filepaths = bytes_output.split(b'\0')[:-1] + \
+                          bytes_framework_filepaths
+        ascii_filepaths = map(lambda fp: fp.decode('ascii'), bytes_filepaths)
+
+        # Filter out directories. Normally Git doesn't list directories
+        # (it only knows about the files inside them), but there is
+        # at least one case where 'git ls-files' includes a directory:
+        # submodules. Just skip submodules (and any other directories).
+        ascii_filepaths = [fp for fp in ascii_filepaths
+                           if os.path.isfile(fp)]
+        # Prepend './' to files in the top-level directory so that
+        # something like `'/Makefile' in fp` matches in the top-level
+        # directory as well as in subdirectories.
+        return [fp if os.path.dirname(fp) else os.path.join(os.curdir, fp)
+                for fp in ascii_filepaths]
+
+    def check_files(self):
+        """Check all files for all issues."""
+        for issue_to_check in self.issues_to_check:
+            for filepath in self.collect_files():
+                if issue_to_check.should_check_file(filepath):
+                    issue_to_check.check_file_for_issue(filepath)
+
+    def output_issues(self):
+        """Log the issues found and their locations.
+
+        Return 1 if there were issues, 0 otherwise.
+        """
+        integrity_return_code = 0
+        for issue_to_check in self.issues_to_check:
+            if issue_to_check.files_with_issues:
+                integrity_return_code = 1
+            issue_to_check.output_file_issues(self.logger)
+        return integrity_return_code
+
+
+def run_main():
+    parser = argparse.ArgumentParser(description=__doc__)
+    parser.add_argument(
+        "-l", "--log_file", type=str, help="path to optional output log",
+    )
+    check_args = parser.parse_args()
+    integrity_check = IntegrityChecker(check_args.log_file)
+    integrity_check.check_files()
+    return_code = integrity_check.output_issues()
+    sys.exit(return_code)
+
+
+if __name__ == "__main__":
+    run_main()
diff --git a/framework/scripts/check_names.py b/framework/scripts/check_names.py
new file mode 100755
index 0000000..0f54275
--- /dev/null
+++ b/framework/scripts/check_names.py
@@ -0,0 +1,1197 @@
+#!/usr/bin/env python3
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+"""
+This script confirms that the naming of all symbols and identifiers in Mbed TLS
+are consistent with the house style and are also self-consistent. It only runs
+on Linux and macOS since it depends on nm.
+
+It contains three major Python classes, TFPSACryptoCodeParser,
+MBEDTLSCodeParser and NameChecker. They all have a comprehensive "run-all"
+function (comprehensive_parse() and perform_checks()) but the individual
+functions can also be used for specific needs.
+
+CodeParser(a inherent base class for TFPSACryptoCodeParser and MBEDTLSCodeParser)
+makes heavy use of regular expressions to parse the code, and is dependent on
+the current code formatting. Many Python C parser libraries require
+preprocessed C code, which means no macro parsing. Compiler tools are also not
+very helpful when we want the exact location in the original source (which
+becomes impossible when e.g. comments are stripped).
+
+NameChecker performs the following checks:
+
+- All exported and available symbols in the library object files, are explicitly
+  declared in the header files. This uses the nm command.
+- All macros, constants, and identifiers (function names, struct names, etc)
+  follow the required regex pattern.
+- Typo checking: All words that begin with MBED|PSA exist as macros or constants.
+
+The script returns 0 on success, 1 on test failure, and 2 if there is a script
+error. It must be run from Mbed TLS root.
+"""
+
+import abc
+import argparse
+import fnmatch
+import glob
+import textwrap
+import os
+import sys
+import traceback
+import re
+import enum
+import shutil
+import subprocess
+import logging
+import tempfile
+
+import project_scripts # pylint: disable=unused-import
+from mbedtls_framework import build_tree
+
+
+# Naming patterns to check against. These are defined outside the NameCheck
+# class for ease of modification.
+PUBLIC_MACRO_PATTERN = r"^(MBEDTLS|PSA|TF_PSA)_[0-9A-Z_]*[0-9A-Z]$"
+INTERNAL_MACRO_PATTERN = r"^[0-9A-Za-z_]*[0-9A-Z]$"
+CONSTANTS_PATTERN = PUBLIC_MACRO_PATTERN
+IDENTIFIER_PATTERN = r"^(mbedtls|psa)_[0-9a-z_]*[0-9a-z]$"
+
+class Match(): # pylint: disable=too-few-public-methods
+    """
+    A class representing a match, together with its found position.
+
+    Fields:
+    * filename: the file that the match was in.
+    * line: the full line containing the match.
+    * line_no: the line number.
+    * pos: a tuple of (start, end) positions on the line where the match is.
+    * name: the match itself.
+    """
+    def __init__(self, filename, line, line_no, pos, name):
+        # pylint: disable=too-many-arguments
+        self.filename = filename
+        self.line = line
+        self.line_no = line_no
+        self.pos = pos
+        self.name = name
+
+    def __str__(self):
+        """
+        Return a formatted code listing representation of the erroneous line.
+        """
+        gutter = format(self.line_no, "4d")
+        underline = self.pos[0] * " " + (self.pos[1] - self.pos[0]) * "^"
+
+        return (
+            " {0} |\n".format(" " * len(gutter)) +
+            " {0} | {1}".format(gutter, self.line) +
+            " {0} | {1}\n".format(" " * len(gutter), underline)
+        )
+
+class Problem(abc.ABC): # pylint: disable=too-few-public-methods
+    """
+    An abstract parent class representing a form of static analysis error.
+    It extends an Abstract Base Class, which means it is not instantiable, and
+    it also mandates certain abstract methods to be implemented in subclasses.
+    """
+    # Class variable to control the quietness of all problems
+    quiet = False
+    def __init__(self):
+        self.textwrapper = textwrap.TextWrapper()
+        self.textwrapper.width = 80
+        self.textwrapper.initial_indent = "    > "
+        self.textwrapper.subsequent_indent = "      "
+
+    def __str__(self):
+        """
+        Unified string representation method for all Problems.
+        """
+        if self.__class__.quiet:
+            return self.quiet_output()
+        return self.verbose_output()
+
+    @abc.abstractmethod
+    def quiet_output(self):
+        """
+        The output when --quiet is enabled.
+        """
+        pass
+
+    @abc.abstractmethod
+    def verbose_output(self):
+        """
+        The default output with explanation and code snippet if appropriate.
+        """
+        pass
+
+class SymbolNotInHeader(Problem): # pylint: disable=too-few-public-methods
+    """
+    A problem that occurs when an exported/available symbol in the object file
+    is not explicitly declared in header files. Created with
+    NameCheck.check_symbols_declared_in_header()
+
+    Fields:
+    * symbol_name: the name of the symbol.
+    """
+    def __init__(self, symbol_name):
+        self.symbol_name = symbol_name
+        Problem.__init__(self)
+
+    def quiet_output(self):
+        return "{0}".format(self.symbol_name)
+
+    def verbose_output(self):
+        return self.textwrapper.fill(
+            "'{0}' was found as an available symbol in the output of nm, "
+            "however it was not declared in any header files."
+            .format(self.symbol_name))
+
+class PatternMismatch(Problem): # pylint: disable=too-few-public-methods
+    """
+    A problem that occurs when something doesn't match the expected pattern.
+    Created with NameCheck.check_match_pattern()
+
+    Fields:
+    * pattern: the expected regex pattern
+    * match: the Match object in question
+    """
+    def __init__(self, pattern, match):
+        self.pattern = pattern
+        self.match = match
+        Problem.__init__(self)
+
+
+    def quiet_output(self):
+        return (
+            "{0}:{1}:{2}"
+            .format(self.match.filename, self.match.line_no, self.match.name)
+        )
+
+    def verbose_output(self):
+        return self.textwrapper.fill(
+            "{0}:{1}: '{2}' does not match the required pattern '{3}'."
+            .format(
+                self.match.filename,
+                self.match.line_no,
+                self.match.name,
+                self.pattern
+            )
+        ) + "\n" + str(self.match)
+
+class Typo(Problem): # pylint: disable=too-few-public-methods
+    """
+    A problem that occurs when a word using MBED or PSA doesn't
+    appear to be defined as constants nor enum values. Created with
+    NameCheck.check_for_typos()
+
+    Fields:
+    * match: the Match object of the MBED|PSA name in question.
+    """
+    def __init__(self, match):
+        self.match = match
+        Problem.__init__(self)
+
+    def quiet_output(self):
+        return (
+            "{0}:{1}:{2}"
+            .format(self.match.filename, self.match.line_no, self.match.name)
+        )
+
+    def verbose_output(self):
+        return self.textwrapper.fill(
+            "{0}:{1}: '{2}' looks like a typo. It was not found in any "
+            "macros or any enums. If this is not a typo, put "
+            "//no-check-names after it."
+            .format(self.match.filename, self.match.line_no, self.match.name)
+        ) + "\n" + str(self.match)
+
+class CodeParser():
+    """
+    Class for retrieving files and parsing the code. This can be used
+    independently of the checks that NameChecker performs, for example for
+    list_internal_identifiers.py.
+    """
+    def __init__(self, log):
+        self.log = log
+        if not build_tree.looks_like_root(os.getcwd()):
+            raise Exception("This script must be run from Mbed TLS or TF-PSA-Crypto root")
+
+        # Memo for storing "glob expression": set(filepaths)
+        self.files = {}
+
+        # Globally excluded filenames.
+        # Note that "*" can match directory separators in exclude lists.
+        self.excluded_files = ["*/bn_mul", "*/compat-2.x.h"]
+
+    def _parse(self, all_macros, enum_consts, identifiers,
+               excluded_identifiers, mbed_psa_words, symbols):
+        # pylint: disable=too-many-arguments
+        """
+        Parse macros, enums, identifiers, excluded identifiers, Mbed PSA word and Symbols.
+
+        Returns a dict of parsed item key to the corresponding List of Matches.
+        """
+
+        self.log.info("Parsing source code...")
+        self.log.debug(
+            "The following files are excluded from the search: {}"
+            .format(str(self.excluded_files))
+        )
+
+        # Remove identifier macros like mbedtls_printf or mbedtls_calloc
+        identifiers_justname = [x.name for x in identifiers]
+        actual_macros = {"public": [], "internal": []}
+        for scope in actual_macros:
+            for macro in all_macros[scope]:
+                if macro.name not in identifiers_justname:
+                    actual_macros[scope].append(macro)
+
+        self.log.debug("Found:")
+        # Aligns the counts on the assumption that none exceeds 4 digits
+        for scope in actual_macros:
+            self.log.debug("  {:4} Total {} Macros"
+                           .format(len(all_macros[scope]), scope))
+            self.log.debug("  {:4} {} Non-identifier Macros"
+                           .format(len(actual_macros[scope]), scope))
+        self.log.debug("  {:4} Enum Constants".format(len(enum_consts)))
+        self.log.debug("  {:4} Identifiers".format(len(identifiers)))
+        self.log.debug("  {:4} Exported Symbols".format(len(symbols)))
+        return {
+            "public_macros": actual_macros["public"],
+            "internal_macros": actual_macros["internal"],
+            "private_macros": all_macros["private"],
+            "enum_consts": enum_consts,
+            "identifiers": identifiers,
+            "excluded_identifiers": excluded_identifiers,
+            "symbols": symbols,
+            "mbed_psa_words": mbed_psa_words
+        }
+
+    def is_file_excluded(self, path, exclude_wildcards):
+        """Whether the given file path is excluded."""
+        # exclude_wildcards may be None. Also, consider the global exclusions.
+        exclude_wildcards = (exclude_wildcards or []) + self.excluded_files
+        for pattern in exclude_wildcards:
+            if fnmatch.fnmatch(path, pattern):
+                return True
+        return False
+
+    def get_all_files(self, include_wildcards, exclude_wildcards):
+        """
+        Get all files that match any of the included UNIX-style wildcards
+        and filter them into included and excluded lists.
+        While the check_names script is designed only for use on UNIX/macOS
+        (due to nm), this function alone will work fine on Windows even with
+        forward slashes in the wildcard.
+
+        Args:
+        * include_wildcards: a List of shell-style wildcards to match filepaths.
+        * exclude_wildcards: a List of shell-style wildcards to exclude.
+
+        Returns:
+        * inc_files: A List of relative filepaths for included files.
+        * exc_files: A List of relative filepaths for excluded files.
+        """
+        accumulator = set()
+        all_wildcards = include_wildcards + (exclude_wildcards or [])
+        for wildcard in all_wildcards:
+            accumulator = accumulator.union(glob.iglob(wildcard))
+
+        inc_files = []
+        exc_files = []
+        for path in accumulator:
+            if self.is_file_excluded(path, exclude_wildcards):
+                exc_files.append(path)
+            else:
+                inc_files.append(path)
+        return (inc_files, exc_files)
+
+    def get_included_files(self, include_wildcards, exclude_wildcards):
+        """
+        Get all files that match any of the included UNIX-style wildcards.
+        While the check_names script is designed only for use on UNIX/macOS
+        (due to nm), this function alone will work fine on Windows even with
+        forward slashes in the wildcard.
+
+        Args:
+        * include_wildcards: a List of shell-style wildcards to match filepaths.
+        * exclude_wildcards: a List of shell-style wildcards to exclude.
+
+        Returns a List of relative filepaths.
+        """
+        accumulator = set()
+
+        for include_wildcard in include_wildcards:
+            accumulator = accumulator.union(glob.iglob(include_wildcard))
+
+        return list(path for path in accumulator
+                    if not self.is_file_excluded(path, exclude_wildcards))
+
+    def parse_macros(self, include, exclude=None):
+        """
+        Parse all macros defined by #define preprocessor directives.
+
+        Args:
+        * include: A List of glob expressions to look for files through.
+        * exclude: A List of glob expressions for excluding files.
+
+        Returns a List of Match objects for the found macros.
+        """
+        macro_regex = re.compile(r"# *define +(?P<macro>\w+)")
+        exclusions = (
+            "asm", "inline", "EMIT", "_CRT_SECURE_NO_DEPRECATE", "MULADDC_"
+        )
+
+        files = self.get_included_files(include, exclude)
+        self.log.debug("Looking for macros in {} files".format(len(files)))
+
+        macros = []
+        for header_file in files:
+            with open(header_file, "r", encoding="utf-8") as header:
+                for line_no, line in enumerate(header):
+                    for macro in macro_regex.finditer(line):
+                        if macro.group("macro").startswith(exclusions):
+                            continue
+
+                        macros.append(Match(
+                            header_file,
+                            line,
+                            line_no,
+                            macro.span("macro"),
+                            macro.group("macro")))
+
+        return macros
+
+    def parse_mbed_psa_words(self, include, exclude=None):
+        """
+        Parse all words in the file that begin with MBED|PSA, in and out of
+        macros, comments, anything.
+
+        Args:
+        * include: A List of glob expressions to look for files through.
+        * exclude: A List of glob expressions for excluding files.
+
+        Returns a List of Match objects for words beginning with MBED|PSA.
+        """
+        # Typos of TLS are common, hence the broader check below than MBEDTLS.
+        mbed_regex = re.compile(r"\b(MBED.+?|PSA)_[A-Z0-9_]*")
+        exclusions = re.compile(r"// *no-check-names|#error")
+
+        files = self.get_included_files(include, exclude)
+        self.log.debug(
+            "Looking for MBED|PSA words in {} files"
+            .format(len(files))
+        )
+
+        mbed_psa_words = []
+        for filename in files:
+            with open(filename, "r", encoding="utf-8") as fp:
+                for line_no, line in enumerate(fp):
+                    if exclusions.search(line):
+                        continue
+
+                    for name in mbed_regex.finditer(line):
+                        mbed_psa_words.append(Match(
+                            filename,
+                            line,
+                            line_no,
+                            name.span(0),
+                            name.group(0)))
+
+        return mbed_psa_words
+
+    def parse_enum_consts(self, include, exclude=None):
+        """
+        Parse all enum value constants that are declared.
+
+        Args:
+        * include: A List of glob expressions to look for files through.
+        * exclude: A List of glob expressions for excluding files.
+
+        Returns a List of Match objects for the findings.
+        """
+        files = self.get_included_files(include, exclude)
+        self.log.debug("Looking for enum consts in {} files".format(len(files)))
+
+        # Emulate a finite state machine to parse enum declarations.
+        # OUTSIDE_KEYWORD = outside the enum keyword
+        # IN_BRACES = inside enum opening braces
+        # IN_BETWEEN = between enum keyword and opening braces
+        states = enum.Enum("FSM", ["OUTSIDE_KEYWORD", "IN_BRACES", "IN_BETWEEN"])
+        enum_consts = []
+        for header_file in files:
+            state = states.OUTSIDE_KEYWORD
+            with open(header_file, "r", encoding="utf-8") as header:
+                for line_no, line in enumerate(header):
+                    # Match typedefs and brackets only when they are at the
+                    # beginning of the line -- if they are indented, they might
+                    # be sub-structures within structs, etc.
+                    optional_c_identifier = r"([_a-zA-Z][_a-zA-Z0-9]*)?"
+                    if (state == states.OUTSIDE_KEYWORD and
+                            re.search(r"^(typedef +)?enum " + \
+                                    optional_c_identifier + \
+                                    r" *{", line)):
+                        state = states.IN_BRACES
+                    elif (state == states.OUTSIDE_KEYWORD and
+                          re.search(r"^(typedef +)?enum", line)):
+                        state = states.IN_BETWEEN
+                    elif (state == states.IN_BETWEEN and
+                          re.search(r"^{", line)):
+                        state = states.IN_BRACES
+                    elif (state == states.IN_BRACES and
+                          re.search(r"^}", line)):
+                        state = states.OUTSIDE_KEYWORD
+                    elif (state == states.IN_BRACES and
+                          not re.search(r"^ *#", line)):
+                        enum_const = re.search(r"^ *(?P<enum_const>\w+)", line)
+                        if not enum_const:
+                            continue
+
+                        enum_consts.append(Match(
+                            header_file,
+                            line,
+                            line_no,
+                            enum_const.span("enum_const"),
+                            enum_const.group("enum_const")))
+
+        return enum_consts
+
+    IGNORED_CHUNK_REGEX = re.compile('|'.join([
+        r'/\*.*?\*/', # block comment entirely on one line
+        r'//.*', # line comment
+        r'(?P<string>")(?:[^\\\"]|\\.)*"', # string literal
+    ]))
+
+    def strip_comments_and_literals(self, line, in_block_comment):
+        """Strip comments and string literals from line.
+
+        Continuation lines are not supported.
+
+        If in_block_comment is true, assume that the line starts inside a
+        block comment.
+
+        Return updated values of (line, in_block_comment) where:
+        * Comments in line have been replaced by a space (or nothing at the
+          start or end of the line).
+        * String contents have been removed.
+        * in_block_comment indicates whether the line ends inside a block
+          comment that continues on the next line.
+        """
+
+        # Terminate current multiline comment?
+        if in_block_comment:
+            m = re.search(r"\*/", line)
+            if m:
+                in_block_comment = False
+                line = line[m.end(0):]
+            else:
+                return '', True
+
+        # Remove full comments and string literals.
+        # Do it all together to handle cases like "/*" correctly.
+        # Note that continuation lines are not supported.
+        line = re.sub(self.IGNORED_CHUNK_REGEX,
+                      lambda s: '""' if s.group('string') else ' ',
+                      line)
+
+        # Start an unfinished comment?
+        # (If `/*` was part of a complete comment, it's already been removed.)
+        m = re.search(r"/\*", line)
+        if m:
+            in_block_comment = True
+            line = line[:m.start(0)]
+
+        return line, in_block_comment
+
+    IDENTIFIER_REGEX = re.compile('|'.join([
+        # Match " something(a" or " *something(a". Functions.
+        # Assumptions:
+        # - function definition from return type to one of its arguments is
+        #   all on one line
+        # - function definition line only contains alphanumeric, asterisk,
+        #   underscore, and open bracket
+        r".* \**(\w+) *\( *\w",
+        # Match "(*something)(".
+        r".*\( *\* *(\w+) *\) *\(",
+        # Match names of named data structures.
+        r"(?:typedef +)?(?:struct|union|enum) +(\w+)(?: *{)?$",
+        # Match names of typedef instances, after closing bracket.
+        r"}? *(\w+)[;[].*",
+    ]))
+    # The regex below is indented for clarity.
+    EXCLUSION_LINES = re.compile("|".join([
+        r"extern +\"C\"",
+        r"(typedef +)?(struct|union|enum)( *{)?$",
+        r"} *;?$",
+        r"$",
+        r"//",
+        r"#",
+    ]))
+
+    def parse_identifiers_in_file(self, header_file, identifiers):
+        """
+        Parse all lines of a header where a function/enum/struct/union/typedef
+        identifier is declared, based on some regex and heuristics. Highly
+        dependent on formatting style.
+
+        Append found matches to the list ``identifiers``.
+        """
+
+        with open(header_file, "r", encoding="utf-8") as header:
+            in_block_comment = False
+            # The previous line variable is used for concatenating lines
+            # when identifiers are formatted and spread across multiple
+            # lines.
+            previous_line = ""
+
+            for line_no, line in enumerate(header):
+                line, in_block_comment = \
+                    self.strip_comments_and_literals(line, in_block_comment)
+
+                if self.EXCLUSION_LINES.match(line):
+                    previous_line = ""
+                    continue
+
+                # If the line contains only space-separated alphanumeric
+                # characters (or underscore, asterisk, or open parenthesis),
+                # and nothing else, high chance it's a declaration that
+                # continues on the next line
+                if re.search(r"^([\w\*\(]+\s+)+$", line):
+                    previous_line += line
+                    continue
+
+                # If previous line seemed to start an unfinished declaration
+                # (as above), concat and treat them as one.
+                if previous_line:
+                    line = previous_line.strip() + " " + line.strip() + "\n"
+                    previous_line = ""
+
+                # Skip parsing if line has a space in front = heuristic to
+                # skip function argument lines (highly subject to formatting
+                # changes)
+                if line[0] == " ":
+                    continue
+
+                identifier = self.IDENTIFIER_REGEX.search(line)
+
+                if not identifier:
+                    continue
+
+                # Find the group that matched, and append it
+                for group in identifier.groups():
+                    if not group:
+                        continue
+
+                    identifiers.append(Match(
+                        header_file,
+                        line,
+                        line_no,
+                        identifier.span(),
+                        group))
+
+    def parse_identifiers(self, include, exclude=None):
+        """
+        Parse all lines of a header where a function/enum/struct/union/typedef
+        identifier is declared, based on some regex and heuristics. Highly
+        dependent on formatting style. Identifiers in excluded files are still
+        parsed
+
+        Args:
+        * include: A List of glob expressions to look for files through.
+        * exclude: A List of glob expressions for excluding files.
+
+        Returns: a Tuple of two Lists of Match objects with identifiers.
+        * included_identifiers: A List of Match objects with identifiers from
+          included files.
+        * excluded_identifiers: A List of Match objects with identifiers from
+          excluded files.
+        """
+
+        included_files, excluded_files = \
+            self.get_all_files(include, exclude)
+
+        self.log.debug("Looking for included identifiers in {} files".format \
+            (len(included_files)))
+
+        included_identifiers = []
+        excluded_identifiers = []
+        for header_file in included_files:
+            self.parse_identifiers_in_file(header_file, included_identifiers)
+        for header_file in excluded_files:
+            self.parse_identifiers_in_file(header_file, excluded_identifiers)
+
+        return (included_identifiers, excluded_identifiers)
+
+    def parse_symbols(self):
+        """
+        Compile a library, and parse the object files using nm to retrieve the
+        list of referenced symbols. Exceptions thrown here are rethrown because
+        they would be critical errors that void several tests, and thus needs
+        to halt the program. This is explicitly done for clarity.
+
+        Returns a List of unique symbols defined and used in the libraries.
+        """
+        raise NotImplementedError("parse_symbols must be implemented by a code parser")
+
+    def comprehensive_parse(self):
+        """
+        (Must be defined as a class method)
+        Comprehensive ("default") function to call each parsing function and
+        retrieve various elements of the code, together with the source location.
+
+        Returns a dict of parsed item key to the corresponding List of Matches.
+        """
+        raise NotImplementedError("comprehension_parse must be implemented by a code parser")
+
+    def parse_symbols_from_nm(self, object_files):
+        """
+        Run nm to retrieve the list of referenced symbols in each object file.
+        Does not return the position data since it is of no use.
+
+        Args:
+        * object_files: a List of compiled object filepaths to search through.
+
+        Returns a List of unique symbols defined and used in any of the object
+        files.
+        """
+        nm_undefined_regex = re.compile(r"^\S+: +U |^$|^\S+:$")
+        nm_valid_regex = re.compile(r"^\S+( [0-9A-Fa-f]+)* . _*(?P<symbol>\w+)")
+        exclusions = ("FStar", "Hacl")
+        symbols = []
+        # Gather all outputs of nm
+        nm_output = ""
+        for lib in object_files:
+            nm_output += subprocess.run(
+                ["nm", "-og", lib],
+                universal_newlines=True,
+                stdout=subprocess.PIPE,
+                stderr=subprocess.STDOUT,
+                check=True
+            ).stdout
+        for line in nm_output.splitlines():
+            if not nm_undefined_regex.search(line):
+                symbol = nm_valid_regex.search(line)
+                if (symbol and not symbol.group("symbol").startswith(exclusions)):
+                    symbols.append(symbol.group("symbol"))
+                else:
+                    self.log.error(line)
+        return symbols
+
+class TFPSACryptoCodeParser(CodeParser):
+    """
+    Class for retrieving files and parsing TF-PSA-Crypto code. This can be used
+    independently of the checks that NameChecker performs.
+    """
+
+    def __init__(self, log):
+        super().__init__(log)
+        if not build_tree.looks_like_tf_psa_crypto_root(os.getcwd()):
+            raise Exception("This script must be run from TF-PSA-Crypto root.")
+
+    def comprehensive_parse(self):
+        """
+        Comprehensive ("default") function to call each parsing function and
+        retrieve various elements of the code, together with the source location.
+
+        Returns a dict of parsed item key to the corresponding List of Matches.
+        """
+        all_macros = {"public": [], "internal": [], "private":[]}
+        all_macros["public"] = self.parse_macros([
+            "include/psa/*.h",
+            "include/tf-psa-crypto/*.h",
+            "drivers/builtin/include/mbedtls/*.h",
+            "drivers/everest/include/everest/everest.h",
+            "drivers/everest/include/everest/x25519.h"
+        ])
+        all_macros["internal"] = self.parse_macros([
+            "core/*.h",
+            "drivers/builtin/src/*.h",
+            "framework/tests/include/test/drivers/*.h",
+        ])
+        all_macros["private"] = self.parse_macros([
+            "core/*.c",
+            "drivers/builtin/src/*.c",
+        ])
+        enum_consts = self.parse_enum_consts([
+            "include/psa/*.h",
+            "include/tf-psa-crypto/*.h",
+            "drivers/builtin/include/mbedtls/*.h",
+            "core/*.h",
+            "drivers/builtin/src/*.h",
+            "core/*.c",
+            "drivers/builtin/src/*.c",
+            "drivers/everest/include/everest/everest.h",
+            "drivers/everest/include/everest/x25519.h"
+        ])
+        identifiers, excluded_identifiers = self.parse_identifiers([
+            "include/psa/*.h",
+            "include/tf-psa-crypto/*.h",
+            "drivers/builtin/include/mbedtls/*.h",
+            "core/*.h",
+            "drivers/builtin/src/*.h",
+            "drivers/everest/include/everest/everest.h",
+            "drivers/everest/include/everest/x25519.h"
+        ], ["drivers/p256-m/p256-m/p256-m.h"])
+        mbed_psa_words = self.parse_mbed_psa_words([
+            "include/psa/*.h",
+            "include/tf-psa-crypto/*.h",
+            "drivers/builtin/include/mbedtls/*.h",
+            "core/*.h",
+            "drivers/builtin/src/*.h",
+            "drivers/everest/include/everest/everest.h",
+            "drivers/everest/include/everest/x25519.h",
+            "core/*.c",
+            "drivers/builtin/src/*.c",
+            "drivers/everest/library/everest.c",
+            "drivers/everest/library/x25519.c"
+        ], ["core/psa_crypto_driver_wrappers.h"])
+        symbols = self.parse_symbols()
+
+        return self._parse(all_macros, enum_consts, identifiers,
+                           excluded_identifiers, mbed_psa_words, symbols)
+
+    def parse_symbols(self):
+        """
+        Compile the TF-PSA-Crypto libraries, and parse the
+        object files using nm to retrieve the list of referenced symbols.
+        Exceptions thrown here are rethrown because they would be critical
+        errors that void several tests, and thus needs to halt the program. This
+        is explicitly done for clarity.
+
+        Returns a List of unique symbols defined and used in the libraries.
+        """
+        self.log.info("Compiling...")
+        symbols = []
+
+        # Back up the config and atomically compile with the full configuration.
+        shutil.copy(
+            "include/psa/crypto_config.h",
+            "include/psa/crypto_config.h.bak"
+        )
+        try:
+            # Use check=True in all subprocess calls so that failures are raised
+            # as exceptions and logged.
+            subprocess.run(
+                ["python3", "scripts/config.py", "full"],
+                universal_newlines=True,
+                check=True
+            )
+            my_environment = os.environ.copy()
+            my_environment["CFLAGS"] = "-fno-asynchronous-unwind-tables"
+
+            source_dir = os.getcwd()
+            build_dir = tempfile.mkdtemp()
+            os.chdir(build_dir)
+            subprocess.run(
+                ["cmake", "-DGEN_FILES=ON", source_dir],
+                universal_newlines=True,
+                check=True
+            )
+            subprocess.run(
+                ["make"],
+                env=my_environment,
+                universal_newlines=True,
+                stdout=subprocess.PIPE,
+                stderr=subprocess.STDOUT,
+                check=True
+            )
+
+            # Perform object file analysis using nm
+            symbols = self.parse_symbols_from_nm([
+                build_dir + "/drivers/builtin/libbuiltin.a",
+                build_dir + "/drivers/p256-m/libp256m.a",
+                build_dir + "/drivers/everest/libeverest.a",
+                build_dir + "/core/libtfpsacrypto.a"
+            ])
+
+            os.chdir(source_dir)
+            shutil.rmtree(build_dir)
+        except subprocess.CalledProcessError as error:
+            self.log.debug(error.output)
+            raise error
+        finally:
+            # Put back the original config regardless of there being errors.
+            # Works also for keyboard interrupts.
+            shutil.move(
+                "include/psa/crypto_config.h.bak",
+                "include/psa/crypto_config.h"
+            )
+
+        return symbols
+
+class MBEDTLSCodeParser(CodeParser):
+    """
+    Class for retrieving files and parsing Mbed TLS code. This can be used
+    independently of the checks that NameChecker performs.
+    """
+
+    def __init__(self, log):
+        super().__init__(log)
+        if not build_tree.looks_like_mbedtls_root(os.getcwd()):
+            raise Exception("This script must be run from Mbed TLS root.")
+
+    def comprehensive_parse(self):
+        """
+        Comprehensive ("default") function to call each parsing function and
+        retrieve various elements of the code, together with the source location.
+
+        Returns a dict of parsed item key to the corresponding List of Matches.
+        """
+        all_macros = {"public": [], "internal": [], "private":[]}
+        # TF-PSA-Crypto is in the same repo in 3.6 so initalise variable here.
+        tf_psa_crypto_parse_result = {}
+
+        if build_tree.is_mbedtls_3_6():
+            all_macros["public"] = self.parse_macros([
+                "include/mbedtls/*.h",
+                "include/psa/*.h",
+                "3rdparty/everest/include/everest/everest.h",
+                "3rdparty/everest/include/everest/x25519.h"
+            ])
+            all_macros["internal"] = self.parse_macros([
+                "library/*.h",
+                "framework/tests/include/test/drivers/*.h",
+            ])
+            all_macros["private"] = self.parse_macros([
+                "library/*.c",
+            ])
+            enum_consts = self.parse_enum_consts([
+                "include/mbedtls/*.h",
+                "include/psa/*.h",
+                "library/*.h",
+                "library/*.c",
+                "3rdparty/everest/include/everest/everest.h",
+                "3rdparty/everest/include/everest/x25519.h"
+            ])
+            identifiers, excluded_identifiers = self.parse_identifiers([
+                "include/mbedtls/*.h",
+                "include/psa/*.h",
+                "library/*.h",
+                "3rdparty/everest/include/everest/everest.h",
+                "3rdparty/everest/include/everest/x25519.h"
+            ], ["3rdparty/p256-m/p256-m/p256-m.h"])
+            mbed_psa_words = self.parse_mbed_psa_words([
+                "include/mbedtls/*.h",
+                "include/psa/*.h",
+                "library/*.h",
+                "3rdparty/everest/include/everest/everest.h",
+                "3rdparty/everest/include/everest/x25519.h",
+                "library/*.c",
+                "3rdparty/everest/library/everest.c",
+                "3rdparty/everest/library/x25519.c"
+            ], ["library/psa_crypto_driver_wrappers.h"])
+        else:
+            all_macros = {"public": [], "internal": [], "private":[]}
+            all_macros["public"] = self.parse_macros([
+                "include/mbedtls/*.h",
+            ])
+            all_macros["internal"] = self.parse_macros([
+                "library/*.h",
+                "framework/tests/include/test/drivers/*.h",
+            ])
+            all_macros["private"] = self.parse_macros([
+                "library/*.c",
+            ])
+            enum_consts = self.parse_enum_consts([
+                "include/mbedtls/*.h",
+                "library/*.h",
+                "library/*.c",
+            ])
+            identifiers, excluded_identifiers = self.parse_identifiers([
+                "include/mbedtls/*.h",
+                "library/*.h",
+            ])
+            mbed_psa_words = self.parse_mbed_psa_words([
+                "include/mbedtls/*.h",
+                "library/*.h",
+                "library/*.c",
+            ])
+            os.chdir("./tf-psa-crypto")
+            tf_psa_crypto_code_parser = TFPSACryptoCodeParser(self.log)
+            tf_psa_crypto_parse_result = tf_psa_crypto_code_parser.comprehensive_parse()
+            os.chdir("../")
+
+        symbols = self.parse_symbols()
+        mbedtls_parse_result = self._parse(all_macros, enum_consts,
+                                           identifiers, excluded_identifiers,
+                                           mbed_psa_words, symbols)
+        # Combile results for Mbed TLS and TF-PSA-Crypto
+        for key in tf_psa_crypto_parse_result:
+            mbedtls_parse_result[key] += tf_psa_crypto_parse_result[key]
+        return mbedtls_parse_result
+
+    def parse_symbols(self):
+        """
+        Compile the Mbed TLS libraries, and parse the TLS, Crypto, and x509
+        object files using nm to retrieve the list of referenced symbols.
+        Exceptions thrown here are rethrown because they would be critical
+        errors that void several tests, and thus needs to halt the program. This
+        is explicitly done for clarity.
+
+        Returns a List of unique symbols defined and used in the libraries.
+        """
+        self.log.info("Compiling...")
+        symbols = []
+
+        # Back up the config and atomically compile with the full configuration.
+        shutil.copy(
+            "include/mbedtls/mbedtls_config.h",
+            "include/mbedtls/mbedtls_config.h.bak"
+        )
+        try:
+            # Use check=True in all subprocess calls so that failures are raised
+            # as exceptions and logged.
+            subprocess.run(
+                ["python3", "scripts/config.py", "full"],
+                universal_newlines=True,
+                check=True
+            )
+            my_environment = os.environ.copy()
+            my_environment["CFLAGS"] = "-fno-asynchronous-unwind-tables"
+            # Run make clean separately to lib to prevent unwanted behavior when
+            # make is invoked with parallelism.
+            subprocess.run(
+                ["make", "clean"],
+                universal_newlines=True,
+                check=True
+            )
+            subprocess.run(
+                ["make", "lib"],
+                env=my_environment,
+                universal_newlines=True,
+                stdout=subprocess.PIPE,
+                stderr=subprocess.STDOUT,
+                check=True
+            )
+
+            # Perform object file analysis using nm
+            symbols = self.parse_symbols_from_nm([
+                "library/libmbedcrypto.a",
+                "library/libmbedtls.a",
+                "library/libmbedx509.a"
+            ])
+
+            subprocess.run(
+                ["make", "clean"],
+                universal_newlines=True,
+                check=True
+            )
+        except subprocess.CalledProcessError as error:
+            self.log.debug(error.output)
+            raise error
+        finally:
+            # Put back the original config regardless of there being errors.
+            # Works also for keyboard interrupts.
+            shutil.move(
+                "include/mbedtls/mbedtls_config.h.bak",
+                "include/mbedtls/mbedtls_config.h"
+            )
+
+        return symbols
+
+class NameChecker():
+    """
+    Representation of the core name checking operation performed by this script.
+    """
+    def __init__(self, parse_result, log):
+        self.parse_result = parse_result
+        self.log = log
+
+    def perform_checks(self, quiet=False):
+        """
+        A comprehensive checker that performs each check in order, and outputs
+        a final verdict.
+
+        Args:
+        * quiet: whether to hide detailed problem explanation.
+        """
+        self.log.info("=============")
+        Problem.quiet = quiet
+        problems = 0
+        problems += self.check_symbols_declared_in_header()
+
+        pattern_checks = [
+            ("public_macros", PUBLIC_MACRO_PATTERN),
+            ("internal_macros", INTERNAL_MACRO_PATTERN),
+            ("enum_consts", CONSTANTS_PATTERN),
+            ("identifiers", IDENTIFIER_PATTERN)
+        ]
+        for group, check_pattern in pattern_checks:
+            problems += self.check_match_pattern(group, check_pattern)
+
+        problems += self.check_for_typos()
+
+        self.log.info("=============")
+        if problems > 0:
+            self.log.info("FAIL: {0} problem(s) to fix".format(str(problems)))
+            if quiet:
+                self.log.info("Remove --quiet to see explanations.")
+            else:
+                self.log.info("Use --quiet for minimal output.")
+            return 1
+        else:
+            self.log.info("PASS")
+            return 0
+
+    def check_symbols_declared_in_header(self):
+        """
+        Perform a check that all detected symbols in the library object files
+        are properly declared in headers.
+        Assumes parse_names_in_source() was called before this.
+
+        Returns the number of problems that need fixing.
+        """
+        problems = []
+        all_identifiers = self.parse_result["identifiers"] +  \
+            self.parse_result["excluded_identifiers"]
+
+        for symbol in self.parse_result["symbols"]:
+            found_symbol_declared = False
+            for identifier_match in all_identifiers:
+                if symbol == identifier_match.name:
+                    found_symbol_declared = True
+                    break
+
+            if not found_symbol_declared:
+                problems.append(SymbolNotInHeader(symbol))
+
+        self.output_check_result("All symbols in header", problems)
+        return len(problems)
+
+    def check_match_pattern(self, group_to_check, check_pattern):
+        """
+        Perform a check that all items of a group conform to a regex pattern.
+        Assumes parse_names_in_source() was called before this.
+
+        Args:
+        * group_to_check: string key to index into self.parse_result.
+        * check_pattern: the regex to check against.
+
+        Returns the number of problems that need fixing.
+        """
+        problems = []
+
+        for item_match in self.parse_result[group_to_check]:
+            if not re.search(check_pattern, item_match.name):
+                problems.append(PatternMismatch(check_pattern, item_match))
+            # Double underscore should not be used for names
+            if re.search(r".*__.*", item_match.name):
+                problems.append(
+                    PatternMismatch("no double underscore allowed", item_match))
+
+        self.output_check_result(
+            "Naming patterns of {}".format(group_to_check),
+            problems)
+        return len(problems)
+
+    def check_for_typos(self):
+        """
+        Perform a check that all words in the source code beginning with MBED are
+        either defined as macros, or as enum constants.
+        Assumes parse_names_in_source() was called before this.
+
+        Returns the number of problems that need fixing.
+        """
+        problems = []
+
+        # Set comprehension, equivalent to a list comprehension wrapped by set()
+        all_caps_names = {
+            match.name
+            for match
+            in self.parse_result["public_macros"] +
+            self.parse_result["internal_macros"] +
+            self.parse_result["private_macros"] +
+            self.parse_result["enum_consts"]
+            }
+        typo_exclusion = re.compile(r"XXX|__|_$|^MBEDTLS_.*CONFIG_FILE$|"
+                                    r"MBEDTLS_TEST_LIBTESTDRIVER*|"
+                                    r"PSA_CRYPTO_DRIVER_TEST")
+
+        for name_match in self.parse_result["mbed_psa_words"]:
+            found = name_match.name in all_caps_names
+
+            # Since MBEDTLS_PSA_ACCEL_XXX defines are defined by the
+            # PSA driver, they will not exist as macros. However, they
+            # should still be checked for typos using the equivalent
+            # BUILTINs that exist.
+            if "MBEDTLS_PSA_ACCEL_" in name_match.name:
+                found = name_match.name.replace(
+                    "MBEDTLS_PSA_ACCEL_",
+                    "MBEDTLS_PSA_BUILTIN_") in all_caps_names
+
+            if not found and not typo_exclusion.search(name_match.name):
+                problems.append(Typo(name_match))
+
+        self.output_check_result("Likely typos", problems)
+        return len(problems)
+
+    def output_check_result(self, name, problems):
+        """
+        Write out the PASS/FAIL status of a performed check depending on whether
+        there were problems.
+
+        Args:
+        * name: the name of the test
+        * problems: a List of encountered Problems
+        """
+        if problems:
+            self.log.info("{}: FAIL\n".format(name))
+            for problem in problems:
+                self.log.warning(str(problem))
+        else:
+            self.log.info("{}: PASS".format(name))
+
+def main():
+    """
+    Perform argument parsing, and create an instance of CodeParser and
+    NameChecker to begin the core operation.
+    """
+    parser = argparse.ArgumentParser(
+        formatter_class=argparse.RawDescriptionHelpFormatter,
+        description=(
+            "This script confirms that the naming of all symbols and identifiers "
+            "in Mbed TLS are consistent with the house style and are also "
+            "self-consistent.\n\n"
+            "Expected to be run from the Mbed TLS root directory.")
+    )
+    parser.add_argument(
+        "-v", "--verbose",
+        action="store_true",
+        help="show parse results"
+    )
+    parser.add_argument(
+        "-q", "--quiet",
+        action="store_true",
+        help="hide unnecessary text, explanations, and highlights"
+    )
+
+    args = parser.parse_args()
+
+    # Configure the global logger, which is then passed to the classes below
+    log = logging.getLogger()
+    log.setLevel(logging.DEBUG if args.verbose else logging.INFO)
+    log.addHandler(logging.StreamHandler())
+
+    try:
+        if build_tree.looks_like_tf_psa_crypto_root(os.getcwd()):
+            tf_psa_crypto_code_parser = TFPSACryptoCodeParser(log)
+            parse_result = tf_psa_crypto_code_parser.comprehensive_parse()
+        elif build_tree.looks_like_mbedtls_root(os.getcwd()):
+            # Mbed TLS uses TF-PSA-Crypto, so we need to parse TF-PSA-Crypto too
+            mbedtls_code_parser = MBEDTLSCodeParser(log)
+            parse_result = mbedtls_code_parser.comprehensive_parse()
+        else:
+            raise Exception("This script must be run from Mbed TLS or TF-PSA-Crypto root")
+    except Exception: # pylint: disable=broad-except
+        traceback.print_exc()
+        sys.exit(2)
+
+    name_checker = NameChecker(parse_result, log)
+    return_code = name_checker.perform_checks(quiet=args.quiet)
+
+    sys.exit(return_code)
+
+if __name__ == "__main__":
+    main()
diff --git a/framework/scripts/check_test_cases.py b/framework/scripts/check_test_cases.py
new file mode 100755
index 0000000..2576dc7
--- /dev/null
+++ b/framework/scripts/check_test_cases.py
@@ -0,0 +1,82 @@
+#!/usr/bin/env python3
+
+"""Sanity checks for test data."""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+import argparse
+import re
+import sys
+
+from mbedtls_framework import collect_test_cases
+
+
+class DescriptionChecker(collect_test_cases.TestDescriptionExplorer):
+    """Check all test case descriptions.
+
+* Check that each description is valid (length, allowed character set, etc.).
+* Check that there is no duplicated description inside of one test suite.
+"""
+
+    def __init__(self, results):
+        self.results = results
+
+    def new_per_file_state(self):
+        """Dictionary mapping descriptions to their line number."""
+        return {}
+
+    def process_test_case(self, per_file_state,
+                          file_name, line_number, description):
+        """Check test case descriptions for errors."""
+        results = self.results
+        seen = per_file_state
+        if description in seen:
+            results.error(file_name, line_number,
+                          'Duplicate description (also line {})',
+                          seen[description])
+            return
+        if re.search(br'[\t;]', description):
+            results.error(file_name, line_number,
+                          'Forbidden character \'{}\' in description',
+                          re.search(br'[\t;]', description).group(0).decode('ascii'))
+        if re.search(br'[^ -~]', description):
+            results.error(file_name, line_number,
+                          'Non-ASCII character in description')
+        if len(description) > 66:
+            results.warning(file_name, line_number,
+                            'Test description too long ({} > 66)',
+                            len(description))
+        seen[description] = line_number
+
+def main():
+    parser = argparse.ArgumentParser(description=__doc__)
+    parser.add_argument('--list-all',
+                        action='store_true',
+                        help='List all test cases, without doing checks')
+    parser.add_argument('--quiet', '-q',
+                        action='store_true',
+                        help='Hide warnings')
+    parser.add_argument('--verbose', '-v',
+                        action='store_false', dest='quiet',
+                        help='Show warnings (default: on; undoes --quiet)')
+    options = parser.parse_args()
+    if options.list_all:
+        descriptions = collect_test_cases.collect_available_test_cases()
+        sys.stdout.write('\n'.join(descriptions + ['']))
+        return
+    results = collect_test_cases.Results(options)
+    checker = DescriptionChecker(results)
+    try:
+        checker.walk_all()
+    except collect_test_cases.ScriptOutputError as e:
+        results.error(e.script_name, e.idx,
+                      '"{}" should be listed as "<suite_name>;<description>"',
+                      e.line)
+    if (results.warnings or results.errors) and not options.quiet:
+        sys.stderr.write('{}: {} errors, {} warnings\n'
+                         .format(sys.argv[0], results.errors, results.warnings))
+    sys.exit(1 if results.errors else 0)
+
+if __name__ == '__main__':
+    main()
diff --git a/framework/scripts/code_style.py b/framework/scripts/code_style.py
new file mode 100755
index 0000000..63cc6dc
--- /dev/null
+++ b/framework/scripts/code_style.py
@@ -0,0 +1,286 @@
+#!/usr/bin/env python3
+"""Check or fix the code style by running Uncrustify.
+
+This script must be run from the root of a Git work tree containing Mbed TLS.
+"""
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+import argparse
+import os
+import re
+import subprocess
+import sys
+from typing import FrozenSet, List, Optional
+from mbedtls_framework import build_tree
+
+UNCRUSTIFY_SUPPORTED_VERSION = "0.75.1"
+CONFIG_FILE = ".uncrustify.cfg"
+UNCRUSTIFY_EXE = "uncrustify"
+UNCRUSTIFY_ARGS = ["-c", CONFIG_FILE]
+CHECK_GENERATED_FILES = "tests/scripts/check-generated-files.sh"
+
+def print_err(*args):
+    print("Error: ", *args, file=sys.stderr)
+
+# Print the file names that will be skipped and the help message
+def print_skip(files_to_skip):
+    print()
+    print(*files_to_skip, sep=", SKIP\n", end=", SKIP\n")
+    print("Warning: The listed files will be skipped because\n"
+          "they are not known to git.")
+    print()
+
+# Match FILENAME(s) in "check SCRIPT (FILENAME...)"
+CHECK_CALL_RE = re.compile(r"\n\s*check\s+[^\s#$&*?;|]+([^\n#$&*?;|]+)",
+                           re.ASCII)
+def list_generated_files() -> FrozenSet[str]:
+    """Return the names of generated files.
+
+    We don't reformat generated files, since the result might be different
+    from the output of the generator. Ideally the result of the generator
+    would conform to the code style, but this would be difficult, especially
+    with respect to the placement of line breaks in long logical lines.
+    """
+    # Parse check-generated-files.sh to get an up-to-date list of
+    # generated files. Read the file rather than calling it so that
+    # this script only depends on Git, Python and uncrustify, and not other
+    # tools such as sh or grep which might not be available on Windows.
+    # This introduces a limitation: check-generated-files.sh must have
+    # the expected format and must list the files explicitly, not through
+    # wildcards or command substitution.
+    content = open(CHECK_GENERATED_FILES, encoding="utf-8").read()
+    checks = re.findall(CHECK_CALL_RE, content)
+    return frozenset(word for s in checks for word in s.split())
+
+# Check for comment string indicating an auto-generated file
+AUTOGEN_RE = re.compile(r"Warning[ :-]+This file is (now )?auto[ -]?generated",
+                        re.ASCII | re.IGNORECASE)
+def is_file_autogenerated(filename):
+    content = open(filename, encoding="utf-8").read()
+    return AUTOGEN_RE.search(content) is not None
+
+def get_src_files(since: Optional[str]) -> List[str]:
+    """
+    Use git to get a list of the source files.
+
+    The optional argument since is a commit, indicating to only list files
+    that have changed since that commit. Without this argument, list all
+    files known to git.
+
+    Only C files are included, and certain files (generated, or third party)
+    are excluded.
+    """
+    file_patterns = ["*.[hc]",
+                     "tests/suites/*.function",
+                     "tf-psa-crypto/tests/suites/*.function",
+                     "scripts/data_files/*.fmt"]
+    output = subprocess.check_output(["git", "ls-files"] + file_patterns,
+                                     universal_newlines=True)
+    src_files = output.split()
+
+    # When this script is called from a git hook, some environment variables
+    # are set by default which force all git commands to use the main repository
+    # (i.e. prevent us from performing commands on the framework repo).
+    # Create an environment without these variables for running commands on the
+    # framework repo.
+    framework_env = os.environ.copy()
+    # Get a list of environment vars that git sets
+    git_env_vars = subprocess.check_output(["git", "rev-parse", "--local-env-vars"],
+                                           universal_newlines=True)
+    # Remove the vars from the environment
+    for var in git_env_vars.split():
+        framework_env.pop(var, None)
+
+    output = subprocess.check_output(["git", "-C", "framework", "ls-files"]
+                                     + file_patterns,
+                                     universal_newlines=True,
+                                     env=framework_env)
+    framework_src_files = output.split()
+
+    if since:
+        # get all files changed in commits since the starting point in ...
+        # ... the main repository
+        cmd = ["git", "log", since + "..HEAD", "--ignore-submodules",
+               "--name-only", "--pretty=", "--"] + src_files
+        output = subprocess.check_output(cmd, universal_newlines=True)
+        committed_changed_files = output.split()
+
+        # ... the framework submodule
+        framework_since = get_submodule_hash(since, "framework")
+        cmd = ["git", "-C", "framework", "log", framework_since + "..HEAD",
+               "--name-only", "--pretty=", "--"] + framework_src_files
+        output = subprocess.check_output(cmd, universal_newlines=True,
+                                         env=framework_env)
+        committed_changed_files += ["framework/" + s for s in output.split()]
+
+        # and also get all files with uncommitted changes in ...
+        # ... the main repository
+        cmd = ["git", "diff", "--name-only", "--"] + src_files
+        output = subprocess.check_output(cmd, universal_newlines=True)
+        uncommitted_changed_files = output.split()
+        # ... the framework submodule
+        cmd = ["git", "-C", "framework", "diff", "--name-only", "--"] + \
+              framework_src_files
+        output = subprocess.check_output(cmd, universal_newlines=True,
+                                         env=framework_env)
+        uncommitted_changed_files += ["framework/" + s for s in output.split()]
+
+        src_files = committed_changed_files + uncommitted_changed_files
+    else:
+        src_files += ["framework/" + s for s in framework_src_files]
+
+    generated_files = list_generated_files()
+    # Don't correct style for third-party files (and, for simplicity,
+    # companion files in the same subtree), or for automatically
+    # generated files (we're correcting the templates instead).
+    if build_tree.is_mbedtls_3_6():
+        src_files = [filename for filename in src_files
+                     if not (filename.startswith("3rdparty/") or
+                             filename in generated_files or
+                             is_file_autogenerated(filename))]
+    else:
+        src_files = [filename for filename in src_files
+                     if not (filename.startswith("tf-psa-crypto/drivers/everest/") or
+                             filename.startswith("tf-psa-crypto/drivers/p256-m/") or
+                             filename in generated_files or
+                             is_file_autogenerated(filename))]
+    return src_files
+
+def get_submodule_hash(commit: str, submodule: str) -> str:
+    """Get the commit hash of a submodule at a given commit in the Git repository."""
+    cmd = ["git", "ls-tree", commit, submodule]
+    output = subprocess.check_output(cmd, universal_newlines=True)
+    return output.split()[2]
+
+def get_uncrustify_version() -> str:
+    """
+    Get the version string from Uncrustify
+    """
+    result = subprocess.run([UNCRUSTIFY_EXE, "--version"],
+                            stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+                            check=False)
+    if result.returncode != 0:
+        print_err("Could not get Uncrustify version:", str(result.stderr, "utf-8"))
+        return ""
+    else:
+        return str(result.stdout, "utf-8")
+
+def check_style_is_correct(src_file_list: List[str]) -> bool:
+    """
+    Check the code style and output a diff for each file whose style is
+    incorrect.
+    """
+    style_correct = True
+    for src_file in src_file_list:
+        uncrustify_cmd = [UNCRUSTIFY_EXE] + UNCRUSTIFY_ARGS + [src_file]
+        result = subprocess.run(uncrustify_cmd, stdout=subprocess.PIPE,
+                                stderr=subprocess.PIPE, check=False)
+        if result.returncode != 0:
+            print_err("Uncrustify returned " + str(result.returncode) +
+                      " correcting file " + src_file)
+            return False
+
+        # Uncrustify makes changes to the code and places the result in a new
+        # file with the extension ".uncrustify". To get the changes (if any)
+        # simply diff the 2 files.
+        diff_cmd = ["diff", "-u", src_file, src_file + ".uncrustify"]
+        cp = subprocess.run(diff_cmd, check=False)
+
+        if cp.returncode == 1:
+            print(src_file + " changed - code style is incorrect.")
+            style_correct = False
+        elif cp.returncode != 0:
+            raise subprocess.CalledProcessError(cp.returncode, cp.args,
+                                                cp.stdout, cp.stderr)
+
+        # Tidy up artifact
+        os.remove(src_file + ".uncrustify")
+
+    return style_correct
+
+def fix_style_single_pass(src_file_list: List[str]) -> bool:
+    """
+    Run Uncrustify once over the source files.
+    """
+    code_change_args = UNCRUSTIFY_ARGS + ["--no-backup"]
+    for src_file in src_file_list:
+        uncrustify_cmd = [UNCRUSTIFY_EXE] + code_change_args + [src_file]
+        result = subprocess.run(uncrustify_cmd, check=False)
+        if result.returncode != 0:
+            print_err("Uncrustify with file returned: " +
+                      str(result.returncode) + " correcting file " +
+                      src_file)
+            return False
+    return True
+
+def fix_style(src_file_list: List[str]) -> int:
+    """
+    Fix the code style. This takes 2 passes of Uncrustify.
+    """
+    if not fix_style_single_pass(src_file_list):
+        return 1
+    if not fix_style_single_pass(src_file_list):
+        return 1
+
+    # Guard against future changes that cause the codebase to require
+    # more passes.
+    if not check_style_is_correct(src_file_list):
+        print_err("Code style still incorrect after second run of Uncrustify.")
+        return 1
+    else:
+        return 0
+
+def main() -> int:
+    """
+    Main with command line arguments.
+    """
+    uncrustify_version = get_uncrustify_version().strip()
+    if UNCRUSTIFY_SUPPORTED_VERSION not in uncrustify_version:
+        print("Warning: Using unsupported Uncrustify version '" +
+              uncrustify_version + "'")
+        print("Note: The only supported version is " +
+              UNCRUSTIFY_SUPPORTED_VERSION)
+
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-f', '--fix', action='store_true',
+                        help=('modify source files to fix the code style '
+                              '(default: print diff, do not modify files)'))
+    parser.add_argument('-s', '--since', metavar='COMMIT', const='development', nargs='?',
+                        help=('only check files modified since the specified commit'
+                              ' (e.g. --since=HEAD~3 or --since=development). If no'
+                              ' commit is specified, default to development.'))
+    # --subset is almost useless: it only matters if there are no files
+    # ('code_style.py' without arguments checks all files known to Git,
+    # 'code_style.py --subset' does nothing). In particular,
+    # 'code_style.py --fix --subset ...' is intended as a stable ("porcelain")
+    # way to restyle a possibly empty set of files.
+    parser.add_argument('--subset', action='store_true',
+                        help='only check the specified files (default with non-option arguments)')
+    parser.add_argument('operands', nargs='*', metavar='FILE',
+                        help='files to check (files MUST be known to git, if none: check all)')
+
+    args = parser.parse_args()
+
+    covered = frozenset(get_src_files(args.since))
+    # We only check files that are known to git
+    if args.subset or args.operands:
+        src_files = [f for f in args.operands if f in covered]
+        skip_src_files = [f for f in args.operands if f not in covered]
+        if skip_src_files:
+            print_skip(skip_src_files)
+    else:
+        src_files = list(covered)
+
+    if args.fix:
+        # Fix mode
+        return fix_style(src_files)
+    else:
+        # Check mode
+        if check_style_is_correct(src_files):
+            print("Checked {} files, style ok.".format(len(src_files)))
+            return 0
+        else:
+            return 1
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/framework/scripts/demo_common.sh b/framework/scripts/demo_common.sh
new file mode 100644
index 0000000..004e6ab
--- /dev/null
+++ b/framework/scripts/demo_common.sh
@@ -0,0 +1,137 @@
+## Common shell functions used by demo scripts programs/*/*.sh.
+
+## How to write a demo script
+## ==========================
+##
+## Include this file near the top of each demo script:
+##   . "${0%/*}/demo_common.sh"
+##
+## Start with a "msg" call that explains the purpose of the script.
+## Then call the "depends_on" function to ensure that all config
+## dependencies are met.
+##
+## As the last thing in the script, call the cleanup function.
+##
+## You can use the functions and variables described below.
+
+set -e -u
+
+## $root_dir is the root directory of the Mbed TLS source tree.
+root_dir="${0%/*}"
+# Find a nice path to the root directory, avoiding unnecessary "../".
+# The code supports demo scripts nested up to 4 levels deep.
+# The code works no matter where the demo script is relative to the current
+# directory, even if it is called with a relative path.
+n=4 # limit the search depth
+while ! [ -d "$root_dir/programs" ] || ! [ -d "$root_dir/library" ]; do
+  if [ $n -eq 0 ]; then
+    echo >&2 "This doesn't seem to be an Mbed TLS source tree."
+    exit 125
+  fi
+  n=$((n - 1))
+  case $root_dir in
+    .) root_dir="..";;
+    ..|?*/..) root_dir="$root_dir/..";;
+    ?*/*) root_dir="${root_dir%/*}";;
+    /*) root_dir="/";;
+    *) root_dir=".";;
+  esac
+done
+
+## $programs_dir is the directory containing the sample programs.
+# Assume an in-tree build.
+programs_dir="$root_dir/programs"
+
+## msg LINE...
+## msg <TEXT_ORIGIN
+## Display an informational message.
+msg () {
+  if [ $# -eq 0 ]; then
+    sed 's/^/# /'
+  else
+    for x in "$@"; do
+      echo "# $x"
+    done
+  fi
+}
+
+## run "Message" COMMAND ARGUMENT...
+## Display the message, then run COMMAND with the specified arguments.
+run () {
+    echo
+    echo "# $1"
+    shift
+    echo "+ $*"
+    "$@"
+}
+
+## Like '!', but stop on failure with 'set -e'
+not () {
+  if "$@"; then false; fi
+}
+
+## run_bad "Message" COMMAND ARGUMENT...
+## Like run, but the command is expected to fail.
+run_bad () {
+  echo
+  echo "$1 This must fail."
+  shift
+  echo "+ ! $*"
+  not "$@"
+}
+
+## config_has SYMBOL...
+## Succeeds if the library configuration has all SYMBOLs set.
+config_has () {
+  for x in "$@"; do
+    "$programs_dir/test/query_compile_time_config" "$x"
+  done
+}
+
+## depends_on SYMBOL...
+## Exit if the library configuration does not have all SYMBOLs set.
+depends_on () {
+  m=
+  for x in "$@"; do
+    if ! config_has "$x"; then
+      m="$m $x"
+    fi
+  done
+  if [ -n "$m" ]; then
+    cat >&2 <<EOF
+$0: this demo requires the following
+configuration options to be enabled at compile time:
+ $m
+EOF
+    # Exit with a success status so that this counts as a pass for run_demos.py.
+    exit
+  fi
+}
+
+## Add the names of files to clean up to this whitespace-separated variable.
+## The file names must not contain whitespace characters.
+files_to_clean=
+
+## Call this function at the end of each script.
+## It is called automatically if the script is killed by a signal.
+cleanup () {
+  rm -f -- $files_to_clean
+}
+
+
+
+################################################################
+## End of the public interfaces. Code beyond this point is not
+## meant to be called directly from a demo script.
+
+trap 'cleanup; trap - HUP; kill -HUP $$' HUP
+trap 'cleanup; trap - INT; kill -INT $$' INT
+trap 'cleanup; trap - TERM; kill -TERM $$' TERM
+
+if config_has MBEDTLS_ENTROPY_NV_SEED; then
+  # Create a seedfile that's sufficiently long in all library configurations.
+  # This is necessary for programs that use randomness.
+  # Assume that the name of the seedfile is the default name.
+  files_to_clean="$files_to_clean seedfile"
+  dd if=/dev/urandom of=seedfile ibs=64 obs=64 count=1
+fi
diff --git a/framework/scripts/doxygen.sh b/framework/scripts/doxygen.sh
new file mode 100755
index 0000000..7d051ed
--- /dev/null
+++ b/framework/scripts/doxygen.sh
@@ -0,0 +1,37 @@
+#!/bin/sh
+
+# Make sure the doxygen documentation builds without warnings
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+# Abort on errors (and uninitialised variables)
+set -eu
+
+. $(dirname "$0")/project_detection.sh
+
+if in_mbedtls_repo || in_tf_psa_crypto_repo; then :; else
+    echo "Must be run from Mbed TLS root or TF-PSA-Crypto root" >&2
+    exit 1
+fi
+
+if $(dirname "$0")/apidoc_full.sh > doc.out 2>doc.err; then :; else
+    cat doc.err
+    echo "FAIL" >&2
+    exit 1;
+fi
+
+cat doc.out doc.err | \
+    grep -v "warning: ignoring unsupported tag" \
+    > doc.filtered
+
+if grep -E "(warning|error):" doc.filtered; then
+    echo "FAIL" >&2
+    exit 1;
+fi
+
+if in_mbedtls_repo; then
+    make apidoc_clean
+fi
+
+rm -f doc.out doc.err doc.filtered
diff --git a/framework/scripts/generate_bignum_tests.py b/framework/scripts/generate_bignum_tests.py
new file mode 100755
index 0000000..68ad42f
--- /dev/null
+++ b/framework/scripts/generate_bignum_tests.py
@@ -0,0 +1,186 @@
+#!/usr/bin/env python3
+"""Generate test data for bignum functions.
+
+With no arguments, generate all test data. With non-option arguments,
+generate only the specified files.
+
+Class structure:
+
+Child classes of test_data_generation.BaseTarget (file targets) represent an output
+file. These indicate where test cases will be written to, for all subclasses of
+this target. Multiple file targets should not reuse a `target_basename`.
+
+Each subclass derived from a file target can either be:
+  - A concrete class, representing a test function, which generates test cases.
+  - An abstract class containing shared methods and attributes, not associated
+        with a test function. An example is BignumOperation, which provides
+        common features used for bignum binary operations.
+
+Both concrete and abstract subclasses can be derived from, to implement
+additional test cases (see BignumCmp and BignumCmpAbs for examples of deriving
+from abstract and concrete classes).
+
+
+Adding test case generation for a function:
+
+A subclass representing the test function should be added, deriving from a
+file target such as BignumTarget. This test class must set/implement the
+following:
+  - test_function: the function name from the associated .function file.
+  - test_name: a descriptive name or brief summary to refer to the test
+        function.
+  - arguments(): a method to generate the list of arguments required for the
+        test_function.
+  - generate_function_tests(): a method to generate TestCases for the function.
+        This should create instances of the class with required input data, and
+        call `.create_test_case()` to yield the TestCase.
+
+Additional details and other attributes/methods are given in the documentation
+of BaseTarget in test_data_generation.py.
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+import sys
+
+from abc import ABCMeta
+from typing import List
+
+from mbedtls_framework import test_data_generation
+from mbedtls_framework import bignum_common
+# Import modules containing additional test classes
+# Test function classes in these modules will be registered by
+# the framework
+from mbedtls_framework import bignum_core, bignum_mod_raw, bignum_mod # pylint: disable=unused-import
+
+class BignumTarget(test_data_generation.BaseTarget):
+    #pylint: disable=too-few-public-methods
+    """Target for bignum (legacy) test case generation."""
+    target_basename = 'test_suite_bignum.generated'
+
+
+class BignumOperation(bignum_common.OperationCommon, BignumTarget,
+                      metaclass=ABCMeta):
+    #pylint: disable=abstract-method
+    """Common features for bignum operations in legacy tests."""
+    unique_combinations_only = True
+    input_values = [
+        "", "0", "-", "-0",
+        "7b", "-7b",
+        "0000000000000000123", "-0000000000000000123",
+        "1230000000000000000", "-1230000000000000000"
+    ]
+
+    def description_suffix(self) -> str:
+        #pylint: disable=no-self-use # derived classes need self
+        """Text to add at the end of the test case description."""
+        return ""
+
+    def description(self) -> str:
+        """Generate a description for the test case.
+
+        If not set, case_description uses the form A `symbol` B, where symbol
+        is used to represent the operation. Descriptions of each value are
+        generated to provide some context to the test case.
+        """
+        if not self.case_description:
+            self.case_description = "{} {} {}".format(
+                self.value_description(self.arg_a),
+                self.symbol,
+                self.value_description(self.arg_b)
+            )
+            description_suffix = self.description_suffix()
+            if description_suffix:
+                self.case_description += " " + description_suffix
+        return super().description()
+
+    @staticmethod
+    def value_description(val) -> str:
+        """Generate a description of the argument val.
+
+        This produces a simple description of the value, which is used in test
+        case naming to add context.
+        """
+        if val == "":
+            return "0 (null)"
+        if val == "-":
+            return "negative 0 (null)"
+        if val == "0":
+            return "0 (1 limb)"
+
+        if val[0] == "-":
+            tmp = "negative"
+            val = val[1:]
+        else:
+            tmp = "positive"
+        if val[0] == "0":
+            tmp += " with leading zero limb"
+        elif len(val) > 10:
+            tmp = "large " + tmp
+        return tmp
+
+
+class BignumCmp(BignumOperation):
+    """Test cases for bignum value comparison."""
+    count = 0
+    test_function = "mpi_cmp_mpi"
+    test_name = "MPI compare"
+    input_cases = [
+        ("-2", "-3"),
+        ("-2", "-2"),
+        ("2b4", "2b5"),
+        ("2b5", "2b6")
+        ]
+
+    def __init__(self, val_a, val_b) -> None:
+        super().__init__(val_a, val_b)
+        self._result = int(self.int_a > self.int_b) - int(self.int_a < self.int_b)
+        self.symbol = ["<", "==", ">"][self._result + 1]
+
+    def result(self) -> List[str]:
+        return [str(self._result)]
+
+
+class BignumCmpAbs(BignumCmp):
+    """Test cases for absolute bignum value comparison."""
+    count = 0
+    test_function = "mpi_cmp_abs"
+    test_name = "MPI compare (abs)"
+
+    def __init__(self, val_a, val_b) -> None:
+        super().__init__(val_a.strip("-"), val_b.strip("-"))
+
+
+class BignumAdd(BignumOperation):
+    """Test cases for bignum value addition."""
+    count = 0
+    symbol = "+"
+    test_function = "mpi_add_mpi"
+    test_name = "MPI add"
+    input_cases = bignum_common.combination_pairs(
+        [
+            "1c67967269c6", "9cde3",
+            "-1c67967269c6", "-9cde3",
+        ]
+    )
+
+    def __init__(self, val_a: str, val_b: str) -> None:
+        super().__init__(val_a, val_b)
+        self._result = self.int_a + self.int_b
+
+    def description_suffix(self) -> str:
+        if (self.int_a >= 0 and self.int_b >= 0):
+            return "" # obviously positive result or 0
+        if (self.int_a <= 0 and self.int_b <= 0):
+            return "" # obviously negative result or 0
+        # The sign of the result is not obvious, so indicate it
+        return ", result{}0".format('>' if self._result > 0 else
+                                    '<' if self._result < 0 else '=')
+
+    def result(self) -> List[str]:
+        return [bignum_common.quote_str("{:x}".format(self._result))]
+
+if __name__ == '__main__':
+    # Use the section of the docstring relevant to the CLI as description
+    test_data_generation.main(sys.argv[1:], "\n".join(__doc__.splitlines()[:4]))
diff --git a/framework/scripts/generate_config_tests.py b/framework/scripts/generate_config_tests.py
new file mode 100755
index 0000000..e3c1d8d
--- /dev/null
+++ b/framework/scripts/generate_config_tests.py
@@ -0,0 +1,186 @@
+#!/usr/bin/env python3
+"""Generate test data for configuration reporting.
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+import inspect
+import re
+import sys
+from typing import Iterable, Iterator, List, Optional, Tuple
+
+import project_scripts # pylint: disable=unused-import
+import config
+from mbedtls_framework import config_common
+from mbedtls_framework import test_case
+from mbedtls_framework import test_data_generation
+
+
+def single_setting_case(setting: config_common.Setting, when_on: bool,
+                        dependencies: List[str],
+                        note: Optional[str]) -> test_case.TestCase:
+    """Construct a test case for a boolean setting.
+
+    This test case passes if the setting and its dependencies are enabled,
+    and is skipped otherwise.
+
+    * setting: the setting to be tested.
+    * when_on: True to test with the setting enabled, or False to test
+      with the setting disabled.
+    * dependencies: extra dependencies for the test case.
+    * note: a note to add after the setting name in the test description.
+      This is generally a summary of dependencies, and is generally empty
+      if the given setting is only tested once.
+    """
+    base = setting.name if when_on else '!' + setting.name
+    tc = test_case.TestCase()
+    tc.set_function('pass')
+    description_suffix = ' (' + note + ')' if note else ''
+    tc.set_description('Config: ' + base + description_suffix)
+    tc.set_dependencies([base] + dependencies)
+    return tc
+
+
+PSA_WANT_KEY_TYPE_KEY_PAIR_RE = \
+    re.compile(r'(?P<prefix>PSA_WANT_KEY_TYPE_(?P<type>\w+)_KEY_PAIR_)(?P<operation>\w+)\Z')
+
+# If foo is a setting that is only meaningful when bar is enabled, set
+# SIMPLE_DEPENDENCIES[foo]=bar. More generally, bar can be a colon-separated
+# list of settings, meaning that all the settings must be enabled. Each setting
+# in bar can be prefixed with '!' to negate it. This is the same syntax as a
+# depends_on directive in test data.
+# See also `dependencies_of_settting`.
+SIMPLE_DEPENDENCIES = {
+    'MBEDTLS_AESCE_C': 'MBEDTLS_AES_C',
+    'MBEDTLS_AESNI_C': 'MBEDTLS_AES_C',
+    'MBEDTLS_ERROR_STRERROR_DUMMY': '!MBEDTLS_ERROR_C',
+    'MBEDTLS_GENPRIME': 'MBEDTLS_RSA_C',
+    'MBEDTLS_NO_DEFAULT_ENTROPY_SOURCES': 'MBEDTLS_ENTROPY_C',
+    'MBEDTLS_NO_PLATFORM_ENTROPY': 'MBEDTLS_ENTROPY_C',
+    'MBEDTLS_PKCS1_V15': 'MBEDTLS_RSA_C',
+    'MBEDTLS_PKCS1_V21': 'MBEDTLS_RSA_C',
+    'MBEDTLS_PSA_CRYPTO_CLIENT': '!MBEDTLS_PSA_CRYPTO_C',
+    'MBEDTLS_PSA_INJECT_ENTROPY': 'MBEDTLS_PSA_CRYPTO_C',
+    'MBEDTLS_PSA_ASSUME_EXCLUSIVE_BUFFERS': 'MBEDTLS_PSA_CRYPTO_C',
+}
+
+def dependencies_of_setting(cfg: config_common.Config,
+                            setting: config_common.Setting) -> Optional[str]:
+    """Return dependencies without which a setting is not meaningful.
+
+    The dependencies of a setting express when a setting can be enabled and
+    is relevant. For example, if ``check_config.h`` errors out when
+    ``defined(FOO) && !defined(BAR)``, then ``BAR`` is a dependency of ``FOO``.
+    If ``FOO`` has no effect when ``CORGE`` is disabled, then ``CORGE``
+    is a dependency of ``FOO``.
+
+    The return value can be a colon-separated list of settings, if the setting
+    is only meaningful when all of these settings are enabled. Each setting can
+    be negated by prefixing them with '!'. This is the same syntax as a
+    depends_on directive in test data.
+    """
+    #pylint: disable=too-many-branches,too-many-return-statements
+    name = setting.name
+    if name in SIMPLE_DEPENDENCIES:
+        return SIMPLE_DEPENDENCIES[name]
+    if name.startswith('MBEDTLS_') and not name.endswith('_C'):
+        if name.startswith('MBEDTLS_CIPHER_PADDING_'):
+            return 'MBEDTLS_CIPHER_C:MBEDTLS_CIPHER_MODE_CBC'
+        if name.startswith('MBEDTLS_PK_PARSE_EC_'):
+            return 'MBEDTLS_PK_C:' + test_case.psa_or_3_6_feature_macro(
+                'PSA_KEY_TYPE_ECC_PUBLIC_KEY', test_case.Domain36.USE_PSA)
+
+        # For TLS settings, insist on having them once off and once on in
+        # a configuration where both client support and server support are
+        # enabled. The settings are also meaningful when only one side is
+        # enabled, but there isn't much point in having separate records
+        # for client-side and server-side, so we keep things simple.
+        # Requiring both sides to be enabled also means we know we'll run
+        # tests that only run Mbed TLS against itself, which only run in
+        # configurations with both sides enabled.
+        if name.startswith('MBEDTLS_SSL_TLS1_3_') or \
+           name == 'MBEDTLS_SSL_EARLY_DATA':
+            return 'MBEDTLS_SSL_CLI_C:MBEDTLS_SSL_SRV_C:MBEDTLS_SSL_PROTO_TLS1_3'
+        if name.startswith('MBEDTLS_SSL_DTLS_'):
+            return 'MBEDTLS_SSL_CLI_C:MBEDTLS_SSL_SRV_C:MBEDTLS_SSL_PROTO_DTLS'
+        if name.startswith('MBEDTLS_SSL_'):
+            return 'MBEDTLS_SSL_CLI_C:MBEDTLS_SSL_SRV_C'
+        for pos in re.finditer(r'_', name):
+            super_name = name[:pos.start()] + '_C'
+            if cfg.known(super_name):
+                return super_name
+    if name.startswith('PSA_WANT_'):
+        deps = 'MBEDTLS_PSA_CRYPTO_CLIENT'
+        m = PSA_WANT_KEY_TYPE_KEY_PAIR_RE.match(name)
+        if m and m.group('operation') != 'BASIC':
+            deps += ':' + m.group('prefix') + 'BASIC'
+        return deps
+    return None
+
+def conditions_for_setting(cfg: config_common.Config,
+                           setting: config_common.Setting
+                           ) -> Iterator[Tuple[List[str], str]]:
+    """Enumerate the conditions under which to test the given setting.
+
+    * cfg: all configuration settings.
+    * setting: the setting to be tested.
+
+    Generate a stream of conditions, i.e. extra dependencies to test with
+    together with a human-readable explanation of each dependency. Some
+    typical cases:
+
+    * By default, generate a one-element stream with no extra dependencies.
+    * If the setting is ignored unless some other setting is enabled, generate
+      a one-element stream with that other setting as an extra dependency.
+    * If the setting is known to interact with some other setting, generate
+      a stream with one element where this setting is on and one where it's off.
+    * To skip the setting altogether, generate an empty stream.
+    """
+    name = setting.name
+    if name.endswith('_ALT') and not config.is_seamless_alt(name):
+        # We don't test alt implementations, except (most) platform alts
+        return
+    dependencies = dependencies_of_setting(cfg, setting)
+    if dependencies:
+        yield [dependencies], ''
+        return
+    yield [], ''
+
+
+def enumerate_boolean_setting_cases(cfg: config_common.Config
+                                   ) -> Iterable[test_case.TestCase]:
+    """Emit test cases for all boolean settings."""
+    for name in sorted(cfg.settings.keys()):
+        setting = cfg.settings[name]
+        if not name.startswith('PSA_WANT_') and setting.value:
+            continue # non-boolean setting
+        for when_on in True, False:
+            for deps, note in conditions_for_setting(cfg, setting):
+                yield single_setting_case(setting, when_on, deps, note)
+
+
+
+class ConfigTestGenerator(test_data_generation.TestGenerator):
+    """Generate test cases for configuration reporting."""
+
+    def __init__(self, settings):
+        # pylint: disable=no-member
+        config_members = dict(inspect.getmembers(config))
+        if 'MbedTLSConfig' in config_members:
+            self.mbedtls_config = config.MbedTLSConfig()
+            self.targets['test_suite_config.mbedtls_boolean'] = \
+                lambda: enumerate_boolean_setting_cases(self.mbedtls_config)
+        if 'CryptoConfig' in config_members:
+            self.psa_config = config.CryptoConfig()
+            self.targets['test_suite_config.psa_boolean'] = \
+                lambda: enumerate_boolean_setting_cases(self.psa_config)
+        elif 'TFPSACryptoConfig' in config_members:
+            self.psa_config = config.TFPSACryptoConfig()
+            self.targets['test_suite_config.psa_boolean'] = \
+                lambda: enumerate_boolean_setting_cases(self.psa_config)
+        super().__init__(settings)
+
+
+if __name__ == '__main__':
+    test_data_generation.main(sys.argv[1:], __doc__, ConfigTestGenerator)
diff --git a/framework/scripts/generate_ecp_tests.py b/framework/scripts/generate_ecp_tests.py
new file mode 100755
index 0000000..b506be8
--- /dev/null
+++ b/framework/scripts/generate_ecp_tests.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python3
+"""Generate test data for ecp functions.
+
+The command line usage, class structure and available methods are the same
+as in generate_bignum_tests.py.
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+import sys
+
+from mbedtls_framework import test_data_generation
+# Import modules containing additional test classes
+# Test function classes in these modules will be registered by
+# the framework
+from mbedtls_framework import ecp # pylint: disable=unused-import
+
+if __name__ == '__main__':
+    # Use the section of the docstring relevant to the CLI as description
+    test_data_generation.main(sys.argv[1:], "\n".join(__doc__.splitlines()[:4]))
diff --git a/framework/scripts/generate_pkcs7_tests.py b/framework/scripts/generate_pkcs7_tests.py
new file mode 100755
index 0000000..b97cebb
--- /dev/null
+++ b/framework/scripts/generate_pkcs7_tests.py
@@ -0,0 +1,186 @@
+#!/usr/bin/env python3
+#
+#  Copyright The Mbed TLS Contributors
+#  SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+#
+
+"""
+Make fuzz like testing for pkcs7 tests
+Given a valid DER pkcs7 file add tests to the test_suite_pkcs7.data file
+ - It is expected that the pkcs7_asn1_fail( data_t *pkcs7_buf )
+    function is defined in test_suite_pkcs7.function
+ - This is not meant to be portable code, if anything it is meant to serve as
+   documentation for showing how those ugly tests in test_suite_pkcs7.data were created
+"""
+
+
+import sys
+from os.path import exists
+from mbedtls_framework import test_case
+
+PKCS7_TEST_FILE = "../suites/test_suite_pkcs7.data"
+
+class Test: # pylint: disable=too-few-public-methods
+    """
+    A instance of a test in test_suite_pkcs7.data
+    """
+    def __init__(self, name, depends, func_call):
+        self.name = name
+        self.depends = depends
+        self.func_call = func_call
+
+    # pylint: disable=no-self-use
+    def to_string(self):
+        return "\n" + self.name + "\n" + self.depends + "\n" + self.func_call + "\n"
+
+class TestData:
+    """
+    Take in test_suite_pkcs7.data file.
+    Allow for new tests to be added.
+    """
+    mandatory_dep = test_case.psa_or_3_6_feature_macro("PSA_ALG_SHA_256",
+                                                       test_case.Domain36.USE_PSA)
+
+    test_name = "PKCS7 Parse Failure Invalid ASN1"
+    test_function = "pkcs7_asn1_fail:"
+    def __init__(self, file_name):
+        self.file_name = file_name
+        self.last_test_num, self.old_tests = self.read_test_file(file_name)
+        self.new_tests = []
+
+    # pylint: disable=no-self-use
+    def read_test_file(self, file):
+        """
+        Parse the test_suite_pkcs7.data file.
+        """
+        tests = []
+        if not exists(file):
+            print(file + " Does not exist")
+            sys.exit()
+        with open(file, "r", encoding='UTF-8') as fp:
+            data = fp.read()
+        lines = [line.strip() for line in data.split('\n') if len(line.strip()) > 1]
+        i = 0
+        while i < len(lines):
+            if "depends" in lines[i+1]:
+                tests.append(Test(lines[i], lines[i+1], lines[i+2]))
+                i += 3
+            else:
+                tests.append(Test(lines[i], None, lines[i+1]))
+                i += 2
+        latest_test_num = float(tests[-1].name.split('#')[1])
+        return latest_test_num, tests
+
+    def add(self, name, func_call):
+        self.last_test_num += 1
+        self.new_tests.append(Test(self.test_name + ": " + name +  " #" + \
+                str(self.last_test_num), "depends_on:" + self.mandatory_dep, \
+                self.test_function + '"' + func_call + '"'))
+
+    def write_changes(self):
+        with open(self.file_name, 'a', encoding='UTF-8') as fw:
+            fw.write("\n")
+            for t in self.new_tests:
+                fw.write(t.to_string())
+
+
+def asn1_mutate(data):
+    """
+    We have been given an asn1 structure representing a pkcs7.
+    We want to return an array of slightly modified versions of this data
+    they should be modified in a way which makes the structure invalid
+
+    We know that asn1 structures are:
+    |---1 byte showing data type---|----byte(s) for length of data---|---data content--|
+    We know that some data types can contain other data types.
+    Return a dictionary of reasons and mutated data types.
+    """
+
+    # off the bat just add bytes to start and end of the buffer
+    mutations = []
+    reasons = []
+    mutations.append(["00"] + data)
+    reasons.append("Add null byte to start")
+    mutations.append(data + ["00"])
+    reasons.append("Add null byte to end")
+    # for every asn1 entry we should attempt to:
+    #    - change the data type tag
+    #    - make the length longer than actual
+    #    - make the length shorter than actual
+    i = 0
+    while i < len(data):
+        tag_i = i
+        leng_i = tag_i + 1
+        data_i = leng_i + 1 + (int(data[leng_i][1], 16) if data[leng_i][0] == '8' else 0)
+        if data[leng_i][0] == '8':
+            length = int(''.join(data[leng_i + 1: data_i]), 16)
+        else:
+            length = int(data[leng_i], 16)
+
+        tag = data[tag_i]
+        print("Looking at ans1: offset " + str(i) + " tag = " + tag + \
+                ", length = " + str(length)+ ":")
+        print(''.join(data[data_i:data_i+length]))
+        # change tag to something else
+        if tag == "02":
+            # turn integers into octet strings
+            new_tag = "04"
+        else:
+            # turn everything else into an integer
+            new_tag = "02"
+        mutations.append(data[:tag_i] + [new_tag] + data[leng_i:])
+        reasons.append("Change tag " + tag + " to " + new_tag)
+
+        # change lengths to too big
+        # skip any edge cases which would cause carry over
+        if int(data[data_i - 1], 16) < 255:
+            new_length = str(hex(int(data[data_i - 1], 16) + 1))[2:]
+            if len(new_length) == 1:
+                new_length = "0"+new_length
+            mutations.append(data[:data_i -1] + [new_length] + data[data_i:])
+            reasons.append("Change length from " + str(length) + " to " \
+                    + str(length + 1))
+            # we can add another test here for tags that contain other tags \
+            # where they have more data than there containing tags account for
+            if tag in ["30", "a0", "31"]:
+                mutations.append(data[:data_i -1] + [new_length] + \
+                        data[data_i:data_i + length] + ["00"] + \
+                        data[data_i + length:])
+                reasons.append("Change contents of tag " + tag + " to contain \
+                        one unaccounted extra byte")
+        # change lengths to too small
+        if int(data[data_i - 1], 16) > 0:
+            new_length = str(hex(int(data[data_i - 1], 16) - 1))[2:]
+            if len(new_length) == 1:
+                new_length = "0"+new_length
+            mutations.append(data[:data_i -1] + [new_length] + data[data_i:])
+            reasons.append("Change length from " + str(length) + " to " + str(length - 1))
+
+        # some tag types contain other tag types so we should iterate into the data
+        if tag in ["30", "a0", "31"]:
+            i = data_i
+        else:
+            i = data_i + length
+
+    return list(zip(reasons, mutations))
+
+if __name__ == "__main__":
+    if len(sys.argv) < 2:
+        print("USAGE: " + sys.argv[0] + " <pkcs7_der_file>")
+        sys.exit()
+
+    DATA_FILE = sys.argv[1]
+    TEST_DATA = TestData(PKCS7_TEST_FILE)
+    with open(DATA_FILE, 'rb') as f:
+        DATA_STR = f.read().hex()
+    # make data an array of byte strings eg ['de','ad','be','ef']
+    HEX_DATA = list(map(''.join, [[DATA_STR[i], DATA_STR[i+1]] for i in range(0, len(DATA_STR), \
+            2)]))
+    # returns tuples of test_names and modified data buffers
+    MUT_ARR = asn1_mutate(HEX_DATA)
+
+    print("made " + str(len(MUT_ARR)) + " new tests")
+    for new_test in MUT_ARR:
+        TEST_DATA.add(new_test[0], ''.join(new_test[1]))
+
+    TEST_DATA.write_changes()
diff --git a/framework/scripts/generate_psa_tests.py b/framework/scripts/generate_psa_tests.py
new file mode 100755
index 0000000..9e628ab
--- /dev/null
+++ b/framework/scripts/generate_psa_tests.py
@@ -0,0 +1,837 @@
+#!/usr/bin/env python3
+"""Generate test data for PSA cryptographic mechanisms.
+
+With no arguments, generate all test data. With non-option arguments,
+generate only the specified files.
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+import enum
+import re
+import sys
+from typing import Callable, Dict, Iterable, Iterator, List, Optional
+
+from mbedtls_framework import crypto_data_tests
+from mbedtls_framework import crypto_knowledge
+from mbedtls_framework import macro_collector #pylint: disable=unused-import
+from mbedtls_framework import psa_information
+from mbedtls_framework import psa_storage
+from mbedtls_framework import psa_test_case
+from mbedtls_framework import test_case
+from mbedtls_framework import test_data_generation
+
+
+
+def test_case_for_key_type_not_supported(
+        verb: str, key_type: str, bits: int,
+        not_supported_mechanism: str,
+        *args: str,
+        param_descr: str = ''
+) -> test_case.TestCase:
+    """Return one test case exercising a key creation method
+    for an unsupported key type or size.
+    """
+    tc = psa_test_case.TestCase()
+    short_key_type = crypto_knowledge.short_expression(key_type)
+    tc.set_description('PSA {} {} {}-bit{} not supported'
+                       .format(verb, short_key_type, bits,
+                               ' ' + param_descr if param_descr else ''))
+    # if tc.description == 'PSA import RSA_KEY_PAIR 1024-bit not supported':
+    #     import pdb; pdb.set_trace()
+    tc.set_function(verb + '_not_supported')
+    tc.set_key_bits(bits)
+    tc.set_key_pair_usage([verb.upper()])
+    tc.assumes_not_supported(not_supported_mechanism)
+    tc.set_arguments([key_type] + list(args))
+    return tc
+
+class KeyTypeNotSupported:
+    """Generate test cases for when a key type is not supported."""
+
+    def __init__(self, info: psa_information.Information) -> None:
+        self.constructors = info.constructors
+
+    ALWAYS_SUPPORTED = frozenset([
+        'PSA_KEY_TYPE_DERIVE',
+        'PSA_KEY_TYPE_PASSWORD',
+        'PSA_KEY_TYPE_PASSWORD_HASH',
+        'PSA_KEY_TYPE_RAW_DATA',
+        'PSA_KEY_TYPE_HMAC'
+    ])
+    def test_cases_for_key_type_not_supported(
+            self,
+            kt: crypto_knowledge.KeyType,
+            param: Optional[int] = None,
+            param_descr: str = '',
+    ) -> Iterator[test_case.TestCase]:
+        """Return test cases exercising key creation when the given type is unsupported.
+
+        If param is present and not None, emit test cases conditioned on this
+        parameter not being supported. If it is absent or None, emit test cases
+        conditioned on the base type not being supported.
+        """
+        if kt.name in self.ALWAYS_SUPPORTED:
+            # Don't generate test cases for key types that are always supported.
+            # They would be skipped in all configurations, which is noise.
+            return
+        if param is None:
+            not_supported_mechanism = kt.name
+        else:
+            assert kt.params is not None
+            not_supported_mechanism = kt.params[param]
+        for bits in kt.sizes_to_test():
+            yield test_case_for_key_type_not_supported(
+                'import', kt.expression, bits,
+                not_supported_mechanism,
+                test_case.hex_string(kt.key_material(bits)),
+                param_descr=param_descr,
+            )
+            # Don't generate not-supported test cases for key generation of
+            # public keys. Our implementation always returns
+            # PSA_ERROR_INVALID_ARGUMENT when attempting to generate a
+            # public key, so we cover this together with the positive cases
+            # in the KeyGenerate class.
+            if not kt.is_public():
+                yield test_case_for_key_type_not_supported(
+                    'generate', kt.expression, bits,
+                    not_supported_mechanism,
+                    str(bits),
+                    param_descr=param_descr,
+                )
+            # To be added: derive
+
+    ECC_KEY_TYPES = ('PSA_KEY_TYPE_ECC_KEY_PAIR',
+                     'PSA_KEY_TYPE_ECC_PUBLIC_KEY')
+    DH_KEY_TYPES = ('PSA_KEY_TYPE_DH_KEY_PAIR',
+                    'PSA_KEY_TYPE_DH_PUBLIC_KEY')
+
+    def test_cases_for_not_supported(self) -> Iterator[test_case.TestCase]:
+        """Generate test cases that exercise the creation of keys of unsupported types."""
+        for key_type in sorted(self.constructors.key_types):
+            if key_type in self.ECC_KEY_TYPES:
+                continue
+            if key_type in self.DH_KEY_TYPES:
+                continue
+            kt = crypto_knowledge.KeyType(key_type)
+            yield from self.test_cases_for_key_type_not_supported(kt)
+        for curve_family in sorted(self.constructors.ecc_curves):
+            for constr in self.ECC_KEY_TYPES:
+                kt = crypto_knowledge.KeyType(constr, [curve_family])
+                yield from self.test_cases_for_key_type_not_supported(
+                    kt, param_descr='type')
+                yield from self.test_cases_for_key_type_not_supported(
+                    kt, 0, param_descr='curve')
+        for dh_family in sorted(self.constructors.dh_groups):
+            for constr in self.DH_KEY_TYPES:
+                kt = crypto_knowledge.KeyType(constr, [dh_family])
+                yield from self.test_cases_for_key_type_not_supported(
+                    kt, param_descr='type')
+                yield from self.test_cases_for_key_type_not_supported(
+                    kt, 0, param_descr='group')
+
+def test_case_for_key_generation(
+        key_type: str, bits: int,
+        *args: str,
+        result: str = ''
+) -> test_case.TestCase:
+    """Return one test case exercising a key generation.
+    """
+    tc = psa_test_case.TestCase()
+    short_key_type = crypto_knowledge.short_expression(key_type)
+    tc.set_description('PSA {} {}-bit'
+                       .format(short_key_type, bits))
+    tc.set_function('generate_key')
+    tc.set_key_bits(bits)
+    tc.set_key_pair_usage(['GENERATE'])
+    tc.set_arguments([key_type] + list(args) + [result])
+    return tc
+
+class KeyGenerate:
+    """Generate positive and negative (invalid argument) test cases for key generation."""
+
+    def __init__(self, info: psa_information.Information) -> None:
+        self.constructors = info.constructors
+
+    ECC_KEY_TYPES = ('PSA_KEY_TYPE_ECC_KEY_PAIR',
+                     'PSA_KEY_TYPE_ECC_PUBLIC_KEY')
+    DH_KEY_TYPES = ('PSA_KEY_TYPE_DH_KEY_PAIR',
+                    'PSA_KEY_TYPE_DH_PUBLIC_KEY')
+
+    @staticmethod
+    def test_cases_for_key_type_key_generation(
+            kt: crypto_knowledge.KeyType
+    ) -> Iterator[test_case.TestCase]:
+        """Return test cases exercising key generation.
+
+        All key types can be generated except for public keys. For public key
+        PSA_ERROR_INVALID_ARGUMENT status is expected.
+        """
+        for bits in kt.sizes_to_test():
+            tc = test_case_for_key_generation(
+                kt.expression, bits,
+                str(bits),
+                'PSA_ERROR_INVALID_ARGUMENT' if kt.is_public() else 'PSA_SUCCESS'
+            )
+            if kt.is_public():
+                # The library checks whether the key type is a public key generically,
+                # before it reaches a point where it needs support for the specific key
+                # type, so it returns INVALID_ARGUMENT for unsupported public key types.
+                tc.set_dependencies([])
+            yield tc
+
+    def test_cases_for_key_generation(self) -> Iterator[test_case.TestCase]:
+        """Generate test cases that exercise the generation of keys."""
+        for key_type in sorted(self.constructors.key_types):
+            if key_type in self.ECC_KEY_TYPES:
+                continue
+            if key_type in self.DH_KEY_TYPES:
+                continue
+            kt = crypto_knowledge.KeyType(key_type)
+            yield from self.test_cases_for_key_type_key_generation(kt)
+        for curve_family in sorted(self.constructors.ecc_curves):
+            for constr in self.ECC_KEY_TYPES:
+                kt = crypto_knowledge.KeyType(constr, [curve_family])
+                yield from self.test_cases_for_key_type_key_generation(kt)
+        for dh_family in sorted(self.constructors.dh_groups):
+            for constr in self.DH_KEY_TYPES:
+                kt = crypto_knowledge.KeyType(constr, [dh_family])
+                yield from self.test_cases_for_key_type_key_generation(kt)
+
+class OpFail:
+    """Generate test cases for operations that must fail."""
+    #pylint: disable=too-few-public-methods
+
+    class Reason(enum.Enum):
+        NOT_SUPPORTED = 0
+        INVALID = 1
+        INCOMPATIBLE = 2
+        PUBLIC = 3
+
+    def __init__(self, info: psa_information.Information) -> None:
+        self.constructors = info.constructors
+        key_type_expressions = self.constructors.generate_expressions(
+            sorted(self.constructors.key_types)
+        )
+        self.key_types = [crypto_knowledge.KeyType(kt_expr)
+                          for kt_expr in key_type_expressions]
+
+    def make_test_case(
+            self,
+            alg: crypto_knowledge.Algorithm,
+            category: crypto_knowledge.AlgorithmCategory,
+            reason: 'Reason',
+            kt: Optional[crypto_knowledge.KeyType] = None,
+            not_supported: Optional[str] = None,
+    ) -> test_case.TestCase:
+        """Construct a failure test case for a one-key or keyless operation.
+
+        If `reason` is `Reason.NOT_SUPPORTED`, pass the not-supported
+        dependency symbol as the `not_supported` argument.
+        """
+        #pylint: disable=too-many-arguments,too-many-locals
+        tc = psa_test_case.TestCase()
+        pretty_alg = alg.short_expression()
+        if reason == self.Reason.NOT_SUPPORTED:
+            assert not_supported is not None
+            pretty_reason = '!' + re.sub(r'PSA_WANT_[A-Z]+_', r'', not_supported)
+        else:
+            pretty_reason = reason.name.lower()
+        if kt:
+            key_type = kt.expression
+            pretty_type = kt.short_expression()
+        else:
+            key_type = ''
+            pretty_type = ''
+        tc.set_description('PSA {} {}: {}{}'
+                           .format(category.name.lower(),
+                                   pretty_alg,
+                                   pretty_reason,
+                                   ' with ' + pretty_type if pretty_type else ''))
+        tc.set_function(category.name.lower() + '_fail')
+        arguments = [] # type: List[str]
+        if kt:
+            bits = kt.sizes_to_test()[0]
+            tc.set_key_bits(bits)
+            tc.set_key_pair_usage(['IMPORT'])
+            key_material = kt.key_material(bits)
+            arguments += [key_type, test_case.hex_string(key_material)]
+        arguments.append(alg.expression)
+        if category.is_asymmetric():
+            arguments.append('1' if reason == self.Reason.PUBLIC else '0')
+        error = ('NOT_SUPPORTED' if reason == self.Reason.NOT_SUPPORTED else
+                 'INVALID_ARGUMENT')
+        arguments.append('PSA_ERROR_' + error)
+        if reason == self.Reason.NOT_SUPPORTED:
+            assert not_supported is not None
+            tc.assumes_not_supported(not_supported)
+            # Special case: if one of deterministic/randomized
+            # ECDSA is supported but not the other, then the one
+            # that is not supported in the signature direction is
+            # still supported in the verification direction,
+            # because the two verification algorithms are
+            # identical. This property is how Mbed TLS chooses to
+            # behave, the specification would also allow it to
+            # reject the algorithm. In the generated test cases,
+            # we avoid this difficulty by not running the
+            # not-supported test case when exactly one of the
+            # two variants is supported.
+            if not_supported == 'PSA_WANT_ALG_ECDSA':
+                tc.add_dependencies(['!PSA_WANT_ALG_DETERMINISTIC_ECDSA'])
+            if not_supported == 'PSA_WANT_ALG_DETERMINISTIC_ECDSA':
+                tc.add_dependencies(['!PSA_WANT_ALG_ECDSA'])
+        tc.set_arguments(arguments)
+        return tc
+
+    def no_key_test_cases(
+            self,
+            alg: crypto_knowledge.Algorithm,
+            category: crypto_knowledge.AlgorithmCategory,
+    ) -> Iterator[test_case.TestCase]:
+        """Generate failure test cases for keyless operations with the specified algorithm."""
+        if alg.can_do(category):
+            # Compatible operation, unsupported algorithm
+            for dep in psa_information.automatic_dependencies(alg.base_expression):
+                yield self.make_test_case(alg, category,
+                                          self.Reason.NOT_SUPPORTED,
+                                          not_supported=dep)
+        else:
+            # Incompatible operation, supported algorithm
+            yield self.make_test_case(alg, category, self.Reason.INVALID)
+
+    def one_key_test_cases(
+            self,
+            alg: crypto_knowledge.Algorithm,
+            category: crypto_knowledge.AlgorithmCategory,
+    ) -> Iterator[test_case.TestCase]:
+        """Generate failure test cases for one-key operations with the specified algorithm."""
+        for kt in self.key_types:
+            key_is_compatible = kt.can_do(alg)
+            if key_is_compatible and alg.can_do(category):
+                # Compatible key and operation, unsupported algorithm
+                for dep in psa_information.automatic_dependencies(alg.base_expression):
+                    yield self.make_test_case(alg, category,
+                                              self.Reason.NOT_SUPPORTED,
+                                              kt=kt, not_supported=dep)
+                # Public key for a private-key operation
+                if category.is_asymmetric() and kt.is_public():
+                    yield self.make_test_case(alg, category,
+                                              self.Reason.PUBLIC,
+                                              kt=kt)
+            elif key_is_compatible:
+                # Compatible key, incompatible operation, supported algorithm
+                yield self.make_test_case(alg, category,
+                                          self.Reason.INVALID,
+                                          kt=kt)
+            elif alg.can_do(category):
+                # Incompatible key, compatible operation, supported algorithm
+                yield self.make_test_case(alg, category,
+                                          self.Reason.INCOMPATIBLE,
+                                          kt=kt)
+            else:
+                # Incompatible key and operation. Don't test cases where
+                # multiple things are wrong, to keep the number of test
+                # cases reasonable.
+                pass
+
+    def test_cases_for_algorithm(
+            self,
+            alg: crypto_knowledge.Algorithm,
+    ) -> Iterator[test_case.TestCase]:
+        """Generate operation failure test cases for the specified algorithm."""
+        for category in crypto_knowledge.AlgorithmCategory:
+            if category == crypto_knowledge.AlgorithmCategory.PAKE:
+                # PAKE operations are not implemented yet
+                pass
+            elif category.requires_key():
+                yield from self.one_key_test_cases(alg, category)
+            else:
+                yield from self.no_key_test_cases(alg, category)
+
+    def all_test_cases(self) -> Iterator[test_case.TestCase]:
+        """Generate all test cases for operations that must fail."""
+        algorithms = sorted(self.constructors.algorithms)
+        for expr in self.constructors.generate_expressions(algorithms):
+            alg = crypto_knowledge.Algorithm(expr)
+            yield from self.test_cases_for_algorithm(alg)
+
+
+class StorageKey(psa_storage.Key):
+    """Representation of a key for storage format testing."""
+
+    IMPLICIT_USAGE_FLAGS = {
+        'PSA_KEY_USAGE_SIGN_HASH': 'PSA_KEY_USAGE_SIGN_MESSAGE',
+        'PSA_KEY_USAGE_VERIFY_HASH': 'PSA_KEY_USAGE_VERIFY_MESSAGE'
+    } #type: Dict[str, str]
+    """Mapping of usage flags to the flags that they imply."""
+
+    def __init__(
+            self,
+            usage: Iterable[str],
+            without_implicit_usage: Optional[bool] = False,
+            **kwargs
+    ) -> None:
+        """Prepare to generate a key.
+
+        * `usage`                 : The usage flags used for the key.
+        * `without_implicit_usage`: Flag to define to apply the usage extension
+        """
+        usage_flags = set(usage)
+        if not without_implicit_usage:
+            for flag in sorted(usage_flags):
+                if flag in self.IMPLICIT_USAGE_FLAGS:
+                    usage_flags.add(self.IMPLICIT_USAGE_FLAGS[flag])
+        if usage_flags:
+            usage_expression = ' | '.join(sorted(usage_flags))
+        else:
+            usage_expression = '0'
+        super().__init__(usage=usage_expression, **kwargs)
+
+class StorageTestData(StorageKey):
+    """Representation of test case data for storage format testing."""
+
+    def __init__(
+            self,
+            description: str,
+            expected_usage: Optional[List[str]] = None,
+            **kwargs
+    ) -> None:
+        """Prepare to generate test data
+
+        * `description`   : used for the test case names
+        * `expected_usage`: the usage flags generated as the expected usage flags
+                            in the test cases. CAn differ from the usage flags
+                            stored in the keys because of the usage flags extension.
+        """
+        super().__init__(**kwargs)
+        self.description = description #type: str
+        if expected_usage is None:
+            self.expected_usage = self.usage #type: psa_storage.Expr
+        elif expected_usage:
+            self.expected_usage = psa_storage.Expr(' | '.join(expected_usage))
+        else:
+            self.expected_usage = psa_storage.Expr(0)
+
+class StorageFormat:
+    """Storage format stability test cases."""
+
+    def __init__(self, info: psa_information.Information, version: int, forward: bool) -> None:
+        """Prepare to generate test cases for storage format stability.
+
+        * `info`: information about the API. See the `Information` class.
+        * `version`: the storage format version to generate test cases for.
+        * `forward`: if true, generate forward compatibility test cases which
+          save a key and check that its representation is as intended. Otherwise
+          generate backward compatibility test cases which inject a key
+          representation and check that it can be read and used.
+        """
+        self.constructors = info.constructors #type: macro_collector.PSAMacroEnumerator
+        self.version = version #type: int
+        self.forward = forward #type: bool
+
+    RSA_OAEP_RE = re.compile(r'PSA_ALG_RSA_OAEP\((.*)\)\Z')
+    BRAINPOOL_RE = re.compile(r'PSA_KEY_TYPE_\w+\(PSA_ECC_FAMILY_BRAINPOOL_\w+\)\Z')
+    @classmethod
+    def exercise_key_with_algorithm(
+            cls,
+            key_type: psa_storage.Expr, bits: int,
+            alg: psa_storage.Expr
+    ) -> bool:
+        """Whether to exercise the given key with the given algorithm.
+
+        Normally only the type and algorithm matter for compatibility, and
+        this is handled in crypto_knowledge.KeyType.can_do(). This function
+        exists to detect exceptional cases. Exceptional cases detected here
+        are not tested in OpFail and should therefore have manually written
+        test cases.
+        """
+        # Some test keys have the RAW_DATA type and attributes that don't
+        # necessarily make sense. We do this to validate numerical
+        # encodings of the attributes.
+        # Raw data keys have no useful exercise anyway so there is no
+        # loss of test coverage.
+        if key_type.string == 'PSA_KEY_TYPE_RAW_DATA':
+            return False
+        # OAEP requires room for two hashes plus wrapping
+        m = cls.RSA_OAEP_RE.match(alg.string)
+        if m:
+            hash_alg = m.group(1)
+            hash_length = crypto_knowledge.Algorithm.hash_length(hash_alg)
+            key_length = (bits + 7) // 8
+            # Leave enough room for at least one byte of plaintext
+            return key_length > 2 * hash_length + 2
+        # There's nothing wrong with ECC keys on Brainpool curves,
+        # but operations with them are very slow. So we only exercise them
+        # with a single algorithm, not with all possible hashes. We do
+        # exercise other curves with all algorithms so test coverage is
+        # perfectly adequate like this.
+        m = cls.BRAINPOOL_RE.match(key_type.string)
+        if m and alg.string != 'PSA_ALG_ECDSA_ANY':
+            return False
+        return True
+
+    def make_test_case(self, key: StorageTestData) -> test_case.TestCase:
+        """Construct a storage format test case for the given key.
+
+        If ``forward`` is true, generate a forward compatibility test case:
+        create a key and validate that it has the expected representation.
+        Otherwise generate a backward compatibility test case: inject the
+        key representation into storage and validate that it can be read
+        correctly.
+        """
+        verb = 'save' if self.forward else 'read'
+        tc = psa_test_case.TestCase()
+        tc.set_description(verb + ' ' + key.description)
+        tc.add_dependencies(psa_information.generate_deps_from_description(key.description))
+        tc.set_function('key_storage_' + verb)
+        tc.set_key_bits(key.bits)
+        tc.set_key_pair_usage(['IMPORT'] if self.forward else ['EXPORT'])
+        if self.forward:
+            extra_arguments = []
+        else:
+            flags = []
+            if self.exercise_key_with_algorithm(key.type, key.bits, key.alg):
+                flags.append('TEST_FLAG_EXERCISE')
+            if 'READ_ONLY' in key.lifetime.string:
+                flags.append('TEST_FLAG_READ_ONLY')
+            extra_arguments = [' | '.join(flags) if flags else '0']
+        tc.set_arguments([key.lifetime.string,
+                          key.type.string, str(key.bits),
+                          key.expected_usage.string,
+                          key.alg.string, key.alg2.string,
+                          '"' + key.material.hex() + '"',
+                          '"' + key.hex() + '"',
+                          *extra_arguments])
+        return tc
+
+    def key_for_lifetime(
+            self,
+            lifetime: str,
+    ) -> StorageTestData:
+        """Construct a test key for the given lifetime."""
+        short = lifetime
+        short = re.sub(r'PSA_KEY_LIFETIME_FROM_PERSISTENCE_AND_LOCATION',
+                       r'', short)
+        short = crypto_knowledge.short_expression(short)
+        description = 'lifetime: ' + short
+        key = StorageTestData(version=self.version,
+                              id=1, lifetime=lifetime,
+                              type='PSA_KEY_TYPE_RAW_DATA', bits=8,
+                              usage=['PSA_KEY_USAGE_EXPORT'], alg=0, alg2=0,
+                              material=b'L',
+                              description=description)
+        return key
+
+    def all_keys_for_lifetimes(self) -> Iterator[StorageTestData]:
+        """Generate test keys covering lifetimes."""
+        lifetimes = sorted(self.constructors.lifetimes)
+        expressions = self.constructors.generate_expressions(lifetimes)
+        for lifetime in expressions:
+            # Don't attempt to create or load a volatile key in storage
+            if 'VOLATILE' in lifetime:
+                continue
+            # Don't attempt to create a read-only key in storage,
+            # but do attempt to load one.
+            if 'READ_ONLY' in lifetime and self.forward:
+                continue
+            yield self.key_for_lifetime(lifetime)
+
+    def key_for_usage_flags(
+            self,
+            usage_flags: List[str],
+            short: Optional[str] = None,
+            test_implicit_usage: Optional[bool] = True
+    ) -> StorageTestData:
+        """Construct a test key for the given key usage."""
+        extra_desc = ' without implication' if test_implicit_usage else ''
+        description = 'usage' + extra_desc + ': '
+        key1 = StorageTestData(version=self.version,
+                               id=1, lifetime=0x00000001,
+                               type='PSA_KEY_TYPE_RAW_DATA', bits=8,
+                               expected_usage=usage_flags,
+                               without_implicit_usage=not test_implicit_usage,
+                               usage=usage_flags, alg=0, alg2=0,
+                               material=b'K',
+                               description=description)
+        if short is None:
+            usage_expr = key1.expected_usage.string
+            key1.description += crypto_knowledge.short_expression(usage_expr)
+        else:
+            key1.description += short
+        return key1
+
+    def generate_keys_for_usage_flags(self, **kwargs) -> Iterator[StorageTestData]:
+        """Generate test keys covering usage flags."""
+        known_flags = sorted(self.constructors.key_usage_flags)
+        yield self.key_for_usage_flags(['0'], **kwargs)
+        for usage_flag in known_flags:
+            yield self.key_for_usage_flags([usage_flag], **kwargs)
+        for flag1, flag2 in zip(known_flags,
+                                known_flags[1:] + [known_flags[0]]):
+            yield self.key_for_usage_flags([flag1, flag2], **kwargs)
+
+    def generate_key_for_all_usage_flags(self) -> Iterator[StorageTestData]:
+        known_flags = sorted(self.constructors.key_usage_flags)
+        yield self.key_for_usage_flags(known_flags, short='all known')
+
+    def all_keys_for_usage_flags(self) -> Iterator[StorageTestData]:
+        yield from self.generate_keys_for_usage_flags()
+        yield from self.generate_key_for_all_usage_flags()
+
+    def key_for_type_and_alg(
+            self,
+            kt: crypto_knowledge.KeyType,
+            bits: int,
+            alg: Optional[crypto_knowledge.Algorithm] = None,
+    ) -> StorageTestData:
+        """Construct a test key of the given type.
+
+        If alg is not None, this key allows it.
+        """
+        usage_flags = ['PSA_KEY_USAGE_EXPORT']
+        alg1 = 0 #type: psa_storage.Exprable
+        alg2 = 0
+        if alg is not None:
+            alg1 = alg.expression
+            usage_flags += alg.usage_flags(public=kt.is_public())
+        key_material = kt.key_material(bits)
+        description = 'type: {} {}-bit'.format(kt.short_expression(1), bits)
+        if alg is not None:
+            description += ', ' + alg.short_expression(1)
+        key = StorageTestData(version=self.version,
+                              id=1, lifetime=0x00000001,
+                              type=kt.expression, bits=bits,
+                              usage=usage_flags, alg=alg1, alg2=alg2,
+                              material=key_material,
+                              description=description)
+        return key
+
+    def keys_for_type(
+            self,
+            key_type: str,
+            all_algorithms: List[crypto_knowledge.Algorithm],
+    ) -> Iterator[StorageTestData]:
+        """Generate test keys for the given key type."""
+        kt = crypto_knowledge.KeyType(key_type)
+        for bits in kt.sizes_to_test():
+            # Test a non-exercisable key, as well as exercisable keys for
+            # each compatible algorithm.
+            # To do: test reading a key from storage with an incompatible
+            # or unsupported algorithm.
+            yield self.key_for_type_and_alg(kt, bits)
+            compatible_algorithms = [alg for alg in all_algorithms
+                                     if kt.can_do(alg)]
+            for alg in compatible_algorithms:
+                yield self.key_for_type_and_alg(kt, bits, alg)
+
+    def all_keys_for_types(self) -> Iterator[StorageTestData]:
+        """Generate test keys covering key types and their representations."""
+        key_types = sorted(self.constructors.key_types)
+        all_algorithms = [crypto_knowledge.Algorithm(alg)
+                          for alg in self.constructors.generate_expressions(
+                              sorted(self.constructors.algorithms)
+                          )]
+        for key_type in self.constructors.generate_expressions(key_types):
+            yield from self.keys_for_type(key_type, all_algorithms)
+
+    def keys_for_algorithm(self, alg: str) -> Iterator[StorageTestData]:
+        """Generate test keys for the encoding of the specified algorithm."""
+        # These test cases only validate the encoding of algorithms, not
+        # whether the key read from storage is suitable for an operation.
+        # `keys_for_types` generate read tests with an algorithm and a
+        # compatible key.
+        descr = crypto_knowledge.short_expression(alg, 1)
+        usage = ['PSA_KEY_USAGE_EXPORT']
+        key1 = StorageTestData(version=self.version,
+                               id=1, lifetime=0x00000001,
+                               type='PSA_KEY_TYPE_RAW_DATA', bits=8,
+                               usage=usage, alg=alg, alg2=0,
+                               material=b'K',
+                               description='alg: ' + descr)
+        yield key1
+        key2 = StorageTestData(version=self.version,
+                               id=1, lifetime=0x00000001,
+                               type='PSA_KEY_TYPE_RAW_DATA', bits=8,
+                               usage=usage, alg=0, alg2=alg,
+                               material=b'L',
+                               description='alg2: ' + descr)
+        yield key2
+
+    def all_keys_for_algorithms(self) -> Iterator[StorageTestData]:
+        """Generate test keys covering algorithm encodings."""
+        algorithms = sorted(self.constructors.algorithms)
+        for alg in self.constructors.generate_expressions(algorithms):
+            yield from self.keys_for_algorithm(alg)
+
+    def generate_all_keys(self) -> Iterator[StorageTestData]:
+        """Generate all keys for the test cases."""
+        yield from self.all_keys_for_lifetimes()
+        yield from self.all_keys_for_usage_flags()
+        yield from self.all_keys_for_types()
+        yield from self.all_keys_for_algorithms()
+
+    def all_test_cases(self) -> Iterator[test_case.TestCase]:
+        """Generate all storage format test cases."""
+        # First build a list of all keys, then construct all the corresponding
+        # test cases. This allows all required information to be obtained in
+        # one go, which is a significant performance gain as the information
+        # includes numerical values obtained by compiling a C program.
+        all_keys = list(self.generate_all_keys())
+        for key in all_keys:
+            if key.location_value() != 0:
+                # Skip keys with a non-default location, because they
+                # require a driver and we currently have no mechanism to
+                # determine whether a driver is available.
+                continue
+            yield self.make_test_case(key)
+
+class StorageFormatForward(StorageFormat):
+    """Storage format stability test cases for forward compatibility."""
+
+    def __init__(self, info: psa_information.Information, version: int) -> None:
+        super().__init__(info, version, True)
+
+class StorageFormatV0(StorageFormat):
+    """Storage format stability test cases for version 0 compatibility."""
+
+    def __init__(self, info: psa_information.Information) -> None:
+        super().__init__(info, 0, False)
+
+    def all_keys_for_usage_flags(self) -> Iterator[StorageTestData]:
+        """Generate test keys covering usage flags."""
+        yield from super().all_keys_for_usage_flags()
+        yield from self.generate_keys_for_usage_flags(test_implicit_usage=False)
+
+    def keys_for_implicit_usage(
+            self,
+            implyer_usage: str,
+            alg: str,
+            key_type: crypto_knowledge.KeyType
+    ) -> StorageTestData:
+        # pylint: disable=too-many-locals
+        """Generate test keys for the specified implicit usage flag,
+           algorithm and key type combination.
+        """
+        bits = key_type.sizes_to_test()[0]
+        implicit_usage = StorageKey.IMPLICIT_USAGE_FLAGS[implyer_usage]
+        usage_flags = ['PSA_KEY_USAGE_EXPORT']
+        material_usage_flags = usage_flags + [implyer_usage]
+        expected_usage_flags = material_usage_flags + [implicit_usage]
+        alg2 = 0
+        key_material = key_type.key_material(bits)
+        usage_expression = crypto_knowledge.short_expression(implyer_usage, 1)
+        alg_expression = crypto_knowledge.short_expression(alg, 1)
+        key_type_expression = key_type.short_expression(1)
+        description = 'implied by {}: {} {} {}-bit'.format(
+            usage_expression, alg_expression, key_type_expression, bits)
+        key = StorageTestData(version=self.version,
+                              id=1, lifetime=0x00000001,
+                              type=key_type.expression, bits=bits,
+                              usage=material_usage_flags,
+                              expected_usage=expected_usage_flags,
+                              without_implicit_usage=True,
+                              alg=alg, alg2=alg2,
+                              material=key_material,
+                              description=description)
+        return key
+
+    def gather_key_types_for_sign_alg(self) -> Dict[str, List[str]]:
+        # pylint: disable=too-many-locals
+        """Match possible key types for sign algorithms."""
+        # To create a valid combination both the algorithms and key types
+        # must be filtered. Pair them with keywords created from its names.
+        incompatible_alg_keyword = frozenset(['RAW', 'ANY', 'PURE'])
+        incompatible_key_type_keywords = frozenset(['MONTGOMERY'])
+        keyword_translation = {
+            'ECDSA': 'ECC',
+            'ED[0-9]*.*' : 'EDWARDS'
+        }
+        exclusive_keywords = {
+            'EDWARDS': 'ECC'
+        }
+        key_types = set(self.constructors.generate_expressions(self.constructors.key_types))
+        algorithms = set(self.constructors.generate_expressions(self.constructors.sign_algorithms))
+        alg_with_keys = {} #type: Dict[str, List[str]]
+        translation_table = str.maketrans('(', '_', ')')
+        for alg in algorithms:
+            # Generate keywords from the name of the algorithm
+            alg_keywords = set(alg.partition('(')[0].split(sep='_')[2:])
+            # Translate keywords for better matching with the key types
+            for keyword in alg_keywords.copy():
+                for pattern, replace in keyword_translation.items():
+                    if re.match(pattern, keyword):
+                        alg_keywords.remove(keyword)
+                        alg_keywords.add(replace)
+            # Filter out incompatible algorithms
+            if not alg_keywords.isdisjoint(incompatible_alg_keyword):
+                continue
+
+            for key_type in key_types:
+                # Generate keywords from the of the key type
+                key_type_keywords = set(key_type.translate(translation_table).split(sep='_')[3:])
+
+                # Remove ambiguous keywords
+                for keyword1, keyword2 in exclusive_keywords.items():
+                    if keyword1 in key_type_keywords:
+                        key_type_keywords.remove(keyword2)
+
+                if key_type_keywords.isdisjoint(incompatible_key_type_keywords) and\
+                   not key_type_keywords.isdisjoint(alg_keywords):
+                    if alg in alg_with_keys:
+                        alg_with_keys[alg].append(key_type)
+                    else:
+                        alg_with_keys[alg] = [key_type]
+        return alg_with_keys
+
+    def all_keys_for_implicit_usage(self) -> Iterator[StorageTestData]:
+        """Generate test keys for usage flag extensions."""
+        # Generate a key type and algorithm pair for each extendable usage
+        # flag to generate a valid key for exercising. The key is generated
+        # without usage extension to check the extension compatibility.
+        alg_with_keys = self.gather_key_types_for_sign_alg()
+
+        for usage in sorted(StorageKey.IMPLICIT_USAGE_FLAGS, key=str):
+            for alg in sorted(alg_with_keys):
+                for key_type in sorted(alg_with_keys[alg]):
+                    # The key types must be filtered to fit the specific usage flag.
+                    kt = crypto_knowledge.KeyType(key_type)
+                    if kt.is_public() and '_SIGN_' in usage:
+                        # Can't sign with a public key
+                        continue
+                    yield self.keys_for_implicit_usage(usage, alg, kt)
+
+    def generate_all_keys(self) -> Iterator[StorageTestData]:
+        yield from super().generate_all_keys()
+        yield from self.all_keys_for_implicit_usage()
+
+
+class PSATestGenerator(test_data_generation.TestGenerator):
+    """Test generator subclass including PSA targets and info."""
+    # Note that targets whose names contain 'test_format' have their content
+    # validated by `abi_check.py`.
+    targets = {
+        'test_suite_psa_crypto_generate_key.generated':
+        lambda info: KeyGenerate(info).test_cases_for_key_generation(),
+        'test_suite_psa_crypto_not_supported.generated':
+        lambda info: KeyTypeNotSupported(info).test_cases_for_not_supported(),
+        'test_suite_psa_crypto_low_hash.generated':
+        lambda info: crypto_data_tests.HashPSALowLevel(info).all_test_cases(),
+        'test_suite_psa_crypto_op_fail.generated':
+        lambda info: OpFail(info).all_test_cases(),
+        'test_suite_psa_crypto_storage_format.current':
+        lambda info: StorageFormatForward(info, 0).all_test_cases(),
+        'test_suite_psa_crypto_storage_format.v0':
+        lambda info: StorageFormatV0(info).all_test_cases(),
+    } #type: Dict[str, Callable[[psa_information.Information], Iterable[test_case.TestCase]]]
+
+    def __init__(self, options):
+        super().__init__(options)
+        self.info = psa_information.Information()
+
+    def generate_target(self, name: str, *target_args) -> None:
+        super().generate_target(name, self.info)
+
+
+if __name__ == '__main__':
+    test_data_generation.main(sys.argv[1:], __doc__, PSATestGenerator)
diff --git a/framework/scripts/generate_psa_wrappers.py b/framework/scripts/generate_psa_wrappers.py
new file mode 100755
index 0000000..01baad1
--- /dev/null
+++ b/framework/scripts/generate_psa_wrappers.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python3
+"""Generate wrapper functions for PSA function calls.
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+import argparse
+from mbedtls_framework.code_wrapper.psa_test_wrapper import PSATestWrapper, PSALoggingTestWrapper
+from mbedtls_framework import build_tree
+
+def main() -> None:
+    default_c_output_file_name = 'tests/src/psa_test_wrappers.c'
+    default_h_output_file_name = 'tests/include/test/psa_test_wrappers.h'
+
+    project_root = build_tree.guess_project_root()
+    if build_tree.looks_like_mbedtls_root(project_root) and \
+       not build_tree.is_mbedtls_3_6():
+        default_c_output_file_name = 'tf-psa-crypto/' + default_c_output_file_name
+        default_h_output_file_name = 'tf-psa-crypto/' + default_h_output_file_name
+
+    parser = argparse.ArgumentParser(description=globals()['__doc__'])
+    parser.add_argument('--log',
+                        help='Stream to log to (default: no logging code)')
+    parser.add_argument('--output-c',
+                        metavar='FILENAME',
+                        default=default_c_output_file_name,
+                        help=('Output .c file path (default: {}; skip .c output if empty)'
+                              .format(default_c_output_file_name)))
+    parser.add_argument('--output-h',
+                        metavar='FILENAME',
+                        default=default_h_output_file_name,
+                        help=('Output .h file path (default: {}; skip .h output if empty)'
+                              .format(default_h_output_file_name)))
+    options = parser.parse_args()
+
+    if options.log:
+        generator = PSALoggingTestWrapper(default_h_output_file_name,
+                                          default_c_output_file_name,
+                                          options.log) #type: PSATestWrapper
+    else:
+        generator = PSATestWrapper(default_h_output_file_name,
+                                   default_c_output_file_name)
+
+    if options.output_h:
+        generator.write_h_file(options.output_h)
+    if options.output_c:
+        generator.write_c_file(options.output_c)
+
+if __name__ == '__main__':
+    main()
diff --git a/framework/scripts/generate_ssl_debug_helpers.py b/framework/scripts/generate_ssl_debug_helpers.py
new file mode 100755
index 0000000..39382f0
--- /dev/null
+++ b/framework/scripts/generate_ssl_debug_helpers.py
@@ -0,0 +1,415 @@
+#!/usr/bin/env python3
+
+"""Generate library/ssl_debug_helpers_generated.c
+
+The code generated by this module includes debug helper functions that can not be
+implemented by fixed codes.
+
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+import sys
+import re
+import os
+import textwrap
+import argparse
+
+from mbedtls_framework import build_tree
+
+
+def remove_c_comments(string):
+    """
+        Remove C style comments from input string
+    """
+    string_pattern = r"(?P<string>\".*?\"|\'.*?\')"
+    comment_pattern = r"(?P<comment>/\*.*?\*/|//[^\r\n]*$)"
+    pattern = re.compile(string_pattern + r'|' + comment_pattern,
+                         re.MULTILINE | re.DOTALL)
+
+    def replacer(match):
+        if match.lastgroup == 'comment':
+            return ""
+        return match.group()
+    return pattern.sub(replacer, string)
+
+
+class CondDirectiveNotMatch(Exception):
+    pass
+
+
+def preprocess_c_source_code(source, *classes):
+    """
+        Simple preprocessor for C source code.
+
+        Only processes condition directives without expanding them.
+        Yield object according to the classes input. Most match firstly
+
+        If the directive pair does not match , raise CondDirectiveNotMatch.
+
+        Assume source code does not include comments and compile pass.
+
+    """
+
+    pattern = re.compile(r"^[ \t]*#[ \t]*" +
+                         r"(?P<directive>(if[ \t]|ifndef[ \t]|ifdef[ \t]|else|endif))" +
+                         r"[ \t]*(?P<param>(.*\\\n)*.*$)",
+                         re.MULTILINE)
+    stack = []
+
+    def _yield_objects(s, d, p, st, end):
+        """
+            Output matched source piece
+        """
+        nonlocal stack
+        start_line, end_line = '', ''
+        if stack:
+            start_line = '#{} {}'.format(d, p)
+            if d == 'if':
+                end_line = '#endif /* {} */'.format(p)
+            elif d == 'ifdef':
+                end_line = '#endif /* defined({}) */'.format(p)
+            else:
+                end_line = '#endif /* !defined({}) */'.format(p)
+        has_instance = False
+        for cls in classes:
+            for instance in cls.extract(s, st, end):
+                if has_instance is False:
+                    has_instance = True
+                    yield pair_start, start_line
+                yield instance.span()[0], instance
+        if has_instance:
+            yield start, end_line
+
+    for match in pattern.finditer(source):
+
+        directive = match.groupdict()['directive'].strip()
+        param = match.groupdict()['param']
+        start, end = match.span()
+
+        if directive in ('if', 'ifndef', 'ifdef'):
+            stack.append((directive, param, start, end))
+            continue
+
+        if not stack:
+            raise CondDirectiveNotMatch()
+
+        pair_directive, pair_param, pair_start, pair_end = stack.pop()
+        yield from _yield_objects(source,
+                                  pair_directive,
+                                  pair_param,
+                                  pair_end,
+                                  start)
+
+        if directive == 'endif':
+            continue
+
+        if pair_directive == 'if':
+            directive = 'if'
+            param = "!( {} )".format(pair_param)
+        elif pair_directive == 'ifdef':
+            directive = 'ifndef'
+            param = pair_param
+        else:
+            directive = 'ifdef'
+            param = pair_param
+
+        stack.append((directive, param, start, end))
+    assert not stack, len(stack)
+
+
+class EnumDefinition:
+    """
+        Generate helper functions around enumeration.
+
+        Currently, it generate translation function from enum value to string.
+        Enum definition looks like:
+        [typedef] enum [prefix name] { [body] } [suffix name];
+
+        Known limitation:
+        - the '}' and ';' SHOULD NOT exist in different macro blocks. Like
+        ```
+        enum test {
+            ....
+        #if defined(A)
+            ....
+        };
+        #else
+            ....
+        };
+        #endif
+        ```
+    """
+
+    @classmethod
+    def extract(cls, source_code, start=0, end=-1):
+        enum_pattern = re.compile(r'enum\s*(?P<prefix_name>\w*)\s*' +
+                                  r'{\s*(?P<body>[^}]*)}' +
+                                  r'\s*(?P<suffix_name>\w*)\s*;',
+                                  re.MULTILINE | re.DOTALL)
+
+        for match in enum_pattern.finditer(source_code, start, end):
+            yield EnumDefinition(source_code,
+                                 span=match.span(),
+                                 group=match.groupdict())
+
+    def __init__(self, source_code, span=None, group=None):
+        assert isinstance(group, dict)
+        prefix_name = group.get('prefix_name', None)
+        suffix_name = group.get('suffix_name', None)
+        body = group.get('body', None)
+        assert prefix_name or suffix_name
+        assert body
+        assert span
+        # If suffix_name exists, it is a typedef
+        self._prototype = suffix_name if suffix_name else 'enum ' + prefix_name
+        self._name = suffix_name if suffix_name else prefix_name
+        self._body = body
+        self._source = source_code
+        self._span = span
+
+    def __repr__(self):
+        return 'Enum({},{})'.format(self._name, self._span)
+
+    def __str__(self):
+        return repr(self)
+
+    def span(self):
+        return self._span
+
+    def generate_translation_function(self):
+        """
+            Generate function for translating value to string
+        """
+        translation_table = []
+
+        for line in self._body.splitlines():
+
+            if line.strip().startswith('#'):
+                # Preprocess directive, keep it in table
+                translation_table.append(line.strip())
+                continue
+
+            if not line.strip():
+                continue
+
+            for field in line.strip().split(','):
+                if not field.strip():
+                    continue
+                member = field.strip().split()[0]
+                translation_table.append(
+                    '{space}case {member}:\n{space}    return "{member}";'
+                    .format(member=member, space=' '*8)
+                )
+
+        body = textwrap.dedent('''\
+            const char *{name}_str( {prototype} in )
+            {{
+                switch (in) {{
+            {translation_table}
+                    default:
+                        return "UNKNOWN_VALUE";
+                }}
+            }}
+                    ''')
+        body = body.format(translation_table='\n'.join(translation_table),
+                           name=self._name,
+                           prototype=self._prototype)
+        return body
+
+
+class SignatureAlgorithmDefinition:
+    """
+        Generate helper functions for signature algorithms.
+
+        It generates translation function from signature algorithm define to string.
+        Signature algorithm definition looks like:
+        #define MBEDTLS_TLS1_3_SIG_[ upper case signature algorithm ] [ value(hex) ]
+
+        Known limitation:
+        - the definitions SHOULD  exist in same macro blocks.
+    """
+
+    @classmethod
+    def extract(cls, source_code, start=0, end=-1):
+        sig_alg_pattern = re.compile(r'#define\s+(?P<name>MBEDTLS_TLS1_3_SIG_\w+)\s+' +
+                                     r'(?P<value>0[xX][0-9a-fA-F]+)$',
+                                     re.MULTILINE | re.DOTALL)
+        matches = list(sig_alg_pattern.finditer(source_code, start, end))
+        if matches:
+            yield SignatureAlgorithmDefinition(source_code, definitions=matches)
+
+    def __init__(self, source_code, definitions=None):
+        if definitions is None:
+            definitions = []
+        assert isinstance(definitions, list) and definitions
+        self._definitions = definitions
+        self._source = source_code
+
+    def __repr__(self):
+        return 'SigAlgs({})'.format(self._definitions[0].span())
+
+    def span(self):
+        return self._definitions[0].span()
+
+    def __str__(self):
+        """
+            Generate function for translating value to string
+        """
+        translation_table = []
+        for m in self._definitions:
+            name = m.groupdict()['name']
+            return_val = name[len('MBEDTLS_TLS1_3_SIG_'):].lower()
+            translation_table.append(
+                '    case {}:\n        return "{}";'.format(name, return_val))
+
+        body = textwrap.dedent('''\
+            const char *mbedtls_ssl_sig_alg_to_str( uint16_t in )
+            {{
+                switch( in )
+                {{
+            {translation_table}
+                }};
+
+                return "UNKNOWN";
+            }}''')
+        body = body.format(translation_table='\n'.join(translation_table))
+        return body
+
+
+class NamedGroupDefinition:
+    """
+        Generate helper functions for named group
+
+        It generates translation function from named group define to string.
+        Named group definition looks like:
+        #define MBEDTLS_SSL_IANA_TLS_GROUP_[ upper case named group ] [ value(hex) ]
+
+        Known limitation:
+        - the definitions SHOULD exist in same macro blocks.
+    """
+
+    @classmethod
+    def extract(cls, source_code, start=0, end=-1):
+        named_group_pattern = re.compile(r'#define\s+(?P<name>MBEDTLS_SSL_IANA_TLS_GROUP_\w+)\s+' +
+                                         r'(?P<value>0[xX][0-9a-fA-F]+)$',
+                                         re.MULTILINE | re.DOTALL)
+        matches = list(named_group_pattern.finditer(source_code, start, end))
+        if matches:
+            yield NamedGroupDefinition(source_code, definitions=matches)
+
+    def __init__(self, source_code, definitions=None):
+        if definitions is None:
+            definitions = []
+        assert isinstance(definitions, list) and definitions
+        self._definitions = definitions
+        self._source = source_code
+
+    def __repr__(self):
+        return 'NamedGroup({})'.format(self._definitions[0].span())
+
+    def span(self):
+        return self._definitions[0].span()
+
+    def __str__(self):
+        """
+            Generate function for translating value to string
+        """
+        translation_table = []
+        for m in self._definitions:
+            name = m.groupdict()['name']
+            iana_name = name[len('MBEDTLS_SSL_IANA_TLS_GROUP_'):].lower()
+            translation_table.append('    case {}:\n        return "{}";'.format(name, iana_name))
+
+        body = textwrap.dedent('''\
+            const char *mbedtls_ssl_named_group_to_str( uint16_t in )
+            {{
+                switch( in )
+                {{
+            {translation_table}
+                }};
+
+                return "UNKNOWN";
+            }}''')
+        body = body.format(translation_table='\n'.join(translation_table))
+        return body
+
+
+OUTPUT_C_TEMPLATE = '''\
+/* Automatically generated by generate_ssl_debug_helpers.py. DO NOT EDIT. */
+
+/**
+ * \\file ssl_debug_helpers_generated.c
+ *
+ * \\brief Automatically generated helper functions for debugging
+ */
+/*
+ *  Copyright The Mbed TLS Contributors
+ *  SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+ *
+ */
+
+#include "ssl_misc.h"
+
+#if defined(MBEDTLS_DEBUG_C)
+
+#include "ssl_debug_helpers.h"
+
+{functions}
+
+#endif /* MBEDTLS_DEBUG_C */
+/* End of automatically generated file. */
+
+'''
+
+
+def generate_ssl_debug_helpers(output_directory, mbedtls_root):
+    """
+        Generate functions of debug helps
+    """
+    mbedtls_root = os.path.abspath(
+        mbedtls_root or build_tree.guess_mbedtls_root())
+    with open(os.path.join(mbedtls_root, 'include/mbedtls/ssl.h')) as f:
+        source_code = remove_c_comments(f.read())
+
+    definitions = dict()
+    for start, instance in preprocess_c_source_code(source_code,
+                                                    EnumDefinition,
+                                                    SignatureAlgorithmDefinition,
+                                                    NamedGroupDefinition):
+        if start in definitions:
+            continue
+        if isinstance(instance, EnumDefinition):
+            definition = instance.generate_translation_function()
+        else:
+            definition = instance
+        definitions[start] = definition
+
+    function_definitions = [str(v) for _, v in sorted(definitions.items())]
+    if output_directory == sys.stdout:
+        sys.stdout.write(OUTPUT_C_TEMPLATE.format(
+            functions='\n'.join(function_definitions)))
+    else:
+        with open(os.path.join(output_directory, 'ssl_debug_helpers_generated.c'), 'w') as f:
+            f.write(OUTPUT_C_TEMPLATE.format(
+                functions='\n'.join(function_definitions)))
+
+
+def main():
+    """
+    Command line entry
+    """
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--mbedtls-root', nargs='?', default=None,
+                        help='root directory of mbedtls source code')
+    parser.add_argument('output_directory', nargs='?',
+                        default='library', help='source/header files location')
+
+    args = parser.parse_args()
+
+    generate_ssl_debug_helpers(args.output_directory, args.mbedtls_root)
+    return 0
+
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/framework/scripts/generate_test_cert_macros.py b/framework/scripts/generate_test_cert_macros.py
new file mode 100755
index 0000000..b6d97fc
--- /dev/null
+++ b/framework/scripts/generate_test_cert_macros.py
@@ -0,0 +1,108 @@
+#!/usr/bin/env python3
+
+"""
+Generate `tests/src/test_certs.h` which includes certficaties/keys/certificate list for testing.
+"""
+
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+
+import os
+import sys
+import argparse
+import jinja2
+from mbedtls_framework.build_tree import guess_project_root
+
+TESTS_DIR = os.path.join(guess_project_root(), 'tests')
+FRAMEWORK_DIR = os.path.join(guess_project_root(), 'framework')
+DATA_FILES_PATH = os.path.join(FRAMEWORK_DIR, 'data_files')
+
+INPUT_ARGS = [
+    ("string", "TEST_CA_CRT_EC_PEM", DATA_FILES_PATH + "/test-ca2.crt"),
+    ("binary", "TEST_CA_CRT_EC_DER", DATA_FILES_PATH + "/test-ca2.crt.der"),
+    ("string", "TEST_CA_KEY_EC_PEM", DATA_FILES_PATH + "/test-ca2.key.enc"),
+    ("password", "TEST_CA_PWD_EC_PEM", "PolarSSLTest"),
+    ("binary", "TEST_CA_KEY_EC_DER", DATA_FILES_PATH + "/test-ca2.key.der"),
+    ("string", "TEST_CA_CRT_RSA_SHA256_PEM", DATA_FILES_PATH + "/test-ca-sha256.crt"),
+    ("binary", "TEST_CA_CRT_RSA_SHA256_DER", DATA_FILES_PATH + "/test-ca-sha256.crt.der"),
+    ("string", "TEST_CA_CRT_RSA_SHA1_PEM", DATA_FILES_PATH + "/test-ca-sha1.crt"),
+    ("binary", "TEST_CA_CRT_RSA_SHA1_DER", DATA_FILES_PATH + "/test-ca-sha1.crt.der"),
+    ("string", "TEST_CA_KEY_RSA_PEM", DATA_FILES_PATH + "/test-ca.key"),
+    ("password", "TEST_CA_PWD_RSA_PEM", "PolarSSLTest"),
+    ("binary", "TEST_CA_KEY_RSA_DER", DATA_FILES_PATH + "/test-ca.key.der"),
+    ("string", "TEST_SRV_CRT_EC_PEM", DATA_FILES_PATH + "/server5.crt"),
+    ("binary", "TEST_SRV_CRT_EC_DER", DATA_FILES_PATH + "/server5.crt.der"),
+    ("string", "TEST_SRV_KEY_EC_PEM", DATA_FILES_PATH + "/server5.key"),
+    ("binary", "TEST_SRV_KEY_EC_DER", DATA_FILES_PATH + "/server5.key.der"),
+    ("string", "TEST_SRV_CRT_RSA_SHA256_PEM", DATA_FILES_PATH + "/server2-sha256.crt"),
+    ("binary", "TEST_SRV_CRT_RSA_SHA256_DER", DATA_FILES_PATH + "/server2-sha256.crt.der"),
+    ("string", "TEST_SRV_CRT_RSA_SHA1_PEM", DATA_FILES_PATH + "/server2.crt"),
+    ("binary", "TEST_SRV_CRT_RSA_SHA1_DER", DATA_FILES_PATH + "/server2.crt.der"),
+    ("string", "TEST_SRV_KEY_RSA_PEM", DATA_FILES_PATH + "/server2.key"),
+    ("binary", "TEST_SRV_KEY_RSA_DER", DATA_FILES_PATH + "/server2.key.der"),
+    ("string", "TEST_CLI_CRT_EC_PEM", DATA_FILES_PATH + "/cli2.crt"),
+    ("binary", "TEST_CLI_CRT_EC_DER", DATA_FILES_PATH + "/cli2.crt.der"),
+    ("string", "TEST_CLI_KEY_EC_PEM", DATA_FILES_PATH + "/cli2.key"),
+    ("binary", "TEST_CLI_KEY_EC_DER", DATA_FILES_PATH + "/cli2.key.der"),
+    ("string", "TEST_CLI_CRT_RSA_PEM", DATA_FILES_PATH + "/cli-rsa-sha256.crt"),
+    ("binary", "TEST_CLI_CRT_RSA_DER", DATA_FILES_PATH + "/cli-rsa-sha256.crt.der"),
+    ("string", "TEST_CLI_KEY_RSA_PEM", DATA_FILES_PATH + "/cli-rsa.key"),
+    ("binary", "TEST_CLI_KEY_RSA_DER", DATA_FILES_PATH + "/cli-rsa.key.der"),
+]
+
+def main():
+    parser = argparse.ArgumentParser()
+    default_output_path = os.path.join(TESTS_DIR, 'src', 'test_certs.h')
+    parser.add_argument('--output', type=str, default=default_output_path)
+    parser.add_argument('--list-dependencies', action='store_true')
+    args = parser.parse_args()
+
+    if args.list_dependencies:
+        files_list = [arg[2] for arg in INPUT_ARGS]
+        print(" ".join(files_list))
+        return
+
+    generate(INPUT_ARGS, output=args.output)
+
+#pylint: disable=dangerous-default-value, unused-argument
+def generate(values=[], output=None):
+    """Generate C header file.
+    """
+    template_loader = jinja2.FileSystemLoader(DATA_FILES_PATH)
+    template_env = jinja2.Environment(
+        loader=template_loader, lstrip_blocks=True, trim_blocks=True,
+        keep_trailing_newline=True)
+
+    def read_as_c_array(filename):
+        with open(filename, 'rb') as f:
+            data = f.read(12)
+            while data:
+                yield ', '.join(['{:#04x}'.format(b) for b in data])
+                data = f.read(12)
+
+    def read_lines(filename):
+        with open(filename) as f:
+            try:
+                for line in f:
+                    yield line.strip()
+            except:
+                print(filename)
+                raise
+
+    def put_to_column(value, position=0):
+        return ' '*position + value
+
+    template_env.filters['read_as_c_array'] = read_as_c_array
+    template_env.filters['read_lines'] = read_lines
+    template_env.filters['put_to_column'] = put_to_column
+
+    template = template_env.get_template('test_certs.h.jinja2')
+
+    with open(output, 'w') as f:
+        f.write(template.render(macros=values))
+
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/framework/scripts/generate_test_code.py b/framework/scripts/generate_test_code.py
new file mode 100755
index 0000000..2ac00ad
--- /dev/null
+++ b/framework/scripts/generate_test_code.py
@@ -0,0 +1,1297 @@
+#!/usr/bin/env python3
+# Test suites code generator.
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+"""
+This script is a key part of Mbed TLS test suites framework. For
+understanding the script it is important to understand the
+framework. This doc string contains a summary of the framework
+and explains the function of this script.
+
+Mbed TLS test suites:
+=====================
+Scope:
+------
+The test suites focus on unit testing the crypto primitives and also
+include x509 parser tests. Tests can be added to test any Mbed TLS
+module. However, the framework is not capable of testing SSL
+protocol, since that requires full stack execution and that is best
+tested as part of the system test.
+
+Test case definition:
+---------------------
+Tests are defined in a test_suite_<module>[.<optional sub module>].data
+file. A test definition contains:
+ test name
+ optional build macro dependencies
+ test function
+ test parameters
+
+Test dependencies are build macros that can be specified to indicate
+the build config in which the test is valid. For example if a test
+depends on a feature that is only enabled by defining a macro. Then
+that macro should be specified as a dependency of the test.
+
+Test function is the function that implements the test steps. This
+function is specified for different tests that perform same steps
+with different parameters.
+
+Test parameters are specified in string form separated by ':'.
+Parameters can be of type string, binary data specified as hex
+string and integer constants specified as integer, macro or
+as an expression. Following is an example test definition:
+
+ AES 128 GCM Encrypt and decrypt 8 bytes
+ depends_on:MBEDTLS_AES_C:MBEDTLS_GCM_C
+ enc_dec_buf:MBEDTLS_CIPHER_AES_128_GCM:"AES-128-GCM":128:8:-1
+
+Test functions:
+---------------
+Test functions are coded in C in test_suite_<module>.function files.
+Functions file is itself not compilable and contains special
+format patterns to specify test suite dependencies, start and end
+of functions and function dependencies. Check any existing functions
+file for example.
+
+Execution:
+----------
+Tests are executed in 3 steps:
+- Generating test_suite_<module>[.<optional sub module>].c file
+  for each corresponding .data file.
+- Building each source file into executables.
+- Running each executable and printing report.
+
+Generating C test source requires more than just the test functions.
+Following extras are required:
+- Process main()
+- Reading .data file and dispatching test cases.
+- Platform specific test case execution
+- Dependency checking
+- Integer expression evaluation
+- Test function dispatch
+
+Build dependencies and integer expressions (in the test parameters)
+are specified as strings in the .data file. Their run time value is
+not known at the generation stage. Hence, they need to be translated
+into run time evaluations. This script generates the run time checks
+for dependencies and integer expressions.
+
+Similarly, function names have to be translated into function calls.
+This script also generates code for function dispatch.
+
+The extra code mentioned here is either generated by this script
+or it comes from the input files: helpers file, platform file and
+the template file.
+
+Helper file:
+------------
+Helpers file contains common helper/utility functions and data.
+
+Platform file:
+--------------
+Platform file contains platform specific setup code and test case
+dispatch code. For example, host_test.function reads test data
+file from host's file system and dispatches tests.
+
+Template file:
+---------
+Template file for example main_test.function is a template C file in
+which generated code and code from input files is substituted to
+generate a compilable C file. It also contains skeleton functions for
+dependency checks, expression evaluation and function dispatch. These
+functions are populated with checks and return codes by this script.
+
+Template file contains "replacement" fields that are formatted
+strings processed by Python string.Template.substitute() method.
+
+This script:
+============
+Core function of this script is to fill the template file with
+code that is generated or read from helpers and platform files.
+
+This script replaces following fields in the template and generates
+the test source file:
+
+__MBEDTLS_TEST_TEMPLATE__TEST_COMMON_HELPERS
+            All common code from helpers.function
+            is substituted here.
+__MBEDTLS_TEST_TEMPLATE__FUNCTIONS_CODE
+            Test functions are substituted here
+            from the input test_suit_xyz.function
+            file. C preprocessor checks are generated
+            for the build dependencies specified
+            in the input file. This script also
+            generates wrappers for the test
+            functions with code to expand the
+            string parameters read from the data
+            file.
+__MBEDTLS_TEST_TEMPLATE__EXPRESSION_CODE
+            This script enumerates the
+            expressions in the .data file and
+            generates code to handle enumerated
+            expression Ids and return the values.
+__MBEDTLS_TEST_TEMPLATE__DEP_CHECK_CODE
+            This script enumerates all
+            build dependencies and generate
+            code to handle enumerated build
+            dependency Id and return status: if
+            the dependency is defined or not.
+__MBEDTLS_TEST_TEMPLATE__DISPATCH_CODE
+            This script enumerates the functions
+            specified in the input test data file
+            and generates the initializer for the
+            function table in the template
+            file.
+__MBEDTLS_TEST_TEMPLATE__PLATFORM_CODE
+            Platform specific setup and test
+            dispatch code.
+
+"""
+
+
+import os
+import re
+import sys
+import string
+import argparse
+
+
+# Types recognized as signed integer arguments in test functions.
+SIGNED_INTEGER_TYPES = frozenset([
+    'char',
+    'short',
+    'short int',
+    'int',
+    'int8_t',
+    'int16_t',
+    'int32_t',
+    'int64_t',
+    'intmax_t',
+    'long',
+    'long int',
+    'long long int',
+    'mbedtls_mpi_sint',
+    'psa_status_t',
+])
+# Types recognized as string arguments in test functions.
+STRING_TYPES = frozenset(['char*', 'const char*', 'char const*'])
+# Types recognized as hex data arguments in test functions.
+DATA_TYPES = frozenset(['data_t*', 'const data_t*', 'data_t const*'])
+
+BEGIN_HEADER_REGEX = r'/\*\s*BEGIN_HEADER\s*\*/'
+END_HEADER_REGEX = r'/\*\s*END_HEADER\s*\*/'
+
+BEGIN_SUITE_HELPERS_REGEX = r'/\*\s*BEGIN_SUITE_HELPERS\s*\*/'
+END_SUITE_HELPERS_REGEX = r'/\*\s*END_SUITE_HELPERS\s*\*/'
+
+BEGIN_DEP_REGEX = r'BEGIN_DEPENDENCIES'
+END_DEP_REGEX = r'END_DEPENDENCIES'
+
+BEGIN_CASE_REGEX = r'/\*\s*BEGIN_CASE\s*(?P<depends_on>.*?)\s*\*/'
+END_CASE_REGEX = r'/\*\s*END_CASE\s*\*/'
+
+DEPENDENCY_REGEX = r'depends_on:(?P<dependencies>.*)'
+# This can be something like [!]MBEDTLS_xxx
+C_IDENTIFIER_REGEX = r'!?[a-z_][a-z0-9_]*'
+# This is a generic relation operator: ==, !=, >[=], <[=]
+CONDITION_OPERATOR_REGEX = r'[!=]=|[<>]=?'
+# This can be (almost) anything as long as:
+# - it starts with a number or a letter or a "("
+# - it contains only
+#       - numbers
+#       - letters
+#       - spaces
+#       - math operators, i.e "+", "-", "*", "/"
+#       - bitwise operators, i.e. "^", "|", "&", "~", "<<", ">>"
+#       - parentheses, i.e. "()"
+CONDITION_VALUE_REGEX = r'[\w|\(][\s\w\(\)\+\-\*\/\^\|\&\~\<\>]*'
+CONDITION_REGEX = r'({})(?:\s*({})\s*({}))?$'.format(C_IDENTIFIER_REGEX,
+                                                     CONDITION_OPERATOR_REGEX,
+                                                     CONDITION_VALUE_REGEX)
+# Match numerical values that start with a 0 because they can be accidentally
+# octal or accidentally decimal. Hexadecimal values starting with '0x' are
+# valid of course.
+AMBIGUOUS_INTEGER_REGEX = r'\b0[0-9]+'
+TEST_FUNCTION_VALIDATION_REGEX = r'\s*void\s+(?P<func_name>\w+)\s*\('
+FUNCTION_ARG_LIST_END_REGEX = r'.*\)'
+EXIT_LABEL_REGEX = r'^exit:'
+
+
+class GeneratorInputError(Exception):
+    """
+    Exception to indicate error in the input files to this script.
+    This includes missing patterns, test function names and other
+    parsing errors.
+    """
+    pass
+
+
+class FileWrapper:
+    """
+    This class extends the file object with attribute line_no,
+    that indicates line number for the line that is read.
+    """
+
+    def __init__(self, file_name) -> None:
+        """
+        Instantiate the file object and initialize the line number to 0.
+
+        :param file_name: File path to open.
+        """
+        # private mix-in file object
+        self._f = open(file_name, 'rb')
+        self._line_no = 0
+
+    def __iter__(self):
+        return self
+
+    def __next__(self):
+        """
+        This method makes FileWrapper iterable.
+        It counts the line numbers as each line is read.
+
+        :return: Line read from file.
+        """
+        line = self._f.__next__()
+        self._line_no += 1
+        # Convert byte array to string with correct encoding and
+        # strip any whitespaces added in the decoding process.
+        return line.decode(sys.getdefaultencoding()).rstrip()+ '\n'
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        self._f.__exit__(exc_type, exc_val, exc_tb)
+
+    @property
+    def line_no(self):
+        """
+        Property that indicates line number for the line that is read.
+        """
+        return self._line_no
+
+    @property
+    def name(self):
+        """
+        Property that indicates name of the file that is read.
+        """
+        return self._f.name
+
+
+def split_dep(dep):
+    """
+    Split NOT character '!' from dependency. Used by gen_dependencies()
+
+    :param dep: Dependency list
+    :return: string tuple. Ex: ('!', MACRO) for !MACRO and ('', MACRO) for
+             MACRO.
+    """
+    return ('!', dep[1:]) if dep[0] == '!' else ('', dep)
+
+
+def gen_dependencies(dependencies):
+    """
+    Test suite data and functions specifies compile time dependencies.
+    This function generates C preprocessor code from the input
+    dependency list. Caller uses the generated preprocessor code to
+    wrap dependent code.
+    A dependency in the input list can have a leading '!' character
+    to negate a condition. '!' is separated from the dependency using
+    function split_dep() and proper preprocessor check is generated
+    accordingly.
+
+    :param dependencies: List of dependencies.
+    :return: if defined and endif code with macro annotations for
+             readability.
+    """
+    dep_start = ''.join(['#if %sdefined(%s)\n' % (x, y) for x, y in
+                         map(split_dep, dependencies)])
+    dep_end = ''.join(['#endif /* %s */\n' %
+                       x for x in reversed(dependencies)])
+
+    return dep_start, dep_end
+
+
+def gen_dependencies_one_line(dependencies):
+    """
+    Similar to gen_dependencies() but generates dependency checks in one line.
+    Useful for generating code with #else block.
+
+    :param dependencies: List of dependencies.
+    :return: Preprocessor check code
+    """
+    defines = '#if ' if dependencies else ''
+    defines += ' && '.join(['%sdefined(%s)' % (x, y) for x, y in map(
+        split_dep, dependencies)])
+    return defines
+
+
+def gen_function_wrapper(name, local_vars, args_dispatch):
+    """
+    Creates test function wrapper code. A wrapper has the code to
+    unpack parameters from parameters[] array.
+
+    :param name: Test function name
+    :param local_vars: Local variables declaration code
+    :param args_dispatch: List of dispatch arguments.
+           Ex: ['(char *) params[0]', '*((int *) params[1])']
+    :return: Test function wrapper.
+    """
+    # Then create the wrapper
+    wrapper = '''
+static void {name}_wrapper( void ** params )
+{{
+{unused_params}{locals}
+    {name}( {args} );
+}}
+'''.format(name=name,
+           unused_params='' if args_dispatch else '    (void)params;\n',
+           args=', '.join(args_dispatch),
+           locals=local_vars)
+    return wrapper
+
+
+def gen_dispatch(name, dependencies):
+    """
+    Test suite code template main_test.function defines a C function
+    array to contain test case functions. This function generates an
+    initializer entry for a function in that array. The entry is
+    composed of a compile time check for the test function
+    dependencies. At compile time the test function is assigned when
+    dependencies are met, else NULL is assigned.
+
+    :param name: Test function name
+    :param dependencies: List of dependencies
+    :return: Dispatch code.
+    """
+    if dependencies:
+        preprocessor_check = gen_dependencies_one_line(dependencies)
+        dispatch_code = '''
+{preprocessor_check}
+    {name}_wrapper,
+#else
+    NULL,
+#endif
+'''.format(preprocessor_check=preprocessor_check, name=name)
+    else:
+        dispatch_code = '''
+    {name}_wrapper,
+'''.format(name=name)
+
+    return dispatch_code
+
+
+def parse_until_pattern(funcs_f, end_regex):
+    """
+    Matches pattern end_regex to the lines read from the file object.
+    Returns the lines read until end pattern is matched.
+
+    :param funcs_f: file object for .function file
+    :param end_regex: Pattern to stop parsing
+    :return: Lines read before the end pattern
+    """
+    headers = '#line %d "%s"\n' % (funcs_f.line_no + 1, funcs_f.name)
+    for line in funcs_f:
+        if re.search(end_regex, line):
+            break
+        headers += line
+    else:
+        raise GeneratorInputError("file: %s - end pattern [%s] not found!" %
+                                  (funcs_f.name, end_regex))
+
+    return headers
+
+
+def validate_dependency(dependency):
+    """
+    Validates a C macro and raises GeneratorInputError on invalid input.
+    :param dependency: Input macro dependency
+    :return: input dependency stripped of leading & trailing white spaces.
+    """
+    dependency = dependency.strip()
+    m = re.search(AMBIGUOUS_INTEGER_REGEX, dependency)
+    if m:
+        raise GeneratorInputError('Ambiguous integer literal: '+ m.group(0))
+    if not re.match(CONDITION_REGEX, dependency, re.I):
+        raise GeneratorInputError('Invalid dependency %s' % dependency)
+    return dependency
+
+
+def parse_dependencies(inp_str):
+    """
+    Parses dependencies out of inp_str, validates them and returns a
+    list of macros.
+
+    :param inp_str: Input string with macros delimited by ':'.
+    :return: list of dependencies
+    """
+    dependencies = list(map(validate_dependency, inp_str.split(':')))
+    return dependencies
+
+
+def parse_suite_dependencies(funcs_f):
+    """
+    Parses test suite dependencies specified at the top of a
+    .function file, that starts with pattern BEGIN_DEPENDENCIES
+    and end with END_DEPENDENCIES. Dependencies are specified
+    after pattern 'depends_on:' and are delimited by ':'.
+
+    :param funcs_f: file object for .function file
+    :return: List of test suite dependencies.
+    """
+    dependencies = []
+    for line in funcs_f:
+        match = re.search(DEPENDENCY_REGEX, line.strip())
+        if match:
+            try:
+                dependencies = parse_dependencies(match.group('dependencies'))
+            except GeneratorInputError as error:
+                raise GeneratorInputError(
+                    str(error) + " - %s:%d" % (funcs_f.name, funcs_f.line_no))
+        if re.search(END_DEP_REGEX, line):
+            break
+    else:
+        raise GeneratorInputError("file: %s - end dependency pattern [%s]"
+                                  " not found!" % (funcs_f.name,
+                                                   END_DEP_REGEX))
+
+    return dependencies
+
+
+def parse_function_dependencies(line):
+    """
+    Parses function dependencies, that are in the same line as
+    comment BEGIN_CASE. Dependencies are specified after pattern
+    'depends_on:' and are delimited by ':'.
+
+    :param line: Line from .function file that has dependencies.
+    :return: List of dependencies.
+    """
+    dependencies = []
+    match = re.search(BEGIN_CASE_REGEX, line)
+    dep_str = match.group('depends_on')
+    if dep_str:
+        match = re.search(DEPENDENCY_REGEX, dep_str)
+        if match:
+            dependencies += parse_dependencies(match.group('dependencies'))
+
+    return dependencies
+
+
+ARGUMENT_DECLARATION_REGEX = re.compile(r'(.+?) ?(?:\bconst\b)? ?(\w+)\Z', re.S)
+def parse_function_argument(arg, arg_idx, args, local_vars, args_dispatch):
+    """
+    Parses one test function's argument declaration.
+
+    :param arg: argument declaration.
+    :param arg_idx: current wrapper argument index.
+    :param args: accumulator of arguments' internal types.
+    :param local_vars: accumulator of internal variable declarations.
+    :param args_dispatch: accumulator of argument usage expressions.
+    :return: the number of new wrapper arguments,
+             or None if the argument declaration is invalid.
+    """
+    # Normalize whitespace
+    arg = arg.strip()
+    arg = re.sub(r'\s*\*\s*', r'*', arg)
+    arg = re.sub(r'\s+', r' ', arg)
+    # Extract name and type
+    m = ARGUMENT_DECLARATION_REGEX.search(arg)
+    if not m:
+        # E.g. "int x[42]"
+        return None
+    typ, _ = m.groups()
+    if typ in SIGNED_INTEGER_TYPES:
+        args.append('int')
+        args_dispatch.append('((mbedtls_test_argument_t *) params[%d])->sint' % arg_idx)
+        return 1
+    if typ in STRING_TYPES:
+        args.append('char*')
+        args_dispatch.append('(char *) params[%d]' % arg_idx)
+        return 1
+    if typ in DATA_TYPES:
+        args.append('hex')
+        # create a structure
+        pointer_initializer = '(uint8_t *) params[%d]' % arg_idx
+        len_initializer = '((mbedtls_test_argument_t *) params[%d])->len' % (arg_idx+1)
+        local_vars.append('    data_t data%d = {%s, %s};\n' %
+                          (arg_idx, pointer_initializer, len_initializer))
+        args_dispatch.append('&data%d' % arg_idx)
+        return 2
+    return None
+
+ARGUMENT_LIST_REGEX = re.compile(r'\((.*?)\)', re.S)
+def parse_function_arguments(line):
+    """
+    Parses test function signature for validation and generates
+    a dispatch wrapper function that translates input test vectors
+    read from the data file into test function arguments.
+
+    :param line: Line from .function file that has a function
+                 signature.
+    :return: argument list, local variables for
+             wrapper function and argument dispatch code.
+    """
+    # Process arguments, ex: <type> arg1, <type> arg2 )
+    # This script assumes that the argument list is terminated by ')'
+    # i.e. the test functions will not have a function pointer
+    # argument.
+    m = ARGUMENT_LIST_REGEX.search(line)
+    arg_list = m.group(1).strip()
+    if arg_list in ['', 'void']:
+        return [], '', []
+    args = []
+    local_vars = []
+    args_dispatch = []
+    arg_idx = 0
+    for arg in arg_list.split(','):
+        indexes = parse_function_argument(arg, arg_idx,
+                                          args, local_vars, args_dispatch)
+        if indexes is None:
+            raise ValueError("Test function arguments can only be 'int', "
+                             "'char *' or 'data_t'\n%s" % line)
+        arg_idx += indexes
+
+    return args, ''.join(local_vars), args_dispatch
+
+
+def generate_function_code(name, code, local_vars, args_dispatch,
+                           dependencies):
+    """
+    Generate function code with preprocessor checks and parameter dispatch
+    wrapper.
+
+    :param name: Function name
+    :param code: Function code
+    :param local_vars: Local variables for function wrapper
+    :param args_dispatch: Argument dispatch code
+    :param dependencies: Preprocessor dependencies list
+    :return: Final function code
+    """
+    # Add exit label if not present
+    if code.find('exit:') == -1:
+        split_code = code.rsplit('}', 1)
+        if len(split_code) == 2:
+            code = """exit:
+    ;
+}""".join(split_code)
+
+    code += gen_function_wrapper(name, local_vars, args_dispatch)
+    preprocessor_check_start, preprocessor_check_end = \
+        gen_dependencies(dependencies)
+    return preprocessor_check_start + code + preprocessor_check_end
+
+COMMENT_START_REGEX = re.compile(r'/[*/]')
+
+def skip_comments(line, stream):
+    """Remove comments in line.
+
+    If the line contains an unfinished comment, read more lines from stream
+    until the line that contains the comment.
+
+    :return: The original line with inner comments replaced by spaces.
+             Trailing comments and whitespace may be removed completely.
+    """
+    pos = 0
+    while True:
+        opening = COMMENT_START_REGEX.search(line, pos)
+        if not opening:
+            break
+        if line[opening.start(0) + 1] == '/': # //...
+            continuation = line
+            # Count the number of line breaks, to keep line numbers aligned
+            # in the output.
+            line_count = 1
+            while continuation.endswith('\\\n'):
+                # This errors out if the file ends with an unfinished line
+                # comment. That's acceptable to not complicate the code further.
+                continuation = next(stream)
+                line_count += 1
+            return line[:opening.start(0)].rstrip() + '\n' * line_count
+        # Parsing /*...*/, looking for the end
+        closing = line.find('*/', opening.end(0))
+        while closing == -1:
+            # This errors out if the file ends with an unfinished block
+            # comment. That's acceptable to not complicate the code further.
+            line += next(stream)
+            closing = line.find('*/', opening.end(0))
+        pos = closing + 2
+        # Replace inner comment by spaces. There needs to be at least one space
+        # for things like 'int/*ihatespaces*/foo'. Go further and preserve the
+        # width of the comment and line breaks, this way positions in error
+        # messages remain correct.
+        line = (line[:opening.start(0)] +
+                re.sub(r'.', r' ', line[opening.start(0):pos]) +
+                line[pos:])
+    # Strip whitespace at the end of lines (it's irrelevant to error messages).
+    return re.sub(r' +(\n|\Z)', r'\1', line)
+
+def parse_function_code(funcs_f, dependencies, suite_dependencies):
+    """
+    Parses out a function from function file object and generates
+    function and dispatch code.
+
+    :param funcs_f: file object of the functions file.
+    :param dependencies: List of dependencies
+    :param suite_dependencies: List of test suite dependencies
+    :return: Function name, arguments, function code and dispatch code.
+    """
+    line_directive = '#line %d "%s"\n' % (funcs_f.line_no + 1, funcs_f.name)
+    code = ''
+    has_exit_label = False
+    for line in funcs_f:
+        # Check function signature. Function signature may be split
+        # across multiple lines. Here we try to find the start of
+        # arguments list, then remove '\n's and apply the regex to
+        # detect function start.
+        line = skip_comments(line, funcs_f)
+        up_to_arg_list_start = code + line[:line.find('(') + 1]
+        match = re.match(TEST_FUNCTION_VALIDATION_REGEX,
+                         up_to_arg_list_start.replace('\n', ' '), re.I)
+        if match:
+            # check if we have full signature i.e. split in more lines
+            name = match.group('func_name')
+            if not re.match(FUNCTION_ARG_LIST_END_REGEX, line):
+                for lin in funcs_f:
+                    line += skip_comments(lin, funcs_f)
+                    if re.search(FUNCTION_ARG_LIST_END_REGEX, line):
+                        break
+            args, local_vars, args_dispatch = parse_function_arguments(
+                line)
+            code += line
+            break
+        code += line
+    else:
+        raise GeneratorInputError("file: %s - Test functions not found!" %
+                                  funcs_f.name)
+
+    # Make the test function static
+    code = code.replace('void', 'static void', 1)
+
+    # Prefix test function name with 'test_'
+    code = code.replace(name, 'test_' + name, 1)
+    name = 'test_' + name
+
+    # If a test function has no arguments then add 'void' argument to
+    # avoid "-Wstrict-prototypes" warnings from clang
+    if len(args) == 0:
+        code = code.replace('()', '(void)', 1)
+
+    for line in funcs_f:
+        if re.search(END_CASE_REGEX, line):
+            break
+        if not has_exit_label:
+            has_exit_label = \
+                re.search(EXIT_LABEL_REGEX, line.strip()) is not None
+        code += line
+    else:
+        raise GeneratorInputError("file: %s - end case pattern [%s] not "
+                                  "found!" % (funcs_f.name, END_CASE_REGEX))
+
+    code = line_directive + code
+    code = generate_function_code(name, code, local_vars, args_dispatch,
+                                  dependencies)
+    dispatch_code = gen_dispatch(name, suite_dependencies + dependencies)
+    return (name, args, code, dispatch_code)
+
+
+def parse_functions(funcs_f):
+    """
+    Parses a test_suite_xxx.function file and returns information
+    for generating a C source file for the test suite.
+
+    :param funcs_f: file object of the functions file.
+    :return: List of test suite dependencies, test function dispatch
+             code, function code and a dict with function identifiers
+             and arguments info.
+    """
+    suite_helpers = ''
+    suite_dependencies = []
+    suite_functions = ''
+    func_info = {}
+    function_idx = 0
+    dispatch_code = ''
+    for line in funcs_f:
+        if re.search(BEGIN_HEADER_REGEX, line):
+            suite_helpers += parse_until_pattern(funcs_f, END_HEADER_REGEX)
+        elif re.search(BEGIN_SUITE_HELPERS_REGEX, line):
+            suite_helpers += parse_until_pattern(funcs_f,
+                                                 END_SUITE_HELPERS_REGEX)
+        elif re.search(BEGIN_DEP_REGEX, line):
+            suite_dependencies += parse_suite_dependencies(funcs_f)
+        elif re.search(BEGIN_CASE_REGEX, line):
+            try:
+                dependencies = parse_function_dependencies(line)
+            except GeneratorInputError as error:
+                raise GeneratorInputError(
+                    "%s:%d: %s" % (funcs_f.name, funcs_f.line_no,
+                                   str(error)))
+            func_name, args, func_code, func_dispatch =\
+                parse_function_code(funcs_f, dependencies, suite_dependencies)
+            suite_functions += func_code
+            # Generate dispatch code and enumeration info
+            if func_name in func_info:
+                raise GeneratorInputError(
+                    "file: %s - function %s re-declared at line %d" %
+                    (funcs_f.name, func_name, funcs_f.line_no))
+            func_info[func_name] = (function_idx, args)
+            dispatch_code += '/* Function Id: %d */\n' % function_idx
+            dispatch_code += func_dispatch
+            function_idx += 1
+
+    func_code = (suite_helpers +
+                 suite_functions).join(gen_dependencies(suite_dependencies))
+    return suite_dependencies, dispatch_code, func_code, func_info
+
+
+def escaped_split(inp_str, split_char):
+    """
+    Split inp_str on character split_char but ignore if escaped.
+    Since, return value is used to write back to the intermediate
+    data file, any escape characters in the input are retained in the
+    output.
+
+    :param inp_str: String to split
+    :param split_char: Split character
+    :return: List of splits
+    """
+    if len(split_char) > 1:
+        raise ValueError('Expected split character. Found string!')
+    out = re.sub(r'(\\.)|' + split_char,
+                 lambda m: m.group(1) or '\n', inp_str,
+                 len(inp_str)).split('\n')
+    out = [x for x in out if x]
+    return out
+
+
+def parse_test_data(data_f):
+    """
+    Parses .data file for each test case name, test function name,
+    test dependencies and test arguments. This information is
+    correlated with the test functions file for generating an
+    intermediate data file replacing the strings for test function
+    names, dependencies and integer constant expressions with
+    identifiers. Mainly for optimising space for on-target
+    execution.
+
+    :param data_f: file object of the data file.
+    :return: Generator that yields line number, test name, function name,
+             dependency list and function argument list.
+    """
+    __state_read_name = 0
+    __state_read_args = 1
+    state = __state_read_name
+    dependencies = []
+    name = ''
+    for line in data_f:
+        line = line.strip()
+        # Skip comments
+        if line.startswith('#'):
+            continue
+
+        # Blank line indicates end of test
+        if not line:
+            if state == __state_read_args:
+                raise GeneratorInputError("[%s:%d] Newline before arguments. "
+                                          "Test function and arguments "
+                                          "missing for %s" %
+                                          (data_f.name, data_f.line_no, name))
+            continue
+
+        if state == __state_read_name:
+            # Read test name
+            name = line
+            state = __state_read_args
+        elif state == __state_read_args:
+            # Check dependencies
+            match = re.search(DEPENDENCY_REGEX, line)
+            if match:
+                try:
+                    dependencies = parse_dependencies(
+                        match.group('dependencies'))
+                except GeneratorInputError as error:
+                    raise GeneratorInputError(
+                        str(error) + " - %s:%d" %
+                        (data_f.name, data_f.line_no))
+            else:
+                # Read test vectors
+                parts = escaped_split(line, ':')
+                test_function = parts[0]
+                args = parts[1:]
+                yield data_f.line_no, name, test_function, dependencies, args
+                dependencies = []
+                state = __state_read_name
+    if state == __state_read_args:
+        raise GeneratorInputError("[%s:%d] Newline before arguments. "
+                                  "Test function and arguments missing for "
+                                  "%s" % (data_f.name, data_f.line_no, name))
+
+
+def gen_dep_check(dep_id, dep):
+    """
+    Generate code for checking dependency with the associated
+    identifier.
+
+    :param dep_id: Dependency identifier
+    :param dep: Dependency macro
+    :return: Dependency check code
+    """
+    if dep_id < 0:
+        raise GeneratorInputError("Dependency Id should be a positive "
+                                  "integer.")
+    _not, dep = ('!', dep[1:]) if dep[0] == '!' else ('', dep)
+    if not dep:
+        raise GeneratorInputError("Dependency should not be an empty string.")
+
+    dependency = re.match(CONDITION_REGEX, dep, re.I)
+    if not dependency:
+        raise GeneratorInputError('Invalid dependency %s' % dep)
+
+    _defined = '' if dependency.group(2) else 'defined'
+    _cond = dependency.group(2) if dependency.group(2) else ''
+    _value = dependency.group(3) if dependency.group(3) else ''
+
+    dep_check = '''
+        case {id}:
+            {{
+#if {_not}{_defined}({macro}{_cond}{_value})
+                ret = DEPENDENCY_SUPPORTED;
+#else
+                ret = DEPENDENCY_NOT_SUPPORTED;
+#endif
+            }}
+            break;'''.format(_not=_not, _defined=_defined,
+                             macro=dependency.group(1), id=dep_id,
+                             _cond=_cond, _value=_value)
+    return dep_check
+
+
+def gen_expression_check(exp_id, exp):
+    """
+    Generates code for evaluating an integer expression using
+    associated expression Id.
+
+    :param exp_id: Expression Identifier
+    :param exp: Expression/Macro
+    :return: Expression check code
+    """
+    if exp_id < 0:
+        raise GeneratorInputError("Expression Id should be a positive "
+                                  "integer.")
+    if not exp:
+        raise GeneratorInputError("Expression should not be an empty string.")
+    exp_code = '''
+        case {exp_id}:
+            {{
+                *out_value = {expression};
+            }}
+            break;'''.format(exp_id=exp_id, expression=exp)
+    return exp_code
+
+
+def write_dependencies(out_data_f, test_dependencies, unique_dependencies):
+    """
+    Write dependencies to intermediate test data file, replacing
+    the string form with identifiers. Also, generates dependency
+    check code.
+
+    :param out_data_f: Output intermediate data file
+    :param test_dependencies: Dependencies
+    :param unique_dependencies: Mutable list to track unique dependencies
+           that are global to this re-entrant function.
+    :return: returns dependency check code.
+    """
+    dep_check_code = ''
+    if test_dependencies:
+        out_data_f.write('depends_on')
+        for dep in test_dependencies:
+            if dep not in unique_dependencies:
+                unique_dependencies.append(dep)
+                dep_id = unique_dependencies.index(dep)
+                dep_check_code += gen_dep_check(dep_id, dep)
+            else:
+                dep_id = unique_dependencies.index(dep)
+            out_data_f.write(':' + str(dep_id))
+        out_data_f.write('\n')
+    return dep_check_code
+
+
+INT_VAL_REGEX = re.compile(r'-?(\d+|0x[0-9a-f]+)$', re.I)
+def val_is_int(val: str) -> bool:
+    """Whether val is suitable as an 'int' parameter in the .datax file."""
+    if not INT_VAL_REGEX.match(val):
+        return False
+    # Limit the range to what is guaranteed to get through strtol()
+    return abs(int(val, 0)) <= 0x7fffffff
+
+def write_parameters(out_data_f, test_args, func_args, unique_expressions):
+    """
+    Writes test parameters to the intermediate data file, replacing
+    the string form with identifiers. Also, generates expression
+    check code.
+
+    :param out_data_f: Output intermediate data file
+    :param test_args: Test parameters
+    :param func_args: Function arguments
+    :param unique_expressions: Mutable list to track unique
+           expressions that are global to this re-entrant function.
+    :return: Returns expression check code.
+    """
+    expression_code = ''
+    for i, _ in enumerate(test_args):
+        typ = func_args[i]
+        val = test_args[i]
+
+        # Pass small integer constants literally. This reduces the size of
+        # the C code. Register anything else as an expression.
+        if typ == 'int' and not val_is_int(val):
+            typ = 'exp'
+            if val not in unique_expressions:
+                unique_expressions.append(val)
+                # exp_id can be derived from len(). But for
+                # readability and consistency with case of existing
+                # let's use index().
+                exp_id = unique_expressions.index(val)
+                expression_code += gen_expression_check(exp_id, val)
+                val = exp_id
+            else:
+                val = unique_expressions.index(val)
+        out_data_f.write(':' + typ + ':' + str(val))
+    out_data_f.write('\n')
+    return expression_code
+
+
+def gen_suite_dep_checks(suite_dependencies, dep_check_code, expression_code):
+    """
+    Generates preprocessor checks for test suite dependencies.
+
+    :param suite_dependencies: Test suite dependencies read from the
+            .function file.
+    :param dep_check_code: Dependency check code
+    :param expression_code: Expression check code
+    :return: Dependency and expression code guarded by test suite
+             dependencies.
+    """
+    if suite_dependencies:
+        preprocessor_check = gen_dependencies_one_line(suite_dependencies)
+        dep_check_code = '''
+{preprocessor_check}
+{code}
+#endif
+'''.format(preprocessor_check=preprocessor_check, code=dep_check_code)
+        expression_code = '''
+{preprocessor_check}
+{code}
+#endif
+'''.format(preprocessor_check=preprocessor_check, code=expression_code)
+    return dep_check_code, expression_code
+
+
+def get_function_info(func_info, function_name, line_no):
+    """Look up information about a test function by name.
+
+    Raise an informative expression if function_name is not found.
+
+    :param func_info: dictionary mapping function names to their information.
+    :param function_name: the function name as written in the .function and
+                          .data files.
+    :param line_no: line number for error messages.
+    :return Function information (id, args).
+    """
+    test_function_name = 'test_' + function_name
+    if test_function_name not in func_info:
+        raise GeneratorInputError("%d: Function %s not found!" %
+                                  (line_no, test_function_name))
+    return func_info[test_function_name]
+
+
+def gen_from_test_data(data_f, out_data_f, func_info, suite_dependencies):
+    """
+    This function reads test case name, dependencies and test vectors
+    from the .data file. This information is correlated with the test
+    functions file for generating an intermediate data file replacing
+    the strings for test function names, dependencies and integer
+    constant expressions with identifiers. Mainly for optimising
+    space for on-target execution.
+    It also generates test case dependency check code and expression
+    evaluation code.
+
+    :param data_f: Data file object
+    :param out_data_f: Output intermediate data file
+    :param func_info: Dict keyed by function and with function id
+           and arguments info
+    :param suite_dependencies: Test suite dependencies
+    :return: Returns dependency and expression check code
+    """
+    unique_dependencies = []
+    unique_expressions = []
+    dep_check_code = ''
+    expression_code = ''
+    for line_no, test_name, function_name, test_dependencies, test_args in \
+            parse_test_data(data_f):
+        out_data_f.write(test_name + '\n')
+
+        # Write dependencies
+        dep_check_code += write_dependencies(out_data_f, test_dependencies,
+                                             unique_dependencies)
+
+        # Write test function name
+        func_id, func_args = \
+            get_function_info(func_info, function_name, line_no)
+        out_data_f.write(str(func_id))
+
+        # Write parameters
+        if len(test_args) != len(func_args):
+            raise GeneratorInputError("%d: Invalid number of arguments in test "
+                                      "%s. See function %s signature." %
+                                      (line_no, test_name, function_name))
+        expression_code += write_parameters(out_data_f, test_args, func_args,
+                                            unique_expressions)
+
+        # Write a newline as test case separator
+        out_data_f.write('\n')
+
+    dep_check_code, expression_code = gen_suite_dep_checks(
+        suite_dependencies, dep_check_code, expression_code)
+    return dep_check_code, expression_code
+
+
+def add_input_info(funcs_file, data_file, template_file,
+                   c_file, snippets):
+    """
+    Add generator input info in snippets.
+
+    :param funcs_file: Functions file object
+    :param data_file: Data file object
+    :param template_file: Template file object
+    :param c_file: Output C file object
+    :param snippets: Dictionary to contain code pieces to be
+                     substituted in the template.
+    :return:
+    """
+    snippets['test_file'] = c_file
+    snippets['test_main_file'] = template_file
+    snippets['test_case_file'] = funcs_file
+    snippets['test_case_data_file'] = data_file
+
+
+def read_code_from_input_files(platform_file, helpers_file,
+                               out_data_file, snippets):
+    """
+    Read code from input files and create substitutions for replacement
+    strings in the template file.
+
+    :param platform_file: Platform file object
+    :param helpers_file: Helper functions file object
+    :param out_data_file: Output intermediate data file object
+    :param snippets: Dictionary to contain code pieces to be
+                     substituted in the template.
+    :return:
+    """
+    # Read helpers
+    with open(helpers_file, 'r') as help_f, open(platform_file, 'r') as \
+            platform_f:
+        snippets['test_common_helper_file'] = helpers_file
+        snippets['test_common_helpers'] = help_f.read()
+        snippets['test_platform_file'] = platform_file
+        snippets['platform_code'] = platform_f.read().replace(
+            'DATA_FILE', out_data_file.replace('\\', '\\\\'))  # escape '\'
+
+
+def write_test_source_file(template_file, c_file, snippets):
+    """
+    Write output source file with generated source code.
+
+    :param template_file: Template file name
+    :param c_file: Output source file
+    :param snippets: Generated and code snippets
+    :return:
+    """
+
+    # Create a placeholder pattern with the correct named capture groups
+    # to override the default provided with Template.
+    # Match nothing (no way of escaping placeholders).
+    escaped = "(?P<escaped>(?!))"
+    # Match the "__MBEDTLS_TEST_TEMPLATE__PLACEHOLDER_NAME" pattern.
+    named = "__MBEDTLS_TEST_TEMPLATE__(?P<named>[A-Z][_A-Z0-9]*)"
+    # Match nothing (no braced placeholder syntax).
+    braced = "(?P<braced>(?!))"
+    # If not already matched, a "__MBEDTLS_TEST_TEMPLATE__" prefix is invalid.
+    invalid = "(?P<invalid>__MBEDTLS_TEST_TEMPLATE__)"
+    placeholder_pattern = re.compile("|".join([escaped, named, braced, invalid]))
+
+    with open(template_file, 'r') as template_f, open(c_file, 'w') as c_f:
+        for line_no, line in enumerate(template_f.readlines(), 1):
+            # Update line number. +1 as #line directive sets next line number
+            snippets['line_no'] = line_no + 1
+            template = string.Template(line)
+            template.pattern = placeholder_pattern
+            snippets = {k.upper():v for (k, v) in snippets.items()}
+            code = template.substitute(**snippets)
+            c_f.write(code)
+
+
+def parse_function_file(funcs_file, snippets):
+    """
+    Parse function file and generate function dispatch code.
+
+    :param funcs_file: Functions file name
+    :param snippets: Dictionary to contain code pieces to be
+                     substituted in the template.
+    :return:
+    """
+    with FileWrapper(funcs_file) as funcs_f:
+        suite_dependencies, dispatch_code, func_code, func_info = \
+            parse_functions(funcs_f)
+        snippets['functions_code'] = func_code
+        snippets['dispatch_code'] = dispatch_code
+        return suite_dependencies, func_info
+
+
+def generate_intermediate_data_file(data_file, out_data_file,
+                                    suite_dependencies, func_info, snippets):
+    """
+    Generates intermediate data file from input data file and
+    information read from functions file.
+
+    :param data_file: Data file name
+    :param out_data_file: Output/Intermediate data file
+    :param suite_dependencies: List of suite dependencies.
+    :param func_info: Function info parsed from functions file.
+    :param snippets: Dictionary to contain code pieces to be
+                     substituted in the template.
+    :return:
+    """
+    with FileWrapper(data_file) as data_f, \
+            open(out_data_file, 'w') as out_data_f:
+        dep_check_code, expression_code = gen_from_test_data(
+            data_f, out_data_f, func_info, suite_dependencies)
+        snippets['dep_check_code'] = dep_check_code
+        snippets['expression_code'] = expression_code
+
+
+def generate_code(**input_info):
+    """
+    Generates C source code from test suite file, data file, common
+    helpers file and platform file.
+
+    input_info expands to following parameters:
+    funcs_file: Functions file object
+    data_file: Data file object
+    template_file: Template file object
+    platform_file: Platform file object
+    helpers_file: Helper functions file object
+    suites_dir: Test suites dir
+    c_file: Output C file object
+    out_data_file: Output intermediate data file object
+    :return:
+    """
+    funcs_file = input_info['funcs_file']
+    data_file = input_info['data_file']
+    template_file = input_info['template_file']
+    platform_file = input_info['platform_file']
+    helpers_file = input_info['helpers_file']
+    suites_dir = input_info['suites_dir']
+    c_file = input_info['c_file']
+    out_data_file = input_info['out_data_file']
+    for name, path in [('Functions file', funcs_file),
+                       ('Data file', data_file),
+                       ('Template file', template_file),
+                       ('Platform file', platform_file),
+                       ('Helpers code file', helpers_file),
+                       ('Suites dir', suites_dir)]:
+        if not os.path.exists(path):
+            raise IOError("ERROR: %s [%s] not found!" % (name, path))
+
+    snippets = {'generator_script': os.path.basename(__file__)}
+    read_code_from_input_files(platform_file, helpers_file,
+                               out_data_file, snippets)
+    add_input_info(funcs_file, data_file, template_file,
+                   c_file, snippets)
+    suite_dependencies, func_info = parse_function_file(funcs_file, snippets)
+    generate_intermediate_data_file(data_file, out_data_file,
+                                    suite_dependencies, func_info, snippets)
+    write_test_source_file(template_file, c_file, snippets)
+
+
+def main():
+    """
+    Command line parser.
+
+    :return:
+    """
+    parser = argparse.ArgumentParser(
+        description='Dynamically generate test suite code.')
+
+    parser.add_argument("-f", "--functions-file",
+                        dest="funcs_file",
+                        help="Functions file",
+                        metavar="FUNCTIONS_FILE",
+                        required=True)
+
+    parser.add_argument("-d", "--data-file",
+                        dest="data_file",
+                        help="Data file",
+                        metavar="DATA_FILE",
+                        required=True)
+
+    parser.add_argument("-t", "--template-file",
+                        dest="template_file",
+                        help="Template file",
+                        metavar="TEMPLATE_FILE",
+                        required=True)
+
+    parser.add_argument("-s", "--suites-dir",
+                        dest="suites_dir",
+                        help="Suites dir",
+                        metavar="SUITES_DIR",
+                        required=True)
+
+    parser.add_argument("--helpers-file",
+                        dest="helpers_file",
+                        help="Helpers file",
+                        metavar="HELPERS_FILE",
+                        required=True)
+
+    parser.add_argument("-p", "--platform-file",
+                        dest="platform_file",
+                        help="Platform code file",
+                        metavar="PLATFORM_FILE",
+                        required=True)
+
+    parser.add_argument("-o", "--out-dir",
+                        dest="out_dir",
+                        help="Dir where generated code and scripts are copied",
+                        metavar="OUT_DIR",
+                        required=True)
+
+    args = parser.parse_args()
+
+    data_file_name = os.path.basename(args.data_file)
+    data_name = os.path.splitext(data_file_name)[0]
+
+    out_c_file = os.path.join(args.out_dir, data_name + '.c')
+    out_data_file = os.path.join(args.out_dir, data_name + '.datax')
+
+    out_c_file_dir = os.path.dirname(out_c_file)
+    out_data_file_dir = os.path.dirname(out_data_file)
+    for directory in [out_c_file_dir, out_data_file_dir]:
+        if not os.path.exists(directory):
+            os.makedirs(directory)
+
+    generate_code(funcs_file=args.funcs_file, data_file=args.data_file,
+                  template_file=args.template_file,
+                  platform_file=args.platform_file,
+                  helpers_file=args.helpers_file, suites_dir=args.suites_dir,
+                  c_file=out_c_file, out_data_file=out_data_file)
+
+
+if __name__ == "__main__":
+    try:
+        main()
+    except GeneratorInputError as err:
+        sys.exit("%s: input error: %s" %
+                 (os.path.basename(sys.argv[0]), str(err)))
diff --git a/framework/scripts/generate_test_keys.py b/framework/scripts/generate_test_keys.py
new file mode 100755
index 0000000..f5d6901
--- /dev/null
+++ b/framework/scripts/generate_test_keys.py
@@ -0,0 +1,184 @@
+#!/usr/bin/env python3
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+"""Module generating EC and RSA keys to be used in test_suite_pk instead of
+generating the required key at run time. This helps speeding up testing."""
+
+from typing import Iterator, List, Tuple
+import re
+import argparse
+from mbedtls_framework.asymmetric_key_data import ASYMMETRIC_KEY_DATA
+from mbedtls_framework.build_tree import guess_project_root
+
+BYTES_PER_LINE = 16
+
+def c_byte_array_literal_content(array_name: str, key_data: bytes) -> Iterator[str]:
+    yield 'const unsigned char '
+    yield array_name
+    yield '[] = {'
+    for index in range(0, len(key_data), BYTES_PER_LINE):
+        yield '\n   '
+        for b in key_data[index:index + BYTES_PER_LINE]:
+            yield ' {:#04x},'.format(b)
+    yield '\n};'
+
+def convert_der_to_c(array_name: str, key_data: bytes) -> str:
+    return ''.join(c_byte_array_literal_content(array_name, key_data))
+
+def get_key_type(key: str) -> str:
+    if re.match('PSA_KEY_TYPE_RSA_.*', key):
+        return "rsa"
+    elif re.match('PSA_KEY_TYPE_ECC_.*', key):
+        return "ec"
+    else:
+        print("Unhandled key type {}".format(key))
+        return "unknown"
+
+def get_ec_key_family(key: str) -> str:
+    match = re.search(r'.*\((.*)\)', key)
+    if match is None:
+        raise Exception("Unable to get EC family from {}".format(key))
+    return match.group(1)
+
+# Legacy EC group ID do not support all the key types that PSA does, so the
+# following dictionaries are used for:
+# - getting prefix/suffix for legacy curve names
+# - understand if the curve is supported in legacy symbols (MBEDTLS_ECP_DP_...)
+EC_NAME_CONVERSION = {
+    'PSA_ECC_FAMILY_SECP_K1': {
+        192: ('secp', 'k1'),
+        224: ('secp', 'k1'),
+        256: ('secp', 'k1')
+    },
+    'PSA_ECC_FAMILY_SECP_R1': {
+        192: ('secp', 'r1'),
+        224: ('secp', 'r1'),
+        256: ('secp', 'r1'),
+        384: ('secp', 'r1'),
+        521: ('secp', 'r1')
+    },
+    'PSA_ECC_FAMILY_BRAINPOOL_P_R1': {
+        256: ('bp', 'r1'),
+        384: ('bp', 'r1'),
+        512: ('bp', 'r1')
+    },
+    'PSA_ECC_FAMILY_MONTGOMERY': {
+        255: ('curve', '19'),
+        448: ('curve', '')
+    }
+}
+
+def get_ec_curve_name(priv_key: str, bits: int) -> str:
+    ec_family = get_ec_key_family(priv_key)
+    try:
+        prefix = EC_NAME_CONVERSION[ec_family][bits][0]
+        suffix = EC_NAME_CONVERSION[ec_family][bits][1]
+    except KeyError:
+        return ""
+    return prefix + str(bits) + suffix
+
+def get_look_up_table_entry(key_type: str, group_id_or_keybits: str,
+                            priv_array_name: str, pub_array_name: str) -> Iterator[str]:
+    if key_type == "ec":
+        yield "    {{ {}, 0,\n".format(group_id_or_keybits)
+    else:
+        yield "    {{ 0, {},\n".format(group_id_or_keybits)
+    yield "      {0}, sizeof({0}),\n".format(priv_array_name)
+    yield "      {0}, sizeof({0}) }},".format(pub_array_name)
+
+
+def write_output_file(output_file_name: str, arrays: str, look_up_table: str):
+    with open(output_file_name, 'wt') as output:
+        output.write("""\
+/*********************************************************************************
+ * This file was automatically generated from framework/scripts/generate_test_keys.py.
+ * Please do not edit it manually.
+ *********************************************************************************/
+""")
+        output.write(arrays)
+        output.write("""
+struct predefined_key_element {{
+    int group_id;  // EC group ID; 0 for RSA keys
+    int keybits;  // bits size of RSA key; 0 for EC keys
+    const unsigned char *priv_key;
+    size_t priv_key_len;
+    const unsigned char *pub_key;
+    size_t pub_key_len;
+}};
+
+struct predefined_key_element predefined_keys[] = {{
+{}
+}};
+
+/* End of generated file */
+""".format(look_up_table))
+
+def collect_keys() -> Tuple[str, str]:
+    """"
+    This function reads key data from ASYMMETRIC_KEY_DATA and, only for the
+    keys supported in legacy ECP/RSA modules, it returns 2 strings:
+    - the 1st contains C arrays declaration of these keys and
+    - the 2nd contains the final look-up table for all these arrays.
+    """
+    arrays = []
+    look_up_table = []
+
+    # Get a list of private keys only in order to get a single item for every
+    # (key type, key bits) pair. We know that ASYMMETRIC_KEY_DATA
+    # contains also the public counterpart.
+    priv_keys = [key for key in ASYMMETRIC_KEY_DATA if '_KEY_PAIR' in key]
+    priv_keys = sorted(priv_keys)
+
+    for priv_key in priv_keys:
+        key_type = get_key_type(priv_key)
+        # Ignore keys which are not EC or RSA
+        if key_type == "unknown":
+            continue
+
+        pub_key = re.sub('_KEY_PAIR', '_PUBLIC_KEY', priv_key)
+
+        for bits in ASYMMETRIC_KEY_DATA[priv_key]:
+            if key_type == "ec":
+                curve = get_ec_curve_name(priv_key, bits)
+                # Ignore EC curves unsupported in legacy symbols
+                if curve == "":
+                    continue
+            # Create output array name
+            if key_type == "rsa":
+                array_name_base = "_".join(["test", key_type, str(bits)])
+            else:
+                array_name_base = "_".join(["test", key_type, curve])
+            array_name_priv = array_name_base + "_priv"
+            array_name_pub = array_name_base + "_pub"
+            # Convert bytearray to C array
+            c_array_priv = convert_der_to_c(array_name_priv, ASYMMETRIC_KEY_DATA[priv_key][bits])
+            c_array_pub = convert_der_to_c(array_name_pub, ASYMMETRIC_KEY_DATA[pub_key][bits])
+            # Write the C array to the output file
+            arrays.append(''.join(["\n", c_array_priv, "\n", c_array_pub, "\n"]))
+            # Update the lookup table
+            if key_type == "ec":
+                group_id_or_keybits = "MBEDTLS_ECP_DP_" + curve.upper()
+            else:
+                group_id_or_keybits = str(bits)
+            look_up_table.append(''.join(get_look_up_table_entry(key_type, group_id_or_keybits,
+                                                                 array_name_priv, array_name_pub)))
+
+    return ''.join(arrays), '\n'.join(look_up_table)
+
+def main() -> None:
+    default_output_path = guess_project_root() + "/framework/tests/include/test/test_keys.h"
+
+    argparser = argparse.ArgumentParser()
+    argparser.add_argument("--output", help="Output file", default=default_output_path)
+    args = argparser.parse_args()
+
+    output_file = args.output
+
+    arrays, look_up_table = collect_keys()
+
+    write_output_file(output_file, arrays, look_up_table)
+
+if __name__ == '__main__':
+    main()
diff --git a/framework/scripts/generate_tls13_compat_tests.py b/framework/scripts/generate_tls13_compat_tests.py
new file mode 100755
index 0000000..b9dcff4
--- /dev/null
+++ b/framework/scripts/generate_tls13_compat_tests.py
@@ -0,0 +1,649 @@
+#!/usr/bin/env python3
+
+# generate_tls13_compat_tests.py
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+"""
+Generate TLSv1.3 Compat test cases
+
+"""
+
+import sys
+import os
+import argparse
+import itertools
+from collections import namedtuple
+
+# define certificates configuration entry
+Certificate = namedtuple("Certificate", ['cafile', 'certfile', 'keyfile'])
+# define the certificate parameters for signature algorithms
+CERTIFICATES = {
+    'ecdsa_secp256r1_sha256': Certificate('$DATA_FILES_PATH/test-ca2.crt',
+                                          '$DATA_FILES_PATH/ecdsa_secp256r1.crt',
+                                          '$DATA_FILES_PATH/ecdsa_secp256r1.key'),
+    'ecdsa_secp384r1_sha384': Certificate('$DATA_FILES_PATH/test-ca2.crt',
+                                          '$DATA_FILES_PATH/ecdsa_secp384r1.crt',
+                                          '$DATA_FILES_PATH/ecdsa_secp384r1.key'),
+    'ecdsa_secp521r1_sha512': Certificate('$DATA_FILES_PATH/test-ca2.crt',
+                                          '$DATA_FILES_PATH/ecdsa_secp521r1.crt',
+                                          '$DATA_FILES_PATH/ecdsa_secp521r1.key'),
+    'rsa_pss_rsae_sha256': Certificate('$DATA_FILES_PATH/test-ca_cat12.crt',
+                                       '$DATA_FILES_PATH/server2-sha256.crt',
+                                       '$DATA_FILES_PATH/server2.key')
+}
+
+CIPHER_SUITE_IANA_VALUE = {
+    "TLS_AES_128_GCM_SHA256": 0x1301,
+    "TLS_AES_256_GCM_SHA384": 0x1302,
+    "TLS_CHACHA20_POLY1305_SHA256": 0x1303,
+    "TLS_AES_128_CCM_SHA256": 0x1304,
+    "TLS_AES_128_CCM_8_SHA256": 0x1305
+}
+
+SIG_ALG_IANA_VALUE = {
+    "ecdsa_secp256r1_sha256": 0x0403,
+    "ecdsa_secp384r1_sha384": 0x0503,
+    "ecdsa_secp521r1_sha512": 0x0603,
+    'rsa_pss_rsae_sha256': 0x0804,
+}
+
+NAMED_GROUP_IANA_VALUE = {
+    'secp256r1': 0x17,
+    'secp384r1': 0x18,
+    'secp521r1': 0x19,
+    'x25519': 0x1d,
+    'x448': 0x1e,
+    # Only one finite field group to keep testing time within reasonable bounds.
+    'ffdhe2048': 0x100,
+}
+
+class TLSProgram:
+    """
+    Base class for generate server/client command.
+    """
+
+    # pylint: disable=too-many-arguments
+    def __init__(self, ciphersuite=None, signature_algorithm=None, named_group=None,
+                 cert_sig_alg=None):
+        self._ciphers = []
+        self._sig_algs = []
+        self._named_groups = []
+        self._cert_sig_algs = []
+        if ciphersuite:
+            self.add_ciphersuites(ciphersuite)
+        if named_group:
+            self.add_named_groups(named_group)
+        if signature_algorithm:
+            self.add_signature_algorithms(signature_algorithm)
+        if cert_sig_alg:
+            self.add_cert_signature_algorithms(cert_sig_alg)
+
+    # add_ciphersuites should not override by sub class
+    def add_ciphersuites(self, *ciphersuites):
+        self._ciphers.extend(
+            [cipher for cipher in ciphersuites if cipher not in self._ciphers])
+
+    # add_signature_algorithms should not override by sub class
+    def add_signature_algorithms(self, *signature_algorithms):
+        self._sig_algs.extend(
+            [sig_alg for sig_alg in signature_algorithms if sig_alg not in self._sig_algs])
+
+    # add_named_groups should not override by sub class
+    def add_named_groups(self, *named_groups):
+        self._named_groups.extend(
+            [named_group for named_group in named_groups if named_group not in self._named_groups])
+
+    # add_cert_signature_algorithms should not override by sub class
+    def add_cert_signature_algorithms(self, *signature_algorithms):
+        self._cert_sig_algs.extend(
+            [sig_alg for sig_alg in signature_algorithms if sig_alg not in self._cert_sig_algs])
+
+    # pylint: disable=no-self-use
+    def pre_checks(self):
+        return []
+
+    # pylint: disable=no-self-use
+    def cmd(self):
+        if not self._cert_sig_algs:
+            self._cert_sig_algs = list(CERTIFICATES.keys())
+        return self.pre_cmd()
+
+    # pylint: disable=no-self-use
+    def post_checks(self):
+        return []
+
+    # pylint: disable=no-self-use
+    def pre_cmd(self):
+        return ['false']
+
+    # pylint: disable=unused-argument,no-self-use
+    def hrr_post_checks(self, named_group):
+        return []
+
+
+class OpenSSLBase(TLSProgram):
+    """
+    Generate base test commands for OpenSSL.
+    """
+
+    NAMED_GROUP = {
+        'secp256r1': 'P-256',
+        'secp384r1': 'P-384',
+        'secp521r1': 'P-521',
+        'x25519': 'X25519',
+        'x448': 'X448',
+        'ffdhe2048': 'ffdhe2048',
+    }
+
+    def cmd(self):
+        ret = super().cmd()
+
+        if self._ciphers:
+            ciphersuites = ':'.join(self._ciphers)
+            ret += ["-ciphersuites {ciphersuites}".format(ciphersuites=ciphersuites)]
+
+        if self._sig_algs:
+            signature_algorithms = set(self._sig_algs + self._cert_sig_algs)
+            signature_algorithms = ':'.join(signature_algorithms)
+            ret += ["-sigalgs {signature_algorithms}".format(
+                signature_algorithms=signature_algorithms)]
+
+        if self._named_groups:
+            named_groups = ':'.join(
+                map(lambda named_group: self.NAMED_GROUP[named_group], self._named_groups))
+            ret += ["-groups {named_groups}".format(named_groups=named_groups)]
+
+        ret += ['-msg -tls1_3']
+
+        return ret
+
+    def pre_checks(self):
+        ret = ["requires_openssl_tls1_3"]
+
+        # ffdh groups require at least openssl 3.0
+        ffdh_groups = ['ffdhe2048']
+
+        if any(x in ffdh_groups for x in self._named_groups):
+            ret = ["requires_openssl_tls1_3_with_ffdh"]
+
+        return ret
+
+
+class OpenSSLServ(OpenSSLBase):
+    """
+    Generate test commands for OpenSSL server.
+    """
+
+    def cmd(self):
+        ret = super().cmd()
+        ret += ['-num_tickets 0 -no_resume_ephemeral -no_cache']
+        return ret
+
+    def post_checks(self):
+        return ['-c "HTTP/1.0 200 ok"']
+
+    def pre_cmd(self):
+        ret = ['$O_NEXT_SRV_NO_CERT']
+        for _, cert, key in map(lambda sig_alg: CERTIFICATES[sig_alg], self._cert_sig_algs):
+            ret += ['-cert {cert} -key {key}'.format(cert=cert, key=key)]
+        return ret
+
+
+class OpenSSLCli(OpenSSLBase):
+    """
+    Generate test commands for OpenSSL client.
+    """
+
+    def pre_cmd(self):
+        return ['$O_NEXT_CLI_NO_CERT',
+                '-CAfile {cafile}'.format(cafile=CERTIFICATES[self._cert_sig_algs[0]].cafile)]
+
+
+class GnuTLSBase(TLSProgram):
+    """
+    Generate base test commands for GnuTLS.
+    """
+
+    CIPHER_SUITE = {
+        'TLS_AES_256_GCM_SHA384': [
+            'AES-256-GCM',
+            'SHA384',
+            'AEAD'],
+        'TLS_AES_128_GCM_SHA256': [
+            'AES-128-GCM',
+            'SHA256',
+            'AEAD'],
+        'TLS_CHACHA20_POLY1305_SHA256': [
+            'CHACHA20-POLY1305',
+            'SHA256',
+            'AEAD'],
+        'TLS_AES_128_CCM_SHA256': [
+            'AES-128-CCM',
+            'SHA256',
+            'AEAD'],
+        'TLS_AES_128_CCM_8_SHA256': [
+            'AES-128-CCM-8',
+            'SHA256',
+            'AEAD']}
+
+    SIGNATURE_ALGORITHM = {
+        'ecdsa_secp256r1_sha256': ['SIGN-ECDSA-SECP256R1-SHA256'],
+        'ecdsa_secp521r1_sha512': ['SIGN-ECDSA-SECP521R1-SHA512'],
+        'ecdsa_secp384r1_sha384': ['SIGN-ECDSA-SECP384R1-SHA384'],
+        'rsa_pss_rsae_sha256': ['SIGN-RSA-PSS-RSAE-SHA256']}
+
+    NAMED_GROUP = {
+        'secp256r1': ['GROUP-SECP256R1'],
+        'secp384r1': ['GROUP-SECP384R1'],
+        'secp521r1': ['GROUP-SECP521R1'],
+        'x25519': ['GROUP-X25519'],
+        'x448': ['GROUP-X448'],
+        'ffdhe2048': ['GROUP-FFDHE2048'],
+    }
+
+    def pre_checks(self):
+        return ["requires_gnutls_tls1_3",
+                "requires_gnutls_next_no_ticket"]
+
+    def cmd(self):
+        ret = super().cmd()
+
+        priority_string_list = []
+
+        def update_priority_string_list(items, map_table):
+            for item in items:
+                for i in map_table[item]:
+                    if i not in priority_string_list:
+                        yield i
+
+        if self._ciphers:
+            priority_string_list.extend(update_priority_string_list(
+                self._ciphers, self.CIPHER_SUITE))
+        else:
+            priority_string_list.extend(['CIPHER-ALL', 'MAC-ALL'])
+
+        if self._sig_algs:
+            signature_algorithms = set(self._sig_algs + self._cert_sig_algs)
+            priority_string_list.extend(update_priority_string_list(
+                signature_algorithms, self.SIGNATURE_ALGORITHM))
+        else:
+            priority_string_list.append('SIGN-ALL')
+
+
+        if self._named_groups:
+            priority_string_list.extend(update_priority_string_list(
+                self._named_groups, self.NAMED_GROUP))
+        else:
+            priority_string_list.append('GROUP-ALL')
+
+        priority_string_list = ['NONE'] + \
+            priority_string_list + ['VERS-TLS1.3']
+
+        priority_string = ':+'.join(priority_string_list)
+        priority_string += ':%NO_TICKETS'
+
+        ret += ['--priority={priority_string}'.format(
+            priority_string=priority_string)]
+        return ret
+
+class GnuTLSServ(GnuTLSBase):
+    """
+    Generate test commands for GnuTLS server.
+    """
+
+    def pre_cmd(self):
+        ret = ['$G_NEXT_SRV_NO_CERT', '--http', '--disable-client-cert', '--debug=4']
+
+        for _, cert, key in map(lambda sig_alg: CERTIFICATES[sig_alg], self._cert_sig_algs):
+            ret += ['--x509certfile {cert} --x509keyfile {key}'.format(
+                cert=cert, key=key)]
+        return ret
+
+    def post_checks(self):
+        return ['-c "HTTP/1.0 200 OK"']
+
+
+class GnuTLSCli(GnuTLSBase):
+    """
+    Generate test commands for GnuTLS client.
+    """
+
+    def pre_cmd(self):
+        return ['$G_NEXT_CLI_NO_CERT', '--debug=4', '--single-key-share',
+                '--x509cafile {cafile}'.format(cafile=CERTIFICATES[self._cert_sig_algs[0]].cafile)]
+
+
+class MbedTLSBase(TLSProgram):
+    """
+    Generate base test commands for mbedTLS.
+    """
+
+    CIPHER_SUITE = {
+        'TLS_AES_256_GCM_SHA384': 'TLS1-3-AES-256-GCM-SHA384',
+        'TLS_AES_128_GCM_SHA256': 'TLS1-3-AES-128-GCM-SHA256',
+        'TLS_CHACHA20_POLY1305_SHA256': 'TLS1-3-CHACHA20-POLY1305-SHA256',
+        'TLS_AES_128_CCM_SHA256': 'TLS1-3-AES-128-CCM-SHA256',
+        'TLS_AES_128_CCM_8_SHA256': 'TLS1-3-AES-128-CCM-8-SHA256'}
+
+    def cmd(self):
+        ret = super().cmd()
+        ret += ['debug_level=4']
+
+
+        if self._ciphers:
+            ciphers = ','.join(
+                map(lambda cipher: self.CIPHER_SUITE[cipher], self._ciphers))
+            ret += ["force_ciphersuite={ciphers}".format(ciphers=ciphers)]
+
+        if self._sig_algs + self._cert_sig_algs:
+            ret += ['sig_algs={sig_algs}'.format(
+                sig_algs=','.join(set(self._sig_algs + self._cert_sig_algs)))]
+
+        if self._named_groups:
+            named_groups = ','.join(self._named_groups)
+            ret += ["groups={named_groups}".format(named_groups=named_groups)]
+        return ret
+
+    #pylint: disable=missing-function-docstring
+    def add_ffdh_group_requirements(self, requirement_list):
+        if 'ffdhe2048' in self._named_groups:
+            requirement_list.append('requires_config_enabled PSA_WANT_DH_RFC7919_2048')
+        if 'ffdhe3072' in self._named_groups:
+            requirement_list.append('requires_config_enabled PSA_WANT_DH_RFC7919_2048')
+        if 'ffdhe4096' in self._named_groups:
+            requirement_list.append('requires_config_enabled PSA_WANT_DH_RFC7919_2048')
+        if 'ffdhe6144' in self._named_groups:
+            requirement_list.append('requires_config_enabled PSA_WANT_DH_RFC7919_2048')
+        if 'ffdhe8192' in self._named_groups:
+            requirement_list.append('requires_config_enabled PSA_WANT_DH_RFC7919_2048')
+
+    def pre_checks(self):
+        ret = ['requires_config_enabled MBEDTLS_DEBUG_C',
+               'requires_config_enabled MBEDTLS_SSL_TLS1_3_KEY_EXCHANGE_MODE_EPHEMERAL_ENABLED']
+
+        if 'rsa_pss_rsae_sha256' in self._sig_algs + self._cert_sig_algs:
+            ret.append(
+                'requires_config_enabled MBEDTLS_X509_RSASSA_PSS_SUPPORT')
+
+        ec_groups = ['secp256r1', 'secp384r1', 'secp521r1', 'x25519', 'x448']
+        ffdh_groups = ['ffdhe2048', 'ffdhe3072', 'ffdhe4096', 'ffdhe6144', 'ffdhe8192']
+
+        if any(x in ec_groups for x in self._named_groups):
+            ret.append('requires_config_enabled PSA_WANT_ALG_ECDH')
+
+        if any(x in ffdh_groups for x in self._named_groups):
+            ret.append('requires_config_enabled PSA_WANT_ALG_FFDH')
+            self.add_ffdh_group_requirements(ret)
+
+        return ret
+
+
+class MbedTLSServ(MbedTLSBase):
+    """
+    Generate test commands for mbedTLS server.
+    """
+
+    def cmd(self):
+        ret = super().cmd()
+        ret += ['tls13_kex_modes=ephemeral cookies=0 tickets=0']
+        return ret
+
+    def pre_checks(self):
+        return ['requires_config_enabled MBEDTLS_SSL_SRV_C'] + super().pre_checks()
+
+    def post_checks(self):
+        check_strings = ["Protocol is TLSv1.3"]
+        if self._ciphers:
+            check_strings.append(
+                "server hello, chosen ciphersuite: {} ( id={:04d} )".format(
+                    self.CIPHER_SUITE[self._ciphers[0]],
+                    CIPHER_SUITE_IANA_VALUE[self._ciphers[0]]))
+        if self._sig_algs:
+            check_strings.append(
+                "received signature algorithm: 0x{:x}".format(
+                    SIG_ALG_IANA_VALUE[self._sig_algs[0]]))
+
+        for named_group in self._named_groups:
+            check_strings += ['got named group: {named_group}({iana_value:04x})'.format(
+                                named_group=named_group,
+                                iana_value=NAMED_GROUP_IANA_VALUE[named_group])]
+
+        check_strings.append("Certificate verification was skipped")
+        return ['-s "{}"'.format(i) for i in check_strings]
+
+    def pre_cmd(self):
+        ret = ['$P_SRV']
+        for _, cert, key in map(lambda sig_alg: CERTIFICATES[sig_alg], self._cert_sig_algs):
+            ret += ['crt_file={cert} key_file={key}'.format(cert=cert, key=key)]
+        return ret
+
+    def hrr_post_checks(self, named_group):
+        return ['-s "HRR selected_group: {:s}"'.format(named_group)]
+
+
+class MbedTLSCli(MbedTLSBase):
+    """
+    Generate test commands for mbedTLS client.
+    """
+
+    def pre_cmd(self):
+        return ['$P_CLI',
+                'ca_file={cafile}'.format(cafile=CERTIFICATES[self._cert_sig_algs[0]].cafile)]
+
+    def pre_checks(self):
+        return ['requires_config_enabled MBEDTLS_SSL_CLI_C'] + super().pre_checks()
+
+    def hrr_post_checks(self, named_group):
+        ret = ['-c "received HelloRetryRequest message"']
+        ret += ['-c "selected_group ( {:d} )"'.format(NAMED_GROUP_IANA_VALUE[named_group])]
+        return ret
+
+    def post_checks(self):
+        check_strings = ["Protocol is TLSv1.3"]
+        if self._ciphers:
+            check_strings.append(
+                "server hello, chosen ciphersuite: ( {:04x} ) - {}".format(
+                    CIPHER_SUITE_IANA_VALUE[self._ciphers[0]],
+                    self.CIPHER_SUITE[self._ciphers[0]]))
+        if self._sig_algs:
+            check_strings.append(
+                "Certificate Verify: Signature algorithm ( {:04x} )".format(
+                    SIG_ALG_IANA_VALUE[self._sig_algs[0]]))
+
+        for named_group in self._named_groups:
+            check_strings += ['NamedGroup: {named_group} ( {iana_value:x} )'.format(
+                                named_group=named_group,
+                                iana_value=NAMED_GROUP_IANA_VALUE[named_group])]
+
+        check_strings.append("Verifying peer X.509 certificate... ok")
+        return ['-c "{}"'.format(i) for i in check_strings]
+
+
+SERVER_CLASSES = {'OpenSSL': OpenSSLServ, 'GnuTLS': GnuTLSServ, 'mbedTLS': MbedTLSServ}
+CLIENT_CLASSES = {'OpenSSL': OpenSSLCli, 'GnuTLS': GnuTLSCli, 'mbedTLS': MbedTLSCli}
+
+
+def generate_compat_test(client=None, server=None, cipher=None, named_group=None, sig_alg=None):
+    """
+    Generate test case with `ssl-opt.sh` format.
+    """
+    name = 'TLS 1.3 {client[0]}->{server[0]}: {cipher},{named_group},{sig_alg}'.format(
+        client=client, server=server, cipher=cipher[4:], sig_alg=sig_alg, named_group=named_group)
+
+    server_object = SERVER_CLASSES[server](ciphersuite=cipher,
+                                           named_group=named_group,
+                                           signature_algorithm=sig_alg,
+                                           cert_sig_alg=sig_alg)
+    client_object = CLIENT_CLASSES[client](ciphersuite=cipher,
+                                           named_group=named_group,
+                                           signature_algorithm=sig_alg,
+                                           cert_sig_alg=sig_alg)
+
+    cmd = ['run_test "{}"'.format(name),
+           '"{}"'.format(' '.join(server_object.cmd())),
+           '"{}"'.format(' '.join(client_object.cmd())),
+           '0']
+    cmd += server_object.post_checks()
+    cmd += client_object.post_checks()
+    cmd += ['-C "received HelloRetryRequest message"']
+    prefix = ' \\\n' + (' '*9)
+    cmd = prefix.join(cmd)
+    return '\n'.join(server_object.pre_checks() + client_object.pre_checks() + [cmd])
+
+
+def generate_hrr_compat_test(client=None, server=None,
+                             client_named_group=None, server_named_group=None,
+                             cert_sig_alg=None):
+    """
+    Generate Hello Retry Request test case with `ssl-opt.sh` format.
+    """
+    name = 'TLS 1.3 {client[0]}->{server[0]}: HRR {c_named_group} -> {s_named_group}'.format(
+        client=client, server=server, c_named_group=client_named_group,
+        s_named_group=server_named_group)
+    server_object = SERVER_CLASSES[server](named_group=server_named_group,
+                                           cert_sig_alg=cert_sig_alg)
+
+    client_object = CLIENT_CLASSES[client](named_group=client_named_group,
+                                           cert_sig_alg=cert_sig_alg)
+    client_object.add_named_groups(server_named_group)
+
+    cmd = ['run_test "{}"'.format(name),
+           '"{}"'.format(' '.join(server_object.cmd())),
+           '"{}"'.format(' '.join(client_object.cmd())),
+           '0']
+    cmd += server_object.post_checks()
+    cmd += client_object.post_checks()
+    cmd += server_object.hrr_post_checks(server_named_group)
+    cmd += client_object.hrr_post_checks(server_named_group)
+    prefix = ' \\\n' + (' '*9)
+    cmd = prefix.join(cmd)
+    return '\n'.join(server_object.pre_checks() +
+                     client_object.pre_checks() +
+                     [cmd])
+
+SSL_OUTPUT_HEADER = '''\
+# TLS 1.3 interoperability test cases (equivalent of compat.sh for TLS 1.3).
+#
+# Automatically generated by {cmd}. Do not edit!
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+'''
+DATA_FILES_PATH_VAR = '''
+DATA_FILES_PATH=../framework/data_files
+'''
+
+def main():
+    """
+    Main function of this program
+    """
+    parser = argparse.ArgumentParser()
+
+    parser.add_argument('-o', '--output',
+                        default='tests/opt-testcases/tls13-compat.sh',
+                        help='Output file path (not used with -1)')
+
+    parser.add_argument('-1', '--single', action='store_true',
+                        help='Print a single test case')
+    # Single mode used to be the default.
+    parser.add_argument('-a', '--generate-all-tls13-compat-tests',
+                        action='store_false', dest='single',
+                        help='Generate all test cases (negates -1) (default)')
+
+    parser.add_argument('--list-ciphers', action='store_true',
+                        default=False, help='List supported ciphersuites')
+
+    parser.add_argument('--list-sig-algs', action='store_true',
+                        default=False, help='List supported signature algorithms')
+
+    parser.add_argument('--list-named-groups', action='store_true',
+                        default=False, help='List supported named groups')
+
+    parser.add_argument('--list-servers', action='store_true',
+                        default=False, help='List supported TLS servers')
+
+    parser.add_argument('--list-clients', action='store_true',
+                        default=False, help='List supported TLS Clients')
+
+    parser.add_argument('server', choices=SERVER_CLASSES.keys(), nargs='?',
+                        default=list(SERVER_CLASSES.keys())[0],
+                        help='Choose TLS server program for test')
+    parser.add_argument('client', choices=CLIENT_CLASSES.keys(), nargs='?',
+                        default=list(CLIENT_CLASSES.keys())[0],
+                        help='Choose TLS client program for test')
+    parser.add_argument('cipher', choices=CIPHER_SUITE_IANA_VALUE.keys(), nargs='?',
+                        default=list(CIPHER_SUITE_IANA_VALUE.keys())[0],
+                        help='Choose cipher suite for test')
+    parser.add_argument('sig_alg', choices=SIG_ALG_IANA_VALUE.keys(), nargs='?',
+                        default=list(SIG_ALG_IANA_VALUE.keys())[0],
+                        help='Choose cipher suite for test')
+    parser.add_argument('named_group', choices=NAMED_GROUP_IANA_VALUE.keys(), nargs='?',
+                        default=list(NAMED_GROUP_IANA_VALUE.keys())[0],
+                        help='Choose cipher suite for test')
+
+    args = parser.parse_args()
+
+    def get_all_test_cases():
+        # Generate normal compat test cases
+        for client, server, cipher, named_group, sig_alg in \
+            itertools.product(CLIENT_CLASSES.keys(),
+                              SERVER_CLASSES.keys(),
+                              CIPHER_SUITE_IANA_VALUE.keys(),
+                              NAMED_GROUP_IANA_VALUE.keys(),
+                              SIG_ALG_IANA_VALUE.keys()):
+            if server == 'mbedTLS' or client == 'mbedTLS':
+                yield generate_compat_test(client=client, server=server,
+                                           cipher=cipher, named_group=named_group,
+                                           sig_alg=sig_alg)
+
+
+        # Generate Hello Retry Request  compat test cases
+        for client, server, client_named_group, server_named_group in \
+            itertools.product(CLIENT_CLASSES.keys(),
+                              SERVER_CLASSES.keys(),
+                              NAMED_GROUP_IANA_VALUE.keys(),
+                              NAMED_GROUP_IANA_VALUE.keys()):
+
+            if (client == 'mbedTLS' or server == 'mbedTLS') and \
+                client_named_group != server_named_group:
+                yield generate_hrr_compat_test(client=client, server=server,
+                                               client_named_group=client_named_group,
+                                               server_named_group=server_named_group,
+                                               cert_sig_alg="ecdsa_secp256r1_sha256")
+
+    if not args.single:
+        if args.output:
+            with open(args.output, 'w', encoding="utf-8") as f:
+                f.write(SSL_OUTPUT_HEADER.format(
+                    filename=os.path.basename(args.output),
+                    cmd=os.path.basename(sys.argv[0])))
+                f.write(DATA_FILES_PATH_VAR)
+                f.write('\n\n'.join(get_all_test_cases()))
+                f.write('\n')
+        else:
+            print('\n\n'.join(get_all_test_cases()))
+        return 0
+
+    if args.list_ciphers or args.list_sig_algs or args.list_named_groups \
+            or args.list_servers or args.list_clients:
+        if args.list_ciphers:
+            print(*CIPHER_SUITE_IANA_VALUE.keys())
+        if args.list_sig_algs:
+            print(*SIG_ALG_IANA_VALUE.keys())
+        if args.list_named_groups:
+            print(*NAMED_GROUP_IANA_VALUE.keys())
+        if args.list_servers:
+            print(*SERVER_CLASSES.keys())
+        if args.list_clients:
+            print(*CLIENT_CLASSES.keys())
+        return 0
+
+    print(generate_compat_test(server=args.server, client=args.client, sig_alg=args.sig_alg,
+                               cipher=args.cipher, named_group=args.named_group))
+    return 0
+
+
+if __name__ == "__main__":
+    sys.exit(main())
diff --git a/framework/scripts/generate_tls_handshake_tests.py b/framework/scripts/generate_tls_handshake_tests.py
new file mode 100755
index 0000000..1e9dbb9
--- /dev/null
+++ b/framework/scripts/generate_tls_handshake_tests.py
@@ -0,0 +1,219 @@
+#!/usr/bin/env python3
+
+"""
+Generate miscellaneous TLS test cases relating to the handshake.
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+import argparse
+import os
+import sys
+from typing import Optional
+
+from mbedtls_framework import tls_test_case
+from mbedtls_framework import typing_util
+from mbedtls_framework.tls_test_case import Side, Version
+import translate_ciphers
+
+
+# Assume that a TLS 1.2 ClientHello used in these tests will be at most
+# this many bytes long.
+TLS12_CLIENT_HELLO_ASSUMED_MAX_LENGTH = 255
+
+# Minimum handshake fragment length that Mbed TLS supports.
+TLS_HANDSHAKE_FRAGMENT_MIN_LENGTH = 4
+
+def write_tls_handshake_defragmentation_test(
+        #pylint: disable=too-many-arguments
+        out: typing_util.Writable,
+        side: Side,
+        length: Optional[int],
+        version: Optional[Version] = None,
+        cipher: Optional[str] = None,
+        etm: Optional[bool] = None, #encrypt-then-mac (only relevant for CBC)
+        variant: str = ''
+) -> None:
+    """Generate one TLS handshake defragmentation test.
+
+    :param out: file to write to.
+    :param side: which side is Mbed TLS.
+    :param length: fragment length, or None to not fragment.
+    :param version: protocol version, if forced.
+    """
+    #pylint: disable=chained-comparison,too-many-branches,too-many-statements
+
+    our_args = ''
+    their_args = ''
+
+    if length is None:
+        description = 'no fragmentation, for reference'
+    else:
+        description = 'len=' + str(length)
+    if version is not None:
+        description += ', TLS 1.' + str(version.value)
+    description = f'Handshake defragmentation on {side.name.lower()}: {description}'
+    tc = tls_test_case.TestCase(description)
+
+    if version is not None:
+        their_args += ' ' + version.openssl_option()
+        # Emit a version requirement, because we're forcing the version via
+        # OpenSSL, not via Mbed TLS, and the automatic depdendencies in
+        # ssl-opt.sh only handle forcing the version via Mbed TLS.
+        tc.requirements.append(version.requires_command())
+        if side == Side.SERVER and version == Version.TLS12 and \
+           length is not None and \
+           length <= TLS12_CLIENT_HELLO_ASSUMED_MAX_LENGTH:
+            # Server-side ClientHello defragmentation is only supported in
+            # the TLS 1.3 message parser. When that parser sees an 1.2-only
+            # ClientHello, it forwards the reassembled record to the
+            # TLS 1.2 ClientHello parser so the ClientHello can be fragmented.
+            # When TLS 1.3 support is disabled in the server (at compile-time
+            # or at runtime), the TLS 1.2 ClientHello parser only sees
+            # the first fragment of the ClientHello.
+            tc.requirements.append('requires_config_enabled MBEDTLS_SSL_PROTO_TLS1_3')
+            tc.description += ' with 1.3 support'
+
+    # To guarantee that the handhake messages are large enough and need to be
+    # split into fragments, the tests require certificate authentication.
+    # The party in control of the fragmentation operations is OpenSSL and
+    # will always use server5.crt (548 Bytes).
+    if length is not None and \
+       length >= TLS_HANDSHAKE_FRAGMENT_MIN_LENGTH:
+        tc.requirements.append('requires_certificate_authentication')
+        if version == Version.TLS12 and side == Side.CLIENT:
+            #The server uses an ECDSA cert, so make sure we have a compatible key exchange
+            tc.requirements.append(
+                'requires_config_enabled MBEDTLS_KEY_EXCHANGE_ECDHE_ECDSA_ENABLED')
+    else:
+        # This test case may run in a pure-PSK configuration. OpenSSL doesn't
+        # allow this by default with TLS 1.3.
+        their_args += ' -allow_no_dhe_kex'
+
+    if length is None:
+        forbidden_patterns = [
+            'waiting for more fragments',
+        ]
+        wanted_patterns = []
+    elif length < TLS_HANDSHAKE_FRAGMENT_MIN_LENGTH:
+        their_args += ' -split_send_frag ' + str(length)
+        tc.exit_code = 1
+        forbidden_patterns = []
+        wanted_patterns = [
+            'handshake message too short: ' + str(length),
+            'SSL - An invalid SSL record was received',
+        ]
+        if side == Side.SERVER:
+            wanted_patterns[0:0] = ['<= parse client hello']
+        elif version == Version.TLS13:
+            wanted_patterns[0:0] = ['=> ssl_tls13_process_server_hello']
+    else:
+        their_args += ' -split_send_frag ' + str(length)
+        forbidden_patterns = []
+        wanted_patterns = [
+            'reassembled record',
+            fr'initial handshake fragment: {length}, 0\.\.{length} of [0-9]\+',
+            fr'subsequent handshake fragment: [0-9]\+, {length}\.\.',
+            fr'Prepare: waiting for more handshake fragments {length}/',
+            fr'Consume: waiting for more handshake fragments {length}/',
+        ]
+
+    if cipher is not None:
+        mbedtls_cipher = translate_ciphers.translate_mbedtls(cipher)
+        if side == Side.CLIENT:
+            our_args += ' force_ciphersuite=' + mbedtls_cipher
+            if 'NULL' in cipher:
+                their_args += ' -cipher ALL@SECLEVEL=0:COMPLEMENTOFALL@SECLEVEL=0'
+        else:
+            # For TLS 1.2, when Mbed TLS is the server, we must force the
+            # cipher suite on the client side, because passing
+            # force_ciphersuite to ssl_server2 would force a TLS-1.2-only
+            # server, which does not support a fragmented ClientHello.
+            tc.requirements.append('requires_ciphersuite_enabled ' + mbedtls_cipher)
+            their_args += ' -cipher ' + translate_ciphers.translate_ossl(cipher)
+            if 'NULL' in cipher:
+                their_args += '@SECLEVEL=0'
+
+    if etm is not None:
+        if etm:
+            tc.requirements.append('requires_config_enabled MBEDTLS_SSL_ENCRYPT_THEN_MAC')
+        our_args += ' etm=' + str(int(etm))
+        (wanted_patterns if etm else forbidden_patterns)[0:0] = [
+            'using encrypt then mac',
+        ]
+
+    tc.description += variant
+
+    if side == Side.CLIENT:
+        tc.client = '$P_CLI debug_level=4' + our_args
+        tc.server = '$O_NEXT_SRV' + their_args
+        tc.wanted_client_patterns = wanted_patterns
+        tc.forbidden_client_patterns = forbidden_patterns
+    else:
+        their_args += ' -cert $DATA_FILES_PATH/server5.crt -key $DATA_FILES_PATH/server5.key'
+        our_args += ' auth_mode=required'
+        tc.client = '$O_NEXT_CLI' + their_args
+        tc.server = '$P_SRV debug_level=4' + our_args
+        tc.wanted_server_patterns = wanted_patterns
+        tc.forbidden_server_patterns = forbidden_patterns
+    tc.write(out)
+
+
+CIPHERS_FOR_TLS12_HANDSHAKE_DEFRAGMENTATION = [
+    (None, 'default', None),
+    ('TLS_ECDHE_ECDSA_WITH_NULL_SHA', 'null', None),
+    ('TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256', 'ChachaPoly', None),
+    ('TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256', 'GCM', None),
+    ('TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256', 'CBC, etm=n', False),
+    ('TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256', 'CBC, etm=y', True),
+]
+
+def write_tls_handshake_defragmentation_tests(out: typing_util.Writable) -> None:
+    """Generate TLS handshake defragmentation tests."""
+    for side in Side.CLIENT, Side.SERVER:
+        write_tls_handshake_defragmentation_test(out, side, None)
+        for length in [512, 513, 256, 128, 64, 36, 32, 16, 13, 5, 4, 3]:
+            write_tls_handshake_defragmentation_test(out, side, length,
+                                                     Version.TLS13)
+            if length == 4:
+                for (cipher_suite, nickname, etm) in \
+                        CIPHERS_FOR_TLS12_HANDSHAKE_DEFRAGMENTATION:
+                    write_tls_handshake_defragmentation_test(
+                        out, side, length, Version.TLS12,
+                        cipher=cipher_suite, etm=etm,
+                        variant=', '+nickname)
+            else:
+                write_tls_handshake_defragmentation_test(out, side, length,
+                                                         Version.TLS12)
+
+
+def write_handshake_tests(out: typing_util.Writable) -> None:
+    """Generate handshake tests."""
+    out.write(f"""\
+# Miscellaneous tests related to the TLS handshake layer.
+#
+# Automatically generated by {os.path.basename(sys.argv[0])}. Do not edit!
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+""")
+    write_tls_handshake_defragmentation_tests(out)
+    out.write("""\
+# End of automatically generated file.
+""")
+
+def main() -> None:
+    """Command line entry point."""
+    parser = argparse.ArgumentParser()
+    parser = argparse.ArgumentParser(description=__doc__)
+    parser.add_argument('-o', '--output',
+                        default='tests/opt-testcases/handshake-generated.sh',
+                        help='Output file (default: tests/opt-testcases/handshake-generated.sh)')
+    args = parser.parse_args()
+    with open(args.output, 'w') as out:
+        write_handshake_tests(out)
+
+if __name__ == '__main__':
+    main()
diff --git a/framework/scripts/mbedtls_framework/__init__.py b/framework/scripts/mbedtls_framework/__init__.py
new file mode 100644
index 0000000..50147ce
--- /dev/null
+++ b/framework/scripts/mbedtls_framework/__init__.py
@@ -0,0 +1,3 @@
+# This file needs to exist to make mbedtls_framework a package.
+# Among other things, this allows modules in this directory to make
+# relative imports.
diff --git a/framework/scripts/mbedtls_framework/asymmetric_key_data.py b/framework/scripts/mbedtls_framework/asymmetric_key_data.py
new file mode 100644
index 0000000..175bc9f
--- /dev/null
+++ b/framework/scripts/mbedtls_framework/asymmetric_key_data.py
@@ -0,0 +1,237 @@
+"""Sample key material for asymmetric key types.
+
+Meant for use in crypto_knowledge.py.
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+#
+
+import binascii
+import re
+from typing import Dict
+
+STR_TRANS_REMOVE_BLANKS = str.maketrans('', '', ' \t\n\r')
+
+def unhexlify(text: str) -> bytes:
+    return binascii.unhexlify(text.translate(STR_TRANS_REMOVE_BLANKS))
+
+def construct_asymmetric_key_data(src) -> Dict[str, Dict[int, bytes]]:
+    """Split key pairs into separate table entries and convert hex to bytes.
+
+    Input format: src[abbreviated_type][size] = (private_key_hex, public_key_hex)
+    Output format: dst['PSA_KEY_TYPE_xxx'][size] = key_bytes
+    """
+    dst = {} #type: Dict[str, Dict[int, bytes]]
+    for typ in src:
+        private = 'PSA_KEY_TYPE_' + re.sub(r'(\(|\Z)', r'_KEY_PAIR\1', typ, 1)
+        public = 'PSA_KEY_TYPE_' + re.sub(r'(\(|\Z)', r'_PUBLIC_KEY\1', typ, 1)
+        dst[private] = {}
+        dst[public] = {}
+        for size in src[typ]:
+            dst[private][size] = unhexlify(src[typ][size][0])
+            dst[public][size] = unhexlify(src[typ][size][1])
+    return dst
+
+## These are valid keys that don't try to exercise any edge cases. They're
+## either test vectors from some specification, or randomly generated. All
+## pairs consist of a private key and its public key.
+#pylint: disable=line-too-long
+ASYMMETRIC_KEY_DATA = construct_asymmetric_key_data({
+    'ECC(PSA_ECC_FAMILY_SECP_K1)': {
+        192: ("297ac1722ccac7589ecb240dc719842538ca974beb79f228",
+              "0426b7bb38da649ac2138fc050c6548b32553dab68afebc36105d325b75538c12323cb0764789ecb992671beb2b6bef2f5"),
+        225: ("0024122bf020fa113f6c0ac978dfbd41f749257a9468febdbe0dc9f7e8",
+              "042cc7335f4b76042bed44ef45959a62aa215f7a5ff0c8111b8c44ed654ee71c1918326ad485b2d599fe2a6eab096ee26d977334d2bac6d61d"),
+        256: ("7fa06fa02d0e911b9a47fdc17d2d962ca01e2f31d60c6212d0ed7e3bba23a7b9",
+              "045c39154579efd667adc73a81015a797d2c8682cdfbd3c3553c4a185d481cdc50e42a0e1cbc3ca29a32a645e927f54beaed14c9dbbf8279d725f5495ca924b24d"),
+    },
+    'ECC(PSA_ECC_FAMILY_SECP_R1)': {
+        192: ("d83b57a59c51358d9c8bbb898aff507f44dd14cf16917190",
+              "04e35fcbee11cec3154f80a1a61df7d7612de4f2fd70c5608d0ee3a4a1a5719471adb33966dd9b035fdb774feeba94b04c"),
+        224: ("872f203b3ad35b7f2ecc803c3a0e1e0b1ed61cc1afe71b189cd4c995",
+              "046f00eadaa949fee3e9e1c7fa1247eecec86a0dce46418b9bd3117b981d4bd0ae7a990de912f9d060d6cb531a42d22e394ac29e81804bf160"),
+        256: ("49c9a8c18c4b885638c431cf1df1c994131609b580d4fd43a0cab17db2f13eee",
+              "047772656f814b399279d5e1f1781fac6f099a3c5ca1b0e35351834b08b65e0b572590cdaf8f769361bcf34acfc11e5e074e8426bdde04be6e653945449617de45"),
+        384: ("3f5d8d9be280b5696cc5cc9f94cf8af7e6b61dd6592b2ab2b3a4c607450417ec327dcdcaed7c10053d719a0574f0a76a",
+              "04d9c662b50ba29ca47990450e043aeaf4f0c69b15676d112f622a71c93059af999691c5680d2b44d111579db12f4a413a2ed5c45fcfb67b5b63e00b91ebe59d09a6b1ac2c0c4282aa12317ed5914f999bc488bb132e8342cc36f2ca5e3379c747"),
+        521: ("01b1b6ad07bb79e7320da59860ea28e055284f6058f279de666e06d435d2af7bda28d99fa47b7dd0963e16b0073078ee8b8a38d966a582f46d19ff95df3ad9685aae",
+              "04001de142d54f69eb038ee4b7af9d3ca07736fd9cf719eb354d69879ee7f3c136fb0fbf9f08f86be5fa128ec1a051d3e6c643e85ada8ffacf3663c260bd2c844b6f5600cee8e48a9e65d09cadd89f235dee05f3b8a646be715f1f67d5b434e0ff23a1fc07ef7740193e40eeff6f3bcdfd765aa9155033524fe4f205f5444e292c4c2f6ac1"),
+    },
+    'ECC(PSA_ECC_FAMILY_SECP_R2)': {
+        160: ("00bf539a1cdda0d7f71a50a3f98aec0a2e8e4ced1e",
+              "049570d541398665adb5cfa16f5af73b3196926bbd4b876bdb80f8eab20d0f540c22f4de9c140f6d7b"),
+    },
+    'ECC(PSA_ECC_FAMILY_SECT_K1)': {
+        163: ("03ebc8fcded2d6ab72ec0f75bdb4fd080481273e71",
+              "0406f88f90b4b65950f06ce433afdb097e320f433dc2062b8a65db8fafd3c110f46bc45663fbf021ee7eb9"),
+        233: ("41f08485ce587b06061c087e76e247c359de2ba9927ee013b2f1ed9ca8",
+              "0401e9d7189189f773bd8f71be2c10774ba18842434dfa9312595ea545104400f45a9d5675647513ba75b079fe66a29daac2ec86a6a5d4e75c5f290c1f"),
+        239: ("1a8069ce2c2c8bdd7087f2a6ab49588797e6294e979495602ab9650b9c61",
+              "04068d76b9f4508762c2379db9ee8b87ad8d86d9535132ffba3b5680440cfa28eb133d4232faf1c9aba96af11aefe634a551440800d5f8185105d3072d"),
+        283: ("006d627885dd48b9ec6facb5b3865377d755b75a5d51440e45211c1f600e15eff8a881a0",
+              "0405f48374debceaadb46ba385fd92048fcc5b9af1a1c90408bf94a68b9378df1cbfdfb6fb026a96bea06d8f181bf10c020adbcc88b6ecff96bdc564a9649c247cede601c4be63afc3"),
+        409: ("3ff5e74d932fa77db139b7c948c81e4069c72c24845574064beea8976b70267f1c6f9a503e3892ea1dcbb71fcea423faa370a8",
+              "04012c587f69f68b308ba6dcb238797f4e22290ca939ae806604e2b5ab4d9caef5a74a98fd87c4f88d292dd39d92e556e16c6ecc3c019a105826eef507cd9a04119f54d5d850b3720b3792d5d03410e9105610f7e4b420166ed45604a7a1f229d80975ba6be2060e8b"),
+        571: ("005008c97b4a161c0db1bac6452c72846d57337aa92d8ecb4a66eb01d2f29555ffb61a5317225dcc8ca6917d91789e227efc0bfe9eeda7ee21998cd11c3c9885056b0e55b4f75d51",
+              "04050172a7fd7adf98e4e2ed2742faa5cd12731a15fb0dbbdf75b1c3cc771a4369af6f2fa00e802735650881735759ea9c79961ded18e0daa0ac59afb1d513b5bbda9962e435f454fc020b4afe1445c2302ada07d295ec2580f8849b2dfa7f956b09b4cbe4c88d3b1c217049f75d3900d36df0fa12689256b58dd2ef784ebbeb0564600cf47a841485f8cf897a68accd5a"),
+    },
+    'ECC(PSA_ECC_FAMILY_SECT_R1)': {
+        163: ("009b05dc82d46d64a04a22e6e5ca70ca1231e68c50",
+              "0400465eeb9e7258b11e33c02266bfe834b20bcb118700772796ee4704ec67651bd447e3011959a79a04cb"),
+        233: ("00e5e42834e3c78758088b905deea975f28dc20ef6173e481f96e88afe7f",
+              "0400cd68c8af4430c92ec7a7048becfdf00a6bae8d1b4c37286f2d336f2a0e017eca3748f4ad6d435c85867aa014eea1bd6d9d005bbd8319cab629001d"),
+        283: ("004cecad915f6f3c9bbbd92d1eb101eda23f16c7dad60a57c87c7e1fd2b29b22f6d666ad",
+              "04052f9ff887254c2d1440ba9e30f13e2185ba53c373b2c410dae21cf8c167f796c08134f601cbc4c570bffbc2433082cf4d9eb5ba173ecb8caec15d66a02673f60807b2daa729b765"),
+        409: ("00c22422d265721a3ae2b3b2baeb77bee50416e19877af97b5fc1c700a0a88916ecb9050135883accb5e64edc77a3703f4f67a64",
+              "0401aa25466b1d291846db365957b25431591e50d9c109fe2106e93bb369775896925b15a7bfec397406ab4fe6f6b1a13bf8fdcb9300fa5500a813228676b0a6c572ed96b0f4aec7e87832e7e20f17ca98ecdfd36f59c82bddb8665f1f357a73900e827885ec9e1f22"),
+        571: ("026ac1cdf92a13a1b8d282da9725847908745138f5c6706b52d164e3675fcfbf86fc3e6ab2de732193267db029dd35a0599a94a118f480231cfc6ccca2ebfc1d8f54176e0f5656a1",
+              "040708f3403ee9948114855c17572152a08f8054d486defef5f29cbffcfb7cfd9280746a1ac5f751a6ad902ec1e0525120e9be56f03437af196fbe60ee7856e3542ab2cf87880632d80290e39b1a2bd03c6bbf6225511c567bd2ff41d2325dc58346f2b60b1feee4dc8b2af2296c2dc52b153e0556b5d24152b07f690c3fa24e4d1d19efbdeb1037833a733654d2366c74"),
+    },
+    'ECC(PSA_ECC_FAMILY_SECT_R2)': {
+        163: ("0210b482a458b4822d0cb21daa96819a67c8062d34",
+              "0403692601144c32a6cfa369ae20ae5d43c1c764678c037bafe80c6fd2e42b7ced96171d9c5367fd3dca6f"),
+    },
+    'ECC(PSA_ECC_FAMILY_BRAINPOOL_P_R1)': {
+        160: ("69502c4fdaf48d4fa617bdd24498b0406d0eeaac",
+              "04d4b9186816358e2f9c59cf70748cb70641b22fbab65473db4b4e22a361ed7e3de7e8a8ddc4130c5c"),
+        192: ("1688a2c5fbf4a3c851d76a98c3ec88f445a97996283db59f",
+              "043fdd168c179ff5363dd71dcd58de9617caad791ae0c37328be9ca0bfc79cebabf6a95d1c52df5b5f3c8b1a2441cf6c88"),
+        224: ("a69835dafeb5da5ab89c59860dddebcfd80b529a99f59b880882923c",
+              "045fbea378fc8583b3837e3f21a457c31eaf20a54e18eb11d104b3adc47f9d1c97eb9ea4ac21740d70d88514b98bf0bc31addac1d19c4ab3cc"),
+        256: ("2161d6f2db76526fa62c16f356a80f01f32f776784b36aa99799a8b7662080ff",
+              "04768c8cae4abca6306db0ed81b0c4a6215c378066ec6d616c146e13f1c7df809b96ab6911c27d8a02339f0926840e55236d3d1efbe2669d090e4c4c660fada91d"),
+        320: ("61b8daa7a6e5aa9fccf1ef504220b2e5a5b8c6dc7475d16d3172d7db0b2778414e4f6e8fa2032ead",
+              "049caed8fb4742956cc2ad12a9a1c995e21759ef26a07bc2054136d3d2f28bb331a70e26c4c687275ab1f434be7871e115d2350c0c5f61d4d06d2bcdb67f5cb63fdb794e5947c87dc6849a58694e37e6cd"),
+        384: ("3dd92e750d90d7d39fc1885cd8ad12ea9441f22b9334b4d965202adb1448ce24c5808a85dd9afc229af0a3124f755bcb",
+              "04719f9d093a627e0d350385c661cebf00c61923566fe9006a3107af1d871bc6bb68985fd722ea32be316f8e783b7cd1957785f66cfc0cb195dd5c99a8e7abaa848553a584dfd2b48e76d445fe00dd8be59096d877d4696d23b4bc8db14724e66a"),
+        512: ("372c9778f69f726cbca3f4a268f16b4d617d10280d79a6a029cd51879fe1012934dfe5395455337df6906dc7d6d2eea4dbb2065c0228f73b3ed716480e7d71d2",
+              "0438b7ec92b61c5c6c7fbc28a4ec759d48fcd4e2e374defd5c4968a54dbef7510e517886fbfc38ea39aa529359d70a7156c35d3cbac7ce776bdb251dd64bce71234424ee7049eed072f0dbc4d79996e175d557e263763ae97095c081e73e7db2e38adc3d4c9a0487b1ede876dc1fca61c902e9a1d8722b8612928f18a24845591a"),
+    },
+    'ECC(PSA_ECC_FAMILY_MONTGOMERY)': {
+        255: ("70076d0a7318a57d3c16c17251b26645df4c2f87ebc0992ab177fba51db92c6a",
+              "8520f0098930a754748b7ddcb43ef75a0dbf3a0d26381af4eba4a98eaa9b4e6a"),
+        448: ("e4e49f52686f9ee3b638528f721f1596196ffd0a1cddb64c3f216f06541805cfeb1a286dc78018095cdfec050e8007b5f4908962ba20d6c1",
+              "c0d3a5a2b416a573dc9909f92f134ac01323ab8f8e36804e578588ba2d09fe7c3e737f771ca112825b548a0ffded6d6a2fd09a3e77dec30e"),
+    },
+    'ECC(PSA_ECC_FAMILY_TWISTED_EDWARDS)': {
+        255: ("9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60",
+              "d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a"),
+        448: ("6c82a562cb808d10d632be89c8513ebf6c929f34ddfa8c9f63c9960ef6e348a3528c8a3fcc2f044e39a3fc5b94492f8f032e7549a20098f95b",
+              "5fd7449b59b461fd2ce787ec616ad46a1da1342485a70e1f8a0ea75d80e96778edf124769b46c7061bd6783df1e50f6cd1fa1abeafe8256180"),
+    },
+    'RSA': {
+        1024: ("""
+3082025e
+ 020100
+ 02818100af057d396ee84fb75fdbb5c2b13c7fe5a654aa8aa2470b541ee1feb0b12d25c79711531249e1129628042dbbb6c120d1443524ef4c0e6e1d8956eeb2077af12349ddeee54483bc06c2c61948cd02b202e796aebd94d3a7cbf859c2c1819c324cb82b9cd34ede263a2abffe4733f077869e8660f7d6834da53d690ef7985f6bc3
+ 0203010001
+ 02818100874bf0ffc2f2a71d14671ddd0171c954d7fdbf50281e4f6d99ea0e1ebcf82faa58e7b595ffb293d1abe17f110b37c48cc0f36c37e84d876621d327f64bbe08457d3ec4098ba2fa0a319fba411c2841ed7be83196a8cdf9daa5d00694bc335fc4c32217fe0488bce9cb7202e59468b1ead119000477db2ca797fac19eda3f58c1
+ 024100e2ab760841bb9d30a81d222de1eb7381d82214407f1b975cbbfe4e1a9467fd98adbd78f607836ca5be1928b9d160d97fd45c12d6b52e2c9871a174c66b488113
+ 024100c5ab27602159ae7d6f20c3c2ee851e46dc112e689e28d5fcbbf990a99ef8a90b8bb44fd36467e7fc1789ceb663abda338652c3c73f111774902e840565927091
+ 024100b6cdbd354f7df579a63b48b3643e353b84898777b48b15f94e0bfc0567a6ae5911d57ad6409cf7647bf96264e9bd87eb95e263b7110b9a1f9f94acced0fafa4d
+ 024071195eec37e8d257decfc672b07ae639f10cbb9b0c739d0c809968d644a94e3fd6ed9287077a14583f379058f76a8aecd43c62dc8c0f41766650d725275ac4a1
+ 024100bb32d133edc2e048d463388b7be9cb4be29f4b6250be603e70e3647501c97ddde20a4e71be95fd5e71784e25aca4baf25be5738aae59bbfe1c997781447a2b24
+""", """
+ 308189
+  02818100af057d396ee84fb75fdbb5c2b13c7fe5a654aa8aa2470b541ee1feb0b12d25c79711531249e1129628042dbbb6c120d1443524ef4c0e6e1d8956eeb2077af12349ddeee54483bc06c2c61948cd02b202e796aebd94d3a7cbf859c2c1819c324cb82b9cd34ede263a2abffe4733f077869e8660f7d6834da53d690ef7985f6bc3
+ 0203010001
+"""),
+        1026: ("""
+3082025e
+ 020100
+ 02818102d09661fc74224ba7be7907abef4f5e8bcc264a802c978f7eaa5855ada05436d75db768d20f68595dbcc3d725b138e80b247e44a4163a0542fab612acbbde45f2e93894aa253bddef6a7becdc9cc29a99bacf48dc6e38db7a33e9ac924c520fc6be7d6e5646c1d67fb8b2b97ac60beecc3bb8e75bed8315aa3fe46f748a66d6ef
+ 0203010001
+ 0281806a4a346beba97f655fe834647d2944f5f40815e7302caf02ed179893c2d989395d5e877cacbf24a77a079d3db71580ccdbf63023d00f80e52f5c1a0716b323b7bfcbdc8a1781c44c4153e3da228d17b2dc78eb1f44cff60fe1150808a6e38ba2470aee2e948a6898ddadea56d9470927aca8d94a0338c11a8e95715b5f94e011
+ 024101f5418534c36236fc9fd38934d7c06dfed3829151ccab56b6330c641f7796a71924cf8119ca26e186ecd3068d6607a05260db4857651980436891adde9eb92ab7
+ 02410170042fbdbaba1e102b7f7f1dc9d940cfdcd85dd0ea65f543c6432e9c5480724bb49b1e5f80ca2b9f84cd6644bfb2e3d0968090b89f534dc2951e606db909dd89
+ 0241014b6c1aeb1c14a04ec04e5975fb015cb914984c054dd22bef24299939c514733f88bb3a9d16b04685b3a883b8923190ab672715d9d31add57b4983de1e8087e59
+ 02410117bf76f308b0560e00a2c864427dcd50b5161c2aa523a00f46f4e6c79b4c90958fd2a282028aac227477169888085a38c34f33b3c41934f1071db23b75ff53d1
+ 02410120a428b4e0c4a6f202920fd49cc9886e6b6719d40a3ad0604f5d5efd5ef6973a573ab324f38ecb8e669a69341597081e240b6ae4e2714887dd78dadaeb0b9216
+""", """
+308189
+ 02818102d09661fc74224ba7be7907abef4f5e8bcc264a802c978f7eaa5855ada05436d75db768d20f68595dbcc3d725b138e80b247e44a4163a0542fab612acbbde45f2e93894aa253bddef6a7becdc9cc29a99bacf48dc6e38db7a33e9ac924c520fc6be7d6e5646c1d67fb8b2b97ac60beecc3bb8e75bed8315aa3fe46f748a66d6ef
+ 0203010001
+"""),
+        1028: ("""
+3082025e
+ 020100
+ 0281810e62a76f0e0b59683a7ebf7cbfd37b1d1781d8f1b900604b507f0f04c72a3d340d067bcd53bea3caff4e4ae694f0b6d8f591a4167fbf7f372ab57e83a69a3f26f447bcf582bc9621a30a3b44d6b43e986d1a867b07489e4f9bfcadaa82a2782dc2729a631fb1fb9ffb794b4e53c76239e04d4a8f80352588db29462dde18237cf5
+ 0203010001
+ 02818101cfa0422e3bb60c15ef2e96db4499e789f5d634ea64567b2cdd6e2bdd121f85edccdee9b4ed178c5f33816101a7c371518b3e23f9fdc71b90242cd310b6b31428b0b64eb9596be0cc044cc85048982f90b706e66ccdd39ad5a1a7b64cf034eac0c35d7ace93f2bcd3ce243bd8f83b46f509ca2f805063002af2bb2d88b6ee36a9
+ 024103f0886d2977526f3f3f6a075600232ce3008517276dd3721dee08fd6c999fc976b9e8dd2bc143385fa4b48735ce81c66b501d7129ee7860cfbef23b5da91e6c2d
+ 024103a6c8734aace59d5f386f97de450f8a12d63ae6ac15d336e010c9fcf03a32f0611881ac6cd8b3f989925c0f025af26cf26aebd7d9b04eb503048dca2f503c28e9
+ 0241019b300451c3b47866f113e9a9c6a490c87c8dc6c2eca42902caea1f6907b97e0a4a02072aafc1185ae66c34345bddcd683361cda1aaf8a98009f9f8fa56d97081
+ 02401bcca849173d38e1e50ec48872ab54a2dcc621a80a7a1e8ea951287988718d5e85d90d64ab4926e9a575a168a385c421ad765813fc3f4af8cd00de7b6bba6e49
+ 0241036dcf69f6e548c8acfb536fb6cd186f8b8f20d313361d0447c1b5e380f4113e578b31e867dda47d44ad3761e793f725031b8d379f389de277a9a0137651df548a
+""", """
+308189
+ 0281810e62a76f0e0b59683a7ebf7cbfd37b1d1781d8f1b900604b507f0f04c72a3d340d067bcd53bea3caff4e4ae694f0b6d8f591a4167fbf7f372ab57e83a69a3f26f447bcf582bc9621a30a3b44d6b43e986d1a867b07489e4f9bfcadaa82a2782dc2729a631fb1fb9ffb794b4e53c76239e04d4a8f80352588db29462dde18237cf5
+ 0203010001
+"""),
+        1030: ("""
+3082025f
+ 020100
+ 0281812b7cd197f5796d1f8e576b2b37723fd9210814ef1c1995f9899d50058f379d239c66878e922f34c6ae3672c8598fcd5d47b764d2ec156e134d03cf6a94d38d2ea8bc76dbbc60c4b974219090eaf287497d7dcf7f119cfa867496f7e91c12b5d552e1d1461a80dbe9a59db3b016c6c0141c3b2a0e226089b855cb88ef656408bd89
+ 0203010001
+ 0281810210d5ff531cacb22f8cf7dd1fd9fb0376f3647f2e9ab3df9c89b9ad3c98e68b89adeb29901dd2f2cf2ac1f817726278830ec8a8d0fdd19d496ec6bc683671174786b7d6a8e822fa71d65ad35abbdf0e6e55ff2c1821b62bc630192160e5c9b3dcafc65ae6b2a088fbc5591da58a45dd7a30960f7d3def75b80cdf73247360e8fb
+ 0241072e371a3ba861e78e3eb9313065faab0a97216e9544bfc2d5b403844b43273705755a85aa0baf7114770cfeca20bca17ac19bc4cbba106a33b3dddca0fb535f33
+ 0241060e6af37ab4ea11f52b9344e7160eb2a53f1075e1229a7f10a301de3359f53e981ea0e17df0fb380f089e5c37dd40daa29eefd205f5c87b38f8fef636b57ba053
+ 0241023a5dd09ef83540b30b554d24f64f9c28d212068cfc62ffe26d53b605e05557a632ee9e90cfc56531f36aadd82be63bb8aa405a04d8bbe5281bc45883fed7b4af
+ 0241041de6dbad4caf5417a9504965201c4b99827de8f369f7456a84b3ef5c4ec9238c7a3d782a8915ebec643a698b5bee0af0c243592bce0042aadeaf49a4b4c6dd9b
+ 024105d32dee952b503b536fcecf19ec08236a9cd945c49551bf99f15b674fc21aa199f4c4211f0f0007c417c1fb4155326a2142fca454bbd38d6dbc6caa7ac335a17c
+""", """
+308189
+ 0281812b7cd197f5796d1f8e576b2b37723fd9210814ef1c1995f9899d50058f379d239c66878e922f34c6ae3672c8598fcd5d47b764d2ec156e134d03cf6a94d38d2ea8bc76dbbc60c4b974219090eaf287497d7dcf7f119cfa867496f7e91c12b5d552e1d1461a80dbe9a59db3b016c6c0141c3b2a0e226089b855cb88ef656408bd89
+ 0203010001
+"""),
+        1536: ("""
+3082037b
+ 020100
+ 0281c100c870feb6ca6b1d2bd9f2dd99e20f1fe2d7e5192de662229dbe162bd1ba66336a7182903ca0b72796cd441c83d24bcdc3e9a2f5e4399c8a043f1c3ddf04754a66d4cfe7b3671a37dd31a9b4c13bfe06ee90f9d94ddaa06de67a52ac863e68f756736ceb014405a6160579640f831dddccc34ad0b05070e3f9954a58d1815813e1b83bcadba814789c87f1ef2ba5d738b793ec456a67360eea1b5faf1c7cc7bf24f3b2a9d0f8958b1096e0f0c335f8888d0c63a51c3c0337214fa3f5efdf6dcc35
+ 0203010001
+ 0281c06d2d670047973a87752a9d5bc14f3dae00acb01f593aa0e24cf4a49f932931de4bbfb332e2d38083da80bc0b6d538edba479f7f77d0deffb4a28e6e67ff6273585bb4cd862535c946605ab0809d65f0e38f76e4ec2c3d9b8cd6e14bcf667943892cd4b34cc6420a439abbf3d7d35ef73976dd6f9cbde35a51fa5213f0107f83e3425835d16d3c9146fc9e36ce75a09bb66cdff21dd5a776899f1cb07e282cca27be46510e9c799f0d8db275a6be085d9f3f803218ee3384265bfb1a3640e8ca1
+ 026100e6848c31d466fffefc547e3a3b0d3785de6f78b0dd12610843512e495611a0675509b1650b27415009838dd8e68eec6e7530553b637d602424643b33e8bc5b762e1799bc79d56b13251d36d4f201da2182416ce13574e88278ff04467ad602d9
+ 026100de994fdf181f02be2bf9e5f5e4e517a94993b827d1eaf609033e3a6a6f2396ae7c44e9eb594cf1044cb3ad32ea258f0c82963b27bb650ed200cde82cb993374be34be5b1c7ead5446a2b82a4486e8c1810a0b01551609fb0841d474bada802bd
+ 026076ddae751b73a959d0bfb8ff49e7fcd378e9be30652ecefe35c82cb8003bc29cc60ae3809909baf20c95db9516fe680865417111d8b193dbcf30281f1249de57c858bf1ba32f5bb1599800e8398a9ef25c7a642c95261da6f9c17670e97265b1
+ 0260732482b837d5f2a9443e23c1aa0106d83e82f6c3424673b5fdc3769c0f992d1c5c93991c7038e882fcda04414df4d7a5f4f698ead87851ce37344b60b72d7b70f9c60cae8566e7a257f8e1bef0e89df6e4c2f9d24d21d9f8889e4c7eccf91751
+ 026009050d94493da8f00a4ddbe9c800afe3d44b43f78a48941a79b2814a1f0b81a18a8b2347642a03b27998f5a18de9abc9ae0e54ab8294feac66dc87e854cce6f7278ac2710cb5878b592ffeb1f4f0a1853e4e8d1d0561b6efcc831a296cf7eeaf
+""", """
+3081c9
+ 0281c100c870feb6ca6b1d2bd9f2dd99e20f1fe2d7e5192de662229dbe162bd1ba66336a7182903ca0b72796cd441c83d24bcdc3e9a2f5e4399c8a043f1c3ddf04754a66d4cfe7b3671a37dd31a9b4c13bfe06ee90f9d94ddaa06de67a52ac863e68f756736ceb014405a6160579640f831dddccc34ad0b05070e3f9954a58d1815813e1b83bcadba814789c87f1ef2ba5d738b793ec456a67360eea1b5faf1c7cc7bf24f3b2a9d0f8958b1096e0f0c335f8888d0c63a51c3c0337214fa3f5efdf6dcc35
+ 0203010001
+"""),
+        2048: ("""
+308204a3
+ 020100
+ 0282010100f7bb6b8eab40491cd64455ec04d4ed8db5051a9738fc7af73ff3b097511cce40aaf76537b1353504427986b7b2b53a964a6937b558ec0d1dea274af2b8fff2f094c243fa577266a79db0c26ffe30416d23ef05dd5fecab413ebbb4f8526ae720a94584226b37d92ef463fc736cb38e530e7488d9162f5726807bc543138a2d258adb4d680221c2532381ccfa81bc89bc3d7b84039c2df41ce3ec8db91c2380e781ba3aa9e23b74ed9973d4908efca47aa8d9b7b0a4423297a404427c3f3cd6e0782e4553880f06ba39a64f4a7b0eef921a6050a207cefadcf07394a3e18ea915dc8497e7ae61fc3162f62f5065a692af077266f7360c2076cebeaf14cb22c1ed
+ 0203010001
+ 0282010000b8962dce604bc62e7678f48ca80cfff456ad36e2f6d329cc911a42ba7cf5b9b8f5aae1005e4a06f6e591279038d8508f2b62badfa5223da3cc94fa8360d5556f6d6852be75ea08135cac1834da719a4e7837e166d1d2c6c816b64661c10766b02f705cc4489f947428255835a909214341c21335ae12181dd81e611d59b1db70667bebd7e92b71e1d388318d3ec14d616f72c231f6727a183e6818285bd65f6572cadc9012248821b2d0ae6cedd30ca440d4d34cd77e2cf6b40ed2c7d856b30d474733fce0fb695c3e6530c079aed955e4073055f2655d4b671e291fde400f2f06d0b33f87d261e0ad3dae48a913841b34cfed03790fcaee00de2e90fb9621
+ 02818100fcbe89cd1aa319e49ef4f72149bf06da57dcc64d3de605e9ff3e76fc66f4b1e2878245ffd71990511b17e97f33818889a8c21b5527fd181327affe88f9bba670c4e6f1e6309bd0323074e4cbcf23dce3c19b8d5495f56a93059ba7414f28ed1ec906ad18c63de1148abcfe9be7986000f425e580b70e43e48e24fa9d51aaae4d
+ 02818100faec5a7bed2e53cfca1e167db4641db5a00fe2c328125423d594789f3ec072c623e7afbdee0089fd26307651f6d3611a88af28c34585d5cb713a650c35933f58944db9bd15ba9fc28b07e6705b7b3ef1ccb48d21a53569c8b84c444b61ea5c6e67b54f0afd852ffb8c92a111fab8677263eeb80cf1a3403b4a9a209776947221
+ 0281802ff99afeabc7b9ea83a1cc272d706d4494d8fb6b3e0ca3a2bf28843d74ed8db68a3258472ff5524792f4ff057e296059810717591ab61813cabcc57c0aab6bf48bebaa8f1f3af45212909dbd721c449996ee87ed3e69cf49090f7ab812e699dbf61ca64ec592895ef4d6db1d8ce08798a6bf6ac8fbf6613cc91e8bd3c0e4bd21
+ 02818100b29b34590bddb308afecb4c3ab78abf1114add755e7b956aa0677b6896a933c937db7dabaad2b565fd1df7caa5ef9629e5eb100fd6d7c9f372d846fee6cfb6025e25e934df57a4ca3c5e5637d9d6235ac80428852f6c92acae0a937e38e731fde0521d3e4c70d653ae9edc89c8b623e4379fbf606f4b6db8068528f7c70f2921
+ 0281800ed47ae05b275a23a7dfe3ffb727e3a268e626a59d401d2d846de26954ff54fc9ed93a9af33fac2c967a18e0f86145083e39923454bc10da5f4937e836b99851956bffb301ce9e06789786693213fcde6d5f2933d52bb29dc340ea011257788d3c5775eb6569230aafbf08752d40a8419de71b01d4927e27c1079caada0568b1
+               """, """
+3082010a
+ 0282010100f7bb6b8eab40491cd64455ec04d4ed8db5051a9738fc7af73ff3b097511cce40aaf76537b1353504427986b7b2b53a964a6937b558ec0d1dea274af2b8fff2f094c243fa577266a79db0c26ffe30416d23ef05dd5fecab413ebbb4f8526ae720a94584226b37d92ef463fc736cb38e530e7488d9162f5726807bc543138a2d258adb4d680221c2532381ccfa81bc89bc3d7b84039c2df41ce3ec8db91c2380e781ba3aa9e23b74ed9973d4908efca47aa8d9b7b0a4423297a404427c3f3cd6e0782e4553880f06ba39a64f4a7b0eef921a6050a207cefadcf07394a3e18ea915dc8497e7ae61fc3162f62f5065a692af077266f7360c2076cebeaf14cb22c1ed
+ 0203010001
+"""),
+        4096: ("""
+30820929
+ 020100
+ 0282020100cc8725f6b38d5d01aeeb07d36e03de4d31a0261ce74fe11a895ecfd13d168aee932af135ffbb849877273897081f3f7593c14ae82bc266c10544f726ae1ccf133d8a4018d380dfa25251c011107b7513a943346aa0e0dec11d8d7fa25644653c118daabce6d41f066f6621768801478055780e91b68ea3c95856d172a89032b39c824e8b7dc1a3f8aee4f6b368baa3cd68f50d52680117e9b913d7f8c852a0d1008e8b87a5c97e37afc11a080550557b8b4dcbd8e192ed3366d83a09d27c77e150f66855b5dcfdb2df151bd7f444250eaf6fe3f236826c81fa848101bfaad535ffb522d6ff97c9dd1e43b82cce2921d153c15450c4724ffd3efdca578e013650a03a5cf501fc58600fb5c860c0ef0cfe0ac0712d441313dca41a4d7d411e6c83b2151749d28be4692f62373db07e4a79051c5682ec20d491c4cfc7bc140f35fa15e5a1fa756d65b8ef93addf4c47c4a35b184f22a1ef089948f946f6faeb6470f26746e658cf9b4177417842e6d373558089aff721b930e9ec61b4f6a02c052c6924d39a5bbb15ed1106c4010f4dd69c79d042c8b31661b1ee486bc69db5f2f07a50d85b20699d601315625bb869629c7f4c5d48b211d097f438acec95973a38d421090af0f13484e4e94b8cb5efc18507f4b931df39987ffb2830293e4da381aaf70b3292952ef934e2b40fdebba3d9701b76e1be548274b2602d888537482d
+ 0203010001
+ 028202001a943e9c0089f0aa0116048a96abb486321a86916f82fb352460789fcfb1400550853e5afedc9ad6e877259cc4feb093c24b968534f89abb5f48aed8ad3c4bb1cba7cd7c1c724d3dae36770010b5068a334f2b3ee720c9f9ed320001f3f587f5662f939e605df519343d60c0635ccd32b188bc55f5d434173c9e6db2199341af833990e50246f99cddf79dd2c35babe14c103a76b8d2d98d73528f98c249b0a1f09155b31f599fc833542422a2342623bbbef4ac7ee605e2cdecf01fea25683bd4f66ca924ccef00418adff730c4714f66ffa2af0da3e5df7f539c634289fc12bc24093ec8f0ec180af0907cec1ebec911fa180fb5f3c80ed852896ad6e6b3eccb44de62193d52118cab2b171071d5fdaa7c4288fc7766d57774f4be46151bb90ace7c10c215f62ed26e52e6122436f532bd54fc08272adb216a2db433d5699c40ad58faa2660898ffccfc98002f8bb0361b4cf9ed6e93c1ca96d34a1ef40460f85918cfde4a8193b51ecea4b3903cae924a8fad5f8308954c9f19a7597bf0a75126a557e49f8bbd31fc4e8556f230640bf36204c6cf3d56dca5a41d860307ba6705a698681100a327f91739c486c470ba71d03d285314b0d7d04008e03f2a2b85e7c243d6fd9b97a02168c069ec572d3f0ca15ebcb1739f3a0b3c147a88e0b74f45a007ae927d6f822bf50b87b1e93fe7d9180bc6bc12bde6c8070d10c97331
+ 0282010100f50ebceac9d3c64482a8c265d6365461aa4a31a6a7633a24c8e34794ecdfcab1d6b52fb6a5f38055cc32d6a61b889550de27b3d0bd68b6d4fda041598ab98887143988576806b1c48720794902952ebe1bf0def65a0e6f94067056e6864fa2882e3a16f246282093d037639078182dd0a6eb21d3bad0637901a268b14c632c9d0b1690ed88abdde03f528247aa2e41557d0865ad34e53ff53ae0e5dea195d93fe65c25871f6f23adf34b6e960c2978f2b7475dafce6cbb26a53934d26c193d67f32de91035eeb89022beb7d5df784ac20ca6ab91bf6b775b6c9416f605b4841736cbfbd22ad98ab2e8428457e0793f5af40e550b48765d59e6e1b4a4a1f571f1
+ 0282010100d5a91d4d44bb9b73c1fe0248925e2c0ec1de51390bd8a73b453da51ae29325ae7657089fd4ee4a2fd96e345b57f672d7d484fde99189ab0a6365bf2b38680d6bb947f4b217be660323c26b86d643ae686d82e36ec00cfd038942443caa04a0f91e68ec717935b45e790311be56440d7176949594688ed1dd5c9103c57c158d05e4c37b98d81898030744a64f6ebdbf750aab79757e34dac422163ea7c0f42b97710c861978b24100385aad727e5f3836a74ea4bf1d36ef2a5edf9c9e8f996ef3191348450ea9f1d4a63db29cb06f63e5badb18e4d40f5112b658d1cc23cb65388aca03d141a6bc5fbd9429fe33d340d3e85bfa848908d60b562f894e8a337dfd
+ 0282010100c4950f0d95dc51d791ad094d223b3113abc49af1e2a361f83242c8a07a28c8744315d3f1c44c82edd0c21398eacb75648ae1f48885f92379d6ffa08cd11126a99d9acd79b8946e3486659185f511718ec5e1432b02714426cdc77e9eacade36735161a643dcd60dcd2922c47af5f4e196c5d8124555f67fca148048dfe062cbaca334f0d8daeb96d73be9f8e17c1c55d6bd0b9a7e99fe1dfba5cc16a07dbaa8c6d220c64c9dda114a0f029052b3a75b0d73fe3b2ed7821e5cd7307a1a95fd1f7ba8760c8454b7c38fbf65c88b01cd273ba2c55c3b477e426ae025a2cffc4a095f2ba4e0779a24b765b85489f2a0e79b95fc0c38e2a91f12ef65ca749ce369431
+ 028201002aa48e0c95e33bab66d4637048863314deec9819629be30499552c56a951e4fb64f309ed9c79d2a4aa28ac9a6e7be97fda1290fac4e94d11cdb4c8eabf5f450e72f4418a29e2fe493221e3840dcf8447a353b440ae63e93b83718e5ced31ef4ec91af7d5cdf3420478f27be019278be7515b665f305f10d3b55ddbfad64116dc4e4415aef3b234e4a5d6b5bab4c77a26c9f25f536bd4f0b4a478fc184f126c80d53742ac62c270e6b258a6b56b3365ecc28797a9ed12c1b91b265603ef751807bcc1747313f22729e1e3fe79f75cc3fb5dc7ccb81efacf9b847945a6109ecf9cf156505cbb55a3d317eb325661d18fe6bb416046837318053b365199334c03a1
+ 0282010100ee63706030a4ece9fe3bddcfc49f5a83f37f63ebcb29dbdc999f6ff54b596f115cf1eca09990108a439518e996f689fdde89b2c67edc04bf8e366734c2ae3017ec14e042050e7c656840146ca048394dcebe90dd2195349bbad306569031b2ef6e9171d2ae7797c8844e548394ca3b768d8496e99ef63abb59b0ff7fc70eb53153dd0f59018a275acba701f2c76a15c894f53461fedf65bc25c2c5cec396e556a1a919bc7a056393d50644126dcdef9256642e65a6043cbce9497e192cf2cb33648e117f41dbf01900acb93b0c78ddf31f381f4db3f9ccbbb69093dabf2e89dbbc0cb72f20c005a2519e3a874146495d7aacf3416a422e560986f22f39456e7f
+               """, """
+3082020a
+ 0282020100cc8725f6b38d5d01aeeb07d36e03de4d31a0261ce74fe11a895ecfd13d168aee932af135ffbb849877273897081f3f7593c14ae82bc266c10544f726ae1ccf133d8a4018d380dfa25251c011107b7513a943346aa0e0dec11d8d7fa25644653c118daabce6d41f066f6621768801478055780e91b68ea3c95856d172a89032b39c824e8b7dc1a3f8aee4f6b368baa3cd68f50d52680117e9b913d7f8c852a0d1008e8b87a5c97e37afc11a080550557b8b4dcbd8e192ed3366d83a09d27c77e150f66855b5dcfdb2df151bd7f444250eaf6fe3f236826c81fa848101bfaad535ffb522d6ff97c9dd1e43b82cce2921d153c15450c4724ffd3efdca578e013650a03a5cf501fc58600fb5c860c0ef0cfe0ac0712d441313dca41a4d7d411e6c83b2151749d28be4692f62373db07e4a79051c5682ec20d491c4cfc7bc140f35fa15e5a1fa756d65b8ef93addf4c47c4a35b184f22a1ef089948f946f6faeb6470f26746e658cf9b4177417842e6d373558089aff721b930e9ec61b4f6a02c052c6924d39a5bbb15ed1106c4010f4dd69c79d042c8b31661b1ee486bc69db5f2f07a50d85b20699d601315625bb869629c7f4c5d48b211d097f438acec95973a38d421090af0f13484e4e94b8cb5efc18507f4b931df39987ffb2830293e4da381aaf70b3292952ef934e2b40fdebba3d9701b76e1be548274b2602d888537482d
+ 0203010001
+"""),
+    },
+})
diff --git a/framework/scripts/mbedtls_framework/bignum_common.py b/framework/scripts/mbedtls_framework/bignum_common.py
new file mode 100644
index 0000000..eebc858
--- /dev/null
+++ b/framework/scripts/mbedtls_framework/bignum_common.py
@@ -0,0 +1,406 @@
+"""Common features for bignum in test generation framework."""
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+#
+
+from abc import abstractmethod
+import enum
+from typing import Iterator, List, Tuple, TypeVar, Any
+from copy import deepcopy
+from itertools import chain
+from math import ceil
+
+from . import test_case
+from . import test_data_generation
+from .bignum_data import INPUTS_DEFAULT, MODULI_DEFAULT
+
+T = TypeVar('T') #pylint: disable=invalid-name
+
+def invmod(a: int, n: int) -> int:
+    """Return inverse of a to modulo n.
+
+    Equivalent to pow(a, -1, n) in Python 3.8+. Implementation is equivalent
+    to long_invmod() in CPython.
+    """
+    b, c = 1, 0
+    while n:
+        q, r = divmod(a, n)
+        a, b, c, n = n, c, b - q*c, r
+    # at this point a is the gcd of the original inputs
+    if a == 1:
+        return b
+    raise ValueError("Not invertible")
+
+def invmod_positive(a: int, n: int) -> int:
+    """Return a non-negative inverse of a to modulo n."""
+    inv = invmod(a, n)
+    return inv if inv >= 0 else inv + n
+
+def hex_to_int(val: str) -> int:
+    """Implement the syntax accepted by mbedtls_test_read_mpi().
+
+    This is a superset of what is accepted by mbedtls_test_read_mpi_core().
+    """
+    if val in ['', '-']:
+        return 0
+    return int(val, 16)
+
+def quote_str(val: str) -> str:
+    return "\"{}\"".format(val)
+
+def bound_mpi(val: int, bits_in_limb: int) -> int:
+    """First number exceeding number of limbs needed for given input value."""
+    return bound_mpi_limbs(limbs_mpi(val, bits_in_limb), bits_in_limb)
+
+def bound_mpi_limbs(limbs: int, bits_in_limb: int) -> int:
+    """First number exceeding maximum of given number of limbs."""
+    bits = bits_in_limb * limbs
+    return 1 << bits
+
+def limbs_mpi(val: int, bits_in_limb: int) -> int:
+    """Return the number of limbs required to store value."""
+    bit_length = max(val.bit_length(), 1)
+    return (bit_length + bits_in_limb - 1) // bits_in_limb
+
+def combination_pairs(values: List[T]) -> List[Tuple[T, T]]:
+    """Return all pair combinations from input values."""
+    return [(x, y) for x in values for y in values]
+
+def bits_to_limbs(bits: int, bits_in_limb: int) -> int:
+    """ Return the appropriate ammount of limbs needed to store
+        a number contained in input bits"""
+    return ceil(bits / bits_in_limb)
+
+def hex_digits_for_limb(limbs: int, bits_in_limb: int) -> int:
+    """ Return the hex digits need for a number of limbs. """
+    return 2 * ((limbs * bits_in_limb) // 8)
+
+def hex_digits_max_int(val: str, bits_in_limb: int) -> int:
+    """ Return the first number exceeding maximum  the limb space
+    required to store the input hex-string value. This method
+    weights on the input str_len rather than numerical value
+    and works with zero-padded inputs"""
+    n = ((1 << (len(val) * 4)) - 1)
+    l = limbs_mpi(n, bits_in_limb)
+    return bound_mpi_limbs(l, bits_in_limb)
+
+def zfill_match(reference: str, target: str) -> str:
+    """ Zero pad target hex-string to match the limb size of
+    the reference input """
+    lt = len(target)
+    lr = len(reference)
+    target_len = lr if lt < lr else lt
+    return "{:x}".format(int(target, 16)).zfill(target_len)
+
+class OperationCommon(test_data_generation.BaseTest):
+    """Common features for bignum binary operations.
+
+    This adds functionality common in binary operation tests.
+
+    Attributes:
+        symbol: Symbol to use for the operation in case description.
+        input_values: List of values to use as test case inputs. These are
+            combined to produce pairs of values.
+        input_cases: List of tuples containing pairs of test case inputs. This
+            can be used to implement specific pairs of inputs.
+        unique_combinations_only: Boolean to select if test case combinations
+            must be unique. If True, only A,B or B,A would be included as a test
+            case. If False, both A,B and B,A would be included.
+        input_style: Controls the way how test data is passed to the functions
+            in the generated test cases. "variable" passes them as they are
+            defined in the python source. "arch_split" pads the values with
+            zeroes depending on the architecture/limb size. If this is set,
+            test cases are generated for all architectures.
+        arity: the number of operands for the operation. Currently supported
+            values are 1 and 2.
+    """
+    symbol = ""
+    input_values = INPUTS_DEFAULT # type: List[str]
+    input_cases = [] # type: List[Any]
+    dependencies = [] # type: List[Any]
+    unique_combinations_only = False
+    input_styles = ["variable", "fixed", "arch_split"] # type: List[str]
+    input_style = "variable" # type: str
+    limb_sizes = [32, 64] # type: List[int]
+    arities = [1, 2]
+    arity = 2
+    suffix = False   # for arity = 1, symbol can be prefix (default) or suffix
+
+    def __init__(self, val_a: str, val_b: str = "0", bits_in_limb: int = 32) -> None:
+        self.val_a = val_a
+        self.val_b = val_b
+        # Setting the int versions here as opposed to making them @properties
+        # provides earlier/more robust input validation.
+        self.int_a = hex_to_int(val_a)
+        self.int_b = hex_to_int(val_b)
+        self.dependencies = deepcopy(self.dependencies)
+        if bits_in_limb not in self.limb_sizes:
+            raise ValueError("Invalid number of bits in limb!")
+        if self.input_style == "arch_split":
+            self.dependencies.append("MBEDTLS_HAVE_INT{:d}".format(bits_in_limb))
+        self.bits_in_limb = bits_in_limb
+
+    @property
+    def boundary(self) -> int:
+        if self.arity == 1:
+            return self.int_a
+        elif self.arity == 2:
+            return max(self.int_a, self.int_b)
+        raise ValueError("Unsupported number of operands!")
+
+    @property
+    def limb_boundary(self) -> int:
+        return bound_mpi(self.boundary, self.bits_in_limb)
+
+    @property
+    def limbs(self) -> int:
+        return limbs_mpi(self.boundary, self.bits_in_limb)
+
+    @property
+    def hex_digits(self) -> int:
+        return hex_digits_for_limb(self.limbs, self.bits_in_limb)
+
+    def format_arg(self, val: str) -> str:
+        if self.input_style not in self.input_styles:
+            raise ValueError("Unknown input style!")
+        if self.input_style == "variable":
+            return val
+        else:
+            return val.zfill(self.hex_digits)
+
+    def format_result(self, res: int) -> str:
+        res_str = '{:x}'.format(res)
+        return quote_str(self.format_arg(res_str))
+
+    @property
+    def arg_a(self) -> str:
+        return self.format_arg(self.val_a)
+
+    @property
+    def arg_b(self) -> str:
+        if self.arity == 1:
+            raise AttributeError("Operation is unary and doesn't have arg_b!")
+        return self.format_arg(self.val_b)
+
+    def arguments(self) -> List[str]:
+        args = [quote_str(self.arg_a)]
+        if self.arity == 2:
+            args.append(quote_str(self.arg_b))
+        return args + self.result()
+
+    def description(self) -> str:
+        """Generate a description for the test case.
+
+        If not set, case_description uses the form A `symbol` B, where symbol
+        is used to represent the operation. Descriptions of each value are
+        generated to provide some context to the test case.
+        """
+        if not self.case_description:
+            if self.arity == 1:
+                format_string = "{1:x} {0}" if self.suffix else "{0} {1:x}"
+                self.case_description = format_string.format(
+                    self.symbol, self.int_a
+                )
+            elif self.arity == 2:
+                self.case_description = "{:x} {} {:x}".format(
+                    self.int_a, self.symbol, self.int_b
+                )
+        return super().description()
+
+    @property
+    def is_valid(self) -> bool:
+        return True
+
+    @abstractmethod
+    def result(self) -> List[str]:
+        """Get the result of the operation.
+
+        This could be calculated during initialization and stored as `_result`
+        and then returned, or calculated when the method is called.
+        """
+        raise NotImplementedError
+
+    @classmethod
+    def get_value_pairs(cls) -> Iterator[Tuple[str, str]]:
+        """Generator to yield pairs of inputs.
+
+        Combinations are first generated from all input values, and then
+        specific cases provided.
+        """
+        if cls.arity == 1:
+            yield from ((a, "0") for a in cls.input_values)
+        elif cls.arity == 2:
+            if cls.unique_combinations_only:
+                yield from combination_pairs(cls.input_values)
+            else:
+                yield from (
+                    (a, b)
+                    for a in cls.input_values
+                    for b in cls.input_values
+                )
+        else:
+            raise ValueError("Unsupported number of operands!")
+
+    @classmethod
+    def generate_function_tests(cls) -> Iterator[test_case.TestCase]:
+        if cls.input_style not in cls.input_styles:
+            raise ValueError("Unknown input style!")
+        if cls.arity not in cls.arities:
+            raise ValueError("Unsupported number of operands!")
+        if cls.input_style == "arch_split":
+            test_objects = (cls(a, b, bits_in_limb=bil)
+                            for a, b in cls.get_value_pairs()
+                            for bil in cls.limb_sizes)
+            special_cases = (cls(*args, bits_in_limb=bil) # type: ignore
+                             for args in cls.input_cases
+                             for bil in cls.limb_sizes)
+        else:
+            test_objects = (cls(a, b)
+                            for a, b in cls.get_value_pairs())
+            special_cases = (cls(*args) for args in cls.input_cases)
+        yield from (valid_test_object.create_test_case()
+                    for valid_test_object in filter(
+                        lambda test_object: test_object.is_valid,
+                        chain(test_objects, special_cases)
+                        )
+                    )
+
+
+class ModulusRepresentation(enum.Enum):
+    """Representation selector of a modulus."""
+    # Numerical values aligned with the type mbedtls_mpi_mod_rep_selector
+    INVALID = 0
+    MONTGOMERY = 2
+    OPT_RED = 3
+
+    def symbol(self) -> str:
+        """The C symbol for this representation selector."""
+        return 'MBEDTLS_MPI_MOD_REP_' + self.name
+
+    @classmethod
+    def supported_representations(cls) -> List['ModulusRepresentation']:
+        """Return all representations that are supported in positive test cases."""
+        return [cls.MONTGOMERY, cls.OPT_RED]
+
+
+class ModOperationCommon(OperationCommon):
+    #pylint: disable=abstract-method
+    """Target for bignum mod_raw test case generation."""
+    moduli = MODULI_DEFAULT # type: List[str]
+    montgomery_form_a = False
+    disallow_zero_a = False
+
+    def __init__(self, val_n: str, val_a: str, val_b: str = "0",
+                 bits_in_limb: int = 64) -> None:
+        super().__init__(val_a=val_a, val_b=val_b, bits_in_limb=bits_in_limb)
+        self.val_n = val_n
+        # Setting the int versions here as opposed to making them @properties
+        # provides earlier/more robust input validation.
+        self.int_n = hex_to_int(val_n)
+
+    def to_montgomery(self, val: int) -> int:
+        return (val * self.r) % self.int_n
+
+    def from_montgomery(self, val: int) -> int:
+        return (val * self.r_inv) % self.int_n
+
+    def convert_from_canonical(self, canonical: int,
+                               rep: ModulusRepresentation) -> int:
+        """Convert values from canonical representation to the given representation."""
+        if rep is ModulusRepresentation.MONTGOMERY:
+            return self.to_montgomery(canonical)
+        elif rep is ModulusRepresentation.OPT_RED:
+            return canonical
+        else:
+            raise ValueError('Modulus representation not supported: {}'
+                             .format(rep.name))
+
+    @property
+    def boundary(self) -> int:
+        return self.int_n
+
+    @property
+    def arg_a(self) -> str:
+        if self.montgomery_form_a:
+            value_a = self.to_montgomery(self.int_a)
+        else:
+            value_a = self.int_a
+        return self.format_arg('{:x}'.format(value_a))
+
+    @property
+    def arg_n(self) -> str:
+        return self.format_arg(self.val_n)
+
+    def format_arg(self, val: str) -> str:
+        return super().format_arg(val).zfill(self.hex_digits)
+
+    def arguments(self) -> List[str]:
+        return [quote_str(self.arg_n)] + super().arguments()
+
+    @property
+    def r(self) -> int: # pylint: disable=invalid-name
+        l = limbs_mpi(self.int_n, self.bits_in_limb)
+        return bound_mpi_limbs(l, self.bits_in_limb)
+
+    @property
+    def r_inv(self) -> int:
+        return invmod(self.r, self.int_n)
+
+    @property
+    def r2(self) -> int: # pylint: disable=invalid-name
+        return pow(self.r, 2)
+
+    @property
+    def is_valid(self) -> bool:
+        if self.int_a >= self.int_n:
+            return False
+        if self.disallow_zero_a and self.int_a == 0:
+            return False
+        if self.arity == 2 and self.int_b >= self.int_n:
+            return False
+        return True
+
+    def description(self) -> str:
+        """Generate a description for the test case.
+
+        It uses the form A `symbol` B mod N, where symbol is used to represent
+        the operation.
+        """
+
+        if not self.case_description:
+            return super().description() + " mod {:x}".format(self.int_n)
+        return super().description()
+
+    @classmethod
+    def input_cases_args(cls) -> Iterator[Tuple[Any, Any, Any]]:
+        if cls.arity == 1:
+            yield from ((n, a, "0") for a, n in cls.input_cases)
+        elif cls.arity == 2:
+            yield from ((n, a, b) for a, b, n in cls.input_cases)
+        else:
+            raise ValueError("Unsupported number of operands!")
+
+    @classmethod
+    def generate_function_tests(cls) -> Iterator[test_case.TestCase]:
+        if cls.input_style not in cls.input_styles:
+            raise ValueError("Unknown input style!")
+        if cls.arity not in cls.arities:
+            raise ValueError("Unsupported number of operands!")
+        if cls.input_style == "arch_split":
+            test_objects = (cls(n, a, b, bits_in_limb=bil)
+                            for n in cls.moduli
+                            for a, b in cls.get_value_pairs()
+                            for bil in cls.limb_sizes)
+            special_cases = (cls(*args, bits_in_limb=bil)
+                             for args in cls.input_cases_args()
+                             for bil in cls.limb_sizes)
+        else:
+            test_objects = (cls(n, a, b)
+                            for n in cls.moduli
+                            for a, b in cls.get_value_pairs())
+            special_cases = (cls(*args) for args in cls.input_cases_args())
+        yield from (valid_test_object.create_test_case()
+                    for valid_test_object in filter(
+                        lambda test_object: test_object.is_valid,
+                        chain(test_objects, special_cases)
+                        ))
diff --git a/framework/scripts/mbedtls_framework/bignum_core.py b/framework/scripts/mbedtls_framework/bignum_core.py
new file mode 100644
index 0000000..909f6a3
--- /dev/null
+++ b/framework/scripts/mbedtls_framework/bignum_core.py
@@ -0,0 +1,896 @@
+"""Framework classes for generation of bignum core test cases."""
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+#
+
+import random
+
+from typing import Dict, Iterator, List, Tuple
+
+from . import test_case
+from . import test_data_generation
+from . import bignum_common
+from .bignum_data import ADD_SUB_DATA
+
+class BignumCoreTarget(test_data_generation.BaseTarget):
+    #pylint: disable=abstract-method, too-few-public-methods
+    """Target for bignum core test case generation."""
+    target_basename = 'test_suite_bignum_core.generated'
+
+
+class BignumCoreShiftR(BignumCoreTarget, test_data_generation.BaseTest):
+    """Test cases for mbedtls_bignum_core_shift_r()."""
+    count = 0
+    test_function = "mpi_core_shift_r"
+    test_name = "Core shift right"
+
+    DATA = [
+        ('00', '0', [0, 1, 8]),
+        ('01', '1', [0, 1, 2, 8, 64]),
+        ('dee5ca1a7ef10a75', '64-bit',
+         list(range(11)) + [31, 32, 33, 63, 64, 65, 71, 72]),
+        ('002e7ab0070ad57001', '[leading 0 limb]',
+         [0, 1, 8, 63, 64]),
+        ('a1055eb0bb1efa1150ff', '80-bit',
+         [0, 1, 8, 63, 64, 65, 72, 79, 80, 81, 88, 128, 129, 136]),
+        ('020100000000000000001011121314151617', '138-bit',
+         [0, 1, 8, 9, 16, 72, 73, 136, 137, 138, 144]),
+    ]
+
+    def __init__(self, input_hex: str, descr: str, count: int) -> None:
+        self.input_hex = input_hex
+        self.number_description = descr
+        self.shift_count = count
+        self.result = bignum_common.hex_to_int(input_hex) >> count
+
+    def arguments(self) -> List[str]:
+        return ['"{}"'.format(self.input_hex),
+                str(self.shift_count),
+                '"{:0{}x}"'.format(self.result, len(self.input_hex))]
+
+    def description(self) -> str:
+        return 'Core shift {} >> {}'.format(self.number_description,
+                                            self.shift_count)
+
+    @classmethod
+    def generate_function_tests(cls) -> Iterator[test_case.TestCase]:
+        for input_hex, descr, counts in cls.DATA:
+            for count in counts:
+                yield cls(input_hex, descr, count).create_test_case()
+
+
+class BignumCoreShiftL(BignumCoreTarget, bignum_common.ModOperationCommon):
+    """Test cases for mbedtls_bignum_core_shift_l()."""
+
+    BIT_SHIFT_VALUES = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a',
+                        '1f', '20', '21', '3f', '40', '41', '47', '48', '4f',
+                        '50', '51', '58', '80', '81', '88']
+    DATA = ["0", "1", "40", "dee5ca1a7ef10a75", "a1055eb0bb1efa1150ff",
+            "002e7ab0070ad57001", "020100000000000000001011121314151617",
+            "1946e2958a85d8863ae21f4904fcc49478412534ed53eaf321f63f2a222"
+            "7a3c63acbf50b6305595f90cfa8327f6db80d986fe96080bcbb5df1bdbe"
+            "9b74fb8dedf2bddb3f8215b54dffd66409323bcc473e45a8fe9d08e77a51"
+            "1698b5dad0416305db7fcf"]
+    arity = 1
+    test_function = "mpi_core_shift_l"
+    test_name = "Core shift(L)"
+    input_style = "arch_split"
+    symbol = "<<"
+    input_values = BIT_SHIFT_VALUES
+    moduli = DATA
+
+    @property
+    def val_n_max_limbs(self) -> int:
+        """ Return the limb count required to store the maximum number that can
+        fit in a the number of digits used by val_n """
+        m = bignum_common.hex_digits_max_int(self.val_n, self.bits_in_limb) - 1
+        return bignum_common.limbs_mpi(m, self.bits_in_limb)
+
+    def arguments(self) -> List[str]:
+        return [bignum_common.quote_str(self.val_n),
+                str(self.int_a)
+                ] + self.result()
+
+    def description(self) -> str:
+        """ Format the output as:
+        #{count} {hex input} ({input bits} {limbs capacity}) << {bit shift} """
+        bits = "({} bits in {} limbs)".format(self.int_n.bit_length(), self.val_n_max_limbs)
+        return "{} #{} {} {} {} {}".format(self.test_name,
+                                           self.count,
+                                           self.val_n,
+                                           bits,
+                                           self.symbol,
+                                           self.int_a)
+
+    def format_result(self, res: int) -> str:
+        # Override to match zero-pading for leading digits between the output and input.
+        res_str = bignum_common.zfill_match(self.val_n, "{:x}".format(res))
+        return bignum_common.quote_str(res_str)
+
+    def result(self) -> List[str]:
+        result = (self.int_n << self.int_a)
+        # Calculate if there is space for shifting to the left(leading zero limbs)
+        mx = bignum_common.hex_digits_max_int(self.val_n, self.bits_in_limb)
+        # If there are empty limbs ahead, adjust the bitmask accordingly
+        result = result & (mx - 1)
+        return [self.format_result(result)]
+
+    @property
+    def is_valid(self) -> bool:
+        return True
+
+
+class BignumCoreCTLookup(BignumCoreTarget, test_data_generation.BaseTest):
+    """Test cases for mbedtls_mpi_core_ct_uint_table_lookup()."""
+    test_function = "mpi_core_ct_uint_table_lookup"
+    test_name = "Constant time MPI table lookup"
+
+    bitsizes = [
+        (32, "One limb"),
+        (192, "Smallest curve sized"),
+        (512, "Largest curve sized"),
+        (2048, "Small FF/RSA sized"),
+        (4096, "Large FF/RSA sized"),
+        ]
+
+    window_sizes = [0, 1, 2, 3, 4, 5, 6]
+
+    def __init__(self,
+                 bitsize: int, descr: str, window_size: int) -> None:
+        self.bitsize = bitsize
+        self.bitsize_description = descr
+        self.window_size = window_size
+
+    def arguments(self) -> List[str]:
+        return [str(self.bitsize), str(self.window_size)]
+
+    def description(self) -> str:
+        return '{} - {} MPI with {} bit window'.format(
+            BignumCoreCTLookup.test_name,
+            self.bitsize_description,
+            self.window_size
+            )
+
+    @classmethod
+    def generate_function_tests(cls) -> Iterator[test_case.TestCase]:
+        for bitsize, bitsize_description in cls.bitsizes:
+            for window_size in cls.window_sizes:
+                yield (cls(bitsize, bitsize_description, window_size)
+                       .create_test_case())
+
+
+class BignumCoreAddAndAddIf(BignumCoreTarget, bignum_common.OperationCommon):
+    """Test cases for bignum core add and add-if."""
+    count = 0
+    symbol = "+"
+    test_function = "mpi_core_add_and_add_if"
+    test_name = "mpi_core_add_and_add_if"
+    input_style = "arch_split"
+    input_values = ADD_SUB_DATA
+    unique_combinations_only = True
+
+    def result(self) -> List[str]:
+        result = self.int_a + self.int_b
+
+        carry, result = divmod(result, self.limb_boundary)
+
+        return [
+            self.format_result(result),
+            str(carry)
+        ]
+
+
+class BignumCoreSub(BignumCoreTarget, bignum_common.OperationCommon):
+    """Test cases for bignum core sub."""
+    count = 0
+    input_style = "arch_split"
+    symbol = "-"
+    test_function = "mpi_core_sub"
+    test_name = "mbedtls_mpi_core_sub"
+    input_values = ADD_SUB_DATA
+
+    def result(self) -> List[str]:
+        if self.int_a >= self.int_b:
+            result = self.int_a - self.int_b
+            carry = 0
+        else:
+            result = self.limb_boundary + self.int_a - self.int_b
+            carry = 1
+        return [
+            self.format_result(result),
+            str(carry)
+        ]
+
+
+class BignumCoreMLA(BignumCoreTarget, bignum_common.OperationCommon):
+    """Test cases for fixed-size multiply accumulate."""
+    count = 0
+    test_function = "mpi_core_mla"
+    test_name = "mbedtls_mpi_core_mla"
+
+    input_values = [
+        "0", "1", "fffe", "ffffffff", "100000000", "20000000000000",
+        "ffffffffffffffff", "10000000000000000", "1234567890abcdef0",
+        "fffffffffffffffffefefefefefefefe",
+        "100000000000000000000000000000000",
+        "1234567890abcdef01234567890abcdef0",
+        "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+        "1234567890abcdef01234567890abcdef01234567890abcdef01234567890abcdef0",
+        (
+            "4df72d07b4b71c8dacb6cffa954f8d88254b6277099308baf003fab73227f"
+            "34029643b5a263f66e0d3c3fa297ef71755efd53b8fb6cb812c6bbf7bcf17"
+            "9298bd9947c4c8b14324140a2c0f5fad7958a69050a987a6096e9f055fb38"
+            "edf0c5889eca4a0cfa99b45fbdeee4c696b328ddceae4723945901ec02507"
+            "6b12b"
+        )
+    ] # type: List[str]
+    input_scalars = [
+        "0", "3", "fe", "ff", "ffff", "10000", "ffffffff", "100000000",
+        "7f7f7f7f7f7f7f7f", "8000000000000000", "fffffffffffffffe"
+    ] # type: List[str]
+
+    def __init__(self, val_a: str, val_b: str, val_s: str) -> None:
+        super().__init__(val_a, val_b)
+        self.arg_scalar = val_s
+        self.int_scalar = bignum_common.hex_to_int(val_s)
+        if bignum_common.limbs_mpi(self.int_scalar, 32) > 1:
+            self.dependencies = ["MBEDTLS_HAVE_INT64"]
+
+    def arguments(self) -> List[str]:
+        return [
+            bignum_common.quote_str(self.arg_a),
+            bignum_common.quote_str(self.arg_b),
+            bignum_common.quote_str(self.arg_scalar)
+        ] + self.result()
+
+    def description(self) -> str:
+        """Override and add the additional scalar."""
+        if not self.case_description:
+            self.case_description = "0x{} + 0x{} * 0x{}".format(
+                self.arg_a, self.arg_b, self.arg_scalar
+            )
+        return super().description()
+
+    def result(self) -> List[str]:
+        result = self.int_a + (self.int_b * self.int_scalar)
+        bound_val = max(self.int_a, self.int_b)
+        bound_4 = bignum_common.bound_mpi(bound_val, 32)
+        bound_8 = bignum_common.bound_mpi(bound_val, 64)
+        carry_4, remainder_4 = divmod(result, bound_4)
+        carry_8, remainder_8 = divmod(result, bound_8)
+        return [
+            "\"{:x}\"".format(remainder_4),
+            "\"{:x}\"".format(carry_4),
+            "\"{:x}\"".format(remainder_8),
+            "\"{:x}\"".format(carry_8)
+        ]
+
+    @classmethod
+    def get_value_pairs(cls) -> Iterator[Tuple[str, str]]:
+        """Generator to yield pairs of inputs.
+
+        Combinations are first generated from all input values, and then
+        specific cases provided.
+        """
+        yield from super().get_value_pairs()
+        yield from cls.input_cases
+
+    @classmethod
+    def generate_function_tests(cls) -> Iterator[test_case.TestCase]:
+        """Override for additional scalar input."""
+        for a_value, b_value in cls.get_value_pairs():
+            for s_value in cls.input_scalars:
+                cur_op = cls(a_value, b_value, s_value)
+                yield cur_op.create_test_case()
+
+
+class BignumCoreMul(BignumCoreTarget, bignum_common.OperationCommon):
+    """Test cases for bignum core multiplication."""
+    count = 0
+    input_style = "arch_split"
+    symbol = "*"
+    test_function = "mpi_core_mul"
+    test_name = "mbedtls_mpi_core_mul"
+    arity = 2
+    unique_combinations_only = True
+
+    def format_arg(self, val: str) -> str:
+        return val
+
+    def format_result(self, res: int) -> str:
+        res_str = '{:x}'.format(res)
+        a_limbs = bignum_common.limbs_mpi(self.int_a, self.bits_in_limb)
+        b_limbs = bignum_common.limbs_mpi(self.int_b, self.bits_in_limb)
+        hex_digits = bignum_common.hex_digits_for_limb(a_limbs + b_limbs, self.bits_in_limb)
+        return bignum_common.quote_str(self.format_arg(res_str).zfill(hex_digits))
+
+    def result(self) -> List[str]:
+        result = self.int_a * self.int_b
+        return [self.format_result(result)]
+
+
+class BignumCoreMontmul(BignumCoreTarget, test_data_generation.BaseTest):
+    """Test cases for Montgomery multiplication."""
+    count = 0
+    test_function = "mpi_core_montmul"
+    test_name = "mbedtls_mpi_core_montmul"
+
+    start_2_mpi4 = False
+    start_2_mpi8 = False
+
+    replay_test_cases = [
+        (2, 1, 1, 1, "19", "1", "1D"), (2, 1, 1, 1, "7", "1", "9"),
+        (2, 1, 1, 1, "4", "1", "9"),
+        (
+            12, 1, 6, 1, (
+                "3C246D0E059A93A266288A7718419EC741661B474C58C032C5EDAF92709402"
+                "B07CC8C7CE0B781C641A1EA8DB2F4343"
+            ), "1", (
+                "66A198186C18C10B2F5ED9B522752A9830B69916E535C8F047518A889A43A5"
+                "94B6BED27A168D31D4A52F88925AA8F5"
+            )
+        ), (
+            8, 1, 4, 1,
+            "1E442976B0E63D64FCCE74B999E470CA9888165CB75BFA1F340E918CE03C6211",
+            "1", "B3A119602EE213CDE28581ECD892E0F592A338655DCE4CA88054B3D124D0E561"
+        ), (
+            22, 1, 11, 1, (
+                "7CF5AC97304E0B63C65413F57249F59994B0FED1D2A8D3D83ED5FA38560FFB"
+                "82392870D6D08F87D711917FD7537E13B7E125BE407E74157776839B0AC9DB"
+                "23CBDFC696104353E4D2780B2B4968F8D8542306BCA7A2366E"
+            ), "1", (
+                "284139EA19C139EBE09A8111926AAA39A2C2BE12ED487A809D3CB5BC558547"
+                "25B4CDCB5734C58F90B2F60D99CC1950CDBC8D651793E93C9C6F0EAD752500"
+                "A32C56C62082912B66132B2A6AA42ADA923E1AD22CEB7BA0123"
+            )
+        )
+    ] # type: List[Tuple[int, int, int, int, str, str, str]]
+
+    random_test_cases = [
+        ("2", "2", "3", ""), ("1", "2", "3", ""), ("2", "1", "3", ""),
+        ("6", "5", "7", ""), ("3", "4", "7", ""), ("1", "6", "7", ""), ("5", "6", "7", ""),
+        ("3", "4", "B", ""), ("7", "4", "B", ""), ("9", "7", "B", ""), ("2", "a", "B", ""),
+        ("25", "16", "29", "(0x29 is prime)"), ("8", "28", "29", ""),
+        ("18", "21", "29", ""), ("15", "f", "29", ""),
+        ("e2", "ea", "FF", ""), ("43", "72", "FF", ""),
+        ("d8", "70", "FF", ""), ("3c", "7c", "FF", ""),
+        ("99", "b9", "101", "(0x101 is prime)"), ("65", "b2", "101", ""),
+        ("81", "32", "101", ""), ("51", "dd", "101", ""),
+        ("d5", "143", "38B", "(0x38B is prime)"), ("3d", "387", "38B", ""),
+        ("160", "2e5", "38B", ""), ("10f", "137", "38B", ""),
+        ("7dac", "25a", "8003", "(0x8003 is prime)"), ("6f1c", "3286", "8003", ""),
+        ("59ed", "2f3f", "8003", ""), ("6893", "736d", "8003", ""),
+        ("d199", "2832", "10001", "(0x10001 is prime)"), ("c3b2", "3e5b", "10001", ""),
+        ("abe4", "214e", "10001", ""), ("4360", "a05d", "10001", ""),
+        ("3f5a1", "165b2", "7F7F7", ""), ("3bd29", "37863", "7F7F7", ""),
+        ("60c47", "64819", "7F7F7", ""), ("16584", "12c49", "7F7F7", ""),
+        ("1ff03f", "610347", "800009", "(0x800009 is prime)"), ("340fd5", "19812e", "800009", ""),
+        ("3fe2e8", "4d0dc7", "800009", ""), ("40356", "e6392", "800009", ""),
+        ("dd8a1d", "266c0e", "100002B", "(0x100002B is prime)"),
+        ("3fa1cb", "847fd6", "100002B", ""), ("5f439d", "5c3196", "100002B", ""),
+        ("18d645", "f72dc6", "100002B", ""),
+        ("20051ad", "37def6e", "37EEE9D", "(0x37EEE9D is prime)"),
+        ("2ec140b", "3580dbf", "37EEE9D", ""), ("1d91b46", "190d4fc", "37EEE9D", ""),
+        ("34e488d", "1224d24", "37EEE9D", ""),
+        ("2a4fe2cb", "263466a9", "8000000B", "(0x8000000B is prime)"),
+        ("5643fe94", "29a1aefa", "8000000B", ""), ("29633513", "7b007ac4", "8000000B", ""),
+        ("2439cef5", "5c9d5a47", "8000000B", ""),
+        ("4de3cfaa", "50dea178", "8CD626B9", "(0x8CD626B9 is prime)"),
+        ("b8b8563", "10dbbbac", "8CD626B9", ""), ("4e8a6151", "5574ec19", "8CD626B9", ""),
+        ("69224878", "309cfc23", "8CD626B9", ""),
+        ("fb6f7fb6", "afb05423", "10000000F", "(0x10000000F is prime)"),
+        ("8391a243", "26034dcd", "10000000F", ""), ("d26b98c", "14b2d6aa", "10000000F", ""),
+        ("6b9f1371", "a21daf1d", "10000000F", ""),
+        (
+            "9f49435ad", "c8264ade8", "174876E7E9",
+            "0x174876E7E9 is prime (dec) 99999999977"
+        ),
+        ("c402da434", "1fb427acf", "174876E7E9", ""),
+        ("f6ebc2bb1", "1096d39f2a", "174876E7E9", ""),
+        ("153b7f7b6b", "878fda8ff", "174876E7E9", ""),
+        ("2c1adbb8d6", "4384d2d3c6", "8000000017", "(0x8000000017 is prime)"),
+        ("2e4f9cf5fb", "794f3443d9", "8000000017", ""),
+        ("149e495582", "3802b8f7b7", "8000000017", ""),
+        ("7b9d49df82", "69c68a442a", "8000000017", ""),
+        ("683a134600", "6dd80ea9f6", "864CB9076D", "(0x864CB9076D is prime)"),
+        ("13a870ff0d", "59b099694a", "864CB9076D", ""),
+        ("37d06b0e63", "4d2147e46f", "864CB9076D", ""),
+        ("661714f8f4", "22e55df507", "864CB9076D", ""),
+        ("2f0a96363", "52693307b4", "F7F7F7F7F7", ""),
+        ("3c85078e64", "f2275ecb6d", "F7F7F7F7F7", ""),
+        ("352dae68d1", "707775b4c6", "F7F7F7F7F7", ""),
+        ("37ae0f3e0b", "912113040f", "F7F7F7F7F7", ""),
+        ("6dada15e31", "f58ed9eff7", "1000000000F", "(0x1000000000F is prime)"),
+        ("69627a7c89", "cfb5ebd13d", "1000000000F", ""),
+        ("a5e1ad239b", "afc030c731", "1000000000F", ""),
+        ("f1cc45f4c5", "c64ad607c8", "1000000000F", ""),
+        ("2ebad87d2e31", "4c72d90bca78", "800000000005", "(0x800000000005 is prime)"),
+        ("a30b3cc50d", "29ac4fe59490", "800000000005", ""),
+        ("33674e9647b4", "5ec7ee7e72d3", "800000000005", ""),
+        ("3d956f474f61", "74070040257d", "800000000005", ""),
+        ("48348e3717d6", "43fcb4399571", "800795D9BA47", "(0x800795D9BA47 is prime)"),
+        ("5234c03cc99b", "2f3cccb87803", "800795D9BA47", ""),
+        ("3ed13db194ab", "44b8f4ba7030", "800795D9BA47", ""),
+        ("1c11e843bfdb", "95bd1b47b08", "800795D9BA47", ""),
+        ("a81d11cb81fd", "1e5753a3f33d", "1000000000015", "(0x1000000000015 is prime)"),
+        ("688c4db99232", "36fc0cf7ed", "1000000000015", ""),
+        ("f0720cc07e07", "fc76140ed903", "1000000000015", ""),
+        ("2ec61f8d17d1", "d270c85e36d2", "1000000000015", ""),
+        (
+            "6a24cd3ab63820", "ed4aad55e5e348", "100000000000051",
+            "(0x100000000000051 is prime)"
+        ),
+        ("e680c160d3b248", "31e0d8840ed510", "100000000000051", ""),
+        ("a80637e9aebc38", "bb81decc4e1738", "100000000000051", ""),
+        ("9afa5a59e9d630", "be9e65a6d42938", "100000000000051", ""),
+        ("ab5e104eeb71c000", "2cffbd639e9fea00", "ABCDEF0123456789", ""),
+        ("197b867547f68a00", "44b796cf94654800", "ABCDEF0123456789", ""),
+        ("329f9483a04f2c00", "9892f76961d0f000", "ABCDEF0123456789", ""),
+        ("4a2e12dfb4545000", "1aa3e89a69794500", "ABCDEF0123456789", ""),
+        (
+            "8b9acdf013d140f000", "12e4ceaefabdf2b2f00", "25A55A46E5DA99C71C7",
+            "0x25A55A46E5DA99C71C7 is the 3rd repunit prime(dec) 11111111111111111111111"
+        ),
+        ("1b8d960ea277e3f5500", "14418aa980e37dd000", "25A55A46E5DA99C71C7", ""),
+        ("7314524977e8075980", "8172fa45618ccd0d80", "25A55A46E5DA99C71C7", ""),
+        ("ca14f031769be63580", "147a2f3cf2964ca9400", "25A55A46E5DA99C71C7", ""),
+        (
+            "18532ba119d5cd0cf39735c0000", "25f9838e31634844924733000000",
+            "314DC643FB763F2B8C0E2DE00879",
+            "0x314DC643FB763F2B8C0E2DE00879 is (dec)99999999977^3"
+        ),
+        (
+            "a56e2d2517519e3970e70c40000", "ec27428d4bb380458588fa80000",
+            "314DC643FB763F2B8C0E2DE00879", ""
+        ),
+        (
+            "1cb5e8257710e8653fff33a00000", "15fdd42fe440fd3a1d121380000",
+            "314DC643FB763F2B8C0E2DE00879", ""
+        ),
+        (
+            "e50d07a65fc6f93e538ce040000", "1f4b059ca609f3ce597f61240000",
+            "314DC643FB763F2B8C0E2DE00879", ""
+        ),
+        (
+            "1ea3ade786a095d978d387f30df9f20000000",
+            "127c448575f04af5a367a7be06c7da0000000",
+            "47BF19662275FA2F6845C74942ED1D852E521",
+            "0x47BF19662275FA2F6845C74942ED1D852E521 is (dec) 99999999977^4"
+        ),
+        (
+            "16e15b0ca82764e72e38357b1f10a20000000",
+            "43e2355d8514bbe22b0838fdc3983a0000000",
+            "47BF19662275FA2F6845C74942ED1D852E521", ""
+        ),
+        (
+            "be39332529d93f25c3d116c004c620000000",
+            "5cccec42370a0a2c89c6772da801a0000000",
+            "47BF19662275FA2F6845C74942ED1D852E521", ""
+        ),
+        (
+            "ecaa468d90de0eeda474d39b3e1fc0000000",
+            "1e714554018de6dc0fe576bfd3b5660000000",
+            "47BF19662275FA2F6845C74942ED1D852E521", ""
+        ),
+        (
+            "32298816711c5dce46f9ba06e775c4bedfc770e6700000000000000",
+            "8ee751fd5fb24f0b4a653cb3a0c8b7d9e724574d168000000000000",
+            "97EDD86E4B5C4592C6D32064AC55C888A7245F07CA3CC455E07C931",
+            (
+                "0x97EDD86E4B5C4592C6D32064AC55C888A7245F07CA3CC455E07C931"
+                " is (dec) 99999999977^6"
+            )
+        ),
+        (
+            "29213b9df3cfd15f4b428645b67b677c29d1378d810000000000000",
+            "6cbb732c65e10a28872394dfdd1936d5171c3c3aac0000000000000",
+            "97EDD86E4B5C4592C6D32064AC55C888A7245F07CA3CC455E07C931", ""
+        ),
+        (
+            "6f18db06ad4abc52c0c50643dd13098abccd4a232f0000000000000",
+            "7e6bf41f2a86098ad51f98dfc10490ba3e8081bc830000000000000",
+            "97EDD86E4B5C4592C6D32064AC55C888A7245F07CA3CC455E07C931", ""
+        ),
+        (
+            "62d3286cd706ad9d73caff63f1722775d7e8c731208000000000000",
+            "530f7ba02ae2b04c2fe3e3d27ec095925631a6c2528000000000000",
+            "97EDD86E4B5C4592C6D32064AC55C888A7245F07CA3CC455E07C931", ""
+        ),
+        (
+            "a6c6503e3c031fdbf6009a89ed60582b7233c5a85de28b16000000000000000",
+            "75c8ed18270b583f16d442a467d32bf95c5e491e9b8523798000000000000000",
+            "DD15FE80B731872AC104DB37832F7E75A244AA2631BC87885B861E8F20375499",
+            (
+                "0xDD15FE80B731872AC104DB37832F7E75A244AA2631BC87885B861E8F20375499"
+                " is (dec) 99999999977^7"
+            )
+        ),
+        (
+            "bf84d1f85cf6b51e04d2c8f4ffd03532d852053cf99b387d4000000000000000",
+            "397ba5a743c349f4f28bc583ecd5f06e0a25f9c6d98f09134000000000000000",
+            "DD15FE80B731872AC104DB37832F7E75A244AA2631BC87885B861E8F20375499", ""
+        ),
+        (
+            "6db11c3a4152ed1a2aa6fa34b0903ec82ea1b88908dcb482000000000000000",
+            "ac8ac576a74ad6ca48f201bf89f77350ce86e821358d85920000000000000000",
+            "DD15FE80B731872AC104DB37832F7E75A244AA2631BC87885B861E8F20375499", ""
+        ),
+        (
+            "3001d96d7fe8b733f33687646fc3017e3ac417eb32e0ec708000000000000000",
+            "925ddbdac4174e8321a48a32f79640e8cf7ec6f46ea235a80000000000000000",
+            "DD15FE80B731872AC104DB37832F7E75A244AA2631BC87885B861E8F20375499", ""
+        ),
+        (
+            "1029048755f2e60dd98c8de6d9989226b6bb4f0db8e46bd1939de560000000000000000000",
+            "51bb7270b2e25cec0301a03e8275213bb6c2f6e6ec93d4d46d36ca0000000000000000000",
+            "141B8EBD9009F84C241879A1F680FACCED355DA36C498F73E96E880CF78EA5F96146380E41",
+            (
+                "0x141B8EBD9009F84C241879A1F680FACCED355DA36C498F73E96E880CF78EA5F96146"
+                "380E41 is 99999999977^8"
+            )
+        ),
+        (
+            "1c5337ff982b3ad6611257dbff5bbd7a9920ba2d4f5838a0cc681ce000000000000000000",
+            "520c5d049ca4702031ba728591b665c4d4ccd3b2b86864d4c160fd2000000000000000000",
+            "141B8EBD9009F84C241879A1F680FACCED355DA36C498F73E96E880CF78EA5F96146380E41",
+            ""
+        ),
+        (
+            "57074dfa00e42f6555bae624b7f0209f218adf57f73ed34ab0ff90c000000000000000000",
+            "41eb14b6c07bfd3d1fe4f4a610c17cc44fcfcda695db040e011065000000000000000000",
+            "141B8EBD9009F84C241879A1F680FACCED355DA36C498F73E96E880CF78EA5F96146380E41",
+            ""
+        ),
+        (
+            "d8ed7feed2fe855e6997ad6397f776158573d425031bf085a615784000000000000000000",
+            "6f121dcd18c578ab5e229881006007bb6d319b179f11015fe958b9c000000000000000000",
+            "141B8EBD9009F84C241879A1F680FACCED355DA36C498F73E96E880CF78EA5F96146380E41",
+            ""
+        ),
+        (
+            (
+                "2a462b156180ea5fe550d3758c764e06fae54e626b5f503265a09df76edbdfbf"
+                "a1e6000000000000000000000000"
+            ), (
+                "1136f41d1879fd4fb9e49e0943a46b6704d77c068ee237c3121f9071cfd3e6a0"
+                "0315800000000000000000000000"
+            ), (
+                "2A94608DE88B6D5E9F8920F5ABB06B24CC35AE1FBACC87D075C621C3E2833EC90"
+                "2713E40F51E3B3C214EDFABC451"
+            ), (
+                "0x2A94608DE88B6D5E9F8920F5ABB06B24CC35AE1FBACC87D075C621C3E2833EC"
+                "902713E40F51E3B3C214EDFABC451 is (dec) 99999999977^10"
+            )
+        ),
+        (
+            (
+                "c1ac3800dfb3c6954dea391d206200cf3c47f795bf4a5603b4cb88ae7e574de47"
+                "40800000000000000000000000"
+            ), (
+                "c0d16eda0549ede42fa0deb4635f7b7ce061fadea02ee4d85cba4c4f709603419"
+                "3c800000000000000000000000"
+            ), (
+                "2A94608DE88B6D5E9F8920F5ABB06B24CC35AE1FBACC87D075C621C3E2833EC90"
+                "2713E40F51E3B3C214EDFABC451"
+            ), ""
+        ),
+        (
+            (
+                "19e45bb7633094d272588ad2e43bcb3ee341991c6731b6fa9d47c4018d7ce7bba"
+                "5ee800000000000000000000000"
+            ), (
+                "1e4f83166ae59f6b9cc8fd3e7677ed8bfc01bb99c98bd3eb084246b64c1e18c33"
+                "65b800000000000000000000000"
+            ), (
+                "2A94608DE88B6D5E9F8920F5ABB06B24CC35AE1FBACC87D075C621C3E2833EC90"
+                "2713E40F51E3B3C214EDFABC451"
+            ), ""
+        ),
+        (
+            (
+                "1aa93395fad5f9b7f20b8f9028a054c0bb7c11bb8520e6a95e5a34f06cb70bcdd"
+                "01a800000000000000000000000"
+            ), (
+                "54b45afa5d4310192f8d224634242dd7dcfb342318df3d9bd37b4c614788ba13b"
+                "8b000000000000000000000000"
+            ), (
+                "2A94608DE88B6D5E9F8920F5ABB06B24CC35AE1FBACC87D075C621C3E2833EC90"
+                "2713E40F51E3B3C214EDFABC451"
+            ), ""
+        ),
+        (
+            (
+                "544f2628a28cfb5ce0a1b7180ee66b49716f1d9476c466c57f0c4b23089917843"
+                "06d48f78686115ee19e25400000000000000000000000000000000"
+            ), (
+                "677eb31ef8d66c120fa872a60cd47f6e10cbfdf94f90501bd7883cba03d185be0"
+                "a0148d1625745e9c4c827300000000000000000000000000000000"
+            ), (
+                "8335616AED761F1F7F44E6BD49E807B82E3BF2BF11BFA6AF813C808DBF33DBFA1"
+                "1DABD6E6144BEF37C6800000000000000000000000000000000051"
+            ), (
+                "0x8335616AED761F1F7F44E6BD49E807B82E3BF2BF11BFA6AF813C808DBF33DBF"
+                "A11DABD6E6144BEF37C6800000000000000000000000000000000051 is prime,"
+                " (dec) 10^143 + 3^4"
+            )
+        ),
+        (
+            (
+                "76bb3470985174915e9993522aec989666908f9e8cf5cb9f037bf4aee33d8865c"
+                "b6464174795d07e30015b80000000000000000000000000000000"
+            ), (
+                "6aaaf60d5784dcef612d133613b179a317532ecca0eed40b8ad0c01e6d4a6d8c7"
+                "9a52af190abd51739009a900000000000000000000000000000000"
+            ), (
+                "8335616AED761F1F7F44E6BD49E807B82E3BF2BF11BFA6AF813C808DBF33DBFA1"
+                "1DABD6E6144BEF37C6800000000000000000000000000000000051"
+            ), ""
+        ),
+        (
+            (
+                "6cfdd6e60912e441d2d1fc88f421b533f0103a5322ccd3f4db84861643ad63fd6"
+                "3d1d8cfbc1d498162786ba00000000000000000000000000000000"
+            ), (
+                "1177246ec5e93814816465e7f8f248b350d954439d35b2b5d75d917218e7fd5fb"
+                "4c2f6d0667f9467fdcf33400000000000000000000000000000000"
+            ), (
+                "8335616AED761F1F7F44E6BD49E807B82E3BF2BF11BFA6AF813C808DBF33DBFA1"
+                "1DABD6E6144BEF37C6800000000000000000000000000000000051"
+            ), ""
+        ),
+        (
+            (
+                "7a09a0b0f8bbf8057116fb0277a9bdf3a91b5eaa8830d448081510d8973888be5"
+                "a9f0ad04facb69aa3715f00000000000000000000000000000000"
+            ), (
+                "764dec6c05a1c0d87b649efa5fd94c91ea28bffb4725d4ab4b33f1a3e8e3b314d"
+                "799020e244a835a145ec9800000000000000000000000000000000"
+            ), (
+                "8335616AED761F1F7F44E6BD49E807B82E3BF2BF11BFA6AF813C808DBF33DBFA1"
+                "1DABD6E6144BEF37C6800000000000000000000000000000000051"
+            ), ""
+        )
+    ] # type: List[Tuple[str, str, str, str]]
+
+    def __init__(
+            self, val_a: str, val_b: str, val_n: str, case_description: str = ""
+        ):
+        self.case_description = case_description
+        self.arg_a = val_a
+        self.int_a = bignum_common.hex_to_int(val_a)
+        self.arg_b = val_b
+        self.int_b = bignum_common.hex_to_int(val_b)
+        self.arg_n = val_n
+        self.int_n = bignum_common.hex_to_int(val_n)
+
+        limbs_a4 = bignum_common.limbs_mpi(self.int_a, 32)
+        limbs_a8 = bignum_common.limbs_mpi(self.int_a, 64)
+        self.limbs_b4 = bignum_common.limbs_mpi(self.int_b, 32)
+        self.limbs_b8 = bignum_common.limbs_mpi(self.int_b, 64)
+        self.limbs_an4 = bignum_common.limbs_mpi(self.int_n, 32)
+        self.limbs_an8 = bignum_common.limbs_mpi(self.int_n, 64)
+
+        if limbs_a4 > self.limbs_an4 or limbs_a8 > self.limbs_an8:
+            raise Exception("Limbs of input A ({}) exceeds N ({})".format(
+                self.arg_a, self.arg_n
+            ))
+
+    def arguments(self) -> List[str]:
+        return [
+            str(self.limbs_an4), str(self.limbs_b4),
+            str(self.limbs_an8), str(self.limbs_b8),
+            bignum_common.quote_str(self.arg_a),
+            bignum_common.quote_str(self.arg_b),
+            bignum_common.quote_str(self.arg_n)
+        ] + self.result()
+
+    def description(self) -> str:
+        if self.case_description != "replay":
+            if not self.start_2_mpi4 and self.limbs_an4 > 1:
+                tmp = "(start of 2-MPI 4-byte bignums) "
+                self.__class__.start_2_mpi4 = True
+            elif not self.start_2_mpi8 and self.limbs_an8 > 1:
+                tmp = "(start of 2-MPI 8-byte bignums) "
+                self.__class__.start_2_mpi8 = True
+            else:
+                tmp = "(gen) "
+            self.case_description = tmp + self.case_description
+        return super().description()
+
+    def result(self) -> List[str]:
+        """Get the result of the operation."""
+        r4 = bignum_common.bound_mpi_limbs(self.limbs_an4, 32)
+        i4 = bignum_common.invmod(r4, self.int_n)
+        x4 = self.int_a * self.int_b * i4
+        x4 = x4 % self.int_n
+
+        r8 = bignum_common.bound_mpi_limbs(self.limbs_an8, 64)
+        i8 = bignum_common.invmod(r8, self.int_n)
+        x8 = self.int_a * self.int_b * i8
+        x8 = x8 % self.int_n
+        return [
+            "\"{:x}\"".format(x4),
+            "\"{:x}\"".format(x8)
+        ]
+
+    def set_limbs(
+            self, limbs_an4: int, limbs_b4: int, limbs_an8: int, limbs_b8: int
+        ) -> None:
+        """Set number of limbs for each input.
+
+        Replaces default values set during initialization.
+        """
+        self.limbs_an4 = limbs_an4
+        self.limbs_b4 = limbs_b4
+        self.limbs_an8 = limbs_an8
+        self.limbs_b8 = limbs_b8
+
+    @classmethod
+    def generate_function_tests(cls) -> Iterator[test_case.TestCase]:
+        """Generate replay and randomly generated test cases."""
+        # Test cases which replay captured invocations during unit test runs.
+        for limbs_an4, limbs_b4, limbs_an8, limbs_b8, a, b, n in cls.replay_test_cases:
+            cur_op = cls(a, b, n, case_description="replay")
+            cur_op.set_limbs(limbs_an4, limbs_b4, limbs_an8, limbs_b8)
+            yield cur_op.create_test_case()
+        # Random test cases can be generated using mpi_modmul_case_generate()
+        # Uses a mixture of primes and odd numbers as N, with four randomly
+        # generated cases for each N.
+        for a, b, n, description in cls.random_test_cases:
+            cur_op = cls(a, b, n, case_description=description)
+            yield cur_op.create_test_case()
+
+
+def mpi_modmul_case_generate() -> None:
+    """Generate valid inputs for montmul tests using moduli.
+
+    For each modulus, generates random values for A and B and simple descriptions
+    for the test case.
+    """
+    moduli = [
+        ("3", ""), ("7", ""), ("B", ""), ("29", ""), ("FF", ""),
+        ("101", ""), ("38B", ""), ("8003", ""), ("10001", ""),
+        ("7F7F7", ""), ("800009", ""), ("100002B", ""), ("37EEE9D", ""),
+        ("8000000B", ""), ("8CD626B9", ""), ("10000000F", ""),
+        ("174876E7E9", "is prime (dec) 99999999977"),
+        ("8000000017", ""), ("864CB9076D", ""), ("F7F7F7F7F7", ""),
+        ("1000000000F", ""), ("800000000005", ""), ("800795D9BA47", ""),
+        ("1000000000015", ""), ("100000000000051", ""), ("ABCDEF0123456789", ""),
+        (
+            "25A55A46E5DA99C71C7",
+            "is the 3rd repunit prime (dec) 11111111111111111111111"
+        ),
+        ("314DC643FB763F2B8C0E2DE00879", "is (dec)99999999977^3"),
+        ("47BF19662275FA2F6845C74942ED1D852E521", "is (dec) 99999999977^4"),
+        (
+            "97EDD86E4B5C4592C6D32064AC55C888A7245F07CA3CC455E07C931",
+            "is (dec) 99999999977^6"
+        ),
+        (
+            "DD15FE80B731872AC104DB37832F7E75A244AA2631BC87885B861E8F20375499",
+            "is (dec) 99999999977^7"
+        ),
+        (
+            "141B8EBD9009F84C241879A1F680FACCED355DA36C498F73E96E880CF78EA5F96146380E41",
+            "is (dec) 99999999977^8"
+        ),
+        (
+            (
+                "2A94608DE88B6D5E9F8920F5ABB06B24CC35AE1FBACC87D075C621C3E283"
+                "3EC902713E40F51E3B3C214EDFABC451"
+            ),
+            "is (dec) 99999999977^10"
+        ),
+        (
+            "8335616AED761F1F7F44E6BD49E807B82E3BF2BF11BFA6AF813C808DBF33DBFA11"
+            "DABD6E6144BEF37C6800000000000000000000000000000000051",
+            "is prime, (dec) 10^143 + 3^4"
+        )
+    ] # type: List[Tuple[str, str]]
+    primes = [
+        "3", "7", "B", "29", "101", "38B", "8003", "10001", "800009",
+        "100002B", "37EEE9D", "8000000B", "8CD626B9",
+        # From here they require > 1 4-byte MPI
+        "10000000F", "174876E7E9", "8000000017", "864CB9076D", "1000000000F",
+        "800000000005", "800795D9BA47", "1000000000015", "100000000000051",
+        # From here they require > 1 8-byte MPI
+        "25A55A46E5DA99C71C7",      # this is 11111111111111111111111 decimal
+        # 10^143 + 3^4: (which is prime)
+        # 100000000000000000000000000000000000000000000000000000000000000000000000000000
+        # 000000000000000000000000000000000000000000000000000000000000000081
+        (
+            "8335616AED761F1F7F44E6BD49E807B82E3BF2BF11BFA6AF813C808DBF33DBFA11"
+            "DABD6E6144BEF37C6800000000000000000000000000000000051"
+        )
+    ] # type: List[str]
+    generated_inputs = []
+    for mod, description in moduli:
+        n = bignum_common.hex_to_int(mod)
+        mod_read = "{:x}".format(n)
+        case_count = 3 if n < 5 else 4
+        cases = {} # type: Dict[int, int]
+        i = 0
+        while i < case_count:
+            a = random.randint(1, n)
+            b = random.randint(1, n)
+            if cases.get(a) == b:
+                continue
+            cases[a] = b
+            if description:
+                out_description = "0x{} {}".format(mod_read, description)
+            elif i == 0 and len(mod) > 1 and mod in primes:
+                out_description = "(0x{} is prime)"
+            else:
+                out_description = ""
+            generated_inputs.append(
+                ("{:x}".format(a), "{:x}".format(b), mod, out_description)
+            )
+            i += 1
+    print(generated_inputs)
+
+
+class BignumCoreExpMod(BignumCoreTarget, bignum_common.ModOperationCommon):
+    """Test cases for bignum core exponentiation."""
+    symbol = "^"
+    test_function = "mpi_core_exp_mod"
+    test_name = "Core modular exponentiation (Mongtomery form only)"
+    input_style = "fixed"
+    montgomery_form_a = True
+
+    def result(self) -> List[str]:
+        # Result has to be given in Montgomery form too
+        result = pow(self.int_a, self.int_b, self.int_n)
+        mont_result = self.to_montgomery(result)
+        return [self.format_result(mont_result)]
+
+    @property
+    def is_valid(self) -> bool:
+        # The base needs to be canonical, but the exponent can be larger than
+        # the modulus (see for example exponent blinding)
+        return bool(self.int_a < self.int_n)
+
+
+class BignumCoreSubInt(BignumCoreTarget, bignum_common.OperationCommon):
+    """Test cases for bignum core sub int."""
+    count = 0
+    symbol = "-"
+    test_function = "mpi_core_sub_int"
+    test_name = "mpi_core_sub_int"
+    input_style = "arch_split"
+
+    @property
+    def is_valid(self) -> bool:
+        # This is "sub int", so b is only one limb
+        if bignum_common.limbs_mpi(self.int_b, self.bits_in_limb) > 1:
+            return False
+        return True
+
+    # Overriding because we don't want leading zeros on b
+    @property
+    def arg_b(self) -> str:
+        return self.val_b
+
+    def result(self) -> List[str]:
+        result = self.int_a - self.int_b
+
+        borrow, result = divmod(result, self.limb_boundary)
+
+        # Borrow will be -1 if non-zero, but we want it to be 1 in the test data
+        return [
+            self.format_result(result),
+            str(-borrow)
+        ]
+
+class BignumCoreZeroCheckCT(BignumCoreTarget, bignum_common.OperationCommon):
+    """Test cases for bignum core zero check (constant flow)."""
+    count = 0
+    symbol = "== 0"
+    test_function = "mpi_core_check_zero_ct"
+    test_name = "mpi_core_check_zero_ct"
+    input_style = "variable"
+    arity = 1
+    suffix = True
+
+    def result(self) -> List[str]:
+        result = 1 if self.int_a == 0 else 0
+        return [str(result)]
diff --git a/framework/scripts/mbedtls_framework/bignum_data.py b/framework/scripts/mbedtls_framework/bignum_data.py
new file mode 100644
index 0000000..5c6c2c8
--- /dev/null
+++ b/framework/scripts/mbedtls_framework/bignum_data.py
@@ -0,0 +1,159 @@
+"""Base values and datasets for bignum generated tests and helper functions that
+produced them."""
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+#
+
+import random
+
+# Functions calling these were used to produce test data and are here only for
+# reproducibility, they are not used by the test generation framework/classes
+try:
+    from Cryptodome.Util.number import isPrime, getPrime #type: ignore #pylint: disable=import-error
+except ImportError:
+    pass
+
+# Generated by bignum_common.gen_safe_prime(192,1)
+SAFE_PRIME_192_BIT_SEED_1 = "d1c127a667786703830500038ebaef20e5a3e2dc378fb75b"
+
+# First number generated by random.getrandbits(192) - seed(2,2), not a prime
+RANDOM_192_BIT_SEED_2_NO1 = "177219d30e7a269fd95bafc8f2a4d27bdcf4bb99f4bea973"
+
+# Second number generated by random.getrandbits(192) - seed(2,2), not a prime
+RANDOM_192_BIT_SEED_2_NO2 = "cf1822ffbc6887782b491044d5e341245c6e433715ba2bdd"
+
+# Third number generated by random.getrandbits(192) - seed(2,2), not a prime
+RANDOM_192_BIT_SEED_2_NO3 = "3653f8dd9b1f282e4067c3584ee207f8da94e3e8ab73738f"
+
+# Fourth number generated by random.getrandbits(192) - seed(2,2), not a prime
+RANDOM_192_BIT_SEED_2_NO4 = "ffed9235288bc781ae66267594c9c9500925e4749b575bd1"
+
+# Ninth number generated by random.getrandbits(192) - seed(2,2), not a prime
+RANDOM_192_BIT_SEED_2_NO9 = "2a1be9cd8697bbd0e2520e33e44c50556c71c4a66148a86f"
+
+# Generated by bignum_common.gen_safe_prime(1024,3)
+SAFE_PRIME_1024_BIT_SEED_3 = ("c93ba7ec74d96f411ba008bdb78e63ff11bb5df46a51e16b"
+                              "2c9d156f8e4e18abf5e052cb01f47d0d1925a77f60991577"
+                              "e128fb6f52f34a27950a594baadd3d8057abeb222cf3cca9"
+                              "62db16abf79f2ada5bd29ab2f51244bf295eff9f6aaba130"
+                              "2efc449b128be75eeaca04bc3c1a155d11d14e8be32a2c82"
+                              "87b3996cf6ad5223")
+
+# First number generated by random.getrandbits(1024) - seed(4,2), not a prime
+RANDOM_1024_BIT_SEED_4_NO1 = ("6905269ed6f0b09f165c8ce36e2f24b43000de01b2ed40ed"
+                              "3addccb2c33be0ac79d679346d4ac7a5c3902b38963dc6e8"
+                              "534f45738d048ec0f1099c6c3e1b258fd724452ccea71ff4"
+                              "a14876aeaff1a098ca5996666ceab360512bd13110722311"
+                              "710cf5327ac435a7a97c643656412a9b8a1abcd1a6916c74"
+                              "da4f9fc3c6da5d7")
+
+# Second number generated by random.getrandbits(1024) - seed(4,2), not a prime
+RANDOM_1024_BIT_SEED_4_NO2 = ("f1cfd99216df648647adec26793d0e453f5082492d83a823"
+                              "3fb62d2c81862fc9634f806fabf4a07c566002249b191bf4"
+                              "d8441b5616332aca5f552773e14b0190d93936e1daca3c06"
+                              "f5ff0c03bb5d7385de08caa1a08179104a25e4664f5253a0"
+                              "2a3187853184ff27459142deccea264542a00403ce80c4b0"
+                              "a4042bb3d4341aad")
+
+# Third number generated by random.getrandbits(1024) - seed(4,2), not a prime
+RANDOM_1024_BIT_SEED_4_NO3 = ("14c15c910b11ad28cc21ce88d0060cc54278c2614e1bcb38"
+                              "3bb4a570294c4ea3738d243a6e58d5ca49c7b59b995253fd"
+                              "6c79a3de69f85e3131f3b9238224b122c3e4a892d9196ada"
+                              "4fcfa583e1df8af9b474c7e89286a1754abcb06ae8abb93f"
+                              "01d89a024cdce7a6d7288ff68c320f89f1347e0cdd905ecf"
+                              "d160c5d0ef412ed6")
+
+# Fourth number generated by random.getrandbits(1024) - seed(4,2), not a prime
+RANDOM_1024_BIT_SEED_4_NO4 = ("32decd6b8efbc170a26a25c852175b7a96b98b5fbf37a2be"
+                              "6f98bca35b17b9662f0733c846bbe9e870ef55b1a1f65507"
+                              "a2909cb633e238b4e9dd38b869ace91311021c9e32111ac1"
+                              "ac7cc4a4ff4dab102522d53857c49391b36cc9aa78a330a1"
+                              "a5e333cb88dcf94384d4cd1f47ca7883ff5a52f1a05885ac"
+                              "7671863c0bdbc23a")
+
+# Fifth number generated by random.getrandbits(1024) - seed(4,2), not a prime
+RANDOM_1024_BIT_SEED_4_NO5 = ("53be4721f5b9e1f5acdac615bc20f6264922b9ccf469aef8"
+                              "f6e7d078e55b85dd1525f363b281b8885b69dc230af5ac87"
+                              "0692b534758240df4a7a03052d733dcdef40af2e54c0ce68"
+                              "1f44ebd13cc75f3edcb285f89d8cf4d4950b16ffc3e1ac3b"
+                              "4708d9893a973000b54a23020fc5b043d6e4a51519d9c9cc"
+                              "52d32377e78131c1")
+
+# Adding 192 bit and 1024 bit numbers because these are the shortest required
+# for ECC and RSA respectively.
+INPUTS_DEFAULT = [
+        "0", "1", # corner cases
+        "2", "3", # small primes
+        "4",      # non-prime even
+        "38",     # small random
+        SAFE_PRIME_192_BIT_SEED_1,  # prime
+        RANDOM_192_BIT_SEED_2_NO1,  # not a prime
+        RANDOM_192_BIT_SEED_2_NO2,  # not a prime
+        SAFE_PRIME_1024_BIT_SEED_3, # prime
+        RANDOM_1024_BIT_SEED_4_NO1, # not a prime
+        RANDOM_1024_BIT_SEED_4_NO3, # not a prime
+        RANDOM_1024_BIT_SEED_4_NO2, # largest (not a prime)
+        ]
+
+ADD_SUB_DATA = [
+    "0", "1", "3", "f", "fe", "ff", "100", "ff00",
+    "fffe", "ffff", "10000", # 2^16 - 1, 2^16, 2^16 + 1
+    "fffffffe", "ffffffff", "100000000", # 2^32 - 1, 2^32, 2^32 + 1
+    "1f7f7f7f7f7f7f",
+    "8000000000000000", "fefefefefefefefe",
+    "fffffffffffffffe", "ffffffffffffffff", "10000000000000000", # 2^64 - 1, 2^64, 2^64 + 1
+    "1234567890abcdef0",
+    "fffffffffffffffffffffffe",
+    "ffffffffffffffffffffffff",
+    "1000000000000000000000000",
+    "fffffffffffffffffefefefefefefefe",
+    "fffffffffffffffffffffffffffffffe",
+    "ffffffffffffffffffffffffffffffff",
+    "100000000000000000000000000000000",
+    "1234567890abcdef01234567890abcdef0",
+    "fffffffffffffffffffffffffffffffffffffffffffffffffefefefefefefefe",
+    "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe",
+    "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+    "10000000000000000000000000000000000000000000000000000000000000000",
+    "1234567890abcdef01234567890abcdef01234567890abcdef01234567890abcdef0",
+    ]
+
+# Only odd moduli are present as in the new bignum code only odd moduli are
+# supported for now.
+MODULI_DEFAULT = [
+        "53", # safe prime
+        "45", # non-prime
+        SAFE_PRIME_192_BIT_SEED_1,  # safe prime
+        RANDOM_192_BIT_SEED_2_NO4,  # not a prime
+        SAFE_PRIME_1024_BIT_SEED_3, # safe prime
+        RANDOM_1024_BIT_SEED_4_NO5, # not a prime
+        ]
+
+# Some functions, e.g. mbedtls_mpi_mod_raw_inv_prime(), only support prime moduli.
+ONLY_PRIME_MODULI = [
+        "53", # safe prime
+        "8ac72304057392b5",     # 9999999997777777333 (longer, not safe, prime)
+        # The next prime has a different R in Montgomery form depending on
+        # whether 32- or 64-bit MPIs are used.
+        "152d02c7e14af67fe0bf", # 99999999999999999991999
+        SAFE_PRIME_192_BIT_SEED_1,  # safe prime
+        SAFE_PRIME_1024_BIT_SEED_3, # safe prime
+        ]
+
+def __gen_safe_prime(bits, seed):
+    '''
+    Generate a safe prime.
+
+    This function is intended for generating constants offline and shouldn't be
+    used in test generation classes.
+
+    Requires pycryptodomex for getPrime and isPrime and python 3.9 or later for
+    randbytes.
+    '''
+    rng = random.Random()
+    # We want reproducibility across python versions
+    rng.seed(seed, version=2)
+    while True:
+        prime = 2*getPrime(bits-1, rng.randbytes)+1 #pylint: disable=no-member
+        if isPrime(prime, 1e-30):
+            return prime
diff --git a/framework/scripts/mbedtls_framework/bignum_mod.py b/framework/scripts/mbedtls_framework/bignum_mod.py
new file mode 100644
index 0000000..f554001
--- /dev/null
+++ b/framework/scripts/mbedtls_framework/bignum_mod.py
@@ -0,0 +1,102 @@
+"""Framework classes for generation of bignum mod test cases."""
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+#
+
+from typing import Dict, List
+
+from . import test_data_generation
+from . import bignum_common
+from .bignum_data import ONLY_PRIME_MODULI
+
+class BignumModTarget(test_data_generation.BaseTarget):
+    #pylint: disable=abstract-method, too-few-public-methods
+    """Target for bignum mod test case generation."""
+    target_basename = 'test_suite_bignum_mod.generated'
+
+
+class BignumModMul(bignum_common.ModOperationCommon,
+                   BignumModTarget):
+    # pylint:disable=duplicate-code
+    """Test cases for bignum mpi_mod_mul()."""
+    symbol = "*"
+    test_function = "mpi_mod_mul"
+    test_name = "mbedtls_mpi_mod_mul"
+    input_style = "arch_split"
+    arity = 2
+
+    def arguments(self) -> List[str]:
+        return [self.format_result(self.to_montgomery(self.int_a)),
+                self.format_result(self.to_montgomery(self.int_b)),
+                bignum_common.quote_str(self.arg_n)
+               ] + self.result()
+
+    def result(self) -> List[str]:
+        result = (self.int_a * self.int_b) % self.int_n
+        return [self.format_result(self.to_montgomery(result))]
+
+
+class BignumModSub(bignum_common.ModOperationCommon, BignumModTarget):
+    """Test cases for bignum mpi_mod_sub()."""
+    symbol = "-"
+    test_function = "mpi_mod_sub"
+    test_name = "mbedtls_mpi_mod_sub"
+    input_style = "fixed"
+    arity = 2
+
+    def result(self) -> List[str]:
+        result = (self.int_a - self.int_b) % self.int_n
+        # To make negative tests easier, append 0 for success to the
+        # generated cases
+        return [self.format_result(result), "0"]
+
+class BignumModInvNonMont(bignum_common.ModOperationCommon, BignumModTarget):
+    """Test cases for bignum mpi_mod_inv() - not in Montgomery form."""
+    moduli = ONLY_PRIME_MODULI  # for now only prime moduli supported
+    symbol = "^ -1"
+    test_function = "mpi_mod_inv_non_mont"
+    test_name = "mbedtls_mpi_mod_inv non-Mont. form"
+    input_style = "fixed"
+    arity = 1
+    suffix = True
+    disallow_zero_a = True
+
+    def result(self) -> List[str]:
+        result = bignum_common.invmod_positive(self.int_a, self.int_n)
+        # To make negative tests easier, append 0 for success to the
+        # generated cases
+        return [self.format_result(result), "0"]
+
+class BignumModInvMont(bignum_common.ModOperationCommon, BignumModTarget):
+    """Test cases for bignum mpi_mod_inv() - Montgomery form."""
+    moduli = ONLY_PRIME_MODULI  # for now only prime moduli supported
+    symbol = "^ -1"
+    test_function = "mpi_mod_inv_mont"
+    test_name = "mbedtls_mpi_mod_inv Mont. form"
+    input_style = "arch_split"  # Mont. form requires arch_split
+    arity = 1
+    suffix = True
+    disallow_zero_a = True
+    montgomery_form_a = True
+
+    def result(self) -> List[str]:
+        result = bignum_common.invmod_positive(self.int_a, self.int_n)
+        mont_result = self.to_montgomery(result)
+        # To make negative tests easier, append 0 for success to the
+        # generated cases
+        return [self.format_result(mont_result), "0"]
+
+
+class BignumModAdd(bignum_common.ModOperationCommon, BignumModTarget):
+    """Test cases for bignum mpi_mod_add()."""
+    count = 0
+    symbol = "+"
+    test_function = "mpi_mod_add"
+    test_name = "mbedtls_mpi_mod_add"
+    input_style = "fixed"
+
+    def result(self) -> List[str]:
+        result = (self.int_a + self.int_b) % self.int_n
+        # To make negative tests easier, append "0" for success to the
+        # generated cases
+        return [self.format_result(result), "0"]
diff --git a/framework/scripts/mbedtls_framework/bignum_mod_raw.py b/framework/scripts/mbedtls_framework/bignum_mod_raw.py
new file mode 100644
index 0000000..37ad27a
--- /dev/null
+++ b/framework/scripts/mbedtls_framework/bignum_mod_raw.py
@@ -0,0 +1,242 @@
+"""Framework classes for generation of bignum mod_raw test cases."""
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+#
+
+from typing import Iterator, List
+
+from . import test_case
+from . import test_data_generation
+from . import bignum_common
+from .bignum_data import ONLY_PRIME_MODULI
+
+class BignumModRawTarget(test_data_generation.BaseTarget):
+    #pylint: disable=abstract-method, too-few-public-methods
+    """Target for bignum mod_raw test case generation."""
+    target_basename = 'test_suite_bignum_mod_raw.generated'
+
+
+class BignumModRawSub(bignum_common.ModOperationCommon,
+                      BignumModRawTarget):
+    """Test cases for bignum mpi_mod_raw_sub()."""
+    symbol = "-"
+    test_function = "mpi_mod_raw_sub"
+    test_name = "mbedtls_mpi_mod_raw_sub"
+    input_style = "fixed"
+    arity = 2
+
+    def arguments(self) -> List[str]:
+        return [bignum_common.quote_str(n) for n in [self.arg_a,
+                                                     self.arg_b,
+                                                     self.arg_n]
+               ] + self.result()
+
+    def result(self) -> List[str]:
+        result = (self.int_a - self.int_b) % self.int_n
+        return [self.format_result(result)]
+
+class BignumModRawFixQuasiReduction(bignum_common.ModOperationCommon,
+                                    BignumModRawTarget):
+    """Test cases for ecp quasi_reduction()."""
+    symbol = "-"
+    test_function = "mpi_mod_raw_fix_quasi_reduction"
+    test_name = "fix_quasi_reduction"
+    input_style = "fixed"
+    arity = 1
+
+    # Extend the default values with n < x < 2n
+    input_values = bignum_common.ModOperationCommon.input_values + [
+        "73",
+
+        # First number generated by random.getrandbits(1024) - seed(3,2)
+        "ea7b5bf55eb561a4216363698b529b4a97b750923ceb3ffd",
+
+        # First number generated by random.getrandbits(1024) - seed(1,2)
+        ("cd447e35b8b6d8fe442e3d437204e52db2221a58008a05a6c4647159c324c985"
+         "9b810e766ec9d28663ca828dd5f4b3b2e4b06ce60741c7a87ce42c8218072e8c"
+         "35bf992dc9e9c616612e7696a6cecc1b78e510617311d8a3c2ce6f447ed4d57b"
+         "1e2feb89414c343c1027c4d1c386bbc4cd613e30d8f16adf91b7584a2265b1f5")
+    ] # type: List[str]
+
+    def result(self) -> List[str]:
+        result = self.int_a % self.int_n
+        return [self.format_result(result)]
+
+    @property
+    def is_valid(self) -> bool:
+        return bool(self.int_a < 2 * self.int_n)
+
+class BignumModRawMul(bignum_common.ModOperationCommon,
+                      BignumModRawTarget):
+    """Test cases for bignum mpi_mod_raw_mul()."""
+    symbol = "*"
+    test_function = "mpi_mod_raw_mul"
+    test_name = "mbedtls_mpi_mod_raw_mul"
+    input_style = "arch_split"
+    arity = 2
+
+    def arguments(self) -> List[str]:
+        return [self.format_result(self.to_montgomery(self.int_a)),
+                self.format_result(self.to_montgomery(self.int_b)),
+                bignum_common.quote_str(self.arg_n)
+               ] + self.result()
+
+    def result(self) -> List[str]:
+        result = (self.int_a * self.int_b) % self.int_n
+        return [self.format_result(self.to_montgomery(result))]
+
+
+class BignumModRawInvPrime(bignum_common.ModOperationCommon,
+                           BignumModRawTarget):
+    """Test cases for bignum mpi_mod_raw_inv_prime()."""
+    moduli = ONLY_PRIME_MODULI
+    symbol = "^ -1"
+    test_function = "mpi_mod_raw_inv_prime"
+    test_name = "mbedtls_mpi_mod_raw_inv_prime (Montgomery form only)"
+    input_style = "arch_split"
+    arity = 1
+    suffix = True
+    montgomery_form_a = True
+    disallow_zero_a = True
+
+    def result(self) -> List[str]:
+        result = bignum_common.invmod_positive(self.int_a, self.int_n)
+        mont_result = self.to_montgomery(result)
+        return [self.format_result(mont_result)]
+
+
+class BignumModRawAdd(bignum_common.ModOperationCommon,
+                      BignumModRawTarget):
+    """Test cases for bignum mpi_mod_raw_add()."""
+    symbol = "+"
+    test_function = "mpi_mod_raw_add"
+    test_name = "mbedtls_mpi_mod_raw_add"
+    input_style = "fixed"
+    arity = 2
+
+    def result(self) -> List[str]:
+        result = (self.int_a + self.int_b) % self.int_n
+        return [self.format_result(result)]
+
+
+class BignumModRawConvertRep(bignum_common.ModOperationCommon,
+                             BignumModRawTarget):
+    # This is an abstract class, it's ok to have unimplemented methods.
+    #pylint: disable=abstract-method
+    """Test cases for representation conversion."""
+    symbol = ""
+    input_style = "arch_split"
+    arity = 1
+    rep = bignum_common.ModulusRepresentation.INVALID
+
+    def set_representation(self, r: bignum_common.ModulusRepresentation) -> None:
+        self.rep = r
+
+    def arguments(self) -> List[str]:
+        return ([bignum_common.quote_str(self.arg_n), self.rep.symbol(),
+                 bignum_common.quote_str(self.arg_a)] +
+                self.result())
+
+    def description(self) -> str:
+        base = super().description()
+        mod_with_rep = 'mod({})'.format(self.rep.name)
+        return base.replace('mod', mod_with_rep, 1)
+
+    @classmethod
+    def test_cases_for_values(cls, rep: bignum_common.ModulusRepresentation,
+                              n: str, a: str) -> Iterator[test_case.TestCase]:
+        """Emit test cases for the given values (if any).
+
+        This may emit no test cases if a isn't valid for the modulus n,
+        or multiple test cases if rep requires different data depending
+        on the limb size.
+        """
+        for bil in cls.limb_sizes:
+            test_object = cls(n, a, bits_in_limb=bil)
+            test_object.set_representation(rep)
+            # The class is set to having separate test cases for each limb
+            # size, because the Montgomery representation requires it.
+            # But other representations don't require it. So for other
+            # representations, emit a single test case with no dependency
+            # on the limb size.
+            if rep is not bignum_common.ModulusRepresentation.MONTGOMERY:
+                test_object.dependencies = \
+                    [dep for dep in test_object.dependencies
+                     if not dep.startswith('MBEDTLS_HAVE_INT')]
+            if test_object.is_valid:
+                yield test_object.create_test_case()
+            if rep is not bignum_common.ModulusRepresentation.MONTGOMERY:
+                # A single test case (emitted, or skipped due to invalidity)
+                # is enough, since this test case doesn't depend on the
+                # limb size.
+                break
+
+    # The parent class doesn't support non-bignum parameters. So we override
+    # test generation, in order to have the representation as a parameter.
+    @classmethod
+    def generate_function_tests(cls) -> Iterator[test_case.TestCase]:
+
+        for rep in bignum_common.ModulusRepresentation.supported_representations():
+            for n in cls.moduli:
+                for a in cls.input_values:
+                    yield from cls.test_cases_for_values(rep, n, a)
+
+class BignumModRawCanonicalToModulusRep(BignumModRawConvertRep):
+    """Test cases for mpi_mod_raw_canonical_to_modulus_rep."""
+    test_function = "mpi_mod_raw_canonical_to_modulus_rep"
+    test_name = "Rep canon->mod"
+
+    def result(self) -> List[str]:
+        return [self.format_result(self.convert_from_canonical(self.int_a, self.rep))]
+
+class BignumModRawModulusToCanonicalRep(BignumModRawConvertRep):
+    """Test cases for mpi_mod_raw_modulus_to_canonical_rep."""
+    test_function = "mpi_mod_raw_modulus_to_canonical_rep"
+    test_name = "Rep mod->canon"
+
+    @property
+    def arg_a(self) -> str:
+        return self.format_arg("{:x}".format(self.convert_from_canonical(self.int_a, self.rep)))
+
+    def result(self) -> List[str]:
+        return [self.format_result(self.int_a)]
+
+
+class BignumModRawConvertToMont(bignum_common.ModOperationCommon,
+                                BignumModRawTarget):
+    """ Test cases for mpi_mod_raw_to_mont_rep(). """
+    test_function = "mpi_mod_raw_to_mont_rep"
+    test_name = "Convert into Mont: "
+    symbol = "R *"
+    input_style = "arch_split"
+    arity = 1
+
+    def result(self) -> List[str]:
+        result = self.to_montgomery(self.int_a)
+        return [self.format_result(result)]
+
+class BignumModRawConvertFromMont(bignum_common.ModOperationCommon,
+                                  BignumModRawTarget):
+    """ Test cases for mpi_mod_raw_from_mont_rep(). """
+    test_function = "mpi_mod_raw_from_mont_rep"
+    test_name = "Convert from Mont: "
+    symbol = "1/R *"
+    input_style = "arch_split"
+    arity = 1
+
+    def result(self) -> List[str]:
+        result = self.from_montgomery(self.int_a)
+        return [self.format_result(result)]
+
+class BignumModRawModNegate(bignum_common.ModOperationCommon,
+                            BignumModRawTarget):
+    """ Test cases for mpi_mod_raw_neg(). """
+    test_function = "mpi_mod_raw_neg"
+    test_name = "Modular negation: "
+    symbol = "-"
+    input_style = "arch_split"
+    arity = 1
+
+    def result(self) -> List[str]:
+        result = (self.int_n - self.int_a) % self.int_n
+        return [self.format_result(result)]
diff --git a/framework/scripts/mbedtls_framework/build_tree.py b/framework/scripts/mbedtls_framework/build_tree.py
new file mode 100644
index 0000000..00868f5
--- /dev/null
+++ b/framework/scripts/mbedtls_framework/build_tree.py
@@ -0,0 +1,144 @@
+"""Mbed TLS build tree information and manipulation.
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+#
+
+import os
+import inspect
+import re
+from typing import Optional
+
+def looks_like_tf_psa_crypto_root(path: str) -> bool:
+    """Whether the given directory looks like the root of the PSA Crypto source tree."""
+    try:
+        with open(os.path.join(path, 'scripts', 'project_name.txt'), 'r') as f:
+            return f.read() == "TF-PSA-Crypto\n"
+    except FileNotFoundError:
+        return False
+
+def looks_like_mbedtls_root(path: str) -> bool:
+    """Whether the given directory looks like the root of the Mbed TLS source tree."""
+    try:
+        with open(os.path.join(path, 'scripts', 'project_name.txt'), 'r') as f:
+            return f.read() == "Mbed TLS\n"
+    except FileNotFoundError:
+        return False
+
+def looks_like_root(path: str) -> bool:
+    return looks_like_tf_psa_crypto_root(path) or looks_like_mbedtls_root(path)
+
+def crypto_core_directory(root: Optional[str] = None, relative: Optional[bool] = False) -> str:
+    """
+    Return the path of the directory containing the PSA crypto core
+    for either TF-PSA-Crypto or Mbed TLS.
+
+    Returns either the full path or relative path depending on the
+    "relative" boolean argument.
+    """
+    if root is None:
+        root = guess_project_root()
+    if looks_like_tf_psa_crypto_root(root):
+        if relative:
+            return "core"
+        return os.path.join(root, "core")
+    elif looks_like_mbedtls_root(root):
+        if is_mbedtls_3_6():
+            path = "library"
+        else:
+            path = "tf-psa-crypto/core"
+        if relative:
+            return path
+        return os.path.join(root, path)
+    else:
+        raise Exception('Neither Mbed TLS nor TF-PSA-Crypto source tree found')
+
+def crypto_library_filename(root: Optional[str] = None) -> str:
+    """Return the crypto library filename for either TF-PSA-Crypto or Mbed TLS."""
+    if root is None:
+        root = guess_project_root()
+    if looks_like_tf_psa_crypto_root(root):
+        return "tfpsacrypto"
+    elif looks_like_mbedtls_root(root):
+        return "mbedcrypto"
+    else:
+        raise Exception('Neither Mbed TLS nor TF-PSA-Crypto source tree found')
+
+def check_repo_path():
+    """Check that the current working directory is the project root, and throw
+    an exception if not.
+    """
+    if not all(os.path.isdir(d) for d in ["include", "library", "tests"]):
+        raise Exception("This script must be run from Mbed TLS root")
+
+def chdir_to_root() -> None:
+    """Detect the root of the Mbed TLS source tree and change to it.
+
+    The current directory must be up to two levels deep inside an Mbed TLS
+    source tree.
+    """
+    for d in [os.path.curdir,
+              os.path.pardir,
+              os.path.join(os.path.pardir, os.path.pardir)]:
+        if looks_like_root(d):
+            os.chdir(d)
+            return
+    raise Exception('Mbed TLS source tree not found')
+
+def guess_project_root():
+    """Guess project source code directory.
+
+    Return the first possible project root directory.
+    """
+    dirs = set({})
+    for frame in inspect.stack():
+        path = os.path.dirname(frame.filename)
+        for d in ['.', os.path.pardir] \
+                 + [os.path.join(*([os.path.pardir]*i)) for i in range(2, 10)]:
+            d = os.path.abspath(os.path.join(path, d))
+            if d in dirs:
+                continue
+            dirs.add(d)
+            if looks_like_root(d):
+                return d
+    raise Exception('Neither Mbed TLS nor TF-PSA-Crypto source tree found')
+
+def guess_mbedtls_root(root: Optional[str] = None) -> str:
+    """Guess Mbed TLS source code directory.
+
+    Return the first possible Mbed TLS root directory.
+    Raise an exception if we are not in Mbed TLS.
+    """
+    if root is None:
+        root = guess_project_root()
+    if looks_like_mbedtls_root(root):
+        return root
+    else:
+        raise Exception('Mbed TLS source tree not found')
+
+def guess_tf_psa_crypto_root(root: Optional[str] = None) -> str:
+    """Guess TF-PSA-Crypto source code directory.
+
+    Return the first possible TF-PSA-Crypto root directory.
+    Raise an exception if we are not in TF-PSA-Crypto.
+    """
+    if root is None:
+        root = guess_project_root()
+    if looks_like_tf_psa_crypto_root(root):
+        return root
+    else:
+        raise Exception('TF-PSA-Crypto source tree not found')
+
+def is_mbedtls_3_6() -> bool:
+    """Whether the working tree is an Mbed TLS 3.6 one or not
+
+    Return false if we are in TF-PSA-Crypto or in Mbed TLS but with a version
+    different from 3.6.x.
+    Raise an exception if we are neither in Mbed TLS nor in TF-PSA-Crypto.
+    """
+    root = guess_project_root()
+    if not looks_like_mbedtls_root(root):
+        return False
+    with open(os.path.join(root, 'include', 'mbedtls', 'build_info.h'), 'r') as f:
+        return re.search(r"#define MBEDTLS_VERSION_NUMBER.*0x0306", f.read()) is not None
diff --git a/framework/scripts/mbedtls_framework/c_build_helper.py b/framework/scripts/mbedtls_framework/c_build_helper.py
new file mode 100644
index 0000000..2081d13
--- /dev/null
+++ b/framework/scripts/mbedtls_framework/c_build_helper.py
@@ -0,0 +1,177 @@
+"""Generate and run C code.
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+#
+
+import os
+import platform
+import subprocess
+import sys
+import tempfile
+
+class CompileError(Exception):
+    """Exception to represent an error during the compilation."""
+
+    def __init__(self, message):
+        """Save the error massage"""
+
+        super().__init__()
+        self.message = message
+
+def remove_file_if_exists(filename):
+    """Remove the specified file, ignoring errors."""
+    if not filename:
+        return
+    try:
+        os.remove(filename)
+    except OSError:
+        pass
+
+def create_c_file(file_label):
+    """Create a temporary C file.
+
+    * ``file_label``: a string that will be included in the file name.
+
+    Return ```(c_file, c_name, exe_name)``` where ``c_file`` is a Python
+    stream open for writing to the file, ``c_name`` is the name of the file
+    and ``exe_name`` is the name of the executable that will be produced
+    by compiling the file.
+    """
+    c_fd, c_name = tempfile.mkstemp(prefix='tmp-{}-'.format(file_label),
+                                    suffix='.c')
+    exe_suffix = '.exe' if platform.system() == 'Windows' else ''
+    exe_name = c_name[:-2] + exe_suffix
+    remove_file_if_exists(exe_name)
+    c_file = os.fdopen(c_fd, 'w', encoding='ascii')
+    return c_file, c_name, exe_name
+
+def generate_c_printf_expressions(c_file, cast_to, printf_format, expressions):
+    """Generate C instructions to print the value of ``expressions``.
+
+    Write the code with ``c_file``'s ``write`` method.
+
+    Each expression is cast to the type ``cast_to`` and printed with the
+    printf format ``printf_format``.
+    """
+    for expr in expressions:
+        c_file.write('    printf("{}\\n", ({}) {});\n'
+                     .format(printf_format, cast_to, expr))
+
+def generate_c_file(c_file,
+                    caller, header,
+                    main_generator):
+    """Generate a temporary C source file.
+
+    * ``c_file`` is an open stream on the C source file.
+    * ``caller``: an informational string written in a comment at the top
+      of the file.
+    * ``header``: extra code to insert before any function in the generated
+      C file.
+    * ``main_generator``: a function called with ``c_file`` as its sole argument
+      to generate the body of the ``main()`` function.
+    """
+    c_file.write('/* Generated by {} */'
+                 .format(caller))
+    c_file.write('''
+#include <stdio.h>
+''')
+    c_file.write(header)
+    c_file.write('''
+int main(void)
+{
+''')
+    main_generator(c_file)
+    c_file.write('''    return 0;
+}
+''')
+
+def compile_c_file(c_filename, exe_filename, include_dirs):
+    """Compile a C source file with the host compiler.
+
+    * ``c_filename``: the name of the source file to compile.
+    * ``exe_filename``: the name for the executable to be created.
+    * ``include_dirs``: a list of paths to include directories to be passed
+      with the -I switch.
+    """
+    # Respect $HOSTCC if it is set
+    cc = os.getenv('HOSTCC', None)
+    if cc is None:
+        cc = os.getenv('CC', 'cc')
+    cmd = [cc]
+
+    proc = subprocess.Popen(cmd,
+                            stdout=subprocess.DEVNULL,
+                            stderr=subprocess.PIPE,
+                            universal_newlines=True)
+    cc_is_msvc = 'Microsoft (R) C/C++' in proc.communicate()[1]
+
+    cmd += ['-I' + dir for dir in include_dirs]
+    if cc_is_msvc:
+        # MSVC has deprecated using -o to specify the output file,
+        # and produces an object file in the working directory by default.
+        obj_filename = exe_filename[:-4] + '.obj'
+        cmd += ['-Fe' + exe_filename, '-Fo' + obj_filename]
+    else:
+        cmd += ['-o' + exe_filename]
+
+    try:
+        subprocess.check_output(cmd + [c_filename],
+                                stderr=subprocess.PIPE,
+                                universal_newlines=True)
+
+    except subprocess.CalledProcessError as e:
+        raise CompileError(e.stderr) from e
+
+def get_c_expression_values(
+        cast_to, printf_format,
+        expressions,
+        caller=__name__, file_label='',
+        header='', include_path=None,
+        keep_c=False,
+): # pylint: disable=too-many-arguments, too-many-locals
+    """Generate and run a program to print out numerical values for expressions.
+
+    * ``cast_to``: a C type.
+    * ``printf_format``: a printf format suitable for the type ``cast_to``.
+    * ``header``: extra code to insert before any function in the generated
+      C file.
+    * ``expressions``: a list of C language expressions that have the type
+      ``cast_to``.
+    * ``include_path``: a list of directories containing header files.
+    * ``keep_c``: if true, keep the temporary C file (presumably for debugging
+      purposes).
+
+    Use the C compiler specified by the ``CC`` environment variable, defaulting
+    to ``cc``. If ``CC`` looks like MSVC, use its command line syntax,
+    otherwise assume the compiler supports Unix traditional ``-I`` and ``-o``.
+
+    Return the list of values of the ``expressions``.
+    """
+    if include_path is None:
+        include_path = []
+    c_name = None
+    exe_name = None
+    obj_name = None
+    try:
+        c_file, c_name, exe_name = create_c_file(file_label)
+        generate_c_file(
+            c_file, caller, header,
+            lambda c_file: generate_c_printf_expressions(c_file,
+                                                         cast_to, printf_format,
+                                                         expressions)
+        )
+        c_file.close()
+
+        compile_c_file(c_name, exe_name, include_path)
+        if keep_c:
+            sys.stderr.write('List of {} tests kept at {}\n'
+                             .format(caller, c_name))
+        else:
+            os.remove(c_name)
+        output = subprocess.check_output([exe_name])
+        return output.decode('ascii').strip().split('\n')
+    finally:
+        remove_file_if_exists(exe_name)
+        remove_file_if_exists(obj_name)
diff --git a/framework/scripts/mbedtls_framework/c_parsing_helper.py b/framework/scripts/mbedtls_framework/c_parsing_helper.py
new file mode 100644
index 0000000..0e428cd
--- /dev/null
+++ b/framework/scripts/mbedtls_framework/c_parsing_helper.py
@@ -0,0 +1,168 @@
+"""Helper functions to parse C code in heavily constrained scenarios.
+
+Currently supported functionality:
+
+* read_function_declarations: read function declarations from a header file.
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+### WARNING: the code in this file has not been extensively reviewed yet.
+### We do not think it is harmful, but it may be below our normal standards
+### for robustness and maintainability.
+
+import re
+from typing import Dict, Iterable, Iterator, List, Optional, Tuple
+
+
+class ArgumentInfo:
+    """Information about an argument to an API function."""
+    #pylint: disable=too-few-public-methods
+
+    _KEYWORDS = [
+        'const', 'register', 'restrict',
+        'int', 'long', 'short', 'signed', 'unsigned',
+    ]
+    _DECLARATION_RE = re.compile(
+        r'(?P<type>\w[\w\s*]*?)\s*' +
+        r'(?!(?:' + r'|'.join(_KEYWORDS) + r'))(?P<name>\b\w+\b)?' +
+        r'\s*(?P<suffix>\[[^][]*\])?\Z',
+        re.A | re.S)
+
+    @classmethod
+    def normalize_type(cls, typ: str) -> str:
+        """Normalize whitespace in a type."""
+        typ = re.sub(r'\s+', r' ', typ)
+        typ = re.sub(r'\s*\*', r' *', typ)
+        return typ
+
+    def __init__(self, decl: str) -> None:
+        self.decl = decl.strip()
+        m = self._DECLARATION_RE.match(self.decl)
+        if not m:
+            raise ValueError(self.decl)
+        self.type = self.normalize_type(m.group('type')) #type: str
+        self.name = m.group('name') #type: Optional[str]
+        self.suffix = m.group('suffix') if m.group('suffix') else '' #type: str
+
+    def __str__(self) -> str:
+        return self.decl
+
+class FunctionInfo:
+    """Information about an API function."""
+    #pylint: disable=too-few-public-methods
+
+    # Regex matching the declaration of a function that returns void.
+    VOID_RE = re.compile(r'\s*\bvoid\s*\Z', re.A)
+
+    def __init__(self, #pylint: disable=too-many-arguments
+                 filename: str,
+                 line_number: int,
+                 qualifiers: Iterable[str],
+                 return_type: str,
+                 name: str,
+                 arguments: List[str],
+                 doc: str = "") -> None:
+
+        self.filename = filename
+        self.line_number = line_number
+        self.qualifiers = frozenset(qualifiers)
+        self.return_type = return_type
+        self.name = name
+        self.arguments = [ArgumentInfo(arg) for arg in arguments]
+        self.doc = doc
+
+    def returns_void(self) -> bool:
+        """Whether the function returns void."""
+        return bool(self.VOID_RE.search(self.return_type))
+
+    def __str__(self) -> str:
+        str_args = [str(a) for a in self.arguments]
+        str_text = "{} {} {}({})".format(" ".join(self.qualifiers),
+                                         self.return_type, self.name,
+                                         ", ".join(str_args)).strip()
+        str_text = self._c_wrap_(str_text)
+        return self.doc + "\n" + str_text
+
+    @staticmethod
+    def _c_wrap_(in_str: str, line_len: int = 80) -> str:
+        """Auto-idents function declaration args using opening parenthesis."""
+        if len(in_str) >= line_len:
+            p_idx = in_str.index("(")
+            ident = " "  * p_idx
+            padded_comma = ",\n" + ident
+            in_str = in_str.replace(",", padded_comma)
+        return in_str
+
+# Match one C comment.
+# Note that we match both comment types, so things like // in a /*...*/
+# comment are handled correctly.
+_C_COMMENT_RE = re.compile(r'//(?:[^\n]|\\\n)*|/\*.*?\*/', re.S)
+_NOT_NEWLINES_RE = re.compile(r'[^\n]+')
+
+def read_logical_lines(filename: str) -> Iterator[Tuple[int, str]]:
+    """Read logical lines from a file.
+
+    Logical lines are one or more physical line, with balanced parentheses.
+    """
+    with open(filename, encoding='utf-8') as inp:
+        content = inp.read()
+    # Strip comments, but keep newlines for line numbering
+    content = re.sub(_C_COMMENT_RE,
+                     lambda m: re.sub(_NOT_NEWLINES_RE, "", m.group(0)),
+                     content)
+    lines = enumerate(content.splitlines(), 1)
+    for line_number, line in lines:
+        # Read a logical line, containing balanced parentheses.
+        # We assume that parentheses are balanced (this should be ok
+        # since comments have been stripped), otherwise there will be
+        # a gigantic logical line at the end.
+        paren_level = line.count('(') - line.count(')')
+        while paren_level > 0:
+            _, more = next(lines) #pylint: disable=stop-iteration-return
+            paren_level += more.count('(') - more.count(')')
+            line += '\n' + more
+        yield line_number, line
+
+_C_FUNCTION_DECLARATION_RE = re.compile(
+    r'(?P<qualifiers>(?:(?:extern|inline|static)\b\s*)*)'
+    r'(?P<return_type>\w[\w\s*]*?)\s*' +
+    r'\b(?P<name>\w+)' +
+    r'\s*\((?P<arguments>.*)\)\s*;',
+    re.A | re.S)
+
+def read_function_declarations(functions: Dict[str, FunctionInfo],
+                               filename: str) -> None:
+
+    """Collect function declarations from a C header file."""
+    for line_number, line in read_logical_lines(filename):
+        m = _C_FUNCTION_DECLARATION_RE.match(line)
+        if not m:
+            continue
+        qualifiers = m.group('qualifiers').split()
+        return_type = m.group('return_type')
+        name = m.group('name')
+        arguments = m.group('arguments').split(',')
+        if len(arguments) == 1 and re.match(FunctionInfo.VOID_RE, arguments[0]):
+            arguments = []
+        # Note: we replace any existing declaration for the same name.
+        functions[name] = FunctionInfo(filename, line_number,
+                                       qualifiers,
+                                       return_type,
+                                       name,
+                                       arguments)
+
+_C_TYPEDEF_DECLARATION_RE = re.compile(r'typedef (?:struct )?(?P<type>\w+) (?P<name>\w+)')
+
+def read_typedefs(filename: str) -> Dict[str, str]:
+    """ Extract type definitions in a {typedef aliased name: original type} dictionary.
+    Multi-line typedef struct are not captured. """
+
+    type_decl = {}
+
+    for _, line in read_logical_lines(filename):
+        m = _C_TYPEDEF_DECLARATION_RE.match(line)
+        if m:
+            type_decl[m.group("name")] = m.group("type")
+    return type_decl
diff --git a/framework/scripts/mbedtls_framework/c_wrapper_generator.py b/framework/scripts/mbedtls_framework/c_wrapper_generator.py
new file mode 100644
index 0000000..f15f3a7
--- /dev/null
+++ b/framework/scripts/mbedtls_framework/c_wrapper_generator.py
@@ -0,0 +1,501 @@
+"""Generate C wrapper functions.
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+### WARNING: the code in this file has not been extensively reviewed yet.
+### We do not think it is harmful, but it may be below our normal standards
+### for robustness and maintainability.
+
+import os
+import re
+import sys
+from typing import Dict, NamedTuple, List, Optional, Tuple
+
+from .c_parsing_helper import ArgumentInfo, FunctionInfo
+from . import typing_util
+
+
+def c_declare(prefix: str, name: str, suffix: str) -> str:
+    """Format a declaration of name with the given type prefix and suffix."""
+    if not prefix.endswith('*'):
+        prefix += ' '
+    return prefix + name + suffix
+
+
+WrapperInfo = NamedTuple('WrapperInfo', [
+    ('argument_names', List[str]),
+    ('guard', Optional[str]),
+    ('wrapper_name', str),
+])
+
+def strip_indentation(in_str: str, new_lines: int = 1, indent_lv: int = 0) -> str:
+    """Return a whitespace stripped str, with configurable whitespace in output.
+
+    The method will remove space-character indentation from input string.
+    It will also remove all new-lines around the text-block as well as
+    trailing whitespace.
+    The output indentation can be configured by indent_lv, and will use blocks
+    of 4 spaces.
+    At the end of the string a `new_lines` amount of empty lines will be added.
+    """
+
+    _ret_string = in_str.lstrip('\n').rstrip()
+    # Count empty spaces in beggining of each line. The smallest non-zero entry
+    # will be used to clean up input indentation.
+    indents = [len(n)-1 for n in re.findall(r'(?m)^ +\S', in_str)]
+
+    if indents:
+        _ret_string = re.sub(r'(?m)^ {{{indent}}}'.format(indent=min(indents)),
+                             '', _ret_string)
+    if indent_lv:
+        _ret_string = '\n'.join([' ' * indent_lv * 4 + s
+                                 for s in _ret_string.splitlines()])
+    return _ret_string + ('\n' * (new_lines + 1))
+
+class Base:
+    """Generate a C source file containing wrapper functions."""
+
+    # This class is designed to have many methods potentially overloaded.
+    # Tell pylint not to complain about methods that have unused arguments:
+    # child classes are likely to override those methods and need the
+    # arguments in question.
+    #pylint: disable=no-self-use,unused-argument
+
+    # Prefix prepended to the function's name to form the wrapper name.
+    _WRAPPER_NAME_PREFIX = ''
+    # Suffix appended to the function's name to form the wrapper name.
+    _WRAPPER_NAME_SUFFIX = '_wrap'
+
+    _INCLUDES = ['<mbedtls/build_info.h>']
+
+    # Functions with one of these qualifiers are skipped.
+    _SKIP_FUNCTION_WITH_QUALIFIERS = frozenset(['inline', 'static'])
+
+    def __init__(self):
+        """Construct a wrapper generator object.
+        """
+        self.program_name = os.path.basename(sys.argv[0])
+        # To be populated in a derived class
+        self.functions = {} #type: Dict[str, FunctionInfo]
+        self._function_guards = {} #type: Dict[str, str]
+        # Preprocessor symbol used as a guard against multiple inclusion in the
+        # header. Must be set before writing output to a header.
+        # Not used when writing .c output.
+        self.header_guard = None #type: Optional[str]
+
+    def _write_prologue(self, out: typing_util.Writable, header: bool) -> None:
+        """Write the prologue of a C file.
+
+        This includes a description comment and some include directives.
+        """
+        prologue = strip_indentation(f'''
+            /* Automatically generated by {self.program_name}, do not edit! */
+
+            /* Copyright The Mbed TLS Contributors
+             * SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+             */
+
+        ''')
+
+        if header:
+            prologue += strip_indentation(f'''
+                #ifndef {self.header_guard}
+                #define {self.header_guard}
+
+                #ifdef __cplusplus
+                extern "C" {{
+                #endif
+
+            ''')
+
+        for include in self._INCLUDES:
+            prologue += "#include {}\n".format(include)
+
+        # Make certain there is an empty line at the end of this section.
+        prologue += '\n' if self._INCLUDES else '\n\n'
+
+        out.write(prologue)
+
+    def _write_epilogue(self, out: typing_util.Writable, header: bool) -> None:
+        """Write the epilogue of a C file."""
+        epilogue = ""
+        if header:
+            epilogue += strip_indentation(f'''
+                #ifdef __cplusplus
+                }}
+                #endif
+
+                #endif /* {self.header_guard} */
+
+            ''')
+
+        epilogue += ('/* End of automatically generated file. */\n')
+        out.write(epilogue)
+
+    def _wrapper_function_name(self, original_name: str) -> str:
+        """The name of the wrapper function.
+
+        By default, this adds a suffix.
+        """
+        return (self._WRAPPER_NAME_PREFIX +
+                original_name +
+                self._WRAPPER_NAME_SUFFIX)
+
+    def _wrapper_declaration_start(self,
+                                   function: FunctionInfo,
+                                   wrapper_name: str) -> str:
+        """The beginning of the wrapper function declaration.
+
+        This ends just before the opening parenthesis of the argument list.
+
+        This is a string containing at least the return type and the
+        function name. It may start with additional qualifiers or attributes
+        such as `static`, `__attribute__((...))`, etc.
+        """
+        return c_declare(function.return_type, wrapper_name, '')
+
+    def _argument_name(self,
+                       function_name: str,
+                       num: int,
+                       arg: ArgumentInfo) -> str:
+        """Name to use for the given argument in the wrapper function.
+
+        Argument numbers count from 0.
+        """
+        name = 'arg' + str(num)
+        if arg.name:
+            name += '_' + arg.name
+        return name
+
+    def _wrapper_declaration_argument(self,
+                                      function_name: str,
+                                      num: int, name: str,
+                                      arg: ArgumentInfo) -> str:
+        """One argument definition in the wrapper function declaration.
+
+        Argument numbers count from 0.
+        """
+        return c_declare(arg.type, name, arg.suffix)
+
+    def _underlying_function_name(self, function: FunctionInfo) -> str:
+        """The name of the underlying function.
+
+        By default, this is the name of the wrapped function.
+        """
+        return function.name
+
+    def _return_variable_name(self, function: FunctionInfo) -> str:
+        """The name of the variable that will contain the return value."""
+        return 'retval'
+
+    def _write_function_call(self, out: typing_util.Writable,
+                             function: FunctionInfo,
+                             argument_names: List[str]) -> None:
+        """Write the call to the underlying function.
+        """
+        # Note that the function name is in parentheses, to avoid calling
+        # a function-like macro with the same name, since in typical usage
+        # there is a function-like macro with the same name which is the
+        # wrapper.
+        call = '({})({})'.format(self._underlying_function_name(function),
+                                 ', '.join(argument_names))
+        if function.returns_void():
+            out.write('    {};\n'.format(call))
+        else:
+            ret_name = self._return_variable_name(function)
+            ret_decl = c_declare(function.return_type, ret_name, '')
+            out.write('    {} = {};\n'.format(ret_decl, call))
+
+    def _write_function_return(self, out: typing_util.Writable,
+                               function: FunctionInfo,
+                               if_void: bool = False) -> None:
+        """Write a return statement.
+
+        If the function returns void, only write a statement if if_void is true.
+        """
+        if function.returns_void():
+            if if_void:
+                out.write('    return;\n')
+        else:
+            ret_name = self._return_variable_name(function)
+            out.write('    return {};\n'.format(ret_name))
+
+    def _write_function_body(self, out: typing_util.Writable,
+                             function: FunctionInfo,
+                             argument_names: List[str]) -> None:
+        """Write the body of the wrapper code for the specified function.
+        """
+        self._write_function_call(out, function, argument_names)
+        self._write_function_return(out, function)
+
+    def _skip_function(self, function: FunctionInfo) -> bool:
+        """Whether to skip this function.
+
+        By default, static or inline functions are skipped.
+        """
+        if not self._SKIP_FUNCTION_WITH_QUALIFIERS.isdisjoint(function.qualifiers):
+            return True
+        return False
+
+    def _function_guard(self, function: FunctionInfo) -> Optional[str]:
+        """A preprocessor condition for this function.
+
+        The wrapper will be guarded with `#if` on this condition, if not None.
+        """
+        return self._function_guards.get(function.name)
+
+    def _wrapper_info(self, function: FunctionInfo) -> Optional[WrapperInfo]:
+        """Information about the wrapper for one function.
+
+        Return None if the function should be skipped.
+        """
+        if self._skip_function(function):
+            return None
+        argument_names = [self._argument_name(function.name, num, arg)
+                          for num, arg in enumerate(function.arguments)]
+        return WrapperInfo(
+            argument_names=argument_names,
+            guard=self._function_guard(function),
+            wrapper_name=self._wrapper_function_name(function.name),
+        )
+
+    def _write_function_prototype(self, out: typing_util.Writable,
+                                  function: FunctionInfo,
+                                  wrapper: WrapperInfo,
+                                  header: bool) -> None:
+        """Write the prototype of a wrapper function.
+
+        If header is true, write a function declaration, with a semicolon at
+        the end. Otherwise just write the prototype, intended to be followed
+        by the function's body.
+        """
+        declaration_start = self._wrapper_declaration_start(function,
+                                                            wrapper.wrapper_name)
+        arg_indent = '    '
+        terminator = ';\n' if header else '\n'
+        if function.arguments:
+            out.write(declaration_start + '(\n')
+            for num in range(len(function.arguments)):
+                arg_def = self._wrapper_declaration_argument(
+                    function.name,
+                    num, wrapper.argument_names[num], function.arguments[num])
+                arg_terminator = \
+                    (')' + terminator if num == len(function.arguments) - 1 else
+                     ',\n')
+                out.write(arg_indent + arg_def + arg_terminator)
+        else:
+            out.write(declaration_start + '(void)' + terminator)
+
+    def _write_c_function(self, out: typing_util.Writable,
+                          function: FunctionInfo) -> None:
+        """Write wrapper code for one function.
+
+        Do nothing if the function is skipped.
+        """
+        wrapper = self._wrapper_info(function)
+        if wrapper is None:
+            return
+        out.write('/* Wrapper for {} */\n'.format(function.name))
+
+        if wrapper.guard is not None:
+            out.write('#if {}\n'.format(wrapper.guard))
+        self._write_function_prototype(out, function, wrapper, False)
+        out.write('{\n')
+        self._write_function_body(out, function, wrapper.argument_names)
+        out.write('}\n')
+        if wrapper.guard is not None:
+            out.write('#endif /* {} */\n'.format(wrapper.guard))
+        out.write('\n')
+
+    def _write_h_function_declaration(self, out: typing_util.Writable,
+                                      function: FunctionInfo,
+                                      wrapper: WrapperInfo) -> None:
+        """Write the declaration of one wrapper function.
+        """
+        self._write_function_prototype(out, function, wrapper, True)
+
+    def _write_h_macro_definition(self, out: typing_util.Writable,
+                                  function: FunctionInfo,
+                                  wrapper: WrapperInfo) -> None:
+        """Write the macro definition for one wrapper.
+        """
+        arg_list = ', '.join(wrapper.argument_names)
+        out.write('#define {function_name}({args}) \\\n    {wrapper_name}({args})\n'
+                  .format(function_name=function.name,
+                          wrapper_name=wrapper.wrapper_name,
+                          args=arg_list))
+
+    def _write_h_function(self, out: typing_util.Writable,
+                          function: FunctionInfo) -> None:
+        """Write the complete header content for one wrapper.
+
+        This is the declaration of the wrapper function, and the
+        definition of a function-like macro that calls the wrapper function.
+
+        Do nothing if the function is skipped.
+        """
+        wrapper = self._wrapper_info(function)
+        if wrapper is None:
+            return
+        if wrapper.guard is not None:
+            out.write('#if {}\n'.format(wrapper.guard))
+        self._write_h_function_declaration(out, function, wrapper)
+        self._write_h_macro_definition(out, function, wrapper)
+        if wrapper.guard is not None:
+            out.write('#endif /* {} */\n'.format(wrapper.guard))
+        out.write('\n')
+
+    def write_c_file(self, filename: str) -> None:
+        """Output a whole C file containing function wrapper definitions."""
+        with open(filename, 'w', encoding='utf-8') as out:
+            self._write_prologue(out, False)
+            for name in sorted(self.functions):
+                self._write_c_function(out, self.functions[name])
+            self._write_epilogue(out, False)
+
+    def _header_guard_from_file_name(self, filename: str) -> str:
+        """Preprocessor symbol used as a guard against multiple inclusion."""
+        # Heuristic to strip irrelevant leading directories
+        filename = re.sub(r'.*include[\\/]', r'', filename)
+        return re.sub(r'[^0-9A-Za-z]', r'_', filename, re.A).upper()
+
+    def write_h_file(self, filename: str) -> None:
+        """Output a header file with function wrapper declarations and macro definitions."""
+        self.header_guard = self._header_guard_from_file_name(filename)
+        with open(filename, 'w', encoding='utf-8') as out:
+            self._write_prologue(out, True)
+            for name in sorted(self.functions):
+                self._write_h_function(out, self.functions[name])
+            self._write_epilogue(out, True)
+
+
+class UnknownTypeForPrintf(Exception):
+    """Exception raised when attempting to generate code that logs a value of an unknown type."""
+
+    def __init__(self, typ: str) -> None:
+        super().__init__("Unknown type for printf format generation: " + typ)
+
+
+class Logging(Base):
+    """Generate wrapper functions that log the inputs and outputs."""
+
+    def __init__(self) -> None:
+        """Construct a wrapper generator including logging of inputs and outputs.
+
+        Log to stdout by default. Call `set_stream` to change this.
+        """
+        super().__init__()
+        self.stream = 'stdout'
+
+    def set_stream(self, stream: str) -> None:
+        """Set the stdio stream to log to.
+
+        Call this method before calling `write_c_output` or `write_h_output`.
+        """
+        self.stream = stream
+
+    def _write_prologue(self, out: typing_util.Writable, header: bool) -> None:
+        super()._write_prologue(out, header)
+        if not header:
+            out.write("""
+#if defined(MBEDTLS_FS_IO) && defined(MBEDTLS_TEST_HOOKS)
+#include <stdio.h>
+#include <inttypes.h>
+#include <mbedtls/debug.h> // for MBEDTLS_PRINTF_SIZET
+#include <mbedtls/platform.h> // for mbedtls_fprintf
+#endif /* defined(MBEDTLS_FS_IO) && defined(MBEDTLS_TEST_HOOKS) */
+""")
+
+    _PRINTF_SIMPLE_FORMAT = {
+        'int': '%d',
+        'long': '%ld',
+        'long long': '%lld',
+        'size_t': '%"MBEDTLS_PRINTF_SIZET"',
+        'unsigned': '0x%08x',
+        'unsigned int': '0x%08x',
+        'unsigned long': '0x%08lx',
+        'unsigned long long': '0x%016llx',
+    }
+
+    def _printf_simple_format(self, typ: str) -> Optional[str]:
+        """Use this printf format for a value of typ.
+
+        Return None if values of typ need more complex handling.
+        """
+        return self._PRINTF_SIMPLE_FORMAT.get(typ)
+
+    _PRINTF_TYPE_CAST = {
+        'int32_t': 'int',
+        'uint32_t': 'unsigned',
+        'uint64_t': 'unsigned long long',
+    } #type: Dict[str, str]
+
+    def _printf_type_cast(self, typ: str) -> Optional[str]:
+        """Cast values of typ to this type before passing them to printf.
+
+        Return None if values of the given type do not need a cast.
+        """
+        return self._PRINTF_TYPE_CAST.get(typ)
+
+    _POINTER_TYPE_RE = re.compile(r'\s*\*\Z')
+
+    def _printf_parameters(self, typ: str, var: str) -> Tuple[str, List[str]]:
+        """The printf format and arguments for a value of type typ stored in var.
+        """
+        expr = var
+        base_type = typ
+        # For outputs via a pointer, get the value that has been written.
+        # Note: we don't support pointers to pointers here.
+        pointer_match = self._POINTER_TYPE_RE.search(base_type)
+        if pointer_match:
+            base_type = base_type[:pointer_match.start(0)]
+            expr = '*({})'.format(expr)
+        # Maybe cast the value to a standard type.
+        cast_to = self._printf_type_cast(base_type)
+        if cast_to is not None:
+            expr = '({}) {}'.format(cast_to, expr)
+            base_type = cast_to
+        # Try standard types.
+        fmt = self._printf_simple_format(base_type)
+        if fmt is not None:
+            return '{}={}'.format(var, fmt), [expr]
+        raise UnknownTypeForPrintf(typ)
+
+    def _write_function_logging(self, out: typing_util.Writable,
+                                function: FunctionInfo,
+                                argument_names: List[str]) -> None:
+        """Write code to log the function's inputs and outputs."""
+        formats, values = '%s', ['"' + function.name + '"']
+        for arg_info, arg_name in zip(function.arguments, argument_names):
+            fmt, vals = self._printf_parameters(arg_info.type, arg_name)
+            if fmt:
+                formats += ' ' + fmt
+                values += vals
+        if not function.returns_void():
+            ret_name = self._return_variable_name(function)
+            fmt, vals = self._printf_parameters(function.return_type, ret_name)
+            if fmt:
+                formats += ' ' + fmt
+                values += vals
+        out.write("""\
+#if defined(MBEDTLS_FS_IO) && defined(MBEDTLS_TEST_HOOKS)
+    if ({stream}) {{
+        mbedtls_fprintf({stream}, "{formats}\\n",
+                        {values});
+    }}
+#endif /* defined(MBEDTLS_FS_IO) && defined(MBEDTLS_TEST_HOOKS) */
+"""
+                  .format(stream=self.stream,
+                          formats=formats,
+                          values=', '.join(values)))
+
+    def _write_function_body(self, out: typing_util.Writable,
+                             function: FunctionInfo,
+                             argument_names: List[str]) -> None:
+        """Write the body of the wrapper code for the specified function.
+        """
+        self._write_function_call(out, function, argument_names)
+        self._write_function_logging(out, function, argument_names)
+        self._write_function_return(out, function)
diff --git a/framework/scripts/mbedtls_framework/code_wrapper/__init__.py b/framework/scripts/mbedtls_framework/code_wrapper/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/framework/scripts/mbedtls_framework/code_wrapper/__init__.py
diff --git a/framework/scripts/mbedtls_framework/code_wrapper/psa_buffer.py b/framework/scripts/mbedtls_framework/code_wrapper/psa_buffer.py
new file mode 100644
index 0000000..ca62968
--- /dev/null
+++ b/framework/scripts/mbedtls_framework/code_wrapper/psa_buffer.py
@@ -0,0 +1,29 @@
+""" PSA Buffer utility data-class.
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+from typing import List
+from .. import typing_util
+
+class BufferParameter:
+    """Description of an input or output buffer parameter sequence to a PSA function."""
+    #pylint: disable=too-few-public-methods
+
+    def __init__(self, i: int, is_output: bool,
+                 buffer_name: str, size_name: str) -> None:
+        """Initialize the parameter information.
+
+        i is the index of the function argument that is the pointer to the buffer.
+        The size is argument i+1. For a variable-size output, the actual length
+        goes in argument i+2.
+
+        buffer_name and size_names are the names of arguments i and i+1.
+        This class does not yet help with the output length.
+        """
+        self.index = i
+        self.buffer_name = buffer_name
+        self.size_name = size_name
+        self.is_output = is_output
+
diff --git a/framework/scripts/mbedtls_framework/code_wrapper/psa_test_wrapper.py b/framework/scripts/mbedtls_framework/code_wrapper/psa_test_wrapper.py
new file mode 100644
index 0000000..6b90fe2
--- /dev/null
+++ b/framework/scripts/mbedtls_framework/code_wrapper/psa_test_wrapper.py
@@ -0,0 +1,39 @@
+"""Generate wrapper functions for PSA function calls.
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+import argparse
+import itertools
+import os
+from typing import Iterator, List, Collection, Optional, Tuple
+
+from .. import build_tree
+from .. import c_parsing_helper
+from .. import c_wrapper_generator
+from .. import typing_util
+
+from .psa_buffer import BufferParameter
+from .psa_wrapper import PSAWrapper, PSALoggingWrapper, PSAWrapperConfiguration
+
+class PSATestWrapper(PSAWrapper):
+    """Generate a C source file containing wrapper functions for PSA Crypto API calls."""
+
+    _WRAPPER_NAME_PREFIX = 'mbedtls_test_wrap_'
+    _WRAPPER_NAME_SUFFIX = ''
+
+    _PSA_WRAPPER_INCLUDES = ['<psa/crypto.h>',
+                             '<test/memory.h>',
+                             '<test/psa_crypto_helpers.h>',
+                             '<test/psa_test_wrappers.h>']
+
+class PSALoggingTestWrapper(PSATestWrapper, PSALoggingWrapper):
+    """Generate a C source file containing wrapper functions that log PSA Crypto API calls."""
+
+    def __init__(self, out_h_f: str,
+                       out_c_f: str,
+                       stream: str,
+                       in_headers: Optional[List[str]] = None) -> None:
+        super().__init__(out_h_f, out_c_f, stream, in_headers)
+
diff --git a/framework/scripts/mbedtls_framework/code_wrapper/psa_wrapper.py b/framework/scripts/mbedtls_framework/code_wrapper/psa_wrapper.py
new file mode 100644
index 0000000..0148cde
--- /dev/null
+++ b/framework/scripts/mbedtls_framework/code_wrapper/psa_wrapper.py
@@ -0,0 +1,286 @@
+"""Generate wrapper functions for PSA function calls.
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+import argparse
+import itertools
+import os
+from typing import Any, Iterator, List, Dict, Collection, Optional, Tuple
+
+from .. import build_tree
+from .. import c_parsing_helper
+from .. import c_wrapper_generator
+from .. import typing_util
+
+from .psa_buffer import BufferParameter
+
+class PSAWrapperConfiguration:
+    """Configuration data class for PSA Wrapper."""
+
+    def __init__(self) -> None:
+        self.cpp_guards = ["MBEDTLS_PSA_CRYPTO_C", "MBEDTLS_TEST_HOOKS", "!RECORD_PSA_STATUS_COVERAGE_LOG"]
+
+        self.skipped_functions = frozenset([
+            'mbedtls_psa_external_get_random', # not a library function
+            'psa_get_key_domain_parameters', # client-side function
+            'psa_get_key_slot_number', # client-side function
+            'psa_key_derivation_verify_bytes', # not implemented yet
+            'psa_key_derivation_verify_key', # not implemented yet
+            'psa_set_key_domain_parameters', # client-side function
+        ])
+
+        self.skipped_argument_types = frozenset([
+            # PAKE stuff: not implemented yet
+            'psa_crypto_driver_pake_inputs_t *',
+            'psa_pake_cipher_suite_t *',
+        ])
+
+        self.function_guards = {
+            'mbedtls_psa_register_se_key': 'defined(MBEDTLS_PSA_CRYPTO_SE_C)',
+            'mbedtls_psa_inject_entropy': 'defined(MBEDTLS_PSA_INJECT_ENTROPY)',
+            'mbedtls_psa_external_get_random': 'defined(MBEDTLS_PSA_CRYPTO_EXTERNAL_RNG)',
+            'mbedtls_psa_platform_get_builtin_key': 'defined(MBEDTLS_PSA_CRYPTO_BUILTIN_KEYS)',
+            'psa_crypto_driver_pake_get_cipher_suite' : 'defined(PSA_WANT_ALG_SOME_PAKE)',
+            'psa_crypto_driver_pake_get_password' : 'defined(PSA_WANT_ALG_SOME_PAKE)',
+            'psa_crypto_driver_pake_get_password_len' : 'defined(PSA_WANT_ALG_SOME_PAKE)',
+            'psa_crypto_driver_pake_get_peer' : 'defined(PSA_WANT_ALG_SOME_PAKE)',
+            'psa_crypto_driver_pake_get_peer_len' : 'defined(PSA_WANT_ALG_SOME_PAKE)',
+            'psa_crypto_driver_pake_get_user' : 'defined(PSA_WANT_ALG_SOME_PAKE)',
+            'psa_crypto_driver_pake_get_user_len' : 'defined(PSA_WANT_ALG_SOME_PAKE)',
+            'psa_pake_abort' : 'defined(PSA_WANT_ALG_SOME_PAKE)',
+            'psa_pake_get_implicit_key' : 'defined(PSA_WANT_ALG_SOME_PAKE)',
+            'psa_pake_input' : 'defined(PSA_WANT_ALG_SOME_PAKE)',
+            'psa_pake_output' : 'defined(PSA_WANT_ALG_SOME_PAKE)',
+            'psa_pake_set_password_key' : 'defined(PSA_WANT_ALG_SOME_PAKE)',
+            'psa_pake_set_peer' : 'defined(PSA_WANT_ALG_SOME_PAKE)',
+            'psa_pake_set_role' : 'defined(PSA_WANT_ALG_SOME_PAKE)',
+            'psa_pake_set_user' : 'defined(PSA_WANT_ALG_SOME_PAKE)',
+            'psa_pake_setup' : 'defined(PSA_WANT_ALG_SOME_PAKE)',
+        }
+
+class PSAWrapper(c_wrapper_generator.Base):
+    """Generate a C source file containing wrapper functions for PSA Crypto API calls."""
+
+    _WRAPPER_NAME_PREFIX = 'mbedtls_test_wrap_'
+    _WRAPPER_NAME_SUFFIX = ''
+
+    _PSA_WRAPPER_INCLUDES = ['<psa/crypto.h>']
+    _DEFAULT_IN_HEADERS = ['crypto.h', 'crypto_extra.h']
+
+    def __init__(self,
+                 out_h_f: str,
+                 out_c_f: str,
+                 in_headers: Optional[List[str]] = None,
+                 config: PSAWrapperConfiguration = PSAWrapperConfiguration()) -> None:
+
+        super().__init__()
+        self.out_c_f = out_c_f
+        self.out_h_f = out_h_f
+
+        self.mbedtls_root = build_tree.guess_mbedtls_root()
+        self.read_config(config)
+        self.read_headers(in_headers)
+
+    def read_config(self, cfg: PSAWrapperConfiguration)-> None:
+        """Configure instance's parameters from a user provided config."""
+
+        self._cpp_guards = PSAWrapper.parse_def_guards(cfg.cpp_guards)
+        self._skip_functions = cfg.skipped_functions
+        self._function_guards.update(cfg.function_guards)
+        self._not_implemented = cfg.skipped_argument_types
+
+    def read_headers(self, headers: Optional[List[str]]) -> None:
+        """Reads functions to be wrapped from source header files into self.functions."""
+        self.in_headers = self._DEFAULT_IN_HEADERS if headers is None else headers
+        for header_name in self.in_headers:
+            header_path = self.rel_path(header_name)
+            c_parsing_helper.read_function_declarations(self.functions, header_path)
+
+    def rel_path(self, filename: str, path_list: List[str] = ['include', 'psa']) -> str:
+        """Return the estimated path in relationship to the mbedtls_root.
+
+           The method allows overriding the targetted sub-directory.
+           Currently the default is set to mbedtls_root/include/psa."""
+        # Temporary, while Mbed TLS does not just rely on the TF-PSA-Crypto
+        # build system to build its crypto library. When it does, the first
+        # case can just be removed.
+        if not build_tree.is_mbedtls_3_6():
+            path_list = ['tf-psa-crypto' ] + path_list
+            return os.path.join(self.mbedtls_root, *path_list, filename)
+
+        return os.path.join(self.mbedtls_root, *path_list, filename)
+
+    # Utility Methods
+    @staticmethod
+    def parse_def_guards(def_list: Collection[str])-> str:
+        """ Create define guards.
+
+            Convert an input list of into a C preprocessor
+            expression of defined() && !defined() syntax string."""
+
+        output = ""
+        dl = [("defined({})".format(n) if n[0] != "!" else
+                "!defined({})".format(n[1:]))
+               for n in def_list]
+
+        # Split the list in chunks of 2 and add new lines
+        for i in range(0, len(dl), 2):
+            output += "{} && {} && \\".format(dl[i], dl[i+1]) + "\n    "\
+                if i+2 <= len(dl) else dl[i]
+        return output
+
+    @staticmethod
+    def _detect_buffer_parameters(arguments: List[c_parsing_helper.ArgumentInfo],
+                                  argument_names: List[str]) -> Iterator[BufferParameter]:
+        """Detect function arguments that are buffers (pointer, size [,length])."""
+        types = ['' if arg.suffix else arg.type for arg in arguments]
+        # pairs = list of (type_of_arg_N, type_of_arg_N+1)
+        # where each type_of_arg_X is the empty string if the type is an array
+        # or there is no argument X.
+        pairs = enumerate(itertools.zip_longest(types, types[1:], fillvalue=''))
+        for i, t01 in pairs:
+            if (t01[0] == 'const uint8_t *' or t01[0] == 'uint8_t *') and \
+               t01[1] == 'size_t':
+                yield BufferParameter(i, not t01[0].startswith('const '),
+                                      argument_names[i], argument_names[i+1])
+
+    @staticmethod
+    def _parameter_should_be_copied(function_name: str,
+                                    _buffer_name: Optional[str]) -> bool:
+        """Whether the specified buffer argument to a PSA function should be copied.
+        """
+        # False-positives that do not need buffer copying
+        if function_name in ('mbedtls_psa_inject_entropy',
+                             'psa_crypto_driver_pake_get_password',
+                             'psa_crypto_driver_pake_get_user',
+                             'psa_crypto_driver_pake_get_peer'):
+            return False
+
+        return True
+
+    def _poison_wrap(self, param : BufferParameter, poison: bool, ident_lv = 1) -> str:
+        """Returns a call to MBEDTLS_TEST_MEMORY_[UN]POISON.
+
+           The output is prefixed with MBEDTLS_TEST_MEMORY_ followed by POISON/UNPOISON
+           and the input parameter arguments (name, length)
+        """
+        return "{}MBEDTLS_TEST_MEMORY_{}({}, {});\n".format((ident_lv * 4) * ' ',
+                                                            'POISON' if poison else 'UNPOISON',
+                                                             param.buffer_name, param.size_name)
+
+    def _poison_multi_write(self,
+                            out: typing_util.Writable,
+                            buffer_parameters: List['BufferParameter'],
+                            poison: bool) -> None:
+            """Write poisoning or unpoisoning code for the buffer parameters.
+
+               Write poisoning code if poison is true, unpoisoning code otherwise.
+            """
+
+            if not buffer_parameters:
+                return
+            out.write('#if !defined(MBEDTLS_PSA_ASSUME_EXCLUSIVE_BUFFERS)\n')
+            for param in buffer_parameters:
+                out.write(self._poison_wrap(param, poison))
+            out.write('#endif /* !defined(MBEDTLS_PSA_ASSUME_EXCLUSIVE_BUFFERS) */\n')
+
+    # Override parent's methods
+    def _write_function_call(self, out: typing_util.Writable,
+                             function: c_wrapper_generator.FunctionInfo,
+                             argument_names: List[str]) -> None:
+        buffer_parameters = list(
+            param
+            for param in self._detect_buffer_parameters(function.arguments,
+                                                        argument_names)
+            if self._parameter_should_be_copied(function.name,
+                                                function.arguments[param.index].name))
+
+        self._poison_multi_write(out, buffer_parameters, True)
+        super()._write_function_call(out, function, argument_names)
+        self._poison_multi_write(out, buffer_parameters, False)
+
+    def _skip_function(self, function: c_wrapper_generator.FunctionInfo) -> bool:
+        if function.return_type != 'psa_status_t':
+            return True
+        if function.name in self._skip_functions:
+            return True
+        return False
+
+    def _return_variable_name(self,
+                              function: c_wrapper_generator.FunctionInfo) -> str:
+        """The name of the variable that will contain the return value."""
+
+        if function.return_type == 'psa_status_t':
+            return 'status'
+        return super()._return_variable_name(function)
+
+    def _write_prologue(self, out: typing_util.Writable, header: bool) -> None:
+        super()._write_prologue(out, header)
+
+        prologue = []
+        if self._cpp_guards:
+            prologue.append("#if {}".format(self._cpp_guards))
+            prologue.append('')
+
+        for include in self._PSA_WRAPPER_INCLUDES:
+            prologue.append("#include {}".format(include))
+
+        # Make certain there is an empty line at the end of this section.
+        for i in [-1, -2]:
+            if prologue[i] != '':
+                prologue.append('')
+
+        out.write("\n".join(prologue))
+
+    def _write_epilogue(self, out: typing_util.Writable, header: bool) -> None:
+        if self._cpp_guards:
+            out.write("#endif /* {} */\n\n".format(self._cpp_guards))
+        super()._write_epilogue(out, header)
+
+class PSALoggingWrapper(PSAWrapper, c_wrapper_generator.Logging):
+    """Generate a C source file containing wrapper functions that log PSA Crypto API calls."""
+
+    def __init__(self,
+                 stream: str,
+                 out_h_f: str,
+                 out_c_f: str,
+                 in_headers: Optional[List[str]] = None,
+                 config: PSAWrapperConfiguration = PSAWrapperConfiguration()) -> None:
+
+        super().__init__(out_h_f, out_c_f, in_headers, config)
+        self.set_stream(stream)
+
+    _PRINTF_TYPE_CAST = c_wrapper_generator.Logging._PRINTF_TYPE_CAST.copy()
+    _PRINTF_TYPE_CAST.update({
+        'mbedtls_svc_key_id_t': 'unsigned',
+        'psa_algorithm_t': 'unsigned',
+        'psa_drv_slot_number_t': 'unsigned long long',
+        'psa_key_derivation_step_t': 'int',
+        'psa_key_id_t': 'unsigned',
+        'psa_key_slot_number_t': 'unsigned long long',
+        'psa_key_lifetime_t': 'unsigned',
+        'psa_key_type_t': 'unsigned',
+        'psa_key_usage_flags_t': 'unsigned',
+        'psa_pake_role_t': 'int',
+        'psa_pake_step_t': 'int',
+        'psa_status_t': 'int',
+    })
+
+    def _printf_parameters(self, typ: str, var: str) -> Tuple[str, List[str]]:
+        if typ.startswith('const '):
+            typ = typ[6:]
+        if typ == 'uint8_t *':
+            # Skip buffers
+            return '', []
+        if typ.endswith('operation_t *'):
+            return '', []
+        if typ in self._not_implemented:
+            return '', []
+        if typ == 'psa_key_attributes_t *':
+            return (var + '={id=%u, lifetime=0x%08x, type=0x%08x, bits=%u, alg=%08x, usage=%08x}',
+                    ['(unsigned) psa_get_key_{}({})'.format(field, var)
+                     for field in ['id', 'lifetime', 'type', 'bits', 'algorithm', 'usage_flags']])
+        return super()._printf_parameters(typ, var)
+
diff --git a/framework/scripts/mbedtls_framework/collect_test_cases.py b/framework/scripts/mbedtls_framework/collect_test_cases.py
new file mode 100644
index 0000000..6f0c65c
--- /dev/null
+++ b/framework/scripts/mbedtls_framework/collect_test_cases.py
@@ -0,0 +1,170 @@
+"""Discover all the test cases (unit tests and SSL tests)."""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+import glob
+import os
+import re
+import subprocess
+import sys
+
+from . import build_tree
+
+
+class ScriptOutputError(ValueError):
+    """A kind of ValueError that indicates we found
+    the script doesn't list test cases in an expected
+    pattern.
+    """
+
+    @property
+    def script_name(self):
+        return super().args[0]
+
+    @property
+    def idx(self):
+        return super().args[1]
+
+    @property
+    def line(self):
+        return super().args[2]
+
+class Results:
+    """Store file and line information about errors or warnings in test suites."""
+
+    def __init__(self, options):
+        self.errors = 0
+        self.warnings = 0
+        self.ignore_warnings = options.quiet
+
+    def error(self, file_name, line_number, fmt, *args):
+        sys.stderr.write(('{}:{}:ERROR:' + fmt + '\n').
+                         format(file_name, line_number, *args))
+        self.errors += 1
+
+    def warning(self, file_name, line_number, fmt, *args):
+        if not self.ignore_warnings:
+            sys.stderr.write(('{}:{}:Warning:' + fmt + '\n')
+                             .format(file_name, line_number, *args))
+            self.warnings += 1
+
+class TestDescriptionExplorer:
+    """An iterator over test cases with descriptions.
+
+The test cases that have descriptions are:
+* Individual unit tests (entries in a .data file) in test suites.
+* Individual test cases in ssl-opt.sh.
+
+This is an abstract class. To use it, derive a class that implements
+the process_test_case method, and call walk_all().
+"""
+
+    def process_test_case(self, per_file_state,
+                          file_name, line_number, description):
+        """Process a test case.
+
+per_file_state: an object created by new_per_file_state() at the beginning
+                of each file.
+file_name: a relative path to the file containing the test case.
+line_number: the line number in the given file.
+description: the test case description as a byte string.
+"""
+        raise NotImplementedError
+
+    def new_per_file_state(self):
+        """Return a new per-file state object.
+
+The default per-file state object is None. Child classes that require per-file
+state may override this method.
+"""
+        #pylint: disable=no-self-use
+        return None
+
+    def walk_test_suite(self, data_file_name):
+        """Iterate over the test cases in the given unit test data file."""
+        in_paragraph = False
+        descriptions = self.new_per_file_state() # pylint: disable=assignment-from-none
+        with open(data_file_name, 'rb') as data_file:
+            for line_number, line in enumerate(data_file, 1):
+                line = line.rstrip(b'\r\n')
+                if not line:
+                    in_paragraph = False
+                    continue
+                if line.startswith(b'#'):
+                    continue
+                if not in_paragraph:
+                    # This is a test case description line.
+                    self.process_test_case(descriptions,
+                                           data_file_name, line_number, line)
+                in_paragraph = True
+
+    def collect_from_script(self, script_name):
+        """Collect the test cases in a script by calling its listing test cases
+option"""
+        descriptions = self.new_per_file_state() # pylint: disable=assignment-from-none
+        listed = subprocess.check_output(['sh', script_name, '--list-test-cases'])
+        # Assume test file is responsible for printing identical format of
+        # test case description between --list-test-cases and its OUTCOME.CSV
+        #
+        # idx indicates the number of test case since there is no line number
+        # in the script for each test case.
+        for idx, line in enumerate(listed.splitlines()):
+            # We are expecting the script to list the test cases in
+            # `<suite_name>;<description>` pattern.
+            script_outputs = line.split(b';', 1)
+            if len(script_outputs) == 2:
+                suite_name, description = script_outputs
+            else:
+                raise ScriptOutputError(script_name, idx, line.decode("utf-8"))
+
+            self.process_test_case(descriptions,
+                                   suite_name.decode('utf-8'),
+                                   idx,
+                                   description.rstrip())
+
+    @staticmethod
+    def collect_test_directories():
+        """Get the relative path for the TLS and Crypto test directories."""
+        project_root = build_tree.guess_project_root()
+        if build_tree.looks_like_mbedtls_root(project_root) and not build_tree.is_mbedtls_3_6():
+            directories = [os.path.join(project_root, 'tests'),
+                           os.path.join(project_root, 'tf-psa-crypto', 'tests')]
+        else:
+            directories = [os.path.join(project_root, 'tests')]
+
+        directories = [os.path.relpath(p) for p in directories]
+        return directories
+
+    def walk_all(self):
+        """Iterate over all named test cases."""
+        test_directories = self.collect_test_directories()
+        for directory in test_directories:
+            for data_file_name in glob.glob(os.path.join(directory, 'suites',
+                                                         '*.data')):
+                self.walk_test_suite(data_file_name)
+
+            for sh_file in ['ssl-opt.sh', 'compat.sh']:
+                sh_file = os.path.join(directory, sh_file)
+                if os.path.isfile(sh_file):
+                    self.collect_from_script(sh_file)
+
+class TestDescriptions(TestDescriptionExplorer):
+    """Collect the available test cases."""
+
+    def __init__(self):
+        super().__init__()
+        self.descriptions = set()
+
+    def process_test_case(self, _per_file_state,
+                          file_name, _line_number, description):
+        """Record an available test case."""
+        base_name = re.sub(r'\.[^.]*$', '', re.sub(r'.*/', '', file_name))
+        key = ';'.join([base_name, description.decode('utf-8')])
+        self.descriptions.add(key)
+
+def collect_available_test_cases():
+    """Collect the available test cases."""
+    explorer = TestDescriptions()
+    explorer.walk_all()
+    return sorted(explorer.descriptions)
diff --git a/framework/scripts/mbedtls_framework/config_common.py b/framework/scripts/mbedtls_framework/config_common.py
new file mode 100644
index 0000000..123785d
--- /dev/null
+++ b/framework/scripts/mbedtls_framework/config_common.py
@@ -0,0 +1,507 @@
+"""Mbed TLS and PSA configuration file manipulation library
+"""
+
+## Copyright The Mbed TLS Contributors
+## SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+##
+
+import argparse
+import os
+import re
+import shutil
+import sys
+
+from abc import ABCMeta
+
+
+class Setting:
+    """Representation of one Mbed TLS mbedtls_config.h or PSA crypto_config.h setting.
+
+    Fields:
+    * name: the symbol name ('MBEDTLS_xxx').
+    * value: the value of the macro. The empty string for a plain #define
+      with no value.
+    * active: True if name is defined, False if a #define for name is
+      present in mbedtls_config.h but commented out.
+    * section: the name of the section that contains this symbol.
+    * configfile: the representation of the configuration file where the setting is defined
+    """
+    # pylint: disable=too-few-public-methods, too-many-arguments
+    def __init__(self, configfile, active, name, value='', section=None):
+        self.active = active
+        self.name = name
+        self.value = value
+        self.section = section
+        self.configfile = configfile
+
+
+class Config:
+    """Representation of the Mbed TLS and PSA configuration.
+
+    In the documentation of this class, a symbol is said to be *active*
+    if there is a #define for it that is not commented out, and *known*
+    if there is a #define for it whether commented out or not.
+
+    This class supports the following protocols:
+    * `name in config` is `True` if the symbol `name` is active, `False`
+      otherwise (whether `name` is inactive or not known).
+    * `config[name]` is the value of the macro `name`. If `name` is inactive,
+      raise `KeyError` (even if `name` is known).
+    * `config[name] = value` sets the value associated to `name`. `name`
+      must be known, but does not need to be set. This does not cause
+      name to become set.
+    """
+
+    def __init__(self):
+        self.settings = {}
+        self.configfiles = []
+
+    def __contains__(self, name):
+        """True if the given symbol is active (i.e. set).
+
+        False if the given symbol is not set, even if a definition
+        is present but commented out.
+        """
+        return name in self.settings and self.settings[name].active
+
+    def all(self, *names):
+        """True if all the elements of names are active (i.e. set)."""
+        return all(name in self for name in names)
+
+    def any(self, *names):
+        """True if at least one symbol in names are active (i.e. set)."""
+        return any(name in self for name in names)
+
+    def known(self, name):
+        """True if a #define for name is present, whether it's commented out or not."""
+        return name in self.settings
+
+    def __getitem__(self, name):
+        """Get the value of name, i.e. what the preprocessor symbol expands to.
+
+        If name is not known, raise KeyError. name does not need to be active.
+        """
+        return self.settings[name].value
+
+    def get(self, name, default=None):
+        """Get the value of name. If name is inactive (not set), return default.
+
+        If a #define for name is present and not commented out, return
+        its expansion, even if this is the empty string.
+
+        If a #define for name is present but commented out, return default.
+        """
+        if name in self:
+            return self.settings[name].value
+        else:
+            return default
+
+    def get_matching(self, regexs, only_enabled):
+        """Get all symbols matching one of the regexs."""
+        if not regexs:
+            return
+        regex = re.compile('|'.join(regexs))
+        for setting in self.settings.values():
+            if regex.search(setting.name):
+                if setting.active or not only_enabled:
+                    yield setting.name
+
+    def __setitem__(self, name, value):
+        """If name is known, set its value.
+
+        If name is not known, raise KeyError.
+        """
+        setting = self.settings[name]
+        if setting.value != value:
+            setting.configfile.modified = True
+
+        setting.value = value
+
+    def set(self, name, value=None):
+        """Set name to the given value and make it active.
+
+        If value is None and name is already known, don't change its value.
+        If value is None and name is not known, set its value.
+        """
+        if name in self.settings:
+            setting = self.settings[name]
+            if (value is not None and setting.value != value) or not setting.active:
+                setting.configfile.modified = True
+            if value is not None:
+                setting.value = value
+            setting.active = True
+        else:
+            configfile = self._get_configfile(name)
+            self.settings[name] = Setting(configfile, True, name, value=value)
+            configfile.modified = True
+
+    def unset(self, name):
+        """Make name unset (inactive).
+
+        name remains known if it was known before.
+        """
+        if name not in self.settings:
+            return
+
+        setting = self.settings[name]
+        # Check if modifying the config file
+        if setting.active:
+            setting.configfile.modified = True
+
+        setting.active = False
+
+    def adapt(self, adapter):
+        """Run adapter on each known symbol and (de)activate it accordingly.
+
+        `adapter` must be a function that returns a boolean. It is called as
+        `adapter(name, value, active)` for each setting, where
+        `value` is the macro's expansion (possibly empty), and `active` is
+        `True` if `name` is set and `False` if `name` is known but unset.
+        If `adapter` returns `True`, then set `name` (i.e. make it active),
+        otherwise unset `name` (i.e. make it known but inactive).
+        """
+        for setting in self.settings.values():
+            is_active = setting.active
+            setting.active = adapter(setting.name, setting.value,
+                                     setting.active)
+            # Check if modifying the config file
+            if setting.active != is_active:
+                setting.configfile.modified = True
+
+    def change_matching(self, regexs, enable):
+        """Change all symbols matching one of the regexs to the desired state."""
+        if not regexs:
+            return
+        regex = re.compile('|'.join(regexs))
+        for setting in self.settings.values():
+            if regex.search(setting.name):
+                # Check if modifying the config file
+                if setting.active != enable:
+                    setting.configfile.modified = True
+                setting.active = enable
+
+    def _get_configfile(self, name=None):
+        """Get the representation of the configuration file name belongs to
+
+        If the configuration is spread among several configuration files, this
+        function may need to be overridden for the case of an unknown setting.
+        """
+
+        if name and name in self.settings:
+            return self.settings[name].configfile
+        return self.configfiles[0]
+
+    def write(self, filename=None):
+        """Write the whole configuration to the file(s) it was read from.
+
+        If filename is specified, write to this file(s) instead.
+        """
+
+        for configfile in self.configfiles:
+            configfile.write(self.settings, filename)
+
+    def filename(self, name=None):
+        """Get the name of the config file where the setting name is defined."""
+
+        return self._get_configfile(name).filename
+
+    def backup(self, suffix='.bak'):
+        """Back up the configuration file."""
+
+        for configfile in self.configfiles:
+            configfile.backup(suffix)
+
+    def restore(self):
+        """Restore the configuration file."""
+
+        for configfile in self.configfiles:
+            configfile.restore()
+
+
+class ConfigFile(metaclass=ABCMeta):
+    """Representation of a configuration file."""
+
+    def __init__(self, default_path, name, filename=None):
+        """Check if the config file exists."""
+        if filename is None:
+            for candidate in default_path:
+                if os.path.lexists(candidate):
+                    filename = candidate
+                    break
+
+        if not os.path.lexists(filename):
+            raise FileNotFoundError(f'{name} configuration file not found: '
+                                    f'{filename if filename else default_path}')
+
+        self.filename = filename
+        self.templates = []
+        self.current_section = None
+        self.inclusion_guard = None
+        self.modified = False
+        self._backupname = None
+        self._own_backup = False
+
+    _define_line_regexp = (r'(?P<indentation>\s*)' +
+                           r'(?P<commented_out>(//\s*)?)' +
+                           r'(?P<define>#\s*define\s+)' +
+                           r'(?P<name>\w+)' +
+                           r'(?P<arguments>(?:\((?:\w|\s|,)*\))?)' +
+                           r'(?P<separator>\s*)' +
+                           r'(?P<value>.*)')
+    _ifndef_line_regexp = r'#ifndef (?P<inclusion_guard>\w+)'
+    _section_line_regexp = (r'\s*/?\*+\s*[\\@]name\s+SECTION:\s*' +
+                            r'(?P<section>.*)[ */]*')
+    _config_line_regexp = re.compile(r'|'.join([_define_line_regexp,
+                                                _ifndef_line_regexp,
+                                                _section_line_regexp]))
+    def _parse_line(self, line):
+        """Parse a line in the config file, save the templates representing the lines
+           and return the corresponding setting element.
+        """
+
+        line = line.rstrip('\r\n')
+        m = re.match(self._config_line_regexp, line)
+        if m is None:
+            self.templates.append(line)
+            return None
+        elif m.group('section'):
+            self.current_section = m.group('section')
+            self.templates.append(line)
+            return None
+        elif m.group('inclusion_guard') and self.inclusion_guard is None:
+            self.inclusion_guard = m.group('inclusion_guard')
+            self.templates.append(line)
+            return None
+        else:
+            active = not m.group('commented_out')
+            name = m.group('name')
+            value = m.group('value')
+            if name == self.inclusion_guard and value == '':
+                # The file double-inclusion guard is not an option.
+                self.templates.append(line)
+                return None
+            template = (name,
+                        m.group('indentation'),
+                        m.group('define') + name +
+                        m.group('arguments') + m.group('separator'))
+            self.templates.append(template)
+
+            return (active, name, value, self.current_section)
+
+    def parse_file(self):
+        """Parse the whole file and return the settings."""
+
+        with open(self.filename, 'r', encoding='utf-8') as file:
+            for line in file:
+                setting = self._parse_line(line)
+                if setting is not None:
+                    yield setting
+        self.current_section = None
+
+    #pylint: disable=no-self-use
+    def _format_template(self, setting, indent, middle):
+        """Build a line for the config file for the given setting.
+
+        The line has the form "<indent>#define <name> <value>"
+        where <middle> is "#define <name> ".
+        """
+
+        value = setting.value
+        if value is None:
+            value = ''
+        # Normally the whitespace to separate the symbol name from the
+        # value is part of middle, and there's no whitespace for a symbol
+        # with no value. But if a symbol has been changed from having a
+        # value to not having one, the whitespace is wrong, so fix it.
+        if value:
+            if middle[-1] not in '\t ':
+                middle += ' '
+        else:
+            middle = middle.rstrip()
+        return ''.join([indent,
+                        '' if setting.active else '//',
+                        middle,
+                        value]).rstrip()
+
+    def write_to_stream(self, settings, output):
+        """Write the whole configuration to output."""
+
+        for template in self.templates:
+            if isinstance(template, str):
+                line = template
+            else:
+                name, indent, middle = template
+                line = self._format_template(settings[name], indent, middle)
+            output.write(line + '\n')
+
+    def write(self, settings, filename=None):
+        """Write the whole configuration to the file it was read from.
+
+        If filename is specified, write to this file instead.
+        """
+
+        if filename is None:
+            filename = self.filename
+
+        # Not modified so no need to write to the file
+        if not self.modified and filename == self.filename:
+            return
+
+        with open(filename, 'w', encoding='utf-8') as output:
+            self.write_to_stream(settings, output)
+
+    def backup(self, suffix='.bak'):
+        """Back up the configuration file.
+
+        If the backup file already exists, it is presumed to be the desired backup,
+        so don't make another backup.
+        """
+        if self._backupname:
+            return
+
+        self._backupname = self.filename + suffix
+        if os.path.exists(self._backupname):
+            self._own_backup = False
+        else:
+            self._own_backup = True
+            shutil.copy(self.filename, self._backupname)
+
+    def restore(self):
+        """Restore the configuration file.
+
+        Only delete the backup file if it was created earlier.
+        """
+        if not self._backupname:
+            return
+
+        if self._own_backup:
+            shutil.move(self._backupname, self.filename)
+        else:
+            shutil.copy(self._backupname, self.filename)
+
+        self._backupname = None
+
+
+class ConfigTool(metaclass=ABCMeta):
+    """Command line config manipulation tool.
+
+    Custom parser options can be added by overriding 'custom_parser_options'.
+    """
+
+    def __init__(self, default_file_path):
+        """Create parser for config manipulation tool.
+
+        :param default_file_path: Default configuration file path
+        """
+
+        self.parser = argparse.ArgumentParser(description="""
+                                              Configuration file manipulation tool.""")
+        self.subparsers = self.parser.add_subparsers(dest='command',
+                                                     title='Commands')
+        self._common_parser_options(default_file_path)
+        self.custom_parser_options()
+        self.args = self.parser.parse_args()
+        self.config = Config() # Make the pylint happy
+
+    def add_adapter(self, name, function, description):
+        """Creates a command in the tool for a configuration adapter."""
+
+        subparser = self.subparsers.add_parser(name, help=description)
+        subparser.set_defaults(adapter=function)
+
+    def _common_parser_options(self, default_file_path):
+        # pylint: disable=too-many-branches
+        """Common parser options for config manipulation tool."""
+
+        self.parser.add_argument(
+            '--file', '-f',
+            help="""File to read (and modify if requested). Default: {}.
+                 """.format(default_file_path))
+        self.parser.add_argument(
+            '--force', '-o',
+            action='store_true',
+            help="""For the set command, if SYMBOL is not present, add a definition for it.""")
+        self.parser.add_argument(
+            '--write', '-w',
+            metavar='FILE',
+            help="""File to write to instead of the input file.""")
+
+        parser_get = self.subparsers.add_parser(
+            'get',
+            help="""Find the value of SYMBOL and print it. Exit with
+                 status 0 if a #define for SYMBOL is found, 1 otherwise.""")
+        parser_get.add_argument('symbol', metavar='SYMBOL')
+        parser_set = self.subparsers.add_parser(
+            'set',
+            help="""Set SYMBOL to VALUE. If VALUE is omitted, just uncomment
+                 the #define for SYMBOL. Error out of a line defining
+                 SYMBOL (commented or not) is not found, unless --force is passed. """)
+        parser_set.add_argument('symbol', metavar='SYMBOL')
+        parser_set.add_argument('value', metavar='VALUE', nargs='?', default='')
+        parser_set_all = self.subparsers.add_parser(
+            'set-all',
+            help="""Uncomment all #define whose name contains a match for REGEX.""")
+        parser_set_all.add_argument('regexs', metavar='REGEX', nargs='*')
+        parser_unset = self.subparsers.add_parser(
+            'unset',
+            help="""Comment out the #define for SYMBOL. Do nothing if none is present.""")
+        parser_unset.add_argument('symbol', metavar='SYMBOL')
+        parser_unset_all = self.subparsers.add_parser(
+            'unset-all',
+            help="""Comment out all #define whose name contains a match for REGEX.""")
+        parser_unset_all.add_argument('regexs', metavar='REGEX', nargs='*')
+        parser_get_all = self.subparsers.add_parser(
+            'get-all',
+            help="""Get all #define whose name contains a match for REGEX.""")
+        parser_get_all.add_argument('regexs', metavar='REGEX', nargs='*')
+        parser_get_all_enabled = self.subparsers.add_parser(
+            'get-all-enabled',
+            help="""Get all enabled #define whose name contains a match for REGEX.""")
+        parser_get_all_enabled.add_argument('regexs', metavar='REGEX', nargs='*')
+
+
+    def custom_parser_options(self):
+        """Adds custom options for the parser. Designed for overridden by descendant."""
+        pass
+
+    def main(self):
+        # pylint: disable=too-many-branches
+        """Common main fuction for config manipulation tool."""
+
+        args = self.args
+        config = self.config
+
+        if args.command is None:
+            self.parser.print_help()
+            return 1
+        if args.command == 'get':
+            if args.symbol in config:
+                value = config[args.symbol]
+                if value:
+                    sys.stdout.write(value + '\n')
+            return 0 if args.symbol in config else 1
+        elif args.command == 'get-all':
+            match_list = config.get_matching(args.regexs, False)
+            sys.stdout.write("\n".join(match_list))
+        elif args.command == 'get-all-enabled':
+            match_list = config.get_matching(args.regexs, True)
+            sys.stdout.write("\n".join(match_list))
+        elif args.command == 'set':
+            if not args.force and args.symbol not in config.settings:
+                sys.stderr.write(
+                    "A #define for the symbol {} was not found in {}\n"
+                    .format(args.symbol,
+                            config.filename(args.symbol)))
+                return 1
+            config.set(args.symbol, value=args.value)
+        elif args.command == 'set-all':
+            config.change_matching(args.regexs, True)
+        elif args.command == 'unset':
+            config.unset(args.symbol)
+        elif args.command == 'unset-all':
+            config.change_matching(args.regexs, False)
+        else:
+            config.adapt(args.adapter)
+        config.write(args.write)
+
+        return 0
diff --git a/framework/scripts/mbedtls_framework/crypto_data_tests.py b/framework/scripts/mbedtls_framework/crypto_data_tests.py
new file mode 100644
index 0000000..1d46e3f
--- /dev/null
+++ b/framework/scripts/mbedtls_framework/crypto_data_tests.py
@@ -0,0 +1,101 @@
+"""Generate test data for cryptographic mechanisms.
+
+This module is a work in progress, only implementing a few cases for now.
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+#
+
+import hashlib
+from typing import Callable, Dict, Iterator, List, Optional #pylint: disable=unused-import
+
+from . import crypto_knowledge
+from . import psa_information
+from . import psa_test_case
+from . import test_case
+
+
+class HashPSALowLevel:
+    """Generate test cases for the PSA low-level hash interface."""
+
+    def __init__(self, info: psa_information.Information) -> None:
+        self.info = info
+        base_algorithms = sorted(info.constructors.algorithms)
+        all_algorithms = \
+            [crypto_knowledge.Algorithm(expr)
+             for expr in info.constructors.generate_expressions(base_algorithms)]
+        self.algorithms = \
+            [alg
+             for alg in all_algorithms
+             if (not alg.is_wildcard and
+                 alg.can_do(crypto_knowledge.AlgorithmCategory.HASH))]
+
+    # CALCULATE[alg] = function to return the hash of its argument in hex
+    # TO-DO: implement the None entries with a third-party library, because
+    # hashlib might not have everything, depending on the Python version and
+    # the underlying OpenSSL. On Ubuntu 16.04, truncated sha512 and sha3/shake
+    # are not available. On Ubuntu 22.04, md2, md4 and ripemd160 are not
+    # available.
+    CALCULATE = {
+        'PSA_ALG_MD5': lambda data: hashlib.md5(data).hexdigest(),
+        'PSA_ALG_RIPEMD160': None, #lambda data: hashlib.new('ripdemd160').hexdigest()
+        'PSA_ALG_SHA_1': lambda data: hashlib.sha1(data).hexdigest(),
+        'PSA_ALG_SHA_224': lambda data: hashlib.sha224(data).hexdigest(),
+        'PSA_ALG_SHA_256': lambda data: hashlib.sha256(data).hexdigest(),
+        'PSA_ALG_SHA_384': lambda data: hashlib.sha384(data).hexdigest(),
+        'PSA_ALG_SHA_512': lambda data: hashlib.sha512(data).hexdigest(),
+        'PSA_ALG_SHA_512_224': None, #lambda data: hashlib.new('sha512_224').hexdigest()
+        'PSA_ALG_SHA_512_256': None, #lambda data: hashlib.new('sha512_256').hexdigest()
+        'PSA_ALG_SHA3_224': None, #lambda data: hashlib.sha3_224(data).hexdigest(),
+        'PSA_ALG_SHA3_256': None, #lambda data: hashlib.sha3_256(data).hexdigest(),
+        'PSA_ALG_SHA3_384': None, #lambda data: hashlib.sha3_384(data).hexdigest(),
+        'PSA_ALG_SHA3_512': None, #lambda data: hashlib.sha3_512(data).hexdigest(),
+        'PSA_ALG_SHAKE256_512': None, #lambda data: hashlib.shake_256(data).hexdigest(64),
+    } #type: Dict[str, Optional[Callable[[bytes], str]]]
+
+    @staticmethod
+    def one_test_case(alg: crypto_knowledge.Algorithm,
+                      function: str, note: str,
+                      arguments: List[str]) -> test_case.TestCase:
+        """Construct one test case involving a hash."""
+        tc = psa_test_case.TestCase(dependency_prefix='MBEDTLS_PSA_BUILTIN_')
+        tc.set_description('{}{} {}'
+                           .format(function,
+                                   ' ' + note if note else '',
+                                   alg.short_expression()))
+        tc.set_function(function)
+        tc.set_arguments([alg.expression] +
+                         ['"{}"'.format(arg) for arg in arguments])
+        return tc
+
+    def test_cases_for_hash(self,
+                            alg: crypto_knowledge.Algorithm
+                            ) -> Iterator[test_case.TestCase]:
+        """Enumerate all test cases for one hash algorithm."""
+        calc = self.CALCULATE[alg.expression]
+        if calc is None:
+            return # not implemented yet
+
+        short = b'abc'
+        hash_short = calc(short)
+        long = (b'Hello, world. Here are 16 unprintable bytes: ['
+                b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a'
+                b'\x80\x81\x82\x83\xfe\xff]. '
+                b' This message was brought to you by a natural intelligence. '
+                b' If you can read this, good luck with your debugging!')
+        hash_long = calc(long)
+
+        yield self.one_test_case(alg, 'hash_empty', '', [calc(b'')])
+        yield self.one_test_case(alg, 'hash_valid_one_shot', '',
+                                 [short.hex(), hash_short])
+        for n in [0, 1, 64, len(long) - 1, len(long)]:
+            yield self.one_test_case(alg, 'hash_valid_multipart',
+                                     '{} + {}'.format(n, len(long) - n),
+                                     [long[:n].hex(), calc(long[:n]),
+                                      long[n:].hex(), hash_long])
+
+    def all_test_cases(self) -> Iterator[test_case.TestCase]:
+        """Enumerate all test cases for all hash algorithms."""
+        for alg in self.algorithms:
+            yield from self.test_cases_for_hash(alg)
diff --git a/framework/scripts/mbedtls_framework/crypto_knowledge.py b/framework/scripts/mbedtls_framework/crypto_knowledge.py
new file mode 100644
index 0000000..ebfd55c
--- /dev/null
+++ b/framework/scripts/mbedtls_framework/crypto_knowledge.py
@@ -0,0 +1,568 @@
+"""Knowledge about cryptographic mechanisms implemented in Mbed TLS.
+
+This module is entirely based on the PSA API.
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+#
+
+import enum
+import re
+from typing import FrozenSet, Iterable, List, Optional, Tuple, Dict
+
+from .asymmetric_key_data import ASYMMETRIC_KEY_DATA
+
+
+def short_expression(original: str, level: int = 0) -> str:
+    """Abbreviate the expression, keeping it human-readable.
+
+    If `level` is 0, just remove parts that are implicit from context,
+    such as a leading ``PSA_KEY_TYPE_``.
+    For larger values of `level`, also abbreviate some names in an
+    unambiguous, but ad hoc way.
+    """
+    short = original
+    short = re.sub(r'\bPSA_(?:ALG|DH_FAMILY|ECC_FAMILY|KEY_[A-Z]+)_', r'', short)
+    short = re.sub(r' +', r'', short)
+    if level >= 1:
+        short = re.sub(r'PUBLIC_KEY\b', r'PUB', short)
+        short = re.sub(r'KEY_PAIR\b', r'PAIR', short)
+        short = re.sub(r'\bBRAINPOOL_P', r'BP', short)
+        short = re.sub(r'\bMONTGOMERY\b', r'MGM', short)
+        short = re.sub(r'AEAD_WITH_SHORTENED_TAG\b', r'AEAD_SHORT', short)
+        short = re.sub(r'\bDETERMINISTIC_', r'DET_', short)
+        short = re.sub(r'\bKEY_AGREEMENT\b', r'KA', short)
+        short = re.sub(r'_PSK_TO_MS\b', r'_PSK2MS', short)
+    return short
+
+
+BLOCK_CIPHERS = frozenset(['AES', 'ARIA', 'CAMELLIA', 'DES'])
+BLOCK_MAC_MODES = frozenset(['CBC_MAC', 'CMAC'])
+BLOCK_CIPHER_MODES = frozenset([
+    'CTR', 'CFB', 'OFB', 'XTS', 'CCM_STAR_NO_TAG',
+    'ECB_NO_PADDING', 'CBC_NO_PADDING', 'CBC_PKCS7',
+])
+BLOCK_AEAD_MODES = frozenset(['CCM', 'GCM'])
+
+class EllipticCurveCategory(enum.Enum):
+    """Categorization of elliptic curve families.
+
+    The category of a curve determines what algorithms are defined over it.
+    """
+
+    SHORT_WEIERSTRASS = 0
+    MONTGOMERY = 1
+    TWISTED_EDWARDS = 2
+
+    @staticmethod
+    def from_family(family: str) -> 'EllipticCurveCategory':
+        if family == 'PSA_ECC_FAMILY_MONTGOMERY':
+            return EllipticCurveCategory.MONTGOMERY
+        if family == 'PSA_ECC_FAMILY_TWISTED_EDWARDS':
+            return EllipticCurveCategory.TWISTED_EDWARDS
+        # Default to SW, which most curves belong to.
+        return EllipticCurveCategory.SHORT_WEIERSTRASS
+
+
+class KeyType:
+    """Knowledge about a PSA key type."""
+
+    def __init__(self, name: str, params: Optional[Iterable[str]] = None) -> None:
+        """Analyze a key type.
+
+        The key type must be specified in PSA syntax. In its simplest form,
+        `name` is a string 'PSA_KEY_TYPE_xxx' which is the name of a PSA key
+        type macro. For key types that take arguments, the arguments can
+        be passed either through the optional argument `params` or by
+        passing an expression of the form 'PSA_KEY_TYPE_xxx(param1, ...)'
+        in `name` as a string.
+        """
+
+        self.name = name.strip()
+        """The key type macro name (``PSA_KEY_TYPE_xxx``).
+
+        For key types constructed from a macro with arguments, this is the
+        name of the macro, and the arguments are in `self.params`.
+        """
+        if params is None:
+            if '(' in self.name:
+                m = re.match(r'(\w+)\s*\((.*)\)\Z', self.name)
+                assert m is not None
+                self.name = m.group(1)
+                params = m.group(2).split(',')
+        self.params = (None if params is None else
+                       [param.strip() for param in params])
+        """The parameters of the key type, if there are any.
+
+        None if the key type is a macro without arguments.
+        """
+        assert re.match(r'PSA_KEY_TYPE_\w+\Z', self.name)
+
+        self.expression = self.name
+        """A C expression whose value is the key type encoding."""
+        if self.params is not None:
+            self.expression += '(' + ', '.join(self.params) + ')'
+
+        m = re.match(r'PSA_KEY_TYPE_(\w+)', self.name)
+        assert m
+        self.head = re.sub(r'_(?:PUBLIC_KEY|KEY_PAIR)\Z', r'', m.group(1))
+        """The key type macro name, with common prefixes and suffixes stripped."""
+
+        self.private_type = re.sub(r'_PUBLIC_KEY\Z', r'_KEY_PAIR', self.name)
+        """The key type macro name for the corresponding key pair type.
+
+        For everything other than a public key type, this is the same as
+        `self.name`.
+        """
+
+    def short_expression(self, level: int = 0) -> str:
+        """Abbreviate the expression, keeping it human-readable.
+
+        See `crypto_knowledge.short_expression`.
+        """
+        return short_expression(self.expression, level=level)
+
+    def is_public(self) -> bool:
+        """Whether the key type is for public keys."""
+        return self.name.endswith('_PUBLIC_KEY')
+
+    DH_KEY_SIZES = {
+        'PSA_DH_FAMILY_RFC7919': (2048, 3072, 4096, 6144, 8192),
+    } # type: Dict[str, Tuple[int, ...]]
+    ECC_KEY_SIZES = {
+        'PSA_ECC_FAMILY_SECP_K1': (192, 225, 256),
+        'PSA_ECC_FAMILY_SECP_R1': (224, 256, 384, 521),
+        'PSA_ECC_FAMILY_SECP_R2': (160,),
+        'PSA_ECC_FAMILY_SECT_K1': (163, 233, 239, 283, 409, 571),
+        'PSA_ECC_FAMILY_SECT_R1': (163, 233, 283, 409, 571),
+        'PSA_ECC_FAMILY_SECT_R2': (163,),
+        'PSA_ECC_FAMILY_BRAINPOOL_P_R1': (160, 192, 224, 256, 320, 384, 512),
+        'PSA_ECC_FAMILY_MONTGOMERY': (255, 448),
+        'PSA_ECC_FAMILY_TWISTED_EDWARDS': (255, 448),
+    } # type: Dict[str, Tuple[int, ...]]
+    KEY_TYPE_SIZES = {
+        'PSA_KEY_TYPE_AES': (128, 192, 256), # exhaustive
+        'PSA_KEY_TYPE_ARIA': (128, 192, 256), # exhaustive
+        'PSA_KEY_TYPE_CAMELLIA': (128, 192, 256), # exhaustive
+        'PSA_KEY_TYPE_CHACHA20': (256,), # exhaustive
+        'PSA_KEY_TYPE_DERIVE': (120, 128), # sample
+        'PSA_KEY_TYPE_DES': (64, 128, 192), # exhaustive
+        'PSA_KEY_TYPE_HMAC': (128, 160, 224, 256, 384, 512), # standard size for each supported hash
+        'PSA_KEY_TYPE_PASSWORD': (48, 168, 336), # sample
+        'PSA_KEY_TYPE_PASSWORD_HASH': (128, 256), # sample
+        'PSA_KEY_TYPE_PEPPER': (128, 256), # sample
+        'PSA_KEY_TYPE_RAW_DATA': (8, 40, 128), # sample
+        'PSA_KEY_TYPE_RSA_KEY_PAIR': (1024, 1536), # small sample
+    } # type: Dict[str, Tuple[int, ...]]
+    def sizes_to_test(self) -> Tuple[int, ...]:
+        """Return a tuple of key sizes to test.
+
+        For key types that only allow a single size, or only a small set of
+        sizes, these are all the possible sizes. For key types that allow a
+        wide range of sizes, these are a representative sample of sizes,
+        excluding large sizes for which a typical resource-constrained platform
+        may run out of memory.
+        """
+        if self.private_type == 'PSA_KEY_TYPE_ECC_KEY_PAIR':
+            assert self.params is not None
+            return self.ECC_KEY_SIZES[self.params[0]]
+        if self.private_type == 'PSA_KEY_TYPE_DH_KEY_PAIR':
+            assert self.params is not None
+            return self.DH_KEY_SIZES[self.params[0]]
+        return self.KEY_TYPE_SIZES[self.private_type]
+
+    # "48657265006973206b6579a064617461"
+    DATA_BLOCK = b'Here\000is key\240data'
+    def key_material(self, bits: int) -> bytes:
+        """Return a byte string containing suitable key material with the given bit length.
+
+        Use the PSA export representation. The resulting byte string is one that
+        can be obtained with the following code:
+        ```
+        psa_set_key_type(&attributes, `self.expression`);
+        psa_set_key_bits(&attributes, `bits`);
+        psa_set_key_usage_flags(&attributes, PSA_KEY_USAGE_EXPORT);
+        psa_generate_key(&attributes, &id);
+        psa_export_key(id, `material`, ...);
+        ```
+        """
+        if self.expression in ASYMMETRIC_KEY_DATA:
+            if bits not in ASYMMETRIC_KEY_DATA[self.expression]:
+                raise ValueError('No key data for {}-bit {}'
+                                 .format(bits, self.expression))
+            return ASYMMETRIC_KEY_DATA[self.expression][bits]
+        if bits % 8 != 0:
+            raise ValueError('Non-integer number of bytes: {} bits for {}'
+                             .format(bits, self.expression))
+        length = bits // 8
+        if self.name == 'PSA_KEY_TYPE_DES':
+            # "644573206b457901644573206b457902644573206b457904"
+            des3 = b'dEs kEy\001dEs kEy\002dEs kEy\004'
+            return des3[:length]
+        return b''.join([self.DATA_BLOCK] * (length // len(self.DATA_BLOCK)) +
+                        [self.DATA_BLOCK[:length % len(self.DATA_BLOCK)]])
+
+    def can_do(self, alg: 'Algorithm') -> bool:
+        """Whether this key type can be used for operations with the given algorithm.
+
+        This function does not currently handle key derivation or PAKE.
+        """
+        #pylint: disable=too-many-branches,too-many-return-statements
+        if not alg.is_valid_for_operation():
+            return False
+        if self.head == 'HMAC' and alg.head == 'HMAC':
+            return True
+        if self.head == 'DES':
+            # 64-bit block ciphers only allow a reduced set of modes.
+            return alg.head in [
+                'CBC_NO_PADDING', 'CBC_PKCS7',
+                'ECB_NO_PADDING',
+            ]
+        if self.head in BLOCK_CIPHERS and \
+           alg.head in frozenset.union(BLOCK_MAC_MODES,
+                                       BLOCK_CIPHER_MODES,
+                                       BLOCK_AEAD_MODES):
+            if alg.head in ['CMAC', 'OFB'] and \
+               self.head in ['ARIA', 'CAMELLIA']:
+                return False # not implemented in Mbed TLS
+            return True
+        if self.head == 'CHACHA20' and alg.head == 'CHACHA20_POLY1305':
+            return True
+        if self.head in {'ARC4', 'CHACHA20'} and \
+           alg.head == 'STREAM_CIPHER':
+            return True
+        if self.head == 'RSA' and alg.head.startswith('RSA_'):
+            return True
+        if alg.category == AlgorithmCategory.KEY_AGREEMENT and \
+           self.is_public():
+            # The PSA API does not use public key objects in key agreement
+            # operations: it imports the public key as a formatted byte string.
+            # So a public key object with a key agreement algorithm is not
+            # a valid combination.
+            return False
+        if alg.is_invalid_key_agreement_with_derivation():
+            return False
+        if self.head == 'ECC':
+            assert self.params is not None
+            eccc = EllipticCurveCategory.from_family(self.params[0])
+            if alg.head == 'ECDH' and \
+               eccc in {EllipticCurveCategory.SHORT_WEIERSTRASS,
+                        EllipticCurveCategory.MONTGOMERY}:
+                return True
+            if alg.head == 'ECDSA' and \
+               eccc == EllipticCurveCategory.SHORT_WEIERSTRASS:
+                return True
+            if alg.head in {'PURE_EDDSA', 'EDDSA_PREHASH'} and \
+               eccc == EllipticCurveCategory.TWISTED_EDWARDS:
+                return True
+        if self.head == 'DH' and alg.head == 'FFDH':
+            return True
+        return False
+
+
+class AlgorithmCategory(enum.Enum):
+    """PSA algorithm categories."""
+    # The numbers are aligned with the category bits in numerical values of
+    # algorithms.
+    HASH = 2
+    MAC = 3
+    CIPHER = 4
+    AEAD = 5
+    SIGN = 6
+    ASYMMETRIC_ENCRYPTION = 7
+    KEY_DERIVATION = 8
+    KEY_AGREEMENT = 9
+    PAKE = 10
+
+    def requires_key(self) -> bool:
+        """Whether operations in this category are set up with a key."""
+        return self not in {self.HASH, self.KEY_DERIVATION}
+
+    def is_asymmetric(self) -> bool:
+        """Whether operations in this category involve asymmetric keys."""
+        return self in {
+            self.SIGN,
+            self.ASYMMETRIC_ENCRYPTION,
+            self.KEY_AGREEMENT
+        }
+
+
+class AlgorithmNotRecognized(Exception):
+    def __init__(self, expr: str) -> None:
+        super().__init__('Algorithm not recognized: ' + expr)
+        self.expr = expr
+
+
+class Algorithm:
+    """Knowledge about a PSA algorithm."""
+
+    @staticmethod
+    def determine_base(expr: str) -> str:
+        """Return an expression for the "base" of the algorithm.
+
+        This strips off variants of algorithms such as MAC truncation.
+
+        This function does not attempt to detect invalid inputs.
+        """
+        m = re.match(r'PSA_ALG_(?:'
+                     r'(?:TRUNCATED|AT_LEAST_THIS_LENGTH)_MAC|'
+                     r'AEAD_WITH_(?:SHORTENED|AT_LEAST_THIS_LENGTH)_TAG'
+                     r')\((.*),[^,]+\)\Z', expr)
+        if m:
+            expr = m.group(1)
+        return expr
+
+    @staticmethod
+    def determine_head(expr: str) -> str:
+        """Return the head of an algorithm expression.
+
+        The head is the first (outermost) constructor, without its PSA_ALG_
+        prefix, and with some normalization of similar algorithms.
+        """
+        m = re.match(r'PSA_ALG_(?:DETERMINISTIC_)?(\w+)', expr)
+        if not m:
+            raise AlgorithmNotRecognized(expr)
+        head = m.group(1)
+        if head == 'KEY_AGREEMENT':
+            m = re.match(r'PSA_ALG_KEY_AGREEMENT\s*\(\s*PSA_ALG_(\w+)', expr)
+            if not m:
+                raise AlgorithmNotRecognized(expr)
+            head = m.group(1)
+        head = re.sub(r'_ANY\Z', r'', head)
+        if re.match(r'ED[0-9]+PH\Z', head):
+            head = 'EDDSA_PREHASH'
+        return head
+
+    CATEGORY_FROM_HEAD = {
+        'SHA': AlgorithmCategory.HASH,
+        'SHAKE256_512': AlgorithmCategory.HASH,
+        'MD': AlgorithmCategory.HASH,
+        'RIPEMD': AlgorithmCategory.HASH,
+        'ANY_HASH': AlgorithmCategory.HASH,
+        'HMAC': AlgorithmCategory.MAC,
+        'STREAM_CIPHER': AlgorithmCategory.CIPHER,
+        'CHACHA20_POLY1305': AlgorithmCategory.AEAD,
+        'DSA': AlgorithmCategory.SIGN,
+        'ECDSA': AlgorithmCategory.SIGN,
+        'EDDSA': AlgorithmCategory.SIGN,
+        'PURE_EDDSA': AlgorithmCategory.SIGN,
+        'RSA_PSS': AlgorithmCategory.SIGN,
+        'RSA_PKCS1V15_SIGN': AlgorithmCategory.SIGN,
+        'RSA_PKCS1V15_CRYPT': AlgorithmCategory.ASYMMETRIC_ENCRYPTION,
+        'RSA_OAEP': AlgorithmCategory.ASYMMETRIC_ENCRYPTION,
+        'HKDF': AlgorithmCategory.KEY_DERIVATION,
+        'TLS12_PRF': AlgorithmCategory.KEY_DERIVATION,
+        'TLS12_PSK_TO_MS': AlgorithmCategory.KEY_DERIVATION,
+        'TLS12_ECJPAKE_TO_PMS': AlgorithmCategory.KEY_DERIVATION,
+        'PBKDF': AlgorithmCategory.KEY_DERIVATION,
+        'ECDH': AlgorithmCategory.KEY_AGREEMENT,
+        'FFDH': AlgorithmCategory.KEY_AGREEMENT,
+        # KEY_AGREEMENT(...) is a key derivation with a key agreement component
+        'KEY_AGREEMENT': AlgorithmCategory.KEY_DERIVATION,
+        'JPAKE': AlgorithmCategory.PAKE,
+    }
+    for x in BLOCK_MAC_MODES:
+        CATEGORY_FROM_HEAD[x] = AlgorithmCategory.MAC
+    for x in BLOCK_CIPHER_MODES:
+        CATEGORY_FROM_HEAD[x] = AlgorithmCategory.CIPHER
+    for x in BLOCK_AEAD_MODES:
+        CATEGORY_FROM_HEAD[x] = AlgorithmCategory.AEAD
+
+    def determine_category(self, expr: str, head: str) -> AlgorithmCategory:
+        """Return the category of the given algorithm expression.
+
+        This function does not attempt to detect invalid inputs.
+        """
+        prefix = head
+        while prefix:
+            if prefix in self.CATEGORY_FROM_HEAD:
+                return self.CATEGORY_FROM_HEAD[prefix]
+            if re.match(r'.*[0-9]\Z', prefix):
+                prefix = re.sub(r'_*[0-9]+\Z', r'', prefix)
+            else:
+                prefix = re.sub(r'_*[^_]*\Z', r'', prefix)
+        raise AlgorithmNotRecognized(expr)
+
+    @staticmethod
+    def determine_wildcard(expr) -> bool:
+        """Whether the given algorithm expression is a wildcard.
+
+        This function does not attempt to detect invalid inputs.
+        """
+        if re.search(r'\bPSA_ALG_ANY_HASH\b', expr):
+            return True
+        if re.search(r'_AT_LEAST_', expr):
+            return True
+        return False
+
+    def __init__(self, expr: str) -> None:
+        """Analyze an algorithm value.
+
+        The algorithm must be expressed as a C expression containing only
+        calls to PSA algorithm constructor macros and numeric literals.
+
+        This class is only programmed to handle valid expressions. Invalid
+        expressions may result in exceptions or in nonsensical results.
+        """
+        self.expression = re.sub(r'\s+', r'', expr)
+        self.base_expression = self.determine_base(self.expression)
+        self.head = self.determine_head(self.base_expression)
+        self.category = self.determine_category(self.base_expression, self.head)
+        self.is_wildcard = self.determine_wildcard(self.expression)
+
+    def get_key_agreement_derivation(self) -> Optional[str]:
+        """For a combined key agreement and key derivation algorithm, get the derivation part.
+
+        For anything else, return None.
+        """
+        if self.category != AlgorithmCategory.KEY_AGREEMENT:
+            return None
+        m = re.match(r'PSA_ALG_KEY_AGREEMENT\(\w+,\s*(.*)\)\Z', self.expression)
+        if not m:
+            return None
+        kdf_alg = m.group(1)
+        # Assume kdf_alg is either a valid KDF or 0.
+        if re.match(r'(?:0[Xx])?0+\s*\Z', kdf_alg):
+            return None
+        return kdf_alg
+
+    KEY_DERIVATIONS_INCOMPATIBLE_WITH_AGREEMENT = frozenset([
+        'PSA_ALG_TLS12_ECJPAKE_TO_PMS', # secret input in specific format
+    ])
+    def is_valid_key_agreement_with_derivation(self) -> bool:
+        """Whether this is a valid combined key agreement and key derivation algorithm."""
+        kdf_alg = self.get_key_agreement_derivation()
+        if kdf_alg is None:
+            return False
+        return kdf_alg not in self.KEY_DERIVATIONS_INCOMPATIBLE_WITH_AGREEMENT
+
+    def is_invalid_key_agreement_with_derivation(self) -> bool:
+        """Whether this is an invalid combined key agreement and key derivation algorithm."""
+        kdf_alg = self.get_key_agreement_derivation()
+        if kdf_alg is None:
+            return False
+        return kdf_alg in self.KEY_DERIVATIONS_INCOMPATIBLE_WITH_AGREEMENT
+
+    def short_expression(self, level: int = 0) -> str:
+        """Abbreviate the expression, keeping it human-readable.
+
+        See `crypto_knowledge.short_expression`.
+        """
+        return short_expression(self.expression, level=level)
+
+    HASH_LENGTH = {
+        'PSA_ALG_MD5': 16,
+        'PSA_ALG_SHA_1': 20,
+    }
+    HASH_LENGTH_BITS_RE = re.compile(r'([0-9]+)\Z')
+    @classmethod
+    def hash_length(cls, alg: str) -> int:
+        """The length of the given hash algorithm, in bytes."""
+        if alg in cls.HASH_LENGTH:
+            return cls.HASH_LENGTH[alg]
+        m = cls.HASH_LENGTH_BITS_RE.search(alg)
+        if m:
+            return int(m.group(1)) // 8
+        raise ValueError('Unknown hash length for ' + alg)
+
+    PERMITTED_TAG_LENGTHS = {
+        'PSA_ALG_CCM': frozenset([4, 6, 8, 10, 12, 14, 16]),
+        'PSA_ALG_CHACHA20_POLY1305': frozenset([16]),
+        'PSA_ALG_GCM': frozenset([4, 8, 12, 13, 14, 15, 16]),
+    }
+    MAC_LENGTH = {
+        'PSA_ALG_CBC_MAC': 16, # actually the block cipher length
+        'PSA_ALG_CMAC': 16, # actually the block cipher length
+    }
+    HMAC_RE = re.compile(r'PSA_ALG_HMAC\((.*)\)\Z')
+    @classmethod
+    def permitted_truncations(cls, base: str) -> FrozenSet[int]:
+        """Permitted output lengths for the given MAC or AEAD base algorithm.
+
+        For a MAC algorithm, this is the set of truncation lengths that
+        Mbed TLS supports.
+        For an AEAD algorithm, this is the set of truncation lengths that
+        are permitted by the algorithm specification.
+        """
+        if base in cls.PERMITTED_TAG_LENGTHS:
+            return cls.PERMITTED_TAG_LENGTHS[base]
+        max_length = cls.MAC_LENGTH.get(base, None)
+        if max_length is None:
+            m = cls.HMAC_RE.match(base)
+            if m:
+                max_length = cls.hash_length(m.group(1))
+        if max_length is None:
+            raise ValueError('Unknown permitted lengths for ' + base)
+        return frozenset(range(4, max_length + 1))
+
+    TRUNCATED_ALG_RE = re.compile(
+        r'(?P<face>PSA_ALG_(?:AEAD_WITH_SHORTENED_TAG|TRUNCATED_MAC))'
+        r'\((?P<base>.*),'
+        r'(?P<length>0[Xx][0-9A-Fa-f]+|[1-9][0-9]*|0[0-7]*)[LUlu]*\)\Z')
+    def is_invalid_truncation(self) -> bool:
+        """False for a MAC or AEAD algorithm truncated to an invalid length.
+
+        True for a MAC or AEAD algorithm truncated to a valid length or to
+        a length that cannot be determined. True for anything other than
+        a truncated MAC or AEAD.
+        """
+        m = self.TRUNCATED_ALG_RE.match(self.expression)
+        if m:
+            base = m.group('base')
+            to_length = int(m.group('length'), 0)
+            permitted_lengths = self.permitted_truncations(base)
+            if to_length not in permitted_lengths:
+                return True
+        return False
+
+    def is_valid_for_operation(self) -> bool:
+        """Whether this algorithm construction is valid for an operation.
+
+        This function assumes that the algorithm is constructed in a
+        "grammatically" correct way, and only rejects semantically invalid
+        combinations.
+        """
+        if self.is_wildcard:
+            return False
+        if self.is_invalid_truncation():
+            return False
+        return True
+
+    def can_do(self, category: AlgorithmCategory) -> bool:
+        """Whether this algorithm can perform operations in the given category.
+        """
+        if category == self.category:
+            return True
+        if category == AlgorithmCategory.KEY_DERIVATION and \
+           self.is_valid_key_agreement_with_derivation():
+            return True
+        return False
+
+    def usage_flags(self, public: bool = False) -> List[str]:
+        """The list of usage flags describing operations that can perform this algorithm.
+
+        If public is true, only return public-key operations, not private-key operations.
+        """
+        if self.category == AlgorithmCategory.HASH:
+            flags = []
+        elif self.category == AlgorithmCategory.MAC:
+            flags = ['SIGN_HASH', 'SIGN_MESSAGE',
+                     'VERIFY_HASH', 'VERIFY_MESSAGE']
+        elif self.category == AlgorithmCategory.CIPHER or \
+             self.category == AlgorithmCategory.AEAD:
+            flags = ['DECRYPT', 'ENCRYPT']
+        elif self.category == AlgorithmCategory.SIGN:
+            flags = ['VERIFY_HASH', 'VERIFY_MESSAGE']
+            if not public:
+                flags += ['SIGN_HASH', 'SIGN_MESSAGE']
+        elif self.category == AlgorithmCategory.ASYMMETRIC_ENCRYPTION:
+            flags = ['ENCRYPT']
+            if not public:
+                flags += ['DECRYPT']
+        elif self.category == AlgorithmCategory.KEY_DERIVATION or \
+             self.category == AlgorithmCategory.KEY_AGREEMENT:
+            flags = ['DERIVE']
+        else:
+            raise AlgorithmNotRecognized(self.expression)
+        return ['PSA_KEY_USAGE_' + flag for flag in flags]
diff --git a/framework/scripts/mbedtls_framework/ecp.py b/framework/scripts/mbedtls_framework/ecp.py
new file mode 100644
index 0000000..b40f3b1
--- /dev/null
+++ b/framework/scripts/mbedtls_framework/ecp.py
@@ -0,0 +1,875 @@
+"""Framework classes for generation of ecp test cases."""
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+#
+
+from typing import List
+
+from . import test_data_generation
+from . import bignum_common
+
+
+class EcpTarget(test_data_generation.BaseTarget):
+    #pylint: disable=abstract-method, too-few-public-methods
+    """Target for ecp test case generation."""
+    target_basename = 'test_suite_ecp.generated'
+
+
+class EcpP192R1Raw(bignum_common.ModOperationCommon,
+                   EcpTarget):
+    """Test cases for ECP P192 fast reduction."""
+    symbol = "-"
+    test_function = "ecp_mod_p_generic_raw"
+    test_name = "ecp_mod_p192_raw"
+    input_style = "fixed"
+    arity = 1
+    dependencies = ["MBEDTLS_ECP_DP_SECP192R1_ENABLED",
+                    "MBEDTLS_ECP_NIST_OPTIM"]
+
+    moduli = ["fffffffffffffffffffffffffffffffeffffffffffffffff"] # type: List[str]
+
+    input_values = [
+        "0", "1",
+
+        # Modulus - 1
+        "fffffffffffffffffffffffffffffffefffffffffffffffe",
+
+        # Modulus + 1
+        "ffffffffffffffffffffffffffffffff0000000000000000",
+
+        # 2^192 - 1
+        "ffffffffffffffffffffffffffffffffffffffffffffffff",
+
+        # Maximum canonical P192 multiplication result
+        ("fffffffffffffffffffffffffffffffdfffffffffffffffc"
+         "000000000000000100000000000000040000000000000004"),
+
+        # Generate an overflow during reduction
+        ("00000000000000000000000000000001ffffffffffffffff"
+         "ffffffffffffffffffffffffffffffff0000000000000000"),
+
+        # Generate an overflow during carry reduction
+        ("ffffffffffffffff00000000000000010000000000000000"
+         "fffffffffffffffeffffffffffffffff0000000000000000"),
+
+        # First 8 number generated by random.getrandbits(384) - seed(2,2)
+        ("cf1822ffbc6887782b491044d5e341245c6e433715ba2bdd"
+         "177219d30e7a269fd95bafc8f2a4d27bdcf4bb99f4bea973"),
+        ("ffed9235288bc781ae66267594c9c9500925e4749b575bd1"
+         "3653f8dd9b1f282e4067c3584ee207f8da94e3e8ab73738f"),
+        ("ef8acd128b4f2fc15f3f57ebf30b94fa82523e86feac7eb7"
+         "dc38f519b91751dacdbd47d364be8049a372db8f6e405d93"),
+        ("e8624fab5186ee32ee8d7ee9770348a05d300cb90706a045"
+         "defc044a09325626e6b58de744ab6cce80877b6f71e1f6d2"),
+        ("2d3d854e061b90303b08c6e33c7295782d6c797f8f7d9b78"
+         "2a1be9cd8697bbd0e2520e33e44c50556c71c4a66148a86f"),
+        ("fec3f6b32e8d4b8a8f54f8ceacaab39e83844b40ffa9b9f1"
+         "5c14bc4a829e07b0829a48d422fe99a22c70501e533c9135"),
+        ("97eeab64ca2ce6bc5d3fd983c34c769fe89204e2e8168561"
+         "867e5e15bc01bfce6a27e0dfcbf8754472154e76e4c11ab2"),
+        ("bd143fa9b714210c665d7435c1066932f4767f26294365b2"
+         "721dea3bf63f23d0dbe53fcafb2147df5ca495fa5a91c89b"),
+
+        # Next 2 number generated by random.getrandbits(192)
+        "47733e847d718d733ff98ff387c56473a7a83ee0761ebfd2",
+        "cbd4d3e2d4dec9ef83f0be4e80371eb97f81375eecc1cb63"
+    ]
+
+    @property
+    def arg_a(self) -> str:
+        return super().format_arg('{:x}'.format(self.int_a)).zfill(2 * self.hex_digits)
+
+    def result(self) -> List[str]:
+        result = self.int_a % self.int_n
+        return [self.format_result(result)]
+
+    @property
+    def is_valid(self) -> bool:
+        return True
+
+    def arguments(self)-> List[str]:
+        args = super().arguments()
+        return  ["MBEDTLS_ECP_DP_SECP192R1"] + args
+
+
+class EcpP224R1Raw(bignum_common.ModOperationCommon,
+                   EcpTarget):
+    """Test cases for ECP P224 fast reduction."""
+    symbol = "-"
+    test_function = "ecp_mod_p_generic_raw"
+    test_name = "ecp_mod_p224_raw"
+    input_style = "arch_split"
+    arity = 1
+    dependencies = ["MBEDTLS_ECP_DP_SECP224R1_ENABLED",
+                    "MBEDTLS_ECP_NIST_OPTIM"]
+
+    moduli = ["ffffffffffffffffffffffffffffffff000000000000000000000001"] # type: List[str]
+
+    input_values = [
+        "0", "1",
+
+        # Modulus - 1
+        "ffffffffffffffffffffffffffffffff000000000000000000000000",
+
+        # Modulus + 1
+        "ffffffffffffffffffffffffffffffff000000000000000000000002",
+
+        # 2^224 - 1
+        "ffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+
+        # Maximum canonical P224 multiplication result
+        ("fffffffffffffffffffffffffffffffe000000000000000000000000"
+         "00000001000000000000000000000000000000000000000000000000"),
+
+        # Generate an overflow during reduction
+        ("00000000000000000000000000010000000070000000002000001000"
+         "ffffffffffff9fffffffffe00000efff000070000000002000001003"),
+
+        # Generate an underflow during reduction
+        ("00000001000000000000000000000000000000000000000000000000"
+         "00000000000dc0000000000000000001000000010000000100000003"),
+
+        # First 8 number generated by random.getrandbits(448) - seed(2,2)
+        ("da94e3e8ab73738fcf1822ffbc6887782b491044d5e341245c6e4337"
+         "15ba2bdd177219d30e7a269fd95bafc8f2a4d27bdcf4bb99f4bea973"),
+        ("cdbd47d364be8049a372db8f6e405d93ffed9235288bc781ae662675"
+         "94c9c9500925e4749b575bd13653f8dd9b1f282e4067c3584ee207f8"),
+        ("defc044a09325626e6b58de744ab6cce80877b6f71e1f6d2ef8acd12"
+         "8b4f2fc15f3f57ebf30b94fa82523e86feac7eb7dc38f519b91751da"),
+        ("2d6c797f8f7d9b782a1be9cd8697bbd0e2520e33e44c50556c71c4a6"
+         "6148a86fe8624fab5186ee32ee8d7ee9770348a05d300cb90706a045"),
+        ("8f54f8ceacaab39e83844b40ffa9b9f15c14bc4a829e07b0829a48d4"
+         "22fe99a22c70501e533c91352d3d854e061b90303b08c6e33c729578"),
+        ("97eeab64ca2ce6bc5d3fd983c34c769fe89204e2e8168561867e5e15"
+         "bc01bfce6a27e0dfcbf8754472154e76e4c11ab2fec3f6b32e8d4b8a"),
+        ("a7a83ee0761ebfd2bd143fa9b714210c665d7435c1066932f4767f26"
+         "294365b2721dea3bf63f23d0dbe53fcafb2147df5ca495fa5a91c89b"),
+        ("74667bffe202849da9643a295a9ac6decbd4d3e2d4dec9ef83f0be4e"
+         "80371eb97f81375eecc1cb6347733e847d718d733ff98ff387c56473"),
+
+        # Next 2 number generated by random.getrandbits(224)
+        "eb9ac688b9d39cca91551e8259cc60b17604e4b4e73695c3e652c71a",
+        "f0caeef038c89b38a8acb5137c9260dc74e088a9b9492f258ebdbfe3"
+    ]
+
+    @property
+    def arg_a(self) -> str:
+        limbs = 2 * bignum_common.bits_to_limbs(224, self.bits_in_limb)
+        hex_digits = bignum_common.hex_digits_for_limb(limbs, self.bits_in_limb)
+        return super().format_arg('{:x}'.format(self.int_a)).zfill(hex_digits)
+
+    def result(self) -> List[str]:
+        result = self.int_a % self.int_n
+        return [self.format_result(result)]
+
+    @property
+    def is_valid(self) -> bool:
+        return True
+
+    def arguments(self)-> List[str]:
+        args = super().arguments()
+        return  ["MBEDTLS_ECP_DP_SECP224R1"] + args
+
+
+class EcpP256R1Raw(bignum_common.ModOperationCommon,
+                   EcpTarget):
+    """Test cases for ECP P256 fast reduction."""
+    symbol = "-"
+    test_function = "ecp_mod_p_generic_raw"
+    test_name = "ecp_mod_p256_raw"
+    input_style = "fixed"
+    arity = 1
+    dependencies = ["MBEDTLS_ECP_DP_SECP256R1_ENABLED",
+                    "MBEDTLS_ECP_NIST_OPTIM"]
+
+    moduli = ["ffffffff00000001000000000000000000000000ffffffffffffffffffffffff"] # type: List[str]
+
+    input_values = [
+        "0", "1",
+
+        # Modulus - 1
+        "ffffffff00000001000000000000000000000000fffffffffffffffffffffffe",
+
+        # Modulus + 1
+        "ffffffff00000001000000000000000000000001000000000000000000000000",
+
+        # 2^256 - 1
+        "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+
+        # Maximum canonical P256 multiplication result
+        ("fffffffe00000002fffffffe0000000100000001fffffffe00000001fffffffc"
+         "00000003fffffffcfffffffffffffffffffffffc000000000000000000000004"),
+
+        # Generate an overflow during reduction
+        ("0000000000000000000000010000000000000000000000000000000000000000"
+         "00000000000000000000000000000000000000000000000000000000ffffffff"),
+
+        # Generate an underflow during reduction
+        ("0000000000000000000000000000000000000000000000000000000000000010"
+         "ffffffff00000000000000000000000000000000000000000000000000000000"),
+
+        # Generate an overflow during carry reduction
+        ("aaaaaaaa00000000000000000000000000000000000000000000000000000000"
+         "00000000000000000000000000000000aaaaaaacaaaaaaaaaaaaaaaa00000000"),
+
+        # Generate an underflow during carry reduction
+        ("000000000000000000000001ffffffff00000000000000000000000000000000"
+         "0000000000000000000000000000000000000002000000020000000100000002"),
+
+        # First 8 number generated by random.getrandbits(512) - seed(2,2)
+        ("4067c3584ee207f8da94e3e8ab73738fcf1822ffbc6887782b491044d5e34124"
+         "5c6e433715ba2bdd177219d30e7a269fd95bafc8f2a4d27bdcf4bb99f4bea973"),
+        ("82523e86feac7eb7dc38f519b91751dacdbd47d364be8049a372db8f6e405d93"
+         "ffed9235288bc781ae66267594c9c9500925e4749b575bd13653f8dd9b1f282e"),
+        ("e8624fab5186ee32ee8d7ee9770348a05d300cb90706a045defc044a09325626"
+         "e6b58de744ab6cce80877b6f71e1f6d2ef8acd128b4f2fc15f3f57ebf30b94fa"),
+        ("829a48d422fe99a22c70501e533c91352d3d854e061b90303b08c6e33c729578"
+         "2d6c797f8f7d9b782a1be9cd8697bbd0e2520e33e44c50556c71c4a66148a86f"),
+        ("e89204e2e8168561867e5e15bc01bfce6a27e0dfcbf8754472154e76e4c11ab2"
+         "fec3f6b32e8d4b8a8f54f8ceacaab39e83844b40ffa9b9f15c14bc4a829e07b0"),
+        ("bd143fa9b714210c665d7435c1066932f4767f26294365b2721dea3bf63f23d0"
+         "dbe53fcafb2147df5ca495fa5a91c89b97eeab64ca2ce6bc5d3fd983c34c769f"),
+        ("74667bffe202849da9643a295a9ac6decbd4d3e2d4dec9ef83f0be4e80371eb9"
+         "7f81375eecc1cb6347733e847d718d733ff98ff387c56473a7a83ee0761ebfd2"),
+        ("d08f1bb2531d6460f0caeef038c89b38a8acb5137c9260dc74e088a9b9492f25"
+         "8ebdbfe3eb9ac688b9d39cca91551e8259cc60b17604e4b4e73695c3e652c71a"),
+
+        # Next 2 number generated by random.getrandbits(256)
+        "c5e2486c44a4a8f69dc8db48e86ec9c6e06f291b2a838af8d5c44a4eb3172062",
+        "d4c0dca8b4c9e755cc9c3adcf515a8234da4daeb4f3f87777ad1f45ae9500ec9"
+    ]
+
+    @property
+    def arg_a(self) -> str:
+        return super().format_arg('{:x}'.format(self.int_a)).zfill(2 * self.hex_digits)
+
+    def result(self) -> List[str]:
+        result = self.int_a % self.int_n
+        return [self.format_result(result)]
+
+    @property
+    def is_valid(self) -> bool:
+        return True
+
+    def arguments(self)-> List[str]:
+        args = super().arguments()
+        return  ["MBEDTLS_ECP_DP_SECP256R1"] + args
+
+
+class EcpP384R1Raw(bignum_common.ModOperationCommon,
+                   EcpTarget):
+    """Test cases for ECP P384 fast reduction."""
+    test_function = "ecp_mod_p_generic_raw"
+    test_name = "ecp_mod_p384_raw"
+    input_style = "fixed"
+    arity = 1
+    dependencies = ["MBEDTLS_ECP_DP_SECP384R1_ENABLED",
+                    "MBEDTLS_ECP_NIST_OPTIM"]
+
+    moduli = [("ffffffffffffffffffffffffffffffffffffffffffffffff"
+               "fffffffffffffffeffffffff0000000000000000ffffffff")
+             ] # type: List[str]
+
+    input_values = [
+        "0", "1",
+
+        # Modulus - 1
+        ("ffffffffffffffffffffffffffffffffffffffffffffffff"
+         "fffffffffffffffeffffffff0000000000000000fffffffe"),
+
+        # Modulus + 1
+        ("ffffffffffffffffffffffffffffffffffffffffffffffff"
+         "fffffffffffffffeffffffff000000000000000100000000"),
+
+        # 2^384 - 1
+        ("ffffffffffffffffffffffffffffffffffffffffffffffff"
+         "ffffffffffffffffffffffffffffffffffffffffffffffff"),
+
+        # Maximum canonical P384 multiplication result
+        ("ffffffffffffffffffffffffffffffffffffffffffffffff"
+         "fffffffffffffffdfffffffe0000000000000001fffffffc"
+         "000000000000000000000000000000010000000200000000"
+         "fffffffe000000020000000400000000fffffffc00000004"),
+
+        # Testing with overflow in A(12) + A(21) + A(20);
+        ("497811378624857a2c2af60d70583376545484cfae5c812f"
+         "e2999fc1abb51d18b559e8ca3b50aaf263fdf8f24bdfb98f"
+         "ffffffff20e65bf9099e4e73a5e8b517cf4fbeb8fd1750fd"
+         "ae6d43f2e53f82d5ffffffffffffffffcc6f1e06111c62e0"),
+
+        # Testing with underflow in A(13) + A(22) + A(23) - A(12) - A(20);
+        ("dfdd25e96777406b3c04b8c7b406f5fcf287e1e576003a09"
+         "2852a6fbe517f2712b68abef41dbd35183a0614fb7222606"
+         "ffffffff84396eee542f18a9189d94396c784059c17a9f18"
+         "f807214ef32f2f10ffffffff8a77fac20000000000000000"),
+
+        # Testing with overflow in A(23) + A(20) + A(19) - A(22);
+        ("783753f8a5afba6c1862eead1deb2fcdd907272be3ffd185"
+         "42b24a71ee8b26cab0aa33513610ff973042bbe1637cc9fc"
+         "99ad36c7f703514572cf4f5c3044469a8f5be6312c19e5d3"
+         "f8fc1ac6ffffffffffffffff8c86252400000000ffffffff"),
+
+        # Testing with underflow in A(23) + A(20) + A(19) - A(22);
+        ("65e1d2362fce922663b7fd517586e88842a9b4bd092e93e6"
+         "251c9c69f278cbf8285d99ae3b53da5ba36e56701e2b17c2"
+         "25f1239556c5f00117fa140218b46ebd8e34f50d0018701f"
+         "a8a0a5cc00000000000000004410bcb4ffffffff00000000"),
+
+        # Testing the second round of carry reduction
+        ("000000000000000000000000ffffffffffffffffffffffff"
+         "ffffffffffffffffffffffffffffffff0000000000000000"
+         "0000000000000000ffffffff000000000000000000000001"
+         "00000000000000000000000000000000ffffffff00000001"),
+
+        # First 8 number generated by random.getrandbits(768) - seed(2,2)
+        ("ffed9235288bc781ae66267594c9c9500925e4749b575bd1"
+         "3653f8dd9b1f282e4067c3584ee207f8da94e3e8ab73738f"
+         "cf1822ffbc6887782b491044d5e341245c6e433715ba2bdd"
+         "177219d30e7a269fd95bafc8f2a4d27bdcf4bb99f4bea973"),
+        ("e8624fab5186ee32ee8d7ee9770348a05d300cb90706a045"
+         "defc044a09325626e6b58de744ab6cce80877b6f71e1f6d2"
+         "ef8acd128b4f2fc15f3f57ebf30b94fa82523e86feac7eb7"
+         "dc38f519b91751dacdbd47d364be8049a372db8f6e405d93"),
+        ("fec3f6b32e8d4b8a8f54f8ceacaab39e83844b40ffa9b9f1"
+         "5c14bc4a829e07b0829a48d422fe99a22c70501e533c9135"
+         "2d3d854e061b90303b08c6e33c7295782d6c797f8f7d9b78"
+         "2a1be9cd8697bbd0e2520e33e44c50556c71c4a66148a86f"),
+        ("bd143fa9b714210c665d7435c1066932f4767f26294365b2"
+         "721dea3bf63f23d0dbe53fcafb2147df5ca495fa5a91c89b"
+         "97eeab64ca2ce6bc5d3fd983c34c769fe89204e2e8168561"
+         "867e5e15bc01bfce6a27e0dfcbf8754472154e76e4c11ab2"),
+        ("8ebdbfe3eb9ac688b9d39cca91551e8259cc60b17604e4b4"
+         "e73695c3e652c71a74667bffe202849da9643a295a9ac6de"
+         "cbd4d3e2d4dec9ef83f0be4e80371eb97f81375eecc1cb63"
+         "47733e847d718d733ff98ff387c56473a7a83ee0761ebfd2"),
+        ("d4c0dca8b4c9e755cc9c3adcf515a8234da4daeb4f3f8777"
+         "7ad1f45ae9500ec9c5e2486c44a4a8f69dc8db48e86ec9c6"
+         "e06f291b2a838af8d5c44a4eb3172062d08f1bb2531d6460"
+         "f0caeef038c89b38a8acb5137c9260dc74e088a9b9492f25"),
+        ("0227eeb7b9d7d01f5769da05d205bbfcc8c69069134bccd3"
+         "e1cf4f589f8e4ce0af29d115ef24bd625dd961e6830b54fa"
+         "7d28f93435339774bb1e386c4fd5079e681b8f5896838b76"
+         "9da59b74a6c3181c81e220df848b1df78feb994a81167346"),
+        ("d322a7353ead4efe440e2b4fda9c025a22f1a83185b98f5f"
+         "c11e60de1b343f52ea748db9e020307aaeb6db2c3a038a70"
+         "9779ac1f45e9dd320c855fdfa7251af0930cdbd30f0ad2a8"
+         "1b2d19a2beaa14a7ff3fe32a30ffc4eed0a7bd04e85bfcdd"),
+
+        # Next 2 number generated by random.getrandbits(384)
+        ("5c3747465cc36c270e8a35b10828d569c268a20eb78ac332"
+         "e5e138e26c4454b90f756132e16dce72f18e859835e1f291"),
+        ("eb2b5693babb7fbb0a76c196067cfdcb11457d9cf45e2fa0"
+         "1d7f4275153924800600571fac3a5b263fdf57cd2c006497")
+    ]
+
+    @property
+    def arg_a(self) -> str:
+        return super().format_arg('{:x}'.format(self.int_a)).zfill(2 * self.hex_digits)
+
+    def result(self) -> List[str]:
+        result = self.int_a % self.int_n
+        return [self.format_result(result)]
+
+    @property
+    def is_valid(self) -> bool:
+        return True
+
+    def arguments(self)-> List[str]:
+        args = super().arguments()
+        return  ["MBEDTLS_ECP_DP_SECP384R1"] + args
+
+
+class EcpP521R1Raw(bignum_common.ModOperationCommon,
+                   EcpTarget):
+    """Test cases for ECP P521 fast reduction."""
+    test_function = "ecp_mod_p_generic_raw"
+    test_name = "ecp_mod_p521_raw"
+    input_style = "arch_split"
+    arity = 1
+    dependencies = ["MBEDTLS_ECP_DP_SECP521R1_ENABLED",
+                    "MBEDTLS_ECP_NIST_OPTIM"]
+
+    moduli = [("01ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
+               "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
+             ] # type: List[str]
+
+    input_values = [
+        "0", "1",
+
+        # Modulus - 1
+        ("01ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
+         "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe"),
+
+        # Modulus + 1
+        ("020000000000000000000000000000000000000000000000000000000000000000"
+         "000000000000000000000000000000000000000000000000000000000000000000"),
+
+        # Maximum canonical P521 multiplication result
+        ("0003ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
+         "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
+         "fffff800"
+         "0000000000000000000000000000000000000000000000000000000000000000"
+         "0000000000000000000000000000000000000000000000000000000000000004"),
+
+        # Test case for overflow during addition
+        ("0001efffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
+         "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
+         "000001ef"
+         "0000000000000000000000000000000000000000000000000000000000000000"
+         "000000000000000000000000000000000000000000000000000000000f000000"),
+
+        # First 8 number generated by random.getrandbits(1042) - seed(2,2)
+        ("0003cc2e82523e86feac7eb7dc38f519b91751dacdbd47d364be8049a372db8f"
+         "6e405d93ffed9235288bc781ae66267594c9c9500925e4749b575bd13653f8dd"
+         "9b1f282e"
+         "4067c3584ee207f8da94e3e8ab73738fcf1822ffbc6887782b491044d5e34124"
+         "5c6e433715ba2bdd177219d30e7a269fd95bafc8f2a4d27bdcf4bb99f4bea973"),
+        ("00017052829e07b0829a48d422fe99a22c70501e533c91352d3d854e061b9030"
+         "3b08c6e33c7295782d6c797f8f7d9b782a1be9cd8697bbd0e2520e33e44c5055"
+         "6c71c4a6"
+         "6148a86fe8624fab5186ee32ee8d7ee9770348a05d300cb90706a045defc044a"
+         "09325626e6b58de744ab6cce80877b6f71e1f6d2ef8acd128b4f2fc15f3f57eb"),
+        ("00021f15a7a83ee0761ebfd2bd143fa9b714210c665d7435c1066932f4767f26"
+         "294365b2721dea3bf63f23d0dbe53fcafb2147df5ca495fa5a91c89b97eeab64"
+         "ca2ce6bc"
+         "5d3fd983c34c769fe89204e2e8168561867e5e15bc01bfce6a27e0dfcbf87544"
+         "72154e76e4c11ab2fec3f6b32e8d4b8a8f54f8ceacaab39e83844b40ffa9b9f1"),
+        ("000381bc2a838af8d5c44a4eb3172062d08f1bb2531d6460f0caeef038c89b38"
+         "a8acb5137c9260dc74e088a9b9492f258ebdbfe3eb9ac688b9d39cca91551e82"
+         "59cc60b1"
+         "7604e4b4e73695c3e652c71a74667bffe202849da9643a295a9ac6decbd4d3e2"
+         "d4dec9ef83f0be4e80371eb97f81375eecc1cb6347733e847d718d733ff98ff3"),
+        ("00034816c8c69069134bccd3e1cf4f589f8e4ce0af29d115ef24bd625dd961e6"
+         "830b54fa7d28f93435339774bb1e386c4fd5079e681b8f5896838b769da59b74"
+         "a6c3181c"
+         "81e220df848b1df78feb994a81167346d4c0dca8b4c9e755cc9c3adcf515a823"
+         "4da4daeb4f3f87777ad1f45ae9500ec9c5e2486c44a4a8f69dc8db48e86ec9c6"),
+        ("000397846c4454b90f756132e16dce72f18e859835e1f291d322a7353ead4efe"
+         "440e2b4fda9c025a22f1a83185b98f5fc11e60de1b343f52ea748db9e020307a"
+         "aeb6db2c"
+         "3a038a709779ac1f45e9dd320c855fdfa7251af0930cdbd30f0ad2a81b2d19a2"
+         "beaa14a7ff3fe32a30ffc4eed0a7bd04e85bfcdd0227eeb7b9d7d01f5769da05"),
+        ("00002c3296e6bc4d62b47204007ee4fab105d83e85e951862f0981aebc1b00d9"
+         "2838e766ef9b6bf2d037fe2e20b6a8464174e75a5f834da70569c018eb2b5693"
+         "babb7fbb"
+         "0a76c196067cfdcb11457d9cf45e2fa01d7f4275153924800600571fac3a5b26"
+         "3fdf57cd2c0064975c3747465cc36c270e8a35b10828d569c268a20eb78ac332"),
+        ("00009d23b4917fc09f20dbb0dcc93f0e66dfe717c17313394391b6e2e6eacb0f"
+         "0bb7be72bd6d25009aeb7fa0c4169b148d2f527e72daf0a54ef25c0707e33868"
+         "7d1f7157"
+         "5653a45c49390aa51cf5192bbf67da14be11d56ba0b4a2969d8055a9f03f2d71"
+         "581d8e830112ff0f0948eccaf8877acf26c377c13f719726fd70bddacb4deeec"),
+
+        # Next 2 number generated by random.getrandbits(521)
+        ("12b84ae65e920a63ac1f2b64df6dff07870c9d531ae72a47403063238da1a1fe"
+         "3f9d6a179fa50f96cd4aff9261aa92c0e6f17ec940639bc2ccdf572df00790813e3"),
+        ("166049dd332a73fa0b26b75196cf87eb8a09b27ec714307c68c425424a1574f1"
+         "eedf5b0f16cdfdb839424d201e653f53d6883ca1c107ca6e706649889c0c7f38608")
+    ]
+
+    @property
+    def arg_a(self) -> str:
+        # Number of limbs: 2 * N
+        return super().format_arg('{:x}'.format(self.int_a)).zfill(2 * self.hex_digits)
+
+    def result(self) -> List[str]:
+        result = self.int_a % self.int_n
+        return [self.format_result(result)]
+
+    @property
+    def is_valid(self) -> bool:
+        return True
+
+    def arguments(self)-> List[str]:
+        args = super().arguments()
+        return  ["MBEDTLS_ECP_DP_SECP521R1"] + args
+
+
+class EcpP192K1Raw(bignum_common.ModOperationCommon,
+                   EcpTarget):
+    """Test cases for ECP P192K1 fast reduction."""
+    symbol = "-"
+    test_function = "ecp_mod_p_generic_raw"
+    test_name = "ecp_mod_p192k1_raw"
+    input_style = "fixed"
+    arity = 1
+    dependencies = ["MBEDTLS_ECP_DP_SECP192K1_ENABLED"]
+
+    moduli = ["fffffffffffffffffffffffffffffffffffffffeffffee37"] # type: List[str]
+
+    input_values = [
+        "0", "1",
+
+        # Modulus - 1
+        "fffffffffffffffffffffffffffffffffffffffeffffee36",
+
+        # Modulus + 1
+        "fffffffffffffffffffffffffffffffffffffffeffffee38",
+
+        # 2^192 - 1
+        "ffffffffffffffffffffffffffffffffffffffffffffffff",
+
+        # Maximum canonical P192K1 multiplication result
+        ("fffffffffffffffffffffffffffffffffffffffdffffdc6c"
+         "0000000000000000000000000000000100002394013c7364"),
+
+        # Test case for overflow during addition
+        ("00000007ffff71b809e27dd832cfd5e04d9d2dbb9f8da217"
+         "0000000000000000000000000000000000000000520834f0"),
+
+        # First 8 number generated by random.getrandbits(384) - seed(2,2)
+        ("cf1822ffbc6887782b491044d5e341245c6e433715ba2bdd"
+         "177219d30e7a269fd95bafc8f2a4d27bdcf4bb99f4bea973"),
+        ("ffed9235288bc781ae66267594c9c9500925e4749b575bd1"
+         "3653f8dd9b1f282e4067c3584ee207f8da94e3e8ab73738f"),
+        ("ef8acd128b4f2fc15f3f57ebf30b94fa82523e86feac7eb7"
+         "dc38f519b91751dacdbd47d364be8049a372db8f6e405d93"),
+        ("e8624fab5186ee32ee8d7ee9770348a05d300cb90706a045"
+         "defc044a09325626e6b58de744ab6cce80877b6f71e1f6d2"),
+        ("2d3d854e061b90303b08c6e33c7295782d6c797f8f7d9b78"
+         "2a1be9cd8697bbd0e2520e33e44c50556c71c4a66148a86f"),
+        ("fec3f6b32e8d4b8a8f54f8ceacaab39e83844b40ffa9b9f1"
+         "5c14bc4a829e07b0829a48d422fe99a22c70501e533c9135"),
+        ("97eeab64ca2ce6bc5d3fd983c34c769fe89204e2e8168561"
+         "867e5e15bc01bfce6a27e0dfcbf8754472154e76e4c11ab2"),
+        ("bd143fa9b714210c665d7435c1066932f4767f26294365b2"
+         "721dea3bf63f23d0dbe53fcafb2147df5ca495fa5a91c89b"),
+
+        # Next 2 number generated by random.getrandbits(192)
+        "47733e847d718d733ff98ff387c56473a7a83ee0761ebfd2",
+        "cbd4d3e2d4dec9ef83f0be4e80371eb97f81375eecc1cb63"
+    ]
+
+    @property
+    def arg_a(self) -> str:
+        return super().format_arg('{:x}'.format(self.int_a)).zfill(2 * self.hex_digits)
+
+    def result(self) -> List[str]:
+        result = self.int_a % self.int_n
+        return [self.format_result(result)]
+
+    @property
+    def is_valid(self) -> bool:
+        return True
+
+    def arguments(self):
+        args = super().arguments()
+        return  ["MBEDTLS_ECP_DP_SECP192K1"] + args
+
+
+class EcpP224K1Raw(bignum_common.ModOperationCommon,
+                   EcpTarget):
+    """Test cases for ECP P224 fast reduction."""
+    symbol = "-"
+    test_function = "ecp_mod_p_generic_raw"
+    test_name = "ecp_mod_p224k1_raw"
+    input_style = "arch_split"
+    arity = 1
+    dependencies = ["MBEDTLS_ECP_DP_SECP224K1_ENABLED"]
+
+    moduli = ["fffffffffffffffffffffffffffffffffffffffffffffffeffffe56d"] # type: List[str]
+
+    input_values = [
+        "0", "1",
+
+        # Modulus - 1
+        "fffffffffffffffffffffffffffffffffffffffffffffffeffffe56c",
+
+        # Modulus + 1
+        "fffffffffffffffffffffffffffffffffffffffffffffffeffffe56e",
+
+        # 2^224 - 1
+        "ffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+
+        # Maximum canonical P224K1 multiplication result
+        ("fffffffffffffffffffffffffffffffffffffffffffffffdffffcad8"
+         "00000000000000000000000000000000000000010000352802c26590"),
+
+        # Test case for overflow during addition
+        ("0000007ffff2b68161180fd8cd92e1a109be158a19a99b1809db8032"
+         "0000000000000000000000000000000000000000000000000bf04f49"),
+
+        # First 8 number generated by random.getrandbits(448) - seed(2,2)
+        ("da94e3e8ab73738fcf1822ffbc6887782b491044d5e341245c6e4337"
+         "15ba2bdd177219d30e7a269fd95bafc8f2a4d27bdcf4bb99f4bea973"),
+        ("cdbd47d364be8049a372db8f6e405d93ffed9235288bc781ae662675"
+         "94c9c9500925e4749b575bd13653f8dd9b1f282e4067c3584ee207f8"),
+        ("defc044a09325626e6b58de744ab6cce80877b6f71e1f6d2ef8acd12"
+         "8b4f2fc15f3f57ebf30b94fa82523e86feac7eb7dc38f519b91751da"),
+        ("2d6c797f8f7d9b782a1be9cd8697bbd0e2520e33e44c50556c71c4a6"
+         "6148a86fe8624fab5186ee32ee8d7ee9770348a05d300cb90706a045"),
+        ("8f54f8ceacaab39e83844b40ffa9b9f15c14bc4a829e07b0829a48d4"
+         "22fe99a22c70501e533c91352d3d854e061b90303b08c6e33c729578"),
+        ("97eeab64ca2ce6bc5d3fd983c34c769fe89204e2e8168561867e5e15"
+         "bc01bfce6a27e0dfcbf8754472154e76e4c11ab2fec3f6b32e8d4b8a"),
+        ("a7a83ee0761ebfd2bd143fa9b714210c665d7435c1066932f4767f26"
+         "294365b2721dea3bf63f23d0dbe53fcafb2147df5ca495fa5a91c89b"),
+        ("74667bffe202849da9643a295a9ac6decbd4d3e2d4dec9ef83f0be4e"
+         "80371eb97f81375eecc1cb6347733e847d718d733ff98ff387c56473"),
+
+        # Next 2 number generated by random.getrandbits(224)
+        ("eb9ac688b9d39cca91551e8259cc60b17604e4b4e73695c3e652c71a"),
+        ("f0caeef038c89b38a8acb5137c9260dc74e088a9b9492f258ebdbfe3"),
+    ]
+
+    @property
+    def arg_a(self) -> str:
+        limbs = 2 * bignum_common.bits_to_limbs(224, self.bits_in_limb)
+        hex_digits = bignum_common.hex_digits_for_limb(limbs, self.bits_in_limb)
+        return super().format_arg('{:x}'.format(self.int_a)).zfill(hex_digits)
+
+    def result(self) -> List[str]:
+        result = self.int_a % self.int_n
+        return [self.format_result(result)]
+
+    @property
+    def is_valid(self) -> bool:
+        return True
+
+    def arguments(self):
+        args = super().arguments()
+        return  ["MBEDTLS_ECP_DP_SECP224K1"] + args
+
+
+class EcpP256K1Raw(bignum_common.ModOperationCommon,
+                   EcpTarget):
+    """Test cases for ECP P256 fast reduction."""
+    symbol = "-"
+    test_function = "ecp_mod_p_generic_raw"
+    test_name = "ecp_mod_p256k1_raw"
+    input_style = "fixed"
+    arity = 1
+    dependencies = ["MBEDTLS_ECP_DP_SECP256K1_ENABLED"]
+
+    moduli = ["fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f"] # type: List[str]
+
+    input_values = [
+        "0", "1",
+
+        # Modulus - 1
+        "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2e",
+
+        # Modulus + 1
+        "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30",
+
+        # 2^256 - 1
+        "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+
+        # Maximum canonical P256K1 multiplication result
+        ("fffffffffffffffffffffffffffffffffffffffffffffffffffffffdfffff85c"
+         "000000000000000000000000000000000000000000000001000007a4000e9844"),
+
+        # Test case for overflow during addition
+        ("0000fffffc2f000e90a0c86a0a63234e5ba641f43a7e4aecc4040e67ec850562"
+         "00000000000000000000000000000000000000000000000000000000585674fd"),
+
+        # Test case for overflow during addition
+        ("0000fffffc2f000e90a0c86a0a63234e5ba641f43a7e4aecc4040e67ec850562"
+         "00000000000000000000000000000000000000000000000000000000585674fd"),
+
+        # First 8 number generated by random.getrandbits(512) - seed(2,2)
+        ("4067c3584ee207f8da94e3e8ab73738fcf1822ffbc6887782b491044d5e34124"
+         "5c6e433715ba2bdd177219d30e7a269fd95bafc8f2a4d27bdcf4bb99f4bea973"),
+        ("82523e86feac7eb7dc38f519b91751dacdbd47d364be8049a372db8f6e405d93"
+         "ffed9235288bc781ae66267594c9c9500925e4749b575bd13653f8dd9b1f282e"),
+        ("e8624fab5186ee32ee8d7ee9770348a05d300cb90706a045defc044a09325626"
+         "e6b58de744ab6cce80877b6f71e1f6d2ef8acd128b4f2fc15f3f57ebf30b94fa"),
+        ("829a48d422fe99a22c70501e533c91352d3d854e061b90303b08c6e33c729578"
+         "2d6c797f8f7d9b782a1be9cd8697bbd0e2520e33e44c50556c71c4a66148a86f"),
+        ("e89204e2e8168561867e5e15bc01bfce6a27e0dfcbf8754472154e76e4c11ab2"
+         "fec3f6b32e8d4b8a8f54f8ceacaab39e83844b40ffa9b9f15c14bc4a829e07b0"),
+        ("bd143fa9b714210c665d7435c1066932f4767f26294365b2721dea3bf63f23d0"
+         "dbe53fcafb2147df5ca495fa5a91c89b97eeab64ca2ce6bc5d3fd983c34c769f"),
+        ("74667bffe202849da9643a295a9ac6decbd4d3e2d4dec9ef83f0be4e80371eb9"
+         "7f81375eecc1cb6347733e847d718d733ff98ff387c56473a7a83ee0761ebfd2"),
+        ("d08f1bb2531d6460f0caeef038c89b38a8acb5137c9260dc74e088a9b9492f25"
+         "8ebdbfe3eb9ac688b9d39cca91551e8259cc60b17604e4b4e73695c3e652c71a"),
+
+        # Next 2 number generated by random.getrandbits(256)
+        ("c5e2486c44a4a8f69dc8db48e86ec9c6e06f291b2a838af8d5c44a4eb3172062"),
+        ("d4c0dca8b4c9e755cc9c3adcf515a8234da4daeb4f3f87777ad1f45ae9500ec9"),
+    ]
+
+    @property
+    def arg_a(self) -> str:
+        return super().format_arg('{:x}'.format(self.int_a)).zfill(2 * self.hex_digits)
+
+    def result(self) -> List[str]:
+        result = self.int_a % self.int_n
+        return [self.format_result(result)]
+
+    @property
+    def is_valid(self) -> bool:
+        return True
+
+    def arguments(self):
+        args = super().arguments()
+        return  ["MBEDTLS_ECP_DP_SECP256K1"] + args
+
+
+class EcpP255Raw(bignum_common.ModOperationCommon,
+                 EcpTarget):
+    """Test cases for ECP 25519 fast reduction."""
+    symbol = "-"
+    test_function = "ecp_mod_p_generic_raw"
+    test_name = "mbedtls_ecp_mod_p255_raw"
+    input_style = "fixed"
+    arity = 1
+    dependencies = ["MBEDTLS_ECP_DP_CURVE25519_ENABLED"]
+
+    moduli = [("7fffffffffffffffffffffffffffffffffffffffffffffffff"
+               "ffffffffffffed")] # type: List[str]
+
+    input_values = [
+        "0", "1",
+
+        # Modulus - 1
+        ("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffec"),
+
+        # Modulus + 1
+        ("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffee"),
+
+        # 2^255 - 1
+        ("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
+
+        # Maximum canonical P255 multiplication result
+        ("3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffec"
+         "0000000000000000000000000000000000000000000000000000000000000190"),
+
+        # First 8 number generated by random.getrandbits(510) - seed(2,2)
+        ("1019f0d64ee207f8da94e3e8ab73738fcf1822ffbc6887782b491044d5e34124"
+         "5c6e433715ba2bdd177219d30e7a269fd95bafc8f2a4d27bdcf4bb99f4bea973"),
+        ("20948fa1feac7eb7dc38f519b91751dacdbd47d364be8049a372db8f6e405d93"
+         "ffed9235288bc781ae66267594c9c9500925e4749b575bd13653f8dd9b1f282e"),
+        ("3a1893ea5186ee32ee8d7ee9770348a05d300cb90706a045defc044a09325626"
+         "e6b58de744ab6cce80877b6f71e1f6d2ef8acd128b4f2fc15f3f57ebf30b94fa"),
+        ("20a6923522fe99a22c70501e533c91352d3d854e061b90303b08c6e33c729578"
+         "2d6c797f8f7d9b782a1be9cd8697bbd0e2520e33e44c50556c71c4a66148a86f"),
+        ("3a248138e8168561867e5e15bc01bfce6a27e0dfcbf8754472154e76e4c11ab2"
+         "fec3f6b32e8d4b8a8f54f8ceacaab39e83844b40ffa9b9f15c14bc4a829e07b0"),
+        ("2f450feab714210c665d7435c1066932f4767f26294365b2721dea3bf63f23d0"
+         "dbe53fcafb2147df5ca495fa5a91c89b97eeab64ca2ce6bc5d3fd983c34c769f"),
+        ("1d199effe202849da9643a295a9ac6decbd4d3e2d4dec9ef83f0be4e80371eb9"
+         "7f81375eecc1cb6347733e847d718d733ff98ff387c56473a7a83ee0761ebfd2"),
+        ("3423c6ec531d6460f0caeef038c89b38a8acb5137c9260dc74e088a9b9492f25"
+         "8ebdbfe3eb9ac688b9d39cca91551e8259cc60b17604e4b4e73695c3e652c71a"),
+
+        # Next 2 number generated by random.getrandbits(255)
+        ("62f1243644a4a8f69dc8db48e86ec9c6e06f291b2a838af8d5c44a4eb3172062"),
+        ("6a606e54b4c9e755cc9c3adcf515a8234da4daeb4f3f87777ad1f45ae9500ec9"),
+    ]
+
+    @property
+    def arg_a(self) -> str:
+        return super().format_arg('{:x}'.format(self.int_a)).zfill(2 * self.hex_digits)
+
+    def result(self) -> List[str]:
+        result = self.int_a % self.int_n
+        return [self.format_result(result)]
+
+    @property
+    def is_valid(self) -> bool:
+        return True
+
+    def arguments(self)-> List[str]:
+        args = super().arguments()
+        return  ["MBEDTLS_ECP_DP_CURVE25519"] + args
+
+
+class EcpP448Raw(bignum_common.ModOperationCommon,
+                 EcpTarget):
+    """Test cases for ECP P448 fast reduction."""
+    symbol = "-"
+    test_function = "ecp_mod_p_generic_raw"
+    test_name = "ecp_mod_p448_raw"
+    input_style = "fixed"
+    arity = 1
+    dependencies = ["MBEDTLS_ECP_DP_CURVE448_ENABLED"]
+
+    moduli = [("fffffffffffffffffffffffffffffffffffffffffffffffffffffffe"
+               "ffffffffffffffffffffffffffffffffffffffffffffffffffffffff")] # type: List[str]
+
+    input_values = [
+        "0", "1",
+
+        # Modulus - 1
+        ("fffffffffffffffffffffffffffffffffffffffffffffffffffffffe"
+         "fffffffffffffffffffffffffffffffffffffffffffffffffffffffe"),
+
+        # Modulus + 1
+        ("ffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
+         "00000000000000000000000000000000000000000000000000000000"),
+
+        # 2^448 - 1
+        ("ffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
+         "ffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
+
+        # Maximum canonical P448 multiplication result
+        ("fffffffffffffffffffffffffffffffffffffffffffffffffffffffd"
+         "fffffffffffffffffffffffffffffffffffffffffffffffffffffffd"
+         "00000000000000000000000000000000000000000000000000000004"
+         "00000000000000000000000000000000000000000000000000000004"),
+
+        # First 8 number generated by random.getrandbits(896) - seed(2,2)
+        ("74667bffe202849da9643a295a9ac6decbd4d3e2d4dec9ef83f0be4e"
+         "80371eb97f81375eecc1cb6347733e847d718d733ff98ff387c56473"
+         "a7a83ee0761ebfd2bd143fa9b714210c665d7435c1066932f4767f26"
+         "294365b2721dea3bf63f23d0dbe53fcafb2147df5ca495fa5a91c89b"),
+        ("4da4daeb4f3f87777ad1f45ae9500ec9c5e2486c44a4a8f69dc8db48"
+         "e86ec9c6e06f291b2a838af8d5c44a4eb3172062d08f1bb2531d6460"
+         "f0caeef038c89b38a8acb5137c9260dc74e088a9b9492f258ebdbfe3"
+         "eb9ac688b9d39cca91551e8259cc60b17604e4b4e73695c3e652c71a"),
+        ("bc1b00d92838e766ef9b6bf2d037fe2e20b6a8464174e75a5f834da7"
+         "0569c018eb2b5693babb7fbb0a76c196067cfdcb11457d9cf45e2fa0"
+         "1d7f4275153924800600571fac3a5b263fdf57cd2c0064975c374746"
+         "5cc36c270e8a35b10828d569c268a20eb78ac332e5e138e26c4454b9"),
+        ("8d2f527e72daf0a54ef25c0707e338687d1f71575653a45c49390aa5"
+         "1cf5192bbf67da14be11d56ba0b4a2969d8055a9f03f2d71581d8e83"
+         "0112ff0f0948eccaf8877acf26c377c13f719726fd70bddacb4deeec"
+         "0b0c995e96e6bc4d62b47204007ee4fab105d83e85e951862f0981ae"),
+        ("84ae65e920a63ac1f2b64df6dff07870c9d531ae72a47403063238da"
+         "1a1fe3f9d6a179fa50f96cd4aff9261aa92c0e6f17ec940639bc2ccd"
+         "f572df00790813e32748dd1db4917fc09f20dbb0dcc93f0e66dfe717"
+         "c17313394391b6e2e6eacb0f0bb7be72bd6d25009aeb7fa0c4169b14"),
+        ("2bb3b36f29421c4021b7379f0897246a40c270b00e893302aba9e7b8"
+         "23fc5ad2f58105748ed5d1b7b310b730049dd332a73fa0b26b75196c"
+         "f87eb8a09b27ec714307c68c425424a1574f1eedf5b0f16cdfdb8394"
+         "24d201e653f53d6883ca1c107ca6e706649889c0c7f3860895bfa813"),
+        ("af3f5d7841b1256d5c1dc12fb5a1ae519fb8883accda6559caa538a0"
+         "9fc9370d3a6b86a7975b54a31497024640332b0612d4050771d7b14e"
+         "b6c004cc3b8367dc3f2bb31efe9934ad0809eae3ef232a32b5459d83"
+         "fbc46f1aea990e94821d46063b4dbf2ca294523d74115c86188b1044"),
+        ("7430051376e31f5aab63ad02854efa600641b4fa37a47ce41aeffafc"
+         "3b45402ac02659fe2e87d4150511baeb198ababb1a16daff3da95cd2"
+         "167b75dfb948f82a8317cba01c75f67e290535d868a24b7f627f2855"
+         "09167d4126af8090013c3273c02c6b9586b4625b475b51096c4ad652"),
+
+        # Corner case which causes maximum overflow
+        ("f4ae65e920a63ac1f2b64df6dff07870c9d531ae72a47403063238da1"
+         "a1fe3f9d6a179fa50f96cd4aff9261aa92c0e6f17ec940639bc2ccd0B"
+         "519A16DF59C53E0D49B209200F878F362ACE518D5B8BFCF9CDC725E5E"
+         "01C06295E8605AF06932B5006D9E556D3F190E8136BF9C643D332"),
+
+        # Next 2 number generated by random.getrandbits(448)
+        ("8f54f8ceacaab39e83844b40ffa9b9f15c14bc4a829e07b0829a48d4"
+         "22fe99a22c70501e533c91352d3d854e061b90303b08c6e33c729578"),
+        ("97eeab64ca2ce6bc5d3fd983c34c769fe89204e2e8168561867e5e15"
+         "bc01bfce6a27e0dfcbf8754472154e76e4c11ab2fec3f6b32e8d4b8a"),
+
+    ]
+
+    @property
+    def arg_a(self) -> str:
+        return super().format_arg('{:x}'.format(self.int_a)).zfill(2 * self.hex_digits)
+
+    def result(self) -> List[str]:
+        result = self.int_a % self.int_n
+        return [self.format_result(result)]
+
+    @property
+    def is_valid(self) -> bool:
+        return True
+
+    def arguments(self):
+        args = super().arguments()
+        return  ["MBEDTLS_ECP_DP_CURVE448"] + args
diff --git a/framework/scripts/mbedtls_framework/logging_util.py b/framework/scripts/mbedtls_framework/logging_util.py
new file mode 100644
index 0000000..ddd7c7f
--- /dev/null
+++ b/framework/scripts/mbedtls_framework/logging_util.py
@@ -0,0 +1,46 @@
+"""Auxiliary functions used for logging module.
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+#
+
+import logging
+import sys
+
+def configure_logger(
+        logger: logging.Logger,
+        log_format="[%(levelname)s]: %(message)s",
+        split_level=logging.WARNING
+    ) -> None:
+    """
+    Configure the logging.Logger instance so that:
+        - Format is set to any log_format.
+            Default: "[%(levelname)s]: %(message)s"
+        - loglevel >= split_level are printed to stderr.
+        - loglevel <  split_level are printed to stdout.
+            Default: logging.WARNING
+    """
+    class MaxLevelFilter(logging.Filter):
+        # pylint: disable=too-few-public-methods
+        def __init__(self, max_level, name=''):
+            super().__init__(name)
+            self.max_level = max_level
+
+        def filter(self, record: logging.LogRecord) -> bool:
+            return record.levelno <= self.max_level
+
+    log_formatter = logging.Formatter(log_format)
+
+    # set loglevel >= split_level to be printed to stderr
+    stderr_hdlr = logging.StreamHandler(sys.stderr)
+    stderr_hdlr.setLevel(split_level)
+    stderr_hdlr.setFormatter(log_formatter)
+
+    # set loglevel < split_level to be printed to stdout
+    stdout_hdlr = logging.StreamHandler(sys.stdout)
+    stdout_hdlr.addFilter(MaxLevelFilter(split_level - 1))
+    stdout_hdlr.setFormatter(log_formatter)
+
+    logger.addHandler(stderr_hdlr)
+    logger.addHandler(stdout_hdlr)
diff --git a/framework/scripts/mbedtls_framework/macro_collector.py b/framework/scripts/mbedtls_framework/macro_collector.py
new file mode 100644
index 0000000..d68be00
--- /dev/null
+++ b/framework/scripts/mbedtls_framework/macro_collector.py
@@ -0,0 +1,539 @@
+"""Collect macro definitions from header files.
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+#
+
+import itertools
+import re
+from typing import Dict, IO, Iterable, Iterator, List, Optional, Pattern, Set, Tuple, Union
+
+
+class ReadFileLineException(Exception):
+    def __init__(self, filename: str, line_number: Union[int, str]) -> None:
+        message = 'in {} at {}'.format(filename, line_number)
+        super(ReadFileLineException, self).__init__(message)
+        self.filename = filename
+        self.line_number = line_number
+
+
+class read_file_lines:
+    # Dear Pylint, conventionally, a context manager class name is lowercase.
+    # pylint: disable=invalid-name,too-few-public-methods
+    """Context manager to read a text file line by line.
+
+    ```
+    with read_file_lines(filename) as lines:
+        for line in lines:
+            process(line)
+    ```
+    is equivalent to
+    ```
+    with open(filename, 'r') as input_file:
+        for line in input_file:
+            process(line)
+    ```
+    except that if process(line) raises an exception, then the read_file_lines
+    snippet annotates the exception with the file name and line number.
+    """
+    def __init__(self, filename: str, binary: bool = False) -> None:
+        self.filename = filename
+        self.file = None #type: Optional[IO[str]]
+        self.line_number = 'entry' #type: Union[int, str]
+        self.generator = None #type: Optional[Iterable[Tuple[int, str]]]
+        self.binary = binary
+    def __enter__(self) -> 'read_file_lines':
+        self.file = open(self.filename, 'rb' if self.binary else 'r')
+        self.generator = enumerate(self.file)
+        return self
+    def __iter__(self) -> Iterator[str]:
+        assert self.generator is not None
+        for line_number, content in self.generator:
+            self.line_number = line_number
+            yield content
+        self.line_number = 'exit'
+    def __exit__(self, exc_type, exc_value, exc_traceback) -> None:
+        if self.file is not None:
+            self.file.close()
+        if exc_type is not None:
+            raise ReadFileLineException(self.filename, self.line_number) \
+                from exc_value
+
+
+class PSAMacroEnumerator:
+    """Information about constructors of various PSA Crypto types.
+
+    This includes macro names as well as information about their arguments
+    when applicable.
+
+    This class only provides ways to enumerate expressions that evaluate to
+    values of the covered types. Derived classes are expected to populate
+    the set of known constructors of each kind, as well as populate
+    `self.arguments_for` for arguments that are not of a kind that is
+    enumerated here.
+    """
+    #pylint: disable=too-many-instance-attributes
+
+    def __init__(self) -> None:
+        """Set up an empty set of known constructor macros.
+        """
+        self.statuses = set() #type: Set[str]
+        self.lifetimes = set() #type: Set[str]
+        self.locations = set() #type: Set[str]
+        self.persistence_levels = set() #type: Set[str]
+        self.algorithms = set() #type: Set[str]
+        self.ecc_curves = set() #type: Set[str]
+        self.dh_groups = set() #type: Set[str]
+        self.key_types = set() #type: Set[str]
+        self.key_usage_flags = set() #type: Set[str]
+        self.hash_algorithms = set() #type: Set[str]
+        self.mac_algorithms = set() #type: Set[str]
+        self.ka_algorithms = set() #type: Set[str]
+        self.kdf_algorithms = set() #type: Set[str]
+        self.pake_algorithms = set() #type: Set[str]
+        self.aead_algorithms = set() #type: Set[str]
+        self.sign_algorithms = set() #type: Set[str]
+        # macro name -> list of argument names
+        self.argspecs = {} #type: Dict[str, List[str]]
+        # argument name -> list of values
+        self.arguments_for = {
+            'mac_length': [],
+            'min_mac_length': [],
+            'tag_length': [],
+            'min_tag_length': [],
+        } #type: Dict[str, List[str]]
+        # Whether to include intermediate macros in enumerations. Intermediate
+        # macros serve as category headers and are not valid values of their
+        # type. See `is_internal_name`.
+        # Always false in this class, may be set to true in derived classes.
+        self.include_intermediate = False
+
+    def is_internal_name(self, name: str) -> bool:
+        """Whether this is an internal macro. Internal macros will be skipped."""
+        if not self.include_intermediate:
+            if name.endswith('_BASE') or name.endswith('_NONE'):
+                return True
+            if '_CATEGORY_' in name:
+                return True
+        return name.endswith('_FLAG') or name.endswith('_MASK')
+
+    def gather_arguments(self) -> None:
+        """Populate the list of values for macro arguments.
+
+        Call this after parsing all the inputs.
+        """
+        self.arguments_for['hash_alg'] = sorted(self.hash_algorithms)
+        self.arguments_for['mac_alg'] = sorted(self.mac_algorithms)
+        self.arguments_for['ka_alg'] = sorted(self.ka_algorithms)
+        self.arguments_for['kdf_alg'] = sorted(self.kdf_algorithms)
+        self.arguments_for['aead_alg'] = sorted(self.aead_algorithms)
+        self.arguments_for['sign_alg'] = sorted(self.sign_algorithms)
+        self.arguments_for['curve'] = sorted(self.ecc_curves)
+        self.arguments_for['group'] = sorted(self.dh_groups)
+        self.arguments_for['persistence'] = sorted(self.persistence_levels)
+        self.arguments_for['location'] = sorted(self.locations)
+        self.arguments_for['lifetime'] = sorted(self.lifetimes)
+
+    @staticmethod
+    def _format_arguments(name: str, arguments: Iterable[str]) -> str:
+        """Format a macro call with arguments.
+
+        The resulting format is consistent with
+        `InputsForTest.normalize_argument`.
+        """
+        return name + '(' + ', '.join(arguments) + ')'
+
+    _argument_split_re = re.compile(r' *, *')
+    @classmethod
+    def _argument_split(cls, arguments: str) -> List[str]:
+        return re.split(cls._argument_split_re, arguments)
+
+    def distribute_arguments(self, name: str) -> Iterator[str]:
+        """Generate macro calls with each tested argument set.
+
+        If name is a macro without arguments, just yield "name".
+        If name is a macro with arguments, yield a series of
+        "name(arg1,...,argN)" where each argument takes each possible
+        value at least once.
+        """
+        try:
+            if name not in self.argspecs:
+                yield name
+                return
+            argspec = self.argspecs[name]
+            if argspec == []:
+                yield name + '()'
+                return
+            argument_lists = [self.arguments_for[arg] for arg in argspec]
+            arguments = [values[0] for values in argument_lists]
+            yield self._format_arguments(name, arguments)
+            # Dear Pylint, enumerate won't work here since we're modifying
+            # the array.
+            # pylint: disable=consider-using-enumerate
+            for i in range(len(arguments)):
+                for value in argument_lists[i][1:]:
+                    arguments[i] = value
+                    yield self._format_arguments(name, arguments)
+                arguments[i] = argument_lists[i][0]
+        except BaseException as e:
+            raise Exception('distribute_arguments({})'.format(name)) from e
+
+    def distribute_arguments_without_duplicates(
+            self, seen: Set[str], name: str
+    ) -> Iterator[str]:
+        """Same as `distribute_arguments`, but don't repeat seen results."""
+        for result in self.distribute_arguments(name):
+            if result not in seen:
+                seen.add(result)
+                yield result
+
+    def generate_expressions(self, names: Iterable[str]) -> Iterator[str]:
+        """Generate expressions covering values constructed from the given names.
+
+        `names` can be any iterable collection of macro names.
+
+        For example:
+        * ``generate_expressions(['PSA_ALG_CMAC', 'PSA_ALG_HMAC'])``
+          generates ``'PSA_ALG_CMAC'`` as well as ``'PSA_ALG_HMAC(h)'`` for
+          every known hash algorithm ``h``.
+        * ``macros.generate_expressions(macros.key_types)`` generates all
+          key types.
+        """
+        seen = set() #type: Set[str]
+        return itertools.chain(*(
+            self.distribute_arguments_without_duplicates(seen, name)
+            for name in names
+        ))
+
+
+class PSAMacroCollector(PSAMacroEnumerator):
+    """Collect PSA crypto macro definitions from C header files.
+    """
+
+    def __init__(self, include_intermediate: bool = False) -> None:
+        """Set up an object to collect PSA macro definitions.
+
+        Call the read_file method of the constructed object on each header file.
+
+        * include_intermediate: if true, include intermediate macros such as
+          PSA_XXX_BASE that do not designate semantic values.
+        """
+        super().__init__()
+        self.include_intermediate = include_intermediate
+        self.key_types_from_curve = {} #type: Dict[str, str]
+        self.key_types_from_group = {} #type: Dict[str, str]
+        self.algorithms_from_hash = {} #type: Dict[str, str]
+
+    @staticmethod
+    def algorithm_tester(name: str) -> str:
+        """The predicate for whether an algorithm is built from the given constructor.
+
+        The given name must be the name of an algorithm constructor of the
+        form ``PSA_ALG_xxx`` which is used as ``PSA_ALG_xxx(yyy)`` to build
+        an algorithm value. Return the corresponding predicate macro which
+        is used as ``predicate(alg)`` to test whether ``alg`` can be built
+        as ``PSA_ALG_xxx(yyy)``. The predicate is usually called
+        ``PSA_ALG_IS_xxx``.
+        """
+        prefix = 'PSA_ALG_'
+        assert name.startswith(prefix)
+        midfix = 'IS_'
+        suffix = name[len(prefix):]
+        if suffix in ['DSA', 'ECDSA']:
+            midfix += 'RANDOMIZED_'
+        elif suffix == 'RSA_PSS':
+            suffix += '_STANDARD_SALT'
+        return prefix + midfix + suffix
+
+    def record_algorithm_subtype(self, name: str, expansion: str) -> None:
+        """Record the subtype of an algorithm constructor.
+
+        Given a ``PSA_ALG_xxx`` macro name and its expansion, if the algorithm
+        is of a subtype that is tracked in its own set, add it to the relevant
+        set.
+        """
+        # This code is very ad hoc and fragile. It should be replaced by
+        # something more robust.
+        if re.match(r'MAC(?:_|\Z)', name):
+            self.mac_algorithms.add(name)
+        elif re.match(r'KDF(?:_|\Z)', name):
+            self.kdf_algorithms.add(name)
+        elif re.search(r'0x020000[0-9A-Fa-f]{2}', expansion):
+            self.hash_algorithms.add(name)
+        elif re.search(r'0x03[0-9A-Fa-f]{6}', expansion):
+            self.mac_algorithms.add(name)
+        elif re.search(r'0x05[0-9A-Fa-f]{6}', expansion):
+            self.aead_algorithms.add(name)
+        elif re.search(r'0x09[0-9A-Fa-f]{2}0000', expansion):
+            self.ka_algorithms.add(name)
+        elif re.search(r'0x08[0-9A-Fa-f]{6}', expansion):
+            self.kdf_algorithms.add(name)
+
+    # "#define" followed by a macro name with either no parameters
+    # or a single parameter and a non-empty expansion.
+    # Grab the macro name in group 1, the parameter name if any in group 2
+    # and the expansion in group 3.
+    _define_directive_re = re.compile(r'\s*#\s*define\s+(\w+)' +
+                                      r'(?:\s+|\((\w+)\)\s*)' +
+                                      r'(.+)')
+    _deprecated_definition_re = re.compile(r'\s*MBEDTLS_DEPRECATED')
+
+    def read_line(self, line):
+        """Parse a C header line and record the PSA identifier it defines if any.
+        This function analyzes lines that start with "#define PSA_"
+        (up to non-significant whitespace) and skips all non-matching lines.
+        """
+        # pylint: disable=too-many-branches
+        m = re.match(self._define_directive_re, line)
+        if not m:
+            return
+        name, parameter, expansion = m.groups()
+        expansion = re.sub(r'/\*.*?\*/|//.*', r' ', expansion)
+        if parameter:
+            self.argspecs[name] = [parameter]
+        if re.match(self._deprecated_definition_re, expansion):
+            # Skip deprecated values, which are assumed to be
+            # backward compatibility aliases that share
+            # numerical values with non-deprecated values.
+            return
+        if self.is_internal_name(name):
+            # Macro only to build actual values
+            return
+        elif (name.startswith('PSA_ERROR_') or name == 'PSA_SUCCESS') \
+           and not parameter:
+            self.statuses.add(name)
+        elif name.startswith('PSA_KEY_TYPE_') and not parameter:
+            self.key_types.add(name)
+        elif name.startswith('PSA_KEY_TYPE_') and parameter == 'curve':
+            self.key_types_from_curve[name] = name[:13] + 'IS_' + name[13:]
+        elif name.startswith('PSA_KEY_TYPE_') and parameter == 'group':
+            self.key_types_from_group[name] = name[:13] + 'IS_' + name[13:]
+        elif name.startswith('PSA_ECC_FAMILY_') and not parameter:
+            self.ecc_curves.add(name)
+        elif name.startswith('PSA_DH_FAMILY_') and not parameter:
+            self.dh_groups.add(name)
+        elif name.startswith('PSA_ALG_') and not parameter:
+            if name in ['PSA_ALG_ECDSA_BASE',
+                        'PSA_ALG_RSA_PKCS1V15_SIGN_BASE']:
+                # Ad hoc skipping of duplicate names for some numerical values
+                return
+            self.algorithms.add(name)
+            self.record_algorithm_subtype(name, expansion)
+        elif name.startswith('PSA_ALG_') and parameter == 'hash_alg':
+            self.algorithms_from_hash[name] = self.algorithm_tester(name)
+        elif name.startswith('PSA_KEY_USAGE_') and not parameter:
+            self.key_usage_flags.add(name)
+        else:
+            # Other macro without parameter
+            return
+
+    _nonascii_re = re.compile(rb'[^\x00-\x7f]+')
+    _continued_line_re = re.compile(rb'\\\r?\n\Z')
+    def read_file(self, header_file):
+        for line in header_file:
+            m = re.search(self._continued_line_re, line)
+            while m:
+                cont = next(header_file)
+                line = line[:m.start(0)] + cont
+                m = re.search(self._continued_line_re, line)
+            line = re.sub(self._nonascii_re, rb'', line).decode('ascii')
+            self.read_line(line)
+
+
+class InputsForTest(PSAMacroEnumerator):
+    # pylint: disable=too-many-instance-attributes
+    """Accumulate information about macros to test.
+enumerate
+    This includes macro names as well as information about their arguments
+    when applicable.
+    """
+
+    def __init__(self) -> None:
+        super().__init__()
+        self.all_declared = set() #type: Set[str]
+        # Identifier prefixes
+        self.table_by_prefix = {
+            'ERROR': self.statuses,
+            'ALG': self.algorithms,
+            'ECC_CURVE': self.ecc_curves,
+            'DH_GROUP': self.dh_groups,
+            'KEY_LIFETIME': self.lifetimes,
+            'KEY_LOCATION': self.locations,
+            'KEY_PERSISTENCE': self.persistence_levels,
+            'KEY_TYPE': self.key_types,
+            'KEY_USAGE': self.key_usage_flags,
+        } #type: Dict[str, Set[str]]
+        # Test functions
+        self.table_by_test_function = {
+            # Any function ending in _algorithm also gets added to
+            # self.algorithms.
+            'key_type': [self.key_types],
+            'block_cipher_key_type': [self.key_types],
+            'stream_cipher_key_type': [self.key_types],
+            'ecc_key_family': [self.ecc_curves],
+            'ecc_key_types': [self.ecc_curves],
+            'dh_key_family': [self.dh_groups],
+            'dh_key_types': [self.dh_groups],
+            'hash_algorithm': [self.hash_algorithms],
+            'mac_algorithm': [self.mac_algorithms],
+            'cipher_algorithm': [],
+            'hmac_algorithm': [self.mac_algorithms, self.sign_algorithms],
+            'aead_algorithm': [self.aead_algorithms],
+            'key_derivation_algorithm': [self.kdf_algorithms],
+            'key_agreement_algorithm': [self.ka_algorithms],
+            'asymmetric_signature_algorithm': [self.sign_algorithms],
+            'asymmetric_signature_wildcard': [self.algorithms],
+            'asymmetric_encryption_algorithm': [],
+            'pake_algorithm': [self.pake_algorithms],
+            'other_algorithm': [],
+            'lifetime': [self.lifetimes],
+        } #type: Dict[str, List[Set[str]]]
+        mac_lengths = [str(n) for n in [
+            1,  # minimum expressible
+            4,  # minimum allowed by policy
+            13, # an odd size in a plausible range
+            14, # an even non-power-of-two size in a plausible range
+            16, # same as full size for at least one algorithm
+            63, # maximum expressible
+        ]]
+        self.arguments_for['mac_length'] += mac_lengths
+        self.arguments_for['min_mac_length'] += mac_lengths
+        aead_lengths = [str(n) for n in [
+            1,  # minimum expressible
+            4,  # minimum allowed by policy
+            13, # an odd size in a plausible range
+            14, # an even non-power-of-two size in a plausible range
+            16, # same as full size for at least one algorithm
+            63, # maximum expressible
+        ]]
+        self.arguments_for['tag_length'] += aead_lengths
+        self.arguments_for['min_tag_length'] += aead_lengths
+
+    def add_numerical_values(self) -> None:
+        """Add numerical values that are not supported to the known identifiers."""
+        # Sets of names per type
+        self.algorithms.add('0xffffffff')
+        self.ecc_curves.add('0xff')
+        self.dh_groups.add('0xff')
+        self.key_types.add('0xffff')
+        self.key_usage_flags.add('0x80000000')
+
+        # Hard-coded values for unknown algorithms
+        #
+        # These have to have values that are correct for their respective
+        # PSA_ALG_IS_xxx macros, but are also not currently assigned and are
+        # not likely to be assigned in the near future.
+        self.hash_algorithms.add('0x020000fe') # 0x020000ff is PSA_ALG_ANY_HASH
+        self.mac_algorithms.add('0x03007fff')
+        self.ka_algorithms.add('0x09fc0000')
+        self.kdf_algorithms.add('0x080000ff')
+        self.pake_algorithms.add('0x0a0000ff')
+        # For AEAD algorithms, the only variability is over the tag length,
+        # and this only applies to known algorithms, so don't test an
+        # unknown algorithm.
+
+    def get_names(self, type_word: str) -> Set[str]:
+        """Return the set of known names of values of the given type."""
+        return {
+            'status': self.statuses,
+            'algorithm': self.algorithms,
+            'ecc_curve': self.ecc_curves,
+            'dh_group': self.dh_groups,
+            'key_type': self.key_types,
+            'key_usage': self.key_usage_flags,
+        }[type_word]
+
+    # Regex for interesting header lines.
+    # Groups: 1=macro name, 2=type, 3=argument list (optional).
+    _header_line_re = \
+        re.compile(r'#define +' +
+                   r'(PSA_((?:(?:DH|ECC|KEY)_)?[A-Z]+)_\w+)' +
+                   r'(?:\(([^\n()]*)\))?')
+    # Regex of macro names to exclude.
+    _excluded_name_re = re.compile(r'_(?:GET|IS|OF)_|_(?:BASE|FLAG|MASK)\Z')
+    # Additional excluded macros.
+    _excluded_names = set([
+        # Macros that provide an alternative way to build the same
+        # algorithm as another macro.
+        'PSA_ALG_AEAD_WITH_DEFAULT_LENGTH_TAG',
+        'PSA_ALG_FULL_LENGTH_MAC',
+        # Auxiliary macro whose name doesn't fit the usual patterns for
+        # auxiliary macros.
+        'PSA_ALG_AEAD_WITH_DEFAULT_LENGTH_TAG_CASE',
+    ])
+    def parse_header_line(self, line: str) -> None:
+        """Parse a C header line, looking for "#define PSA_xxx"."""
+        m = re.match(self._header_line_re, line)
+        if not m:
+            return
+        name = m.group(1)
+        self.all_declared.add(name)
+        if re.search(self._excluded_name_re, name) or \
+           name in self._excluded_names or \
+           self.is_internal_name(name):
+            return
+        dest = self.table_by_prefix.get(m.group(2))
+        if dest is None:
+            return
+        dest.add(name)
+        if m.group(3):
+            self.argspecs[name] = self._argument_split(m.group(3))
+
+    _nonascii_re = re.compile(rb'[^\x00-\x7f]+') #type: Pattern
+    def parse_header(self, filename: str) -> None:
+        """Parse a C header file, looking for "#define PSA_xxx"."""
+        with read_file_lines(filename, binary=True) as lines:
+            for line in lines:
+                line = re.sub(self._nonascii_re, rb'', line).decode('ascii')
+                self.parse_header_line(line)
+
+    _macro_identifier_re = re.compile(r'[A-Z]\w+')
+    def generate_undeclared_names(self, expr: str) -> Iterable[str]:
+        for name in re.findall(self._macro_identifier_re, expr):
+            if name not in self.all_declared:
+                yield name
+
+    def accept_test_case_line(self, function: str, argument: str) -> bool:
+        #pylint: disable=unused-argument
+        undeclared = list(self.generate_undeclared_names(argument))
+        if undeclared:
+            raise Exception('Undeclared names in test case', undeclared)
+        return True
+
+    @staticmethod
+    def normalize_argument(argument: str) -> str:
+        """Normalize whitespace in the given C expression.
+
+        The result uses the same whitespace as
+        ` PSAMacroEnumerator.distribute_arguments`.
+        """
+        return re.sub(r',', r', ', re.sub(r' +', r'', argument))
+
+    def add_test_case_line(self, function: str, argument: str) -> None:
+        """Parse a test case data line, looking for algorithm metadata tests."""
+        sets = []
+        if function.endswith('_algorithm'):
+            sets.append(self.algorithms)
+            if function == 'key_agreement_algorithm' and \
+               argument.startswith('PSA_ALG_KEY_AGREEMENT('):
+                # We only want *raw* key agreement algorithms as such, so
+                # exclude ones that are already chained with a KDF.
+                # Keep the expression as one to test as an algorithm.
+                function = 'other_algorithm'
+        sets += self.table_by_test_function[function]
+        if self.accept_test_case_line(function, argument):
+            for s in sets:
+                s.add(self.normalize_argument(argument))
+
+    # Regex matching a *.data line containing a test function call and
+    # its arguments. The actual definition is partly positional, but this
+    # regex is good enough in practice.
+    _test_case_line_re = re.compile(r'(?!depends_on:)(\w+):([^\n :][^:\n]*)')
+    def parse_test_cases(self, filename: str) -> None:
+        """Parse a test case file (*.data), looking for algorithm metadata tests."""
+        with read_file_lines(filename) as lines:
+            for line in lines:
+                m = re.match(self._test_case_line_re, line)
+                if m:
+                    self.add_test_case_line(m.group(1), m.group(2))
diff --git a/framework/scripts/mbedtls_framework/min_requirements.py b/framework/scripts/mbedtls_framework/min_requirements.py
new file mode 100644
index 0000000..fb20d5b
--- /dev/null
+++ b/framework/scripts/mbedtls_framework/min_requirements.py
@@ -0,0 +1,122 @@
+"""Install all the required Python packages, with the minimum Python version.
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+import argparse
+import os
+import re
+import subprocess
+import sys
+import tempfile
+import typing
+
+from typing import List, Optional
+
+import framework_scripts_path # pylint: disable=unused-import
+from mbedtls_framework import typing_util
+
+def pylint_doesn_t_notice_that_certain_types_are_used_in_annotations(
+        _list: List[typing.Any],
+) -> None:
+    pass
+
+
+class Requirements:
+    """Collect and massage Python requirements."""
+
+    def __init__(self) -> None:
+        self.requirements = [] #type: List[str]
+
+    def adjust_requirement(self, req: str) -> str:
+        """Adjust a requirement to the minimum specified version."""
+        # allow inheritance #pylint: disable=no-self-use
+        # If a requirement specifies a minimum version, impose that version.
+        split_req = req.split(';', 1)
+        split_req[0] = re.sub(r'>=|~=', r'==', split_req[0])
+        return ';'.join(split_req)
+
+    def add_file(self, filename: str) -> None:
+        """Add requirements from the specified file.
+
+        This method supports a subset of pip's requirement file syntax:
+        * One requirement specifier per line, which is passed to
+          `adjust_requirement`.
+        * Comments (``#`` at the beginning of the line or after whitespace).
+        * ``-r FILENAME`` to include another file.
+        """
+        for line in open(filename):
+            line = line.strip()
+            line = re.sub(r'(\A|\s+)#.*', r'', line)
+            if not line:
+                continue
+            m = re.match(r'-r\s+', line)
+            if m:
+                nested_file = os.path.join(os.path.dirname(filename),
+                                           line[m.end(0):])
+                self.add_file(nested_file)
+                continue
+            self.requirements.append(self.adjust_requirement(line))
+
+    def write(self, out: typing_util.Writable) -> None:
+        """List the gathered requirements."""
+        for req in self.requirements:
+            out.write(req + '\n')
+
+    def install(
+            self,
+            pip_general_options: Optional[List[str]] = None,
+            pip_install_options: Optional[List[str]] = None,
+    ) -> None:
+        """Call pip to install the requirements."""
+        if pip_general_options is None:
+            pip_general_options = []
+        if pip_install_options is None:
+            pip_install_options = []
+        with tempfile.TemporaryDirectory() as temp_dir:
+            # This is more complicated than it needs to be for the sake
+            # of Windows. Use a temporary file rather than the command line
+            # to avoid quoting issues. Use a temporary directory rather
+            # than NamedTemporaryFile because with a NamedTemporaryFile on
+            # Windows, the subprocess can't open the file because this process
+            # has an exclusive lock on it.
+            req_file_name = os.path.join(temp_dir, 'requirements.txt')
+            with open(req_file_name, 'w') as req_file:
+                self.write(req_file)
+            subprocess.check_call([sys.executable, '-m', 'pip'] +
+                                  pip_general_options +
+                                  ['install'] + pip_install_options +
+                                  ['-r', req_file_name])
+
+def main(default_requirement_file: str) -> None:
+    """Command line entry point."""
+    parser = argparse.ArgumentParser(description=__doc__)
+    parser.add_argument('--no-act', '-n',
+                        action='store_true',
+                        help="Don't act, just print what will be done")
+    parser.add_argument('--pip-install-option',
+                        action='append', dest='pip_install_options',
+                        help="Pass this option to pip install")
+    parser.add_argument('--pip-option',
+                        action='append', dest='pip_general_options',
+                        help="Pass this general option to pip")
+    parser.add_argument('--user',
+                        action='append_const', dest='pip_install_options',
+                        const='--user',
+                        help="Install to the Python user install directory"
+                             " (short for --pip-install-option --user)")
+    parser.add_argument('files', nargs='*', metavar='FILE',
+                        help="Requirement files"
+                             " (default: {})" \
+                             .format(default_requirement_file))
+    options = parser.parse_args()
+    if not options.files:
+        options.files = [default_requirement_file]
+    reqs = Requirements()
+    for filename in options.files:
+        reqs.add_file(filename)
+    reqs.write(sys.stdout)
+    if not options.no_act:
+        reqs.install(pip_general_options=options.pip_general_options,
+                     pip_install_options=options.pip_install_options)
diff --git a/framework/scripts/mbedtls_framework/outcome_analysis.py b/framework/scripts/mbedtls_framework/outcome_analysis.py
new file mode 100644
index 0000000..2a79fd5
--- /dev/null
+++ b/framework/scripts/mbedtls_framework/outcome_analysis.py
@@ -0,0 +1,399 @@
+"""Outcome file analysis code.
+
+This module is the bulk of the code of tests/scripts/analyze_outcomes.py
+in each consuming branch. The consuming script is expected to derive
+the classes with branch-specific customizations such as ignore lists.
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+import argparse
+import gzip
+import lzma
+import sys
+import traceback
+import re
+import subprocess
+import os
+import typing
+
+from . import collect_test_cases
+
+
+# `ComponentOutcomes` is a named tuple which is defined as:
+# ComponentOutcomes(
+#     successes = {
+#         "<suite_case>",
+#         ...
+#     },
+#     failures = {
+#         "<suite_case>",
+#         ...
+#     }
+# )
+# suite_case = "<suite>;<case>"
+ComponentOutcomes = typing.NamedTuple('ComponentOutcomes',
+                                      [('successes', typing.Set[str]),
+                                       ('failures', typing.Set[str])])
+
+# `Outcomes` is a representation of the outcomes file,
+# which defined as:
+# Outcomes = {
+#     "<component>": ComponentOutcomes,
+#     ...
+# }
+Outcomes = typing.Dict[str, ComponentOutcomes]
+
+
+class Results:
+    """Process analysis results."""
+
+    def __init__(self,
+                 stderr: bool = True,
+                 log_file: str = '') -> None:
+        """Log and count errors.
+
+        Log to stderr if stderr is true.
+        Log to log_file if specified and non-empty.
+        """
+        self.error_count = 0
+        self.warning_count = 0
+        self.stderr = stderr
+        self.log_file = None
+        if log_file:
+            self.log_file = open(log_file, 'w', encoding='utf-8')
+
+    def new_section(self, fmt, *args, **kwargs):
+        self._print_line('\n*** ' + fmt + ' ***\n', *args, **kwargs)
+
+    def info(self, fmt, *args, **kwargs):
+        self._print_line('Info: ' + fmt, *args, **kwargs)
+
+    def error(self, fmt, *args, **kwargs):
+        self.error_count += 1
+        self._print_line('Error: ' + fmt, *args, **kwargs)
+
+    def warning(self, fmt, *args, **kwargs):
+        self.warning_count += 1
+        self._print_line('Warning: ' + fmt, *args, **kwargs)
+
+    def _print_line(self, fmt, *args, **kwargs):
+        line = (fmt + '\n').format(*args, **kwargs)
+        if self.stderr:
+            sys.stderr.write(line)
+        if self.log_file:
+            self.log_file.write(line)
+
+def execute_reference_driver_tests(results: Results, ref_component: str, driver_component: str, \
+                                   outcome_file: str) -> None:
+    """Run the tests specified in ref_component and driver_component. Results
+    are stored in the output_file and they will be used for the following
+    coverage analysis"""
+    results.new_section("Test {} and {}", ref_component, driver_component)
+
+    shell_command = "tests/scripts/all.sh --outcome-file " + outcome_file + \
+                    " " + ref_component + " " + driver_component
+    results.info("Running: {}", shell_command)
+    ret_val = subprocess.run(shell_command.split(), check=False).returncode
+
+    if ret_val != 0:
+        results.error("failed to run reference/driver components")
+
+IgnoreEntry = typing.Union[str, typing.Pattern]
+
+def name_matches_pattern(name: str, str_or_re: IgnoreEntry) -> bool:
+    """Check if name matches a pattern, that may be a string or regex.
+    - If the pattern is a string, name must be equal to match.
+    - If the pattern is a regex, name must fully match.
+    """
+    # The CI's python is too old for re.Pattern
+    #if isinstance(str_or_re, re.Pattern):
+    if not isinstance(str_or_re, str):
+        return str_or_re.fullmatch(name) is not None
+    else:
+        return str_or_re == name
+
+def open_outcome_file(outcome_file: str) -> typing.TextIO:
+    if outcome_file.endswith('.gz'):
+        return gzip.open(outcome_file, 'rt', encoding='utf-8')
+    elif outcome_file.endswith('.xz'):
+        return lzma.open(outcome_file, 'rt', encoding='utf-8')
+    else:
+        return open(outcome_file, 'rt', encoding='utf-8')
+
+def read_outcome_file(outcome_file: str) -> Outcomes:
+    """Parse an outcome file and return an outcome collection.
+    """
+    outcomes = {}
+    with open_outcome_file(outcome_file) as input_file:
+        for line in input_file:
+            (_platform, component, suite, case, result, _cause) = line.split(';')
+            # Note that `component` is not unique. If a test case passes on Linux
+            # and fails on FreeBSD, it'll end up in both the successes set and
+            # the failures set.
+            suite_case = ';'.join([suite, case])
+            if component not in outcomes:
+                outcomes[component] = ComponentOutcomes(set(), set())
+            if result == 'PASS':
+                outcomes[component].successes.add(suite_case)
+            elif result == 'FAIL':
+                outcomes[component].failures.add(suite_case)
+
+    return outcomes
+
+
+class Task:
+    """Base class for outcome analysis tasks."""
+
+    # Override the following in child classes.
+    # Map test suite names (with the test_suite_prefix) to a list of ignored
+    # test cases. Each element in the list can be either a string or a regex;
+    # see the `name_matches_pattern` function.
+    IGNORED_TESTS = {} #type: typing.Dict[str, typing.List[IgnoreEntry]]
+
+    def __init__(self, options) -> None:
+        """Pass command line options to the tasks.
+
+        Each task decides which command line options it cares about.
+        """
+        pass
+
+    def section_name(self) -> str:
+        """The section name to use in results."""
+        raise NotImplementedError
+
+    def ignored_tests(self, test_suite: str) -> typing.Iterator[IgnoreEntry]:
+        """Generate the ignore list for the specified test suite."""
+        if test_suite in self.IGNORED_TESTS:
+            yield from self.IGNORED_TESTS[test_suite]
+        pos = test_suite.find('.')
+        if pos != -1:
+            base_test_suite = test_suite[:pos]
+            if base_test_suite in self.IGNORED_TESTS:
+                yield from self.IGNORED_TESTS[base_test_suite]
+
+    def is_test_case_ignored(self, test_suite: str, test_string: str) -> bool:
+        """Check if the specified test case is ignored."""
+        for str_or_re in self.ignored_tests(test_suite):
+            if name_matches_pattern(test_string, str_or_re):
+                return True
+        return False
+
+    def run(self, results: Results, outcomes: Outcomes):
+        """Run the analysis on the specified outcomes.
+
+        Signal errors via the results objects
+        """
+        raise NotImplementedError
+
+
+class CoverageTask(Task):
+    """Analyze test coverage."""
+
+    # Test cases whose suite and description are matched by an entry in
+    # IGNORED_TESTS are expected to be never executed.
+    # All other test cases are expected to be executed at least once.
+
+    def __init__(self, options) -> None:
+        super().__init__(options)
+        self.full_coverage = options.full_coverage #type: bool
+
+    @staticmethod
+    def section_name() -> str:
+        return "Analyze coverage"
+
+    def run(self, results: Results, outcomes: Outcomes) -> None:
+        """Check that all available test cases are executed at least once."""
+        # Make sure that the generated data files are present (and up-to-date).
+        # This allows analyze_outcomes.py to run correctly on a fresh Git
+        # checkout.
+        cp = subprocess.run(['make', 'generated_files'],
+                            cwd='tests',
+                            stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
+                            check=False)
+        if cp.returncode != 0:
+            sys.stderr.write(cp.stdout.decode('utf-8'))
+            results.error("Failed \"make generated_files\" in tests. "
+                          "Coverage analysis may be incorrect.")
+        available = collect_test_cases.collect_available_test_cases()
+        for suite_case in available:
+            hit = any(suite_case in comp_outcomes.successes or
+                      suite_case in comp_outcomes.failures
+                      for comp_outcomes in outcomes.values())
+            (test_suite, test_description) = suite_case.split(';')
+            ignored = self.is_test_case_ignored(test_suite, test_description)
+
+            if not hit and not ignored:
+                if self.full_coverage:
+                    results.error('Test case not executed: {}', suite_case)
+                else:
+                    results.warning('Test case not executed: {}', suite_case)
+            elif hit and ignored:
+                # If a test case is no longer always skipped, we should remove
+                # it from the ignore list.
+                if self.full_coverage:
+                    results.error('Test case was executed but marked as ignored for coverage: {}',
+                                  suite_case)
+                else:
+                    results.warning('Test case was executed but marked as ignored for coverage: {}',
+                                    suite_case)
+
+
+class DriverVSReference(Task):
+    """Compare outcomes from testing with and without a driver.
+
+    There are 2 options to use analyze_driver_vs_reference_xxx locally:
+    1. Run tests and then analysis:
+      - tests/scripts/all.sh --outcome-file "$PWD/out.csv" <component_ref> <component_driver>
+      - tests/scripts/analyze_outcomes.py out.csv analyze_driver_vs_reference_xxx
+    2. Let this script run both automatically:
+      - tests/scripts/analyze_outcomes.py out.csv analyze_driver_vs_reference_xxx
+    """
+
+    # Override the following in child classes.
+    # Configuration name (all.sh component) used as the reference.
+    REFERENCE = ''
+    # Configuration name (all.sh component) used as the driver.
+    DRIVER = ''
+    # Ignored test suites (without the test_suite_ prefix).
+    IGNORED_SUITES = [] #type: typing.List[str]
+
+    def __init__(self, options) -> None:
+        super().__init__(options)
+        self.ignored_suites = frozenset('test_suite_' + x
+                                        for x in self.IGNORED_SUITES)
+
+    def section_name(self) -> str:
+        return f"Analyze driver {self.DRIVER} vs reference {self.REFERENCE}"
+
+    def run(self, results: Results, outcomes: Outcomes) -> None:
+        """Check that all tests passing in the driver component are also
+        passing in the corresponding reference component.
+        Skip:
+        - full test suites provided in ignored_suites list
+        - only some specific test inside a test suite, for which the corresponding
+          output string is provided
+        """
+        ref_outcomes = outcomes.get("component_" + self.REFERENCE)
+        driver_outcomes = outcomes.get("component_" + self.DRIVER)
+
+        if ref_outcomes is None or driver_outcomes is None:
+            results.error("required components are missing: bad outcome file?")
+            return
+
+        if not ref_outcomes.successes:
+            results.error("no passing test in reference component: bad outcome file?")
+            return
+
+        for suite_case in ref_outcomes.successes:
+            # suite_case is like "test_suite_foo.bar;Description of test case"
+            (full_test_suite, test_string) = suite_case.split(';')
+            test_suite = full_test_suite.split('.')[0] # retrieve main part of test suite name
+
+            # Immediately skip fully-ignored test suites
+            if test_suite in self.ignored_suites or \
+               full_test_suite in self.ignored_suites:
+                continue
+
+            # For ignored test cases inside test suites, just remember and:
+            # don't issue an error if they're skipped with drivers,
+            # but issue an error if they're not (means we have a bad entry).
+            ignored = self.is_test_case_ignored(full_test_suite, test_string)
+
+            if not ignored and not suite_case in driver_outcomes.successes:
+                results.error("SKIP/FAIL -> PASS: {}", suite_case)
+            if ignored and suite_case in driver_outcomes.successes:
+                results.error("uselessly ignored: {}", suite_case)
+
+
+# Set this to False if a consuming branch can't achieve full test coverage
+# in its default CI run.
+FULL_COVERAGE_BY_DEFAULT = True
+
+def main(known_tasks: typing.Dict[str, typing.Type[Task]]) -> None:
+    try:
+        parser = argparse.ArgumentParser(description=__doc__)
+        parser.add_argument('outcomes', metavar='OUTCOMES.CSV',
+                            help='Outcome file to analyze (can be .gz or .xz)')
+        parser.add_argument('specified_tasks', default='all', nargs='?',
+                            help='Analysis to be done. By default, run all tasks. '
+                                 'With one or more TASK, run only those. '
+                                 'TASK can be the name of a single task or '
+                                 'comma/space-separated list of tasks. ')
+        parser.add_argument('--allow-partial-coverage', action='store_false',
+                            dest='full_coverage', default=FULL_COVERAGE_BY_DEFAULT,
+                            help=("Only warn if a test case is skipped in all components" +
+                                  (" (default)" if not FULL_COVERAGE_BY_DEFAULT else "") +
+                                  ". Only used by the 'analyze_coverage' task."))
+        parser.add_argument('--list', action='store_true',
+                            help='List all available tasks and exit.')
+        parser.add_argument('--log-file',
+                            default='tests/analyze_outcomes.log',
+                            help='Log file (default: tests/analyze_outcomes.log;'
+                                 ' empty means no log file)')
+        parser.add_argument('--require-full-coverage', action='store_true',
+                            dest='full_coverage', default=FULL_COVERAGE_BY_DEFAULT,
+                            help=("Require all available test cases to be executed" +
+                                  (" (default)" if FULL_COVERAGE_BY_DEFAULT else "") +
+                                  ". Only used by the 'analyze_coverage' task."))
+        options = parser.parse_args()
+
+        if options.list:
+            for task_name in known_tasks:
+                print(task_name)
+            sys.exit(0)
+
+        main_results = Results(log_file=options.log_file)
+
+        if options.specified_tasks == 'all':
+            tasks_list = list(known_tasks.keys())
+        else:
+            tasks_list = re.split(r'[, ]+', options.specified_tasks)
+            for task_name in tasks_list:
+                if task_name not in known_tasks:
+                    sys.stderr.write('invalid task: {}\n'.format(task_name))
+                    sys.exit(2)
+
+        # If the outcome file exists, parse it once and share the result
+        # among tasks to improve performance.
+        # Otherwise, it will be generated by execute_reference_driver_tests.
+        if not os.path.exists(options.outcomes):
+            if len(tasks_list) > 1:
+                sys.stderr.write("mutiple tasks found, please provide a valid outcomes file.\n")
+                sys.exit(2)
+
+            task_name = tasks_list[0]
+            task_class = known_tasks[task_name]
+            if not issubclass(task_class, DriverVSReference):
+                sys.stderr.write("please provide valid outcomes file for {}.\n".format(task_name))
+                sys.exit(2)
+            # mypy isn't smart enough to know that REFERENCE and DRIVER
+            # are *class* attributes of all classes derived from
+            # DriverVSReference. (It would be smart enough if we had an
+            # instance of task_class, but we can't construct an instance
+            # until we have the outcome data, so at this point we only
+            # have the class.) So we use indirection to access the class
+            # attributes.
+            execute_reference_driver_tests(main_results,
+                                           getattr(task_class, 'REFERENCE'),
+                                           getattr(task_class, 'DRIVER'),
+                                           options.outcomes)
+
+        outcomes = read_outcome_file(options.outcomes)
+
+        for task_name in tasks_list:
+            task_constructor = known_tasks[task_name]
+            task_instance = task_constructor(options)
+            main_results.new_section(task_instance.section_name())
+            task_instance.run(main_results, outcomes)
+
+        main_results.info("Overall results: {} warnings and {} errors",
+                          main_results.warning_count, main_results.error_count)
+
+        sys.exit(0 if (main_results.error_count == 0) else 1)
+
+    except Exception: # pylint: disable=broad-except
+        # Print the backtrace and exit explicitly with our chosen status.
+        traceback.print_exc()
+        sys.exit(120)
diff --git a/framework/scripts/mbedtls_framework/psa_information.py b/framework/scripts/mbedtls_framework/psa_information.py
new file mode 100644
index 0000000..f0d0e79
--- /dev/null
+++ b/framework/scripts/mbedtls_framework/psa_information.py
@@ -0,0 +1,156 @@
+"""Collect information about PSA cryptographic mechanisms.
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+#
+
+import re
+from collections import OrderedDict
+from typing import List, Optional
+
+from . import build_tree
+from . import macro_collector
+
+
+class Information:
+    """Gather information about PSA constructors."""
+
+    def __init__(self) -> None:
+        self.constructors = self.read_psa_interface()
+
+    @staticmethod
+    def remove_unwanted_macros(
+            constructors: macro_collector.PSAMacroEnumerator
+    ) -> None:
+        """Remove constructors that should be exckuded from systematic testing."""
+        # Mbed TLS does not support finite-field DSA, but 3.6 defines DSA
+        # identifiers for historical reasons.
+        # Don't attempt to generate any related test case.
+        # The corresponding test cases would be commented out anyway,
+        # but for DSA, we don't have enough support in the test scripts
+        # to generate these test cases.
+        constructors.key_types.discard('PSA_KEY_TYPE_DSA_KEY_PAIR')
+        constructors.key_types.discard('PSA_KEY_TYPE_DSA_PUBLIC_KEY')
+
+    def read_psa_interface(self) -> macro_collector.PSAMacroEnumerator:
+        """Return the list of known key types, algorithms, etc."""
+        constructors = macro_collector.InputsForTest()
+
+        if build_tree.looks_like_root('.'):
+            if build_tree.looks_like_mbedtls_root('.') and \
+               (not build_tree.is_mbedtls_3_6()):
+                header_file_names = ['tf-psa-crypto/include/psa/crypto_values.h',
+                                     'tf-psa-crypto/include/psa/crypto_extra.h']
+                test_suites = ['tf-psa-crypto/tests/suites/test_suite_psa_crypto_metadata.data']
+            else:
+                header_file_names = ['include/psa/crypto_values.h',
+                                     'include/psa/crypto_extra.h']
+                test_suites = ['tests/suites/test_suite_psa_crypto_metadata.data']
+
+        for header_file_name in header_file_names:
+            constructors.parse_header(header_file_name)
+        for test_cases in test_suites:
+            constructors.parse_test_cases(test_cases)
+        self.remove_unwanted_macros(constructors)
+        constructors.gather_arguments()
+        return constructors
+
+
+def psa_want_symbol(name: str, prefix: Optional[str] = None) -> str:
+    """Return the PSA_WANT_xxx symbol associated with a PSA crypto feature.
+
+    You can use an altenative `prefix`, e.g. 'MBEDTLS_PSA_BUILTIN_'
+    when specifically testing builtin implementations.
+    """
+    if prefix is None:
+        prefix = 'PSA_WANT_'
+    if name.startswith('PSA_'):
+        return prefix + name[4:]
+    else:
+        raise ValueError('Unable to determine the PSA_WANT_ symbol for ' + name)
+
+def finish_family_dependency(dep: str, bits: int) -> str:
+    """Finish dep if it's a family dependency symbol prefix.
+
+    A family dependency symbol prefix is a PSA_WANT_ symbol that needs to be
+    qualified by the key size. If dep is such a symbol, finish it by adjusting
+    the prefix and appending the key size. Other symbols are left unchanged.
+    """
+    return re.sub(r'_FAMILY_(.*)', r'_\1_' + str(bits), dep)
+
+def finish_family_dependencies(dependencies: List[str], bits: int) -> List[str]:
+    """Finish any family dependency symbol prefixes.
+
+    Apply `finish_family_dependency` to each element of `dependencies`.
+    """
+    return [finish_family_dependency(dep, bits) for dep in dependencies]
+
+SYMBOLS_WITHOUT_DEPENDENCY = frozenset([
+    'PSA_ALG_AEAD_WITH_AT_LEAST_THIS_LENGTH_TAG', # modifier, only in policies
+    'PSA_ALG_AEAD_WITH_SHORTENED_TAG', # modifier
+    'PSA_ALG_ANY_HASH', # only in policies
+    'PSA_ALG_AT_LEAST_THIS_LENGTH_MAC', # modifier, only in policies
+    'PSA_ALG_KEY_AGREEMENT', # chaining
+    'PSA_ALG_TRUNCATED_MAC', # modifier
+])
+def automatic_dependencies(*expressions: str,
+                           prefix: Optional[str] = None) -> List[str]:
+    """Infer dependencies of a test case by looking for PSA_xxx symbols.
+
+    The arguments are strings which should be C expressions. Do not use
+    string literals or comments as this function is not smart enough to
+    skip them.
+
+    `prefix`: prefix to use in dependencies. Defaults to ``'PSA_WANT_'``.
+              Use ``'MBEDTLS_PSA_BUILTIN_'`` when specifically testing
+              builtin implementations.
+    """
+    used = set()
+    for expr in expressions:
+        used.update(re.findall(r'PSA_(?:ALG|ECC_FAMILY|DH_FAMILY|KEY_TYPE)_\w+', expr))
+    used.difference_update(SYMBOLS_WITHOUT_DEPENDENCY)
+    return sorted(psa_want_symbol(name, prefix=prefix) for name in used)
+
+# Define set of regular expressions and dependencies to optionally append
+# extra dependencies for test case based on key description.
+
+# Skip AES test cases which require 192- or 256-bit key
+# if MBEDTLS_AES_ONLY_128_BIT_KEY_LENGTH defined
+AES_128BIT_ONLY_DEP_REGEX = re.compile(r'AES\s(192|256)')
+AES_128BIT_ONLY_DEP = ['!MBEDTLS_AES_ONLY_128_BIT_KEY_LENGTH']
+# Skip AES/ARIA/CAMELLIA test cases which require decrypt operation in ECB mode
+# if MBEDTLS_BLOCK_CIPHER_NO_DECRYPT enabled.
+ECB_NO_PADDING_DEP_REGEX = re.compile(r'(AES|ARIA|CAMELLIA).*ECB_NO_PADDING')
+ECB_NO_PADDING_DEP = ['!MBEDTLS_BLOCK_CIPHER_NO_DECRYPT']
+
+DEPENDENCY_FROM_DESCRIPTION = OrderedDict()
+DEPENDENCY_FROM_DESCRIPTION[AES_128BIT_ONLY_DEP_REGEX] = AES_128BIT_ONLY_DEP
+DEPENDENCY_FROM_DESCRIPTION[ECB_NO_PADDING_DEP_REGEX] = ECB_NO_PADDING_DEP
+def generate_deps_from_description(
+        description: str
+    ) -> List[str]:
+    """Return additional dependencies based on test case description and REGEX.
+    """
+    dep_list = []
+    for regex, deps in DEPENDENCY_FROM_DESCRIPTION.items():
+        if re.search(regex, description):
+            dep_list += deps
+
+    return dep_list
+
+def tweak_key_pair_dependency(dep: str, usages: List[str]) -> List[str]:
+    """
+    This helper function add the proper suffix to PSA_WANT_KEY_TYPE_xxx_KEY_PAIR
+    symbols according to the required usage.
+    """
+    if dep.endswith('KEY_PAIR'):
+        return [dep + '_' + usage for usage in usages]
+    return [dep]
+
+def fix_key_pair_dependencies(dep_list: List[str], usages: List[str]) -> List[str]:
+    new_list = [new_deps
+                for dep in dep_list
+                for new_deps in tweak_key_pair_dependency(dep, usages)]
+
+    return new_list
diff --git a/framework/scripts/mbedtls_framework/psa_storage.py b/framework/scripts/mbedtls_framework/psa_storage.py
new file mode 100644
index 0000000..9630b95
--- /dev/null
+++ b/framework/scripts/mbedtls_framework/psa_storage.py
@@ -0,0 +1,217 @@
+"""Knowledge about the PSA key store as implemented in Mbed TLS.
+
+Note that if you need to make a change that affects how keys are
+stored, this may indicate that the key store is changing in a
+backward-incompatible way! Think carefully about backward compatibility
+before changing how test data is constructed or validated.
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+#
+
+import re
+import struct
+from typing import Dict, List, Optional, Set, Union
+import unittest
+
+from . import c_build_helper
+from . import build_tree
+
+
+class Expr:
+    """Representation of a C expression with a known or knowable numerical value."""
+
+    def __init__(self, content: Union[int, str]):
+        if isinstance(content, int):
+            digits = 8 if content > 0xffff else 4
+            self.string = '{0:#0{1}x}'.format(content, digits + 2)
+            self.value_if_known = content #type: Optional[int]
+        else:
+            self.string = content
+            self.unknown_values.add(self.normalize(content))
+            self.value_if_known = None
+
+    value_cache = {} #type: Dict[str, int]
+    """Cache of known values of expressions."""
+
+    unknown_values = set() #type: Set[str]
+    """Expressions whose values are not present in `value_cache` yet."""
+
+    def update_cache(self) -> None:
+        """Update `value_cache` for expressions registered in `unknown_values`."""
+        expressions = sorted(self.unknown_values)
+        # Temporary, while Mbed TLS does not just rely on the TF-PSA-Crypto
+        # build system to build its crypto library. When it does, the first
+        # case can just be removed.
+
+        if build_tree.looks_like_root('.'):
+            includes = ['include']
+            if build_tree.looks_like_tf_psa_crypto_root('.'):
+                includes.append('drivers/builtin/include')
+                includes.append('drivers/everest/include')
+            elif not build_tree.is_mbedtls_3_6():
+                includes.append('tf-psa-crypto/include')
+                includes.append('tf-psa-crypto/drivers/builtin/include')
+                includes.append('tf-psa-crypto/drivers/everest/include')
+
+        values = c_build_helper.get_c_expression_values(
+            'unsigned long', '%lu',
+            expressions,
+            header="""
+            #include <psa/crypto.h>
+            """,
+            include_path=includes) #type: List[str]
+        for e, v in zip(expressions, values):
+            self.value_cache[e] = int(v, 0)
+        self.unknown_values.clear()
+
+    @staticmethod
+    def normalize(string: str) -> str:
+        """Put the given C expression in a canonical form.
+
+        This function is only intended to give correct results for the
+        relatively simple kind of C expression typically used with this
+        module.
+        """
+        return re.sub(r'\s+', r'', string)
+
+    def value(self) -> int:
+        """Return the numerical value of the expression."""
+        if self.value_if_known is None:
+            if re.match(r'([0-9]+|0x[0-9a-f]+)\Z', self.string, re.I):
+                return int(self.string, 0)
+            normalized = self.normalize(self.string)
+            if normalized not in self.value_cache:
+                self.update_cache()
+            self.value_if_known = self.value_cache[normalized]
+        return self.value_if_known
+
+Exprable = Union[str, int, Expr]
+"""Something that can be converted to a C expression with a known numerical value."""
+
+def as_expr(thing: Exprable) -> Expr:
+    """Return an `Expr` object for `thing`.
+
+    If `thing` is already an `Expr` object, return it. Otherwise build a new
+    `Expr` object from `thing`. `thing` can be an integer or a string that
+    contains a C expression.
+    """
+    if isinstance(thing, Expr):
+        return thing
+    else:
+        return Expr(thing)
+
+
+class Key:
+    """Representation of a PSA crypto key object and its storage encoding.
+    """
+
+    LATEST_VERSION = 0
+    """The latest version of the storage format."""
+
+    def __init__(self, *,
+                 version: Optional[int] = None,
+                 id: Optional[int] = None, #pylint: disable=redefined-builtin
+                 lifetime: Exprable = 'PSA_KEY_LIFETIME_PERSISTENT',
+                 type: Exprable, #pylint: disable=redefined-builtin
+                 bits: int,
+                 usage: Exprable, alg: Exprable, alg2: Exprable,
+                 material: bytes #pylint: disable=used-before-assignment
+                ) -> None:
+        self.version = self.LATEST_VERSION if version is None else version
+        self.id = id #pylint: disable=invalid-name #type: Optional[int]
+        self.lifetime = as_expr(lifetime) #type: Expr
+        self.type = as_expr(type) #type: Expr
+        self.bits = bits #type: int
+        self.usage = as_expr(usage) #type: Expr
+        self.alg = as_expr(alg) #type: Expr
+        self.alg2 = as_expr(alg2) #type: Expr
+        self.material = material #type: bytes
+
+    MAGIC = b'PSA\000KEY\000'
+
+    @staticmethod
+    def pack(
+            fmt: str,
+            *args: Union[int, Expr]
+    ) -> bytes: #pylint: disable=used-before-assignment
+        """Pack the given arguments into a byte string according to the given format.
+
+        This function is similar to `struct.pack`, but with the following differences:
+        * All integer values are encoded with standard sizes and in
+          little-endian representation. `fmt` must not include an endianness
+          prefix.
+        * Arguments can be `Expr` objects instead of integers.
+        * Only integer-valued elements are supported.
+        """
+        return struct.pack('<' + fmt, # little-endian, standard sizes
+                           *[arg.value() if isinstance(arg, Expr) else arg
+                             for arg in args])
+
+    def bytes(self) -> bytes:
+        """Return the representation of the key in storage as a byte array.
+
+        This is the content of the PSA storage file. When PSA storage is
+        implemented over stdio files, this does not include any wrapping made
+        by the PSA-storage-over-stdio-file implementation.
+
+        Note that if you need to make a change in this function,
+        this may indicate that the key store is changing in a
+        backward-incompatible way! Think carefully about backward
+        compatibility before making any change here.
+        """
+        header = self.MAGIC + self.pack('L', self.version)
+        if self.version == 0:
+            attributes = self.pack('LHHLLL',
+                                   self.lifetime, self.type, self.bits,
+                                   self.usage, self.alg, self.alg2)
+            material = self.pack('L', len(self.material)) + self.material
+        else:
+            raise NotImplementedError
+        return header + attributes + material
+
+    def hex(self) -> str:
+        """Return the representation of the key as a hexadecimal string.
+
+        This is the hexadecimal representation of `self.bytes`.
+        """
+        return self.bytes().hex()
+
+    def location_value(self) -> int:
+        """The numerical value of the location encoded in the key's lifetime."""
+        return self.lifetime.value() >> 8
+
+
+class TestKey(unittest.TestCase):
+    # pylint: disable=line-too-long
+    """A few smoke tests for the functionality of the `Key` class."""
+
+    def test_numerical(self):
+        key = Key(version=0,
+                  id=1, lifetime=0x00000001,
+                  type=0x2400, bits=128,
+                  usage=0x00000300, alg=0x05500200, alg2=0x04c01000,
+                  material=b'@ABCDEFGHIJKLMNO')
+        expected_hex = '505341004b45590000000000010000000024800000030000000250050010c00410000000404142434445464748494a4b4c4d4e4f'
+        self.assertEqual(key.bytes(), bytes.fromhex(expected_hex))
+        self.assertEqual(key.hex(), expected_hex)
+
+    def test_names(self):
+        length = 0xfff8 // 8 # PSA_MAX_KEY_BITS in bytes
+        key = Key(version=0,
+                  id=1, lifetime='PSA_KEY_LIFETIME_PERSISTENT',
+                  type='PSA_KEY_TYPE_RAW_DATA', bits=length*8,
+                  usage=0, alg=0, alg2=0,
+                  material=b'\x00' * length)
+        expected_hex = '505341004b45590000000000010000000110f8ff000000000000000000000000ff1f0000' + '00' * length
+        self.assertEqual(key.bytes(), bytes.fromhex(expected_hex))
+        self.assertEqual(key.hex(), expected_hex)
+
+    def test_defaults(self):
+        key = Key(type=0x1001, bits=8,
+                  usage=0, alg=0, alg2=0,
+                  material=b'\x2a')
+        expected_hex = '505341004b455900000000000100000001100800000000000000000000000000010000002a'
+        self.assertEqual(key.bytes(), bytes.fromhex(expected_hex))
+        self.assertEqual(key.hex(), expected_hex)
diff --git a/framework/scripts/mbedtls_framework/psa_test_case.py b/framework/scripts/mbedtls_framework/psa_test_case.py
new file mode 100644
index 0000000..5964cad
--- /dev/null
+++ b/framework/scripts/mbedtls_framework/psa_test_case.py
@@ -0,0 +1,200 @@
+"""Generate test cases for PSA API calls, with automatic dependencies.
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+#
+
+import os
+import re
+from typing import FrozenSet, List, Optional, Set
+
+from . import build_tree
+from . import psa_information
+from . import test_case
+
+
+# Skip test cases for which the dependency symbols are not defined.
+# We assume that this means that a required mechanism is not implemented.
+# Note that if we erroneously skip generating test cases for
+# mechanisms that are not implemented, this should be caught
+# by the NOT_SUPPORTED test cases generated by generate_psa_tests.py
+# in test_suite_psa_crypto_not_supported and test_suite_psa_crypto_op_fail:
+# those emit tests with negative dependencies, which will not be skipped here.
+
+def read_implemented_dependencies(acc: Set[str], filename: str) -> None:
+    with open(filename) as input_stream:
+        for line in input_stream:
+            for symbol in re.findall(r'\bPSA_WANT_\w+\b', line):
+                acc.add(symbol)
+
+_implemented_dependencies = None #type: Optional[FrozenSet[str]] #pylint: disable=invalid-name
+
+def find_dependencies_not_implemented(dependencies: List[str]) -> List[str]:
+    """List the dependencies that are not implemented."""
+    global _implemented_dependencies #pylint: disable=global-statement,invalid-name
+    if _implemented_dependencies is None:
+        # Temporary, while Mbed TLS does not just rely on the TF-PSA-Crypto
+        # build system to build its crypto library. When it does, the first
+        # case can just be removed.
+
+        if build_tree.looks_like_root('.'):
+            if build_tree.looks_like_mbedtls_root('.') and \
+               (not build_tree.is_mbedtls_3_6()):
+                include_dir = 'tf-psa-crypto/include'
+            else:
+                include_dir = 'include'
+
+        acc = set() #type: Set[str]
+        for filename in [
+                os.path.join(include_dir, 'psa/crypto_config.h'),
+                os.path.join(include_dir, 'psa/crypto_adjust_config_synonyms.h'),
+        ]:
+            read_implemented_dependencies(acc, filename)
+        _implemented_dependencies = frozenset(acc)
+    return [dep
+            for dep in dependencies
+            if (dep not in _implemented_dependencies and
+                dep.startswith('PSA_WANT'))]
+
+
+class TestCase(test_case.TestCase):
+    """A PSA test case with automatically inferred dependencies.
+
+    For mechanisms like ECC curves where the support status includes
+    the key bit-size, this class assumes that only one bit-size is
+    involved in a given test case.
+    """
+
+    def __init__(self, dependency_prefix: Optional[str] = None) -> None:
+        """Construct a test case for a PSA Crypto API call.
+
+        `dependency_prefix`: prefix to use in dependencies. Defaults to
+                             ``'PSA_WANT_'``. Use ``'MBEDTLS_PSA_BUILTIN_'``
+                             when specifically testing builtin implementations.
+        """
+        super().__init__()
+        del self.dependencies
+        self.manual_dependencies = [] #type: List[str]
+        self.automatic_dependencies = set() #type: Set[str]
+        self.dependency_prefix = dependency_prefix #type: Optional[str]
+        self.negated_dependencies = set() #type: Set[str]
+        self.key_bits = None #type: Optional[int]
+        self.key_pair_usage = None #type: Optional[List[str]]
+
+    def set_key_bits(self, key_bits: Optional[int]) -> None:
+        """Use the given key size for automatic dependency generation.
+
+        Call this function before set_arguments() if relevant.
+
+        This is only relevant for ECC and DH keys. For other key types,
+        this information is ignored.
+        """
+        self.key_bits = key_bits
+
+    def set_key_pair_usage(self, key_pair_usage: Optional[List[str]]) -> None:
+        """Use the given suffixes for key pair dependencies.
+
+        Call this function before set_arguments() if relevant.
+
+        This is only relevant for key pair types. For other key types,
+        this information is ignored.
+        """
+        self.key_pair_usage = key_pair_usage
+
+    def infer_dependencies(self, arguments: List[str]) -> List[str]:
+        """Infer dependencies based on the test case arguments."""
+        dependencies = psa_information.automatic_dependencies(*arguments,
+                                                              prefix=self.dependency_prefix)
+        if self.key_bits is not None:
+            dependencies = psa_information.finish_family_dependencies(dependencies,
+                                                                      self.key_bits)
+        if self.key_pair_usage is not None:
+            dependencies = psa_information.fix_key_pair_dependencies(dependencies,
+                                                                     self.key_pair_usage)
+        if 'PSA_WANT_KEY_TYPE_RSA_KEY_PAIR_GENERATE' in dependencies and \
+           'PSA_WANT_KEY_TYPE_RSA_KEY_PAIR_GENERATE' not in self.negated_dependencies and \
+           self.key_bits is not None:
+            size_dependency = ('PSA_VENDOR_RSA_GENERATE_MIN_KEY_BITS <= ' +
+                               str(self.key_bits))
+            dependencies.append(size_dependency)
+        return dependencies
+
+    def assumes_not_supported(self, name: str) -> None:
+        """Negate the given mechanism for automatic dependency generation.
+
+        `name` can be either a dependency symbol (``PSA_WANT_xxx``) or
+        a mechanism name (``PSA_KEY_TYPE_xxx``, etc.).
+
+        Call this function before set_arguments() for a test case that should
+        run if the given mechanism is not supported.
+
+        Call modifiers such as set_key_bits() and set_key_pair_usage() before
+        calling this method, if applicable.
+
+        A mechanism is a PSA_XXX symbol, e.g. PSA_KEY_TYPE_AES, PSA_ALG_HMAC,
+        etc. For mechanisms like ECC curves where the support status includes
+        the key bit-size, this class assumes that only one bit-size is
+        involved in a given test case.
+        """
+        if name.startswith('PSA_WANT_'):
+            self.negated_dependencies.add(name)
+            return
+        if name == 'PSA_KEY_TYPE_RSA_KEY_PAIR' and \
+           self.key_bits is not None and \
+           self.key_pair_usage == ['GENERATE']:
+            # When RSA key pair generation is not supported, it could be
+            # due to the specific key size is out of range, or because
+            # RSA key pair generation itself is not supported. Assume the
+            # latter.
+            dep = psa_information.psa_want_symbol(name, prefix=self.dependency_prefix)
+
+            self.negated_dependencies.add(dep + '_GENERATE')
+            return
+        dependencies = self.infer_dependencies([name])
+        # * If we have more than one dependency to negate, the result would
+        #   say that all of the dependencies are disabled, which is not
+        #   a desirable outcome: the negation of (A and B) is (!A or !B),
+        #   not (!A and !B).
+        # * If we have no dependency to negate, the result wouldn't be a
+        #   not-supported case.
+        # Assert that we don't reach either such case.
+        assert len(dependencies) == 1
+        self.negated_dependencies.add(dependencies[0])
+
+    def set_arguments(self, arguments: List[str]) -> None:
+        """Set test case arguments and automatically infer dependencies."""
+        super().set_arguments(arguments)
+        dependencies = self.infer_dependencies(arguments)
+        for i in range(len(dependencies)): #pylint: disable=consider-using-enumerate
+            if dependencies[i] in self.negated_dependencies:
+                dependencies[i] = '!' + dependencies[i]
+        self.skip_if_any_not_implemented(dependencies)
+        self.automatic_dependencies.update(dependencies)
+
+    def set_dependencies(self, dependencies: List[str]) -> None:
+        """Override any previously added automatic or manual dependencies.
+
+        Also override any previous instruction to skip the test case.
+        """
+        self.manual_dependencies = dependencies
+        self.automatic_dependencies.clear()
+        self.skip_reasons = []
+
+    def add_dependencies(self, dependencies: List[str]) -> None:
+        """Add manual dependencies."""
+        self.manual_dependencies += dependencies
+
+    def get_dependencies(self) -> List[str]:
+        # Make the output independent of the order in which the dependencies
+        # are calculated by the script. Also avoid duplicates. This makes
+        # the output robust with respect to refactoring of the scripts.
+        dependencies = set(self.manual_dependencies)
+        dependencies.update(self.automatic_dependencies)
+        return sorted(dependencies)
+
+    def skip_if_any_not_implemented(self, dependencies: List[str]) -> None:
+        """Skip the test case if any of the given dependencies is not implemented."""
+        not_implemented = find_dependencies_not_implemented(dependencies)
+        for dep in not_implemented:
+            self.skip_because('not implemented: ' + dep)
diff --git a/framework/scripts/mbedtls_framework/test_case.py b/framework/scripts/mbedtls_framework/test_case.py
new file mode 100644
index 0000000..58e75a1
--- /dev/null
+++ b/framework/scripts/mbedtls_framework/test_case.py
@@ -0,0 +1,169 @@
+"""Library for constructing an Mbed TLS test case.
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+#
+
+import binascii
+import os
+import sys
+from typing import Iterable, List, Optional
+from enum import Enum
+
+from . import build_tree
+from . import psa_information
+from . import typing_util
+
+HASHES_3_6 = {
+    "PSA_ALG_MD5" : "MBEDTLS_MD_CAN_MD5",
+    "PSA_ALG_RIPEMD160" : "MBEDTLS_MD_CAN_RIPEMD160",
+    "PSA_ALG_SHA_1" : "MBEDTLS_MD_CAN_SHA1",
+    "PSA_ALG_SHA_224" : "MBEDTLS_MD_CAN_SHA224",
+    "PSA_ALG_SHA_256" : "MBEDTLS_MD_CAN_SHA256",
+    "PSA_ALG_SHA_384" : "MBEDTLS_MD_CAN_SHA384",
+    "PSA_ALG_SHA_512" : "MBEDTLS_MD_CAN_SHA512",
+    "PSA_ALG_SHA3_224" : "MBEDTLS_MD_CAN_SHA3_224",
+    "PSA_ALG_SHA3_256" : "MBEDTLS_MD_CAN_SHA3_256",
+    "PSA_ALG_SHA3_384" : "MBEDTLS_MD_CAN_SHA3_384",
+    "PSA_ALG_SHA3_512" : "MBEDTLS_MD_CAN_SHA3_512"
+}
+
+PK_MACROS_3_6 = {
+    "PSA_KEY_TYPE_ECC_PUBLIC_KEY" : "MBEDTLS_PK_HAVE_ECC_KEYS"
+}
+
+class Domain36(Enum):
+    PSA = 1
+    TLS_1_3_ONLY = 2
+    USE_PSA = 3
+    LEGACY = 4
+
+def hex_string(data: bytes) -> str:
+    return '"' + binascii.hexlify(data).decode('ascii') + '"'
+
+class MissingDescription(Exception):
+    pass
+
+class MissingFunction(Exception):
+    pass
+
+class TestCase:
+    """An Mbed TLS test case."""
+
+    def __init__(self, description: Optional[str] = None):
+        self.comments = [] #type: List[str]
+        self.description = description #type: Optional[str]
+        self.dependencies = [] #type: List[str]
+        self.function = None #type: Optional[str]
+        self.arguments = [] #type: List[str]
+        self.skip_reasons = [] #type: List[str]
+
+    def add_comment(self, *lines: str) -> None:
+        self.comments += lines
+
+    def set_description(self, description: str) -> None:
+        self.description = description
+
+    def get_dependencies(self) -> List[str]:
+        return self.dependencies
+
+    def set_dependencies(self, dependencies: List[str]) -> None:
+        self.dependencies = dependencies
+
+    def set_function(self, function: str) -> None:
+        self.function = function
+
+    def set_arguments(self, arguments: List[str]) -> None:
+        self.arguments = arguments
+
+    def skip_because(self, reason: str) -> None:
+        """Skip this test case.
+
+        It will be included in the output, but commented out.
+
+        This is intended for test cases that are obtained from a
+        systematic enumeration, but that have dependencies that cannot
+        be fulfilled. Since we don't want to have test cases that are
+        never executed, we arrange not to have actual test cases. But
+        we do include comments to make it easier to understand the output
+        of test case generation.
+
+        reason must be a non-empty string explaining to humans why this
+        test case is skipped.
+        """
+        self.skip_reasons.append(reason)
+
+    def check_completeness(self) -> None:
+        if self.description is None:
+            raise MissingDescription
+        if self.function is None:
+            raise MissingFunction
+
+    def write(self, out: typing_util.Writable) -> None:
+        """Write the .data file paragraph for this test case.
+
+        The output starts and ends with a single newline character. If the
+        surrounding code writes lines (consisting of non-newline characters
+        and a final newline), you will end up with a blank line before, but
+        not after the test case.
+        """
+        self.check_completeness()
+        assert self.description is not None # guide mypy
+        assert self.function is not None # guide mypy
+        out.write('\n')
+        for line in self.comments:
+            out.write('# ' + line + '\n')
+        prefix = ''
+        if self.skip_reasons:
+            prefix = '## '
+            for reason in self.skip_reasons:
+                out.write('## # skipped because: ' + reason + '\n')
+        out.write(prefix + self.description + '\n')
+        dependencies = self.get_dependencies()
+        if dependencies:
+            out.write(prefix + 'depends_on:' +
+                      ':'.join(dependencies) + '\n')
+        out.write(prefix + self.function + ':' +
+                  ':'.join(self.arguments) + '\n')
+
+def write_data_file(filename: str,
+                    test_cases: Iterable[TestCase],
+                    caller: Optional[str] = None) -> None:
+    """Write the test cases to the specified file.
+
+    If the file already exists, it is overwritten.
+    """
+    if caller is None:
+        caller = os.path.basename(sys.argv[0])
+    tempfile = filename + '.new'
+    with open(tempfile, 'w') as out:
+        out.write('# Automatically generated by {}. Do not edit!\n'
+                  .format(caller))
+        for tc in test_cases:
+            tc.write(out)
+        out.write('\n# End of automatically generated file.\n')
+    os.replace(tempfile, filename)
+
+def psa_or_3_6_feature_macro(psa_name: str,
+                             domain_3_6: Domain36) -> str:
+    """Determine the dependency symbol for a given psa_name based on
+       the domain and Mbed TLS version. For more information about the domains,
+       and MBEDTLS_MD_CAN_ prefixed symbols, see transition-guards.md.
+       This function currently works with hashes and some PK symbols only.
+       It accepts PSA_ALG_xxx or PSA_KEY_TYPE_xxx as inputs for psa_name.
+    """
+
+    if domain_3_6 == Domain36.PSA or domain_3_6 == Domain36.TLS_1_3_ONLY or \
+        not build_tree.is_mbedtls_3_6():
+        if psa_name in PK_MACROS_3_6 or psa_name in HASHES_3_6:
+            return psa_information.psa_want_symbol(psa_name)
+
+    if domain_3_6 == Domain36.USE_PSA:
+        if psa_name in PK_MACROS_3_6:
+            return PK_MACROS_3_6[psa_name]
+
+    if psa_name in HASHES_3_6:
+        return HASHES_3_6[psa_name]
+
+    raise ValueError(f'Unable to determine dependency symbol for {psa_name} in {domain_3_6}')
diff --git a/framework/scripts/mbedtls_framework/test_data_generation.py b/framework/scripts/mbedtls_framework/test_data_generation.py
new file mode 100644
index 0000000..a84f7dd
--- /dev/null
+++ b/framework/scripts/mbedtls_framework/test_data_generation.py
@@ -0,0 +1,224 @@
+"""Common code for test data generation.
+
+This module defines classes that are of general use to automatically
+generate .data files for unit tests, as well as a main function.
+
+These are used both by generate_psa_tests.py and generate_bignum_tests.py.
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+#
+
+import argparse
+import os
+import posixpath
+import re
+import inspect
+
+from abc import ABCMeta, abstractmethod
+from typing import Callable, Dict, Iterable, Iterator, List, Type, TypeVar
+
+from . import build_tree
+from . import test_case
+
+T = TypeVar('T') #pylint: disable=invalid-name
+
+
+class BaseTest(metaclass=ABCMeta):
+    """Base class for test case generation.
+
+    Attributes:
+        count: Counter for test cases from this class.
+        case_description: Short description of the test case. This may be
+            automatically generated using the class, or manually set.
+        dependencies: A list of dependencies required for the test case.
+        show_test_count: Toggle for inclusion of `count` in the test description.
+        test_function: Test function which the class generates cases for.
+        test_name: A common name or description of the test function. This can
+            be `test_function`, a clearer equivalent, or a short summary of the
+            test function's purpose.
+    """
+    count = 0
+    case_description = ""
+    dependencies = [] # type: List[str]
+    show_test_count = True
+    test_function = ""
+    test_name = ""
+
+    def __new__(cls, *args, **kwargs):
+        # pylint: disable=unused-argument
+        cls.count += 1
+        return super().__new__(cls)
+
+    @abstractmethod
+    def arguments(self) -> List[str]:
+        """Get the list of arguments for the test case.
+
+        Override this method to provide the list of arguments required for
+        the `test_function`.
+
+        Returns:
+            List of arguments required for the test function.
+        """
+        raise NotImplementedError
+
+    def description(self) -> str:
+        """Create a test case description.
+
+        Creates a description of the test case, including a name for the test
+        function, an optional case count, and a description of the specific
+        test case. This should inform a reader what is being tested, and
+        provide context for the test case.
+
+        Returns:
+            Description for the test case.
+        """
+        if self.show_test_count:
+            return "{} #{} {}".format(
+                self.test_name, self.count, self.case_description
+                ).strip()
+        else:
+            return "{} {}".format(self.test_name, self.case_description).strip()
+
+
+    def create_test_case(self) -> test_case.TestCase:
+        """Generate TestCase from the instance."""
+        tc = test_case.TestCase()
+        tc.set_description(self.description())
+        tc.set_function(self.test_function)
+        tc.set_arguments(self.arguments())
+        tc.set_dependencies(self.dependencies)
+
+        return tc
+
+    @classmethod
+    @abstractmethod
+    def generate_function_tests(cls) -> Iterator[test_case.TestCase]:
+        """Generate test cases for the class test function.
+
+        This will be called in classes where `test_function` is set.
+        Implementations should yield TestCase objects, by creating instances
+        of the class with appropriate input data, and then calling
+        `create_test_case()` on each.
+        """
+        raise NotImplementedError
+
+
+class BaseTarget:
+    #pylint: disable=too-few-public-methods
+    """Base target for test case generation.
+
+    Child classes of this class represent an output file, and can be referred
+    to as file targets. These indicate where test cases will be written to for
+    all subclasses of the file target, which is set by `target_basename`.
+
+    Attributes:
+        target_basename: Basename of file to write generated tests to. This
+            should be specified in a child class of BaseTarget.
+    """
+    target_basename = ""
+
+    @classmethod
+    def generate_tests(cls) -> Iterator[test_case.TestCase]:
+        """Generate test cases for the class and its subclasses.
+
+        In classes with `test_function` set, `generate_function_tests()` is
+        called to generate test cases first.
+
+        In all classes, this method will iterate over its subclasses, and
+        yield from `generate_tests()` in each. Calling this method on a class X
+        will yield test cases from all classes derived from X.
+        """
+        if issubclass(cls, BaseTest) and not inspect.isabstract(cls):
+            #pylint: disable=no-member
+            yield from cls.generate_function_tests()
+        for subclass in sorted(cls.__subclasses__(), key=lambda c: c.__name__):
+            yield from subclass.generate_tests()
+
+
+class TestGenerator:
+    """Generate test cases and write to data files."""
+    def __init__(self, options) -> None:
+        self.test_suite_directory = options.directory
+        # Update `targets` with an entry for each child class of BaseTarget.
+        # Each entry represents a file generated by the BaseTarget framework,
+        # and enables generating the .data files using the CLI.
+        self.targets.update({
+            subclass.target_basename: subclass.generate_tests
+            for subclass in BaseTarget.__subclasses__()
+            if subclass.target_basename
+        })
+
+    def filename_for(self, basename: str) -> str:
+        """The location of the data file with the specified base name."""
+        return posixpath.join(self.test_suite_directory, basename + '.data')
+
+    def write_test_data_file(self, basename: str,
+                             test_cases: Iterable[test_case.TestCase]) -> None:
+        """Write the test cases to a .data file.
+
+        The output file is ``basename + '.data'`` in the test suite directory.
+        """
+        filename = self.filename_for(basename)
+        test_case.write_data_file(filename, test_cases)
+
+    # Note that targets whose names contain 'test_format' have their content
+    # validated by `abi_check.py`.
+    targets = {} # type: Dict[str, Callable[..., Iterable[test_case.TestCase]]]
+
+    def generate_target(self, name: str, *target_args) -> None:
+        """Generate cases and write to data file for a target.
+
+        For target callables which require arguments, override this function
+        and pass these arguments using super() (see PSATestGenerator).
+        """
+        test_cases = self.targets[name](*target_args)
+        self.write_test_data_file(name, test_cases)
+
+def main(args, description: str, generator_class: Type[TestGenerator] = TestGenerator):
+    """Command line entry point."""
+    parser = argparse.ArgumentParser(description=description)
+    parser.add_argument('--list', action='store_true',
+                        help='List available targets and exit')
+    parser.add_argument('--list-for-cmake', action='store_true',
+                        help='Print \';\'-separated list of available targets and exit')
+    # If specified explicitly, this option may be a path relative to the
+    # current directory when the script is invoked. The default value
+    # is relative to the mbedtls root, which we don't know yet. So we
+    # can't set a string as the default value here.
+    parser.add_argument('--directory', metavar='DIR',
+                        help='Output directory (default: tests/suites)')
+    parser.add_argument('targets', nargs='*', metavar='TARGET',
+                        help='Target file to generate (default: all; "-": none)')
+    options = parser.parse_args(args)
+
+    # Change to the mbedtls root, to keep things simple. But first, adjust
+    # command line options that might be relative paths.
+    if options.directory is None:
+        options.directory = 'tests/suites'
+    else:
+        options.directory = os.path.abspath(options.directory)
+    build_tree.chdir_to_root()
+
+    generator = generator_class(options)
+    if options.list:
+        for name in sorted(generator.targets):
+            print(generator.filename_for(name))
+        return
+    # List in a cmake list format (i.e. ';'-separated)
+    if options.list_for_cmake:
+        print(';'.join(generator.filename_for(name)
+                       for name in sorted(generator.targets)), end='')
+        return
+    if options.targets:
+        # Allow "-" as a special case so you can run
+        # ``generate_xxx_tests.py - $targets`` and it works uniformly whether
+        # ``$targets`` is empty or not.
+        options.targets = [os.path.basename(re.sub(r'\.data\Z', r'', target))
+                           for target in options.targets
+                           if target != '-']
+    else:
+        options.targets = sorted(generator.targets)
+    for target in options.targets:
+        generator.generate_target(target)
diff --git a/framework/scripts/mbedtls_framework/tls_test_case.py b/framework/scripts/mbedtls_framework/tls_test_case.py
new file mode 100644
index 0000000..73bb039
--- /dev/null
+++ b/framework/scripts/mbedtls_framework/tls_test_case.py
@@ -0,0 +1,101 @@
+"""Library for constructing an Mbed TLS ssl-opt test case.
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+import enum
+import re
+from typing import List
+
+from . import typing_util
+
+
+class TestCase:
+    """Data about an ssl-opt test case."""
+    #pylint: disable=too-few-public-methods
+
+    def __init__(self, description: str) -> None:
+        # List of shell snippets to call before run_test, typically
+        # calls to requires_xxx functions.
+        self.requirements = [] #type: List[str]
+        # Test case description (first argument to run_test).
+        self.description = description
+        # Client command line.
+        # This will be placed directly inside double quotes in the shell script.
+        self.client = '$P_CLI'
+        # Server command line.
+        # This will be placed directly inside double quotes in the shell script.
+        self.server = '$P_SRV'
+        # Expected client exit code.
+        self.exit_code = 0
+
+        # Note that all patterns matched in the logs are in BRE
+        # (Basic Regular Expression) syntax, more precisely in the BRE
+        # dialect that is the default for GNU grep. The main difference
+        # with Python regular expressions is that the operators for
+        # grouping `\(...\)`, alternation `x\|y`, option `x\?`,
+        # one-or-more `x\+` and repetition ranges `x\{M,N\}` must be
+        # preceded by a backslash. The characters `()|?+{}` stand for
+        # themselves.
+
+        # BRE for text that must be present in the client log (run_test -c).
+        self.wanted_client_patterns = [] #type: List[str]
+        # BRE for text that must be present in the server log (run_test -s).
+        self.wanted_server_patterns = [] #type: List[str]
+        # BRE for text that must not be present in the client log (run_test -C).
+        self.forbidden_client_patterns = [] #type: List[str]
+        # BRE for text that must not be present in the server log (run_test -S).
+        self.forbidden_server_patterns = [] #type: List[str]
+
+    @staticmethod
+    def _quote(raw: str) -> str:
+        """Quote the given string for sh.
+
+        Use double quotes, because that's currently the norm in ssl-opt.sh.
+        """
+        return '"' + re.sub(r'([$"\\`])', r'\\\1', raw) + '"'
+
+    def write(self, out: typing_util.Writable) -> None:
+        """Write the test case to the specified file."""
+        for req in self.requirements:
+            out.write(req + '\n')
+        out.write(f'run_test    {self._quote(self.description)} \\\n')
+        out.write(f'            "{self.server}" \\\n')
+        out.write(f'            "{self.client}" \\\n')
+        out.write(f'            {self.exit_code}')
+        for pat in self.wanted_server_patterns:
+            out.write(' \\\n            -s ' + self._quote(pat))
+        for pat in self.forbidden_server_patterns:
+            out.write(' \\\n            -S ' + self._quote(pat))
+        for pat in self.wanted_client_patterns:
+            out.write(' \\\n            -c ' + self._quote(pat))
+        for pat in self.forbidden_client_patterns:
+            out.write(' \\\n            -C ' + self._quote(pat))
+        out.write('\n\n')
+
+
+class Side(enum.Enum):
+    CLIENT = 0
+    SERVER = 1
+
+class Version(enum.Enum):
+    """TLS protocol version.
+
+    This class doesn't know about DTLS yet.
+    """
+
+    TLS12 = 2
+    TLS13 = 3
+
+    def force_version(self) -> str:
+        """Argument to pass to ssl_client2 or ssl_server2 to force this version."""
+        return f'force_version=tls1{self.value}'
+
+    def openssl_option(self) -> str:
+        """Option to pass to openssl s_client or openssl s_server to select this version."""
+        return f'-tls1_{self.value}'
+
+    def requires_command(self) -> str:
+        """Command to require this protocol version in an ssl-opt.sh test case."""
+        return 'requires_config_enabled MBEDTLS_SSL_PROTO_TLS1_' + str(self.value)
diff --git a/framework/scripts/mbedtls_framework/typing_util.py b/framework/scripts/mbedtls_framework/typing_util.py
new file mode 100644
index 0000000..2ec448d
--- /dev/null
+++ b/framework/scripts/mbedtls_framework/typing_util.py
@@ -0,0 +1,28 @@
+"""Auxiliary definitions used in type annotations.
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+#
+
+from typing import Any
+
+# The typing_extensions module is necessary for type annotations that are
+# checked with mypy. It is only used for type annotations or to define
+# things that are themselves only used for type annotations. It is not
+# available on a default Python installation. Therefore, try loading
+# what we need from it for the sake of mypy (which depends on, or comes
+# with, typing_extensions), and if not define substitutes that lack the
+# static type information but are good enough at runtime.
+try:
+    from typing_extensions import Protocol #pylint: disable=import-error
+except ImportError:
+    class Protocol: #type: ignore
+        #pylint: disable=too-few-public-methods
+        pass
+
+class Writable(Protocol):
+    """Abstract class for typing hints."""
+    # pylint: disable=no-self-use,too-few-public-methods,unused-argument
+    def write(self, text: str) -> Any:
+        ...
diff --git a/framework/scripts/output_env.sh b/framework/scripts/output_env.sh
new file mode 100755
index 0000000..32f1f86
--- /dev/null
+++ b/framework/scripts/output_env.sh
@@ -0,0 +1,183 @@
+#! /usr/bin/env sh
+
+# output_env.sh
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+#
+# Purpose
+#
+# To print out all the relevant information about the development environment.
+#
+# This includes:
+#   - architecture of the system
+#   - type and version of the operating system
+#   - version of make and cmake
+#   - version of armcc, clang, gcc-arm and gcc compilers
+#   - version of libc, clang, asan and valgrind if installed
+#   - version of gnuTLS and OpenSSL
+
+print_version()
+{
+    BIN="$1"
+    shift
+    ARGS="$1"
+    shift
+    VARIANT="$1"
+    shift
+
+    if [ -n "$VARIANT" ]; then
+        VARIANT=" ($VARIANT)"
+    fi
+
+    if ! type "$BIN" > /dev/null 2>&1; then
+        echo " * ${BIN##*/}$VARIANT: Not found."
+        return 0
+    fi
+
+    BIN=`which "$BIN"`
+    VERSION_STR=`$BIN $ARGS 2>&1`
+
+    # Apply all filters
+    while [ $# -gt 0 ]; do
+        FILTER="$1"
+        shift
+        VERSION_STR=`echo "$VERSION_STR" | $FILTER`
+    done
+
+    if [ -z "$VERSION_STR" ]; then
+        VERSION_STR="Version could not be determined."
+    fi
+
+    echo " * ${BIN##*/}$VARIANT: ${BIN} : ${VERSION_STR} "
+}
+
+echo "** Platform:"
+echo
+
+if [ `uname -s` = "Linux" ]; then
+    echo "Linux variant"
+    lsb_release -d -c
+else
+    echo "Unknown Unix variant"
+fi
+
+echo
+
+print_version "uname" "-a" ""
+
+echo
+echo
+echo "** Tool Versions:"
+echo
+
+print_version "make" "--version" "" "head -n 1"
+echo
+
+print_version "cmake" "--version" "" "head -n 1"
+echo
+
+if [ "${RUN_ARMCC:-1}" -ne 0 ]; then
+    : "${ARMC6_CC:=armclang}"
+    print_version "$ARMC6_CC" "--vsn" "" "head -n 2"
+    echo
+fi
+
+print_version "arm-none-eabi-gcc" "--version" "" "head -n 1"
+echo
+
+print_version "gcc" "--version" "" "head -n 1"
+echo
+
+if [ -n "${GCC_EARLIEST+set}" ]; then
+    print_version "${GCC_EARLIEST}" "--version" "" "head -n 1"
+else
+    echo " GCC_EARLIEST : Not configured."
+fi
+echo
+
+if [ -n "${GCC_LATEST+set}" ]; then
+    print_version "${GCC_LATEST}" "--version" "" "head -n 1"
+else
+    echo " GCC_LATEST : Not configured."
+fi
+echo
+
+print_version "clang" "--version" "" "head -n 2"
+echo
+
+if [ -n "${CLANG_EARLIEST+set}" ]; then
+    print_version "${CLANG_EARLIEST}" "--version" "" "head -n 2"
+else
+    echo " CLANG_EARLIEST : Not configured."
+fi
+echo
+
+if [ -n "${CLANG_LATEST+set}" ]; then
+    print_version "${CLANG_LATEST}" "--version" "" "head -n 2"
+else
+    echo " CLANG_LATEST : Not configured."
+fi
+echo
+
+print_version "ldd" "--version" "" "head -n 1"
+echo
+
+print_version "valgrind" "--version" ""
+echo
+
+print_version "gdb" "--version" "" "head -n 1"
+echo
+
+print_version "perl" "--version" "" "head -n 2" "grep ."
+echo
+
+print_version "python" "--version" "" "head -n 1"
+echo
+
+print_version "python3" "--version" "" "head -n 1"
+echo
+
+# Find the installed version of Pylint. Installed as a distro package this can
+# be pylint3 and as a PEP egg, pylint. In test scripts We prefer pylint over
+# pylint3
+if type pylint >/dev/null 2>/dev/null; then
+    print_version "pylint" "--version" "" "sed /^.*config/d" "grep pylint"
+elif type pylint3 >/dev/null 2>/dev/null; then
+    print_version "pylint3" "--version" "" "sed /^.*config/d" "grep pylint"
+else
+    echo " * pylint or pylint3: Not found."
+fi
+echo
+
+: ${OPENSSL:=openssl}
+print_version "$OPENSSL" "version" "default"
+echo
+
+if [ -n "${OPENSSL_NEXT+set}" ]; then
+    print_version "$OPENSSL_NEXT" "version" "next"
+else
+    echo " * openssl (next): Not configured."
+fi
+echo
+
+: ${GNUTLS_CLI:=gnutls-cli}
+print_version "$GNUTLS_CLI" "--version" "default" "head -n 1"
+echo
+
+: ${GNUTLS_SERV:=gnutls-serv}
+print_version "$GNUTLS_SERV" "--version" "default" "head -n 1"
+echo
+
+echo " * Installed asan versions:"
+if type dpkg-query >/dev/null 2>/dev/null; then
+    if ! dpkg-query -f '${Status} ${Package}: ${Version}\n' -W 'libasan*' |
+         awk '$3 == "installed" && $4 !~ /-/ {print $4, $5}' |
+         grep .
+    then
+        echo "   No asan versions installed."
+    fi
+else
+    echo "  Unable to determine the asan version without dpkg."
+fi
+echo
diff --git a/framework/scripts/pkgconfig.sh b/framework/scripts/pkgconfig.sh
new file mode 100755
index 0000000..07a73b3
--- /dev/null
+++ b/framework/scripts/pkgconfig.sh
@@ -0,0 +1,40 @@
+#!/bin/sh
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+#
+# Purpose
+#
+# Test pkgconfig files.
+#
+# For each of the build pkg-config files, .pc files, check that
+# they validate and do some basic sanity testing on the output,
+# i.e. that the strings are non-empty.
+#
+# NOTE: This requires the built pc files to be on the pkg-config
+# search path, this can be controlled with env variable
+# PKG_CONFIG_PATH. See man(1) pkg-config for details.
+#
+
+set -e -u
+
+if [ $# -le 0 ]
+then
+    echo " [!] No package names specified" >&2
+    echo "Usage: $0 <package name 1> <package name 2> ..." >&2
+    exit 1
+fi
+
+for pc in "$@"; do
+    printf "testing package config file: ${pc} ... "
+    pkg-config --validate "${pc}"
+    version="$(pkg-config --modversion "${pc}")"
+    test -n "$version"
+    cflags="$(pkg-config --cflags "${pc}")"
+    test -n "$cflags"
+    libs="$(pkg-config --libs "${pc}")"
+    test -n "$libs"
+    printf "passed\n"
+done
+
+exit 0
diff --git a/framework/scripts/project_detection.sh b/framework/scripts/project_detection.sh
new file mode 100644
index 0000000..bbe2813
--- /dev/null
+++ b/framework/scripts/project_detection.sh
@@ -0,0 +1,67 @@
+# project-detection.sh
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+#
+# Purpose
+#
+# This script contains functions for shell scripts to
+# help detect which project (Mbed TLS, TF-PSA-Crypto)
+# or which Mbed TLS branch they are in.
+
+# Project detection
+read_project_name_file () {
+    SCRIPT_DIR=$(pwd)
+
+    PROJECT_NAME_FILE="scripts/project_name.txt"
+
+    if read -r PROJECT_NAME < "$PROJECT_NAME_FILE"; then :; else
+        echo "$PROJECT_NAME_FILE does not exist... Exiting..." >&2
+        exit 1
+    fi
+}
+
+in_mbedtls_repo () {
+    read_project_name_file
+    test "$PROJECT_NAME" = "Mbed TLS"
+}
+
+in_tf_psa_crypto_repo () {
+    read_project_name_file
+    test "$PROJECT_NAME" = "TF-PSA-Crypto"
+}
+
+#Branch detection
+read_build_info () {
+    SCRIPT_DIR=$(pwd)
+
+    BUILD_INFO_FILE="include/mbedtls/build_info.h"
+
+    if [ ! -f "$BUILD_INFO_FILE" ]; then
+        echo "File $BUILD_INFO_FILE not found."
+        exit 1
+    fi
+
+    MBEDTLS_VERSION_MAJOR=$(grep "^#define MBEDTLS_VERSION_MAJOR" "$BUILD_INFO_FILE" | awk '{print $3}')
+    MBEDTLS_VERSION_MINOR=$(grep "^#define MBEDTLS_VERSION_MINOR" "$BUILD_INFO_FILE" | awk '{print $3}')
+
+    if [ -z "$MBEDTLS_VERSION_MAJOR" ]; then
+        echo "MBEDTLS_VERSION_MAJOR not found in $BUILD_INFO_FILE."
+        exit 1
+    fi
+
+    if [ -z "$MBEDTLS_VERSION_MINOR" ]; then
+        echo "MBEDTLS_VERSION_MINOR not found in $BUILD_INFO_FILE."
+        exit 1
+    fi
+}
+
+in_3_6_branch () {
+    read_build_info
+    test $MBEDTLS_VERSION_MAJOR = "3" && test $MBEDTLS_VERSION_MINOR = "6"
+}
+
+in_4_x_branch () {
+    read_build_info
+    test $MBEDTLS_VERSION_MAJOR = "4"
+}
diff --git a/framework/scripts/project_scripts.py b/framework/scripts/project_scripts.py
new file mode 100644
index 0000000..2666c7b
--- /dev/null
+++ b/framework/scripts/project_scripts.py
@@ -0,0 +1,17 @@
+"""Add the consuming repository's scripts to the module search path.
+
+Usage:
+
+    import project_scripts # pylint: disable=unused-import
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+#
+
+import os
+import sys
+
+sys.path.append(os.path.join(os.path.dirname(__file__),
+                             os.path.pardir, os.path.pardir,
+                             'scripts'))
diff --git a/framework/scripts/quiet/cmake b/framework/scripts/quiet/cmake
new file mode 100755
index 0000000..a34365b
--- /dev/null
+++ b/framework/scripts/quiet/cmake
@@ -0,0 +1,19 @@
+#! /usr/bin/env bash
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+#
+# This swallows the output of the wrapped tool, unless there is an error.
+# This helps reduce excess logging in the CI.
+
+# If you are debugging a build / CI issue, you can get complete unsilenced logs
+# by un-commenting the following line (or setting VERBOSE_LOGS in your environment):
+
+# export VERBOSE_LOGS=1
+
+# don't silence invocations containing these arguments
+NO_SILENCE=" --version "
+
+TOOL="cmake"
+
+. "$(dirname "$0")/quiet.sh"
diff --git a/framework/scripts/quiet/make b/framework/scripts/quiet/make
new file mode 100755
index 0000000..920e5b8
--- /dev/null
+++ b/framework/scripts/quiet/make
@@ -0,0 +1,19 @@
+#! /usr/bin/env bash
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+#
+# This swallows the output of the wrapped tool, unless there is an error.
+# This helps reduce excess logging in the CI.
+
+# If you are debugging a build / CI issue, you can get complete unsilenced logs
+# by un-commenting the following line (or setting VERBOSE_LOGS in your environment):
+
+# export VERBOSE_LOGS=1
+
+# don't silence invocations containing these arguments
+NO_SILENCE=" --version | test "
+
+TOOL="make"
+
+. "$(dirname "$0")/quiet.sh"
diff --git a/framework/scripts/quiet/quiet.sh b/framework/scripts/quiet/quiet.sh
new file mode 100644
index 0000000..0f26184
--- /dev/null
+++ b/framework/scripts/quiet/quiet.sh
@@ -0,0 +1,79 @@
+# -*-mode: sh; sh-shell: bash -*-
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+#
+# This swallows the output of the wrapped tool, unless there is an error.
+# This helps reduce excess logging in the CI.
+
+# If you are debugging a build / CI issue, you can get complete unsilenced logs
+# by un-commenting the following line (or setting VERBOSE_LOGS in your environment):
+#
+# VERBOSE_LOGS=1
+#
+# This script provides most of the functionality for the adjacent make and cmake
+# wrappers.
+#
+# It requires two variables to be set:
+#
+# TOOL       - the name of the tool that is being wrapped (with no path), e.g. "make"
+#
+# NO_SILENCE - a regex that describes the commandline arguments for which output will not
+#              be silenced, e.g. " --version | test ". In this example, "make lib test" will
+#              not be silent, but "make lib" will be.
+
+# Identify path to original tool. There is an edge-case here where the quiet wrapper is on the path via
+# a symlink or relative path, but "type -ap" yields the wrapper with it's normalised path. We use
+# the -ef operator to compare paths, to avoid picking the wrapper in this case (to avoid infinitely
+# recursing).
+while IFS= read -r ORIGINAL_TOOL; do
+    if ! [[ $ORIGINAL_TOOL -ef "$0" ]]; then break; fi
+done < <(type -ap -- "$TOOL")
+
+print_quoted_args() {
+    # similar to printf '%q' "$@"
+    # but produce more human-readable results for common/simple cases like "a b"
+    for a in "$@"; do
+        # Get bash to quote the string
+        printf -v q '%q' "$a"
+        simple_pattern="^([-[:alnum:]_+./:@]+=)?([^']*)$"
+        if [[ "$a" != "$q" && $a =~ $simple_pattern ]]; then
+            # a requires some quoting (a != q), but has no single quotes, so we can
+            # simplify the quoted form - e.g.:
+            #   a b        -> 'a b'
+            #   CFLAGS=a b -> CFLAGS='a b'
+            q="${BASH_REMATCH[1]}'${BASH_REMATCH[2]}'"
+        fi
+        printf " %s" "$q"
+    done
+}
+
+if [[ ! " $* " =~ " --version " ]]; then
+    # Display the command being invoked - if it succeeds, this is all that will
+    # be displayed. Don't do this for invocations with --version, because
+    # this output is often parsed by scripts, so we don't want to modify it.
+    printf %s "${TOOL}"    1>&2
+    print_quoted_args "$@" 1>&2
+    echo                   1>&2
+fi
+
+if [[ " $@ " =~ $NO_SILENCE || -n "${VERBOSE_LOGS}" ]]; then
+    # Run original command with no output supression
+    exec "${ORIGINAL_TOOL}" "$@"
+else
+    # Run original command and capture output & exit status
+    TMPFILE=$(mktemp "quiet-${TOOL}.XXXXXX")
+    "${ORIGINAL_TOOL}" "$@" > "${TMPFILE}" 2>&1
+    EXIT_STATUS=$?
+
+    if [[ $EXIT_STATUS -ne 0 ]]; then
+        # On error, display the full output
+        cat "${TMPFILE}"
+    fi
+
+    # Remove tmpfile
+    rm "${TMPFILE}"
+
+    # Propagate the exit status
+    exit $EXIT_STATUS
+fi
diff --git a/framework/scripts/recursion.pl b/framework/scripts/recursion.pl
new file mode 100755
index 0000000..379710f
--- /dev/null
+++ b/framework/scripts/recursion.pl
@@ -0,0 +1,47 @@
+#!/usr/bin/env perl
+
+# Find functions making recursive calls to themselves.
+# (Multiple recursion where a() calls b() which calls a() not covered.)
+#
+# When the recursion depth might depend on data controlled by the attacker in
+# an unbounded way, those functions should use iteration instead.
+#
+# Typical usage: framework/scripts/recursion.pl library/*.c
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+use warnings;
+use strict;
+
+use utf8;
+use open qw(:std utf8);
+
+# exclude functions that are ok:
+# - mpi_write_hlp: bounded by size of mbedtls_mpi, a compile-time constant
+# - x509_crt_verify_child: bounded by MBEDTLS_X509_MAX_INTERMEDIATE_CA
+my $known_ok = qr/mpi_write_hlp|x509_crt_verify_child/;
+
+my $cur_name;
+my $inside;
+my @funcs;
+
+die "Usage: $0 file.c [...]\n" unless @ARGV;
+
+while (<>)
+{
+    if( /^[^\/#{}\s]/ && ! /\[.*]/ ) {
+        chomp( $cur_name = $_ ) unless $inside;
+    } elsif( /^{/ && $cur_name ) {
+        $inside = 1;
+        $cur_name =~ s/.* ([^ ]*)\(.*/$1/;
+    } elsif( /^}/ && $inside ) {
+        undef $inside;
+        undef $cur_name;
+    } elsif( $inside && /\b\Q$cur_name\E\([^)]/ ) {
+        push @funcs, $cur_name unless /$known_ok/;
+    }
+}
+
+print "$_\n" for @funcs;
+exit @funcs;
diff --git a/framework/scripts/sbom.cdx.json b/framework/scripts/sbom.cdx.json
new file mode 100644
index 0000000..2ef7c85
--- /dev/null
+++ b/framework/scripts/sbom.cdx.json
@@ -0,0 +1,48 @@
+{
+  "bomFormat": "CycloneDX",
+  "specVersion": "1.6",
+  "version": 1,
+  "metadata": {
+    "authors": [
+      {
+        "name": "@VCS_SBOM_AUTHORS@"
+      }
+    ]
+  },
+  "components": [
+    {
+      "type": "library",
+      "bom-ref": "pkg:github/Mbed-TLS/mbedtls@@VCS_TAG@",
+      "cpe": "cpe:2.3:a:trustedfirmware:mbed_tls_framework:@VCS_TAG@:*:*:*:*:*:*:*",
+      "name": "mbedtls-framework",
+      "version": "@VCS_VERSION@",
+      "description": "Version-independent build and test framework for TF-PSA-Crypto and Mbed TLS",
+      "authors": [
+        {
+          "name": "@VCS_AUTHORS@"
+        }
+      ],
+      "supplier": {
+        "name": "Trusted Firmware"
+      },
+      "licenses": [
+        {
+          "license": {
+            "id": "Apache-2.0"
+          }
+        },
+        {
+          "license": {
+            "id": "GPL-2.0-or-later"
+          }
+        }
+      ],
+      "externalReferences": [
+        {
+          "type": "vcs",
+          "url": "https://github.com/Mbed-TLS/mbedtls-framework"
+        }
+      ]
+    }
+  ]
+}
diff --git a/framework/scripts/test_generate_test_code.py b/framework/scripts/test_generate_test_code.py
new file mode 100755
index 0000000..0523e98
--- /dev/null
+++ b/framework/scripts/test_generate_test_code.py
@@ -0,0 +1,1915 @@
+#!/usr/bin/env python3
+# Unit test for generate_test_code.py
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+"""
+Unit tests for generate_test_code.py
+"""
+
+from io import StringIO
+from unittest import TestCase, main as unittest_main
+from unittest.mock import patch
+
+from generate_test_code import gen_dependencies, gen_dependencies_one_line
+from generate_test_code import gen_function_wrapper, gen_dispatch
+from generate_test_code import parse_until_pattern, GeneratorInputError
+from generate_test_code import parse_suite_dependencies
+from generate_test_code import parse_function_dependencies
+from generate_test_code import parse_function_arguments, parse_function_code
+from generate_test_code import parse_functions, END_HEADER_REGEX
+from generate_test_code import END_SUITE_HELPERS_REGEX, escaped_split
+from generate_test_code import parse_test_data, gen_dep_check
+from generate_test_code import gen_expression_check, write_dependencies
+from generate_test_code import write_parameters, gen_suite_dep_checks
+from generate_test_code import gen_from_test_data
+
+
+class GenDep(TestCase):
+    """
+    Test suite for function gen_dep()
+    """
+
+    def test_dependencies_list(self):
+        """
+        Test that gen_dep() correctly creates dependencies for given
+        dependency list.
+        :return:
+        """
+        dependencies = ['DEP1', 'DEP2']
+        dep_start, dep_end = gen_dependencies(dependencies)
+        preprocessor1, preprocessor2 = dep_start.splitlines()
+        endif1, endif2 = dep_end.splitlines()
+        self.assertEqual(preprocessor1, '#if defined(DEP1)',
+                         'Preprocessor generated incorrectly')
+        self.assertEqual(preprocessor2, '#if defined(DEP2)',
+                         'Preprocessor generated incorrectly')
+        self.assertEqual(endif1, '#endif /* DEP2 */',
+                         'Preprocessor generated incorrectly')
+        self.assertEqual(endif2, '#endif /* DEP1 */',
+                         'Preprocessor generated incorrectly')
+
+    def test_disabled_dependencies_list(self):
+        """
+        Test that gen_dep() correctly creates dependencies for given
+        dependency list.
+        :return:
+        """
+        dependencies = ['!DEP1', '!DEP2']
+        dep_start, dep_end = gen_dependencies(dependencies)
+        preprocessor1, preprocessor2 = dep_start.splitlines()
+        endif1, endif2 = dep_end.splitlines()
+        self.assertEqual(preprocessor1, '#if !defined(DEP1)',
+                         'Preprocessor generated incorrectly')
+        self.assertEqual(preprocessor2, '#if !defined(DEP2)',
+                         'Preprocessor generated incorrectly')
+        self.assertEqual(endif1, '#endif /* !DEP2 */',
+                         'Preprocessor generated incorrectly')
+        self.assertEqual(endif2, '#endif /* !DEP1 */',
+                         'Preprocessor generated incorrectly')
+
+    def test_mixed_dependencies_list(self):
+        """
+        Test that gen_dep() correctly creates dependencies for given
+        dependency list.
+        :return:
+        """
+        dependencies = ['!DEP1', 'DEP2']
+        dep_start, dep_end = gen_dependencies(dependencies)
+        preprocessor1, preprocessor2 = dep_start.splitlines()
+        endif1, endif2 = dep_end.splitlines()
+        self.assertEqual(preprocessor1, '#if !defined(DEP1)',
+                         'Preprocessor generated incorrectly')
+        self.assertEqual(preprocessor2, '#if defined(DEP2)',
+                         'Preprocessor generated incorrectly')
+        self.assertEqual(endif1, '#endif /* DEP2 */',
+                         'Preprocessor generated incorrectly')
+        self.assertEqual(endif2, '#endif /* !DEP1 */',
+                         'Preprocessor generated incorrectly')
+
+    def test_empty_dependencies_list(self):
+        """
+        Test that gen_dep() correctly creates dependencies for given
+        dependency list.
+        :return:
+        """
+        dependencies = []
+        dep_start, dep_end = gen_dependencies(dependencies)
+        self.assertEqual(dep_start, '', 'Preprocessor generated incorrectly')
+        self.assertEqual(dep_end, '', 'Preprocessor generated incorrectly')
+
+    def test_large_dependencies_list(self):
+        """
+        Test that gen_dep() correctly creates dependencies for given
+        dependency list.
+        :return:
+        """
+        dependencies = []
+        count = 10
+        for i in range(count):
+            dependencies.append('DEP%d' % i)
+        dep_start, dep_end = gen_dependencies(dependencies)
+        self.assertEqual(len(dep_start.splitlines()), count,
+                         'Preprocessor generated incorrectly')
+        self.assertEqual(len(dep_end.splitlines()), count,
+                         'Preprocessor generated incorrectly')
+
+
+class GenDepOneLine(TestCase):
+    """
+    Test Suite for testing gen_dependencies_one_line()
+    """
+
+    def test_dependencies_list(self):
+        """
+        Test that gen_dep() correctly creates dependencies for given
+        dependency list.
+        :return:
+        """
+        dependencies = ['DEP1', 'DEP2']
+        dep_str = gen_dependencies_one_line(dependencies)
+        self.assertEqual(dep_str, '#if defined(DEP1) && defined(DEP2)',
+                         'Preprocessor generated incorrectly')
+
+    def test_disabled_dependencies_list(self):
+        """
+        Test that gen_dep() correctly creates dependencies for given
+        dependency list.
+        :return:
+        """
+        dependencies = ['!DEP1', '!DEP2']
+        dep_str = gen_dependencies_one_line(dependencies)
+        self.assertEqual(dep_str, '#if !defined(DEP1) && !defined(DEP2)',
+                         'Preprocessor generated incorrectly')
+
+    def test_mixed_dependencies_list(self):
+        """
+        Test that gen_dep() correctly creates dependencies for given
+        dependency list.
+        :return:
+        """
+        dependencies = ['!DEP1', 'DEP2']
+        dep_str = gen_dependencies_one_line(dependencies)
+        self.assertEqual(dep_str, '#if !defined(DEP1) && defined(DEP2)',
+                         'Preprocessor generated incorrectly')
+
+    def test_empty_dependencies_list(self):
+        """
+        Test that gen_dep() correctly creates dependencies for given
+        dependency list.
+        :return:
+        """
+        dependencies = []
+        dep_str = gen_dependencies_one_line(dependencies)
+        self.assertEqual(dep_str, '', 'Preprocessor generated incorrectly')
+
+    def test_large_dependencies_list(self):
+        """
+        Test that gen_dep() correctly creates dependencies for given
+        dependency list.
+        :return:
+        """
+        dependencies = []
+        count = 10
+        for i in range(count):
+            dependencies.append('DEP%d' % i)
+        dep_str = gen_dependencies_one_line(dependencies)
+        expected = '#if ' + ' && '.join(['defined(%s)' %
+                                         x for x in dependencies])
+        self.assertEqual(dep_str, expected,
+                         'Preprocessor generated incorrectly')
+
+
+class GenFunctionWrapper(TestCase):
+    """
+    Test Suite for testing gen_function_wrapper()
+    """
+
+    def test_params_unpack(self):
+        """
+        Test that params are properly unpacked in the function call.
+
+        :return:
+        """
+        code = gen_function_wrapper('test_a', '', ('a', 'b', 'c', 'd'))
+        expected = '''
+static void test_a_wrapper( void ** params )
+{
+
+    test_a( a, b, c, d );
+}
+'''
+        self.assertEqual(code, expected)
+
+    def test_local(self):
+        """
+        Test that params are properly unpacked in the function call.
+
+        :return:
+        """
+        code = gen_function_wrapper('test_a',
+                                    'int x = 1;', ('x', 'b', 'c', 'd'))
+        expected = '''
+static void test_a_wrapper( void ** params )
+{
+int x = 1;
+    test_a( x, b, c, d );
+}
+'''
+        self.assertEqual(code, expected)
+
+    def test_empty_params(self):
+        """
+        Test that params are properly unpacked in the function call.
+
+        :return:
+        """
+        code = gen_function_wrapper('test_a', '', ())
+        expected = '''
+static void test_a_wrapper( void ** params )
+{
+    (void)params;
+
+    test_a(  );
+}
+'''
+        self.assertEqual(code, expected)
+
+
+class GenDispatch(TestCase):
+    """
+    Test suite for testing gen_dispatch()
+    """
+
+    def test_dispatch(self):
+        """
+        Test that dispatch table entry is generated correctly.
+        :return:
+        """
+        code = gen_dispatch('test_a', ['DEP1', 'DEP2'])
+        expected = '''
+#if defined(DEP1) && defined(DEP2)
+    test_a_wrapper,
+#else
+    NULL,
+#endif
+'''
+        self.assertEqual(code, expected)
+
+    def test_empty_dependencies(self):
+        """
+        Test empty dependency list.
+        :return:
+        """
+        code = gen_dispatch('test_a', [])
+        expected = '''
+    test_a_wrapper,
+'''
+        self.assertEqual(code, expected)
+
+
+class StringIOWrapper(StringIO):
+    """
+    file like class to mock file object in tests.
+    """
+    def __init__(self, file_name, data, line_no=0):
+        """
+        Init file handle.
+
+        :param file_name:
+        :param data:
+        :param line_no:
+        """
+        super(StringIOWrapper, self).__init__(data)
+        self.line_no = line_no
+        self.name = file_name
+
+    def next(self):
+        """
+        Iterator method. This method overrides base class's
+        next method and extends the next method to count the line
+        numbers as each line is read.
+
+        :return: Line read from file.
+        """
+        parent = super(StringIOWrapper, self)
+        line = parent.__next__()
+        return line
+
+    def readline(self, _length=0):
+        """
+        Wrap the base class readline.
+
+        :param length:
+        :return:
+        """
+        line = super(StringIOWrapper, self).readline()
+        if line is not None:
+            self.line_no += 1
+        return line
+
+
+class ParseUntilPattern(TestCase):
+    """
+    Test Suite for testing parse_until_pattern().
+    """
+
+    def test_suite_headers(self):
+        """
+        Test that suite headers are parsed correctly.
+
+        :return:
+        """
+        data = '''#include "mbedtls/ecp.h"
+
+#define ECP_PF_UNKNOWN     -1
+/* END_HEADER */
+'''
+        expected = '''#line 1 "test_suite_ut.function"
+#include "mbedtls/ecp.h"
+
+#define ECP_PF_UNKNOWN     -1
+'''
+        stream = StringIOWrapper('test_suite_ut.function', data, line_no=0)
+        headers = parse_until_pattern(stream, END_HEADER_REGEX)
+        self.assertEqual(headers, expected)
+
+    def test_line_no(self):
+        """
+        Test that #line is set to correct line no. in source .function file.
+
+        :return:
+        """
+        data = '''#include "mbedtls/ecp.h"
+
+#define ECP_PF_UNKNOWN     -1
+/* END_HEADER */
+'''
+        offset_line_no = 5
+        expected = '''#line %d "test_suite_ut.function"
+#include "mbedtls/ecp.h"
+
+#define ECP_PF_UNKNOWN     -1
+''' % (offset_line_no + 1)
+        stream = StringIOWrapper('test_suite_ut.function', data,
+                                 offset_line_no)
+        headers = parse_until_pattern(stream, END_HEADER_REGEX)
+        self.assertEqual(headers, expected)
+
+    def test_no_end_header_comment(self):
+        """
+        Test that InvalidFileFormat is raised when end header comment is
+        missing.
+        :return:
+        """
+        data = '''#include "mbedtls/ecp.h"
+
+#define ECP_PF_UNKNOWN     -1
+
+'''
+        stream = StringIOWrapper('test_suite_ut.function', data)
+        self.assertRaises(GeneratorInputError, parse_until_pattern, stream,
+                          END_HEADER_REGEX)
+
+
+class ParseSuiteDependencies(TestCase):
+    """
+    Test Suite for testing parse_suite_dependencies().
+    """
+
+    def test_suite_dependencies(self):
+        """
+
+        :return:
+        """
+        data = '''
+ * depends_on:MBEDTLS_ECP_C
+ * END_DEPENDENCIES
+ */
+'''
+        expected = ['MBEDTLS_ECP_C']
+        stream = StringIOWrapper('test_suite_ut.function', data)
+        dependencies = parse_suite_dependencies(stream)
+        self.assertEqual(dependencies, expected)
+
+    def test_no_end_dep_comment(self):
+        """
+        Test that InvalidFileFormat is raised when end dep comment is missing.
+        :return:
+        """
+        data = '''
+* depends_on:MBEDTLS_ECP_C
+'''
+        stream = StringIOWrapper('test_suite_ut.function', data)
+        self.assertRaises(GeneratorInputError, parse_suite_dependencies,
+                          stream)
+
+    def test_dependencies_split(self):
+        """
+        Test that InvalidFileFormat is raised when end dep comment is missing.
+        :return:
+        """
+        data = '''
+ * depends_on:MBEDTLS_ECP_C:A:B:   C  : D :F : G: !H
+ * END_DEPENDENCIES
+ */
+'''
+        expected = ['MBEDTLS_ECP_C', 'A', 'B', 'C', 'D', 'F', 'G', '!H']
+        stream = StringIOWrapper('test_suite_ut.function', data)
+        dependencies = parse_suite_dependencies(stream)
+        self.assertEqual(dependencies, expected)
+
+
+class ParseFuncDependencies(TestCase):
+    """
+    Test Suite for testing parse_function_dependencies()
+    """
+
+    def test_function_dependencies(self):
+        """
+        Test that parse_function_dependencies() correctly parses function
+        dependencies.
+        :return:
+        """
+        line = '/* BEGIN_CASE ' \
+               'depends_on:MBEDTLS_ENTROPY_NV_SEED:MBEDTLS_FS_IO */'
+        expected = ['MBEDTLS_ENTROPY_NV_SEED', 'MBEDTLS_FS_IO']
+        dependencies = parse_function_dependencies(line)
+        self.assertEqual(dependencies, expected)
+
+    def test_no_dependencies(self):
+        """
+        Test that parse_function_dependencies() correctly parses function
+        dependencies.
+        :return:
+        """
+        line = '/* BEGIN_CASE */'
+        dependencies = parse_function_dependencies(line)
+        self.assertEqual(dependencies, [])
+
+    def test_tolerance(self):
+        """
+        Test that parse_function_dependencies() correctly parses function
+        dependencies.
+        :return:
+        """
+        line = '/* BEGIN_CASE depends_on:MBEDTLS_FS_IO: A : !B:C : F*/'
+        dependencies = parse_function_dependencies(line)
+        self.assertEqual(dependencies, ['MBEDTLS_FS_IO', 'A', '!B', 'C', 'F'])
+
+
+class ParseFuncSignature(TestCase):
+    """
+    Test Suite for parse_function_arguments().
+    """
+
+    def test_int_and_char_params(self):
+        """
+        Test int and char parameters parsing
+        :return:
+        """
+        line = 'void entropy_threshold( char * a, int b, int result )'
+        args, local, arg_dispatch = parse_function_arguments(line)
+        self.assertEqual(args, ['char*', 'int', 'int'])
+        self.assertEqual(local, '')
+        self.assertEqual(arg_dispatch,
+                         ['(char *) params[0]',
+                          '((mbedtls_test_argument_t *) params[1])->sint',
+                          '((mbedtls_test_argument_t *) params[2])->sint'])
+
+    def test_hex_params(self):
+        """
+        Test hex parameters parsing
+        :return:
+        """
+        line = 'void entropy_threshold( char * a, data_t * h, int result )'
+        args, local, arg_dispatch = parse_function_arguments(line)
+        self.assertEqual(args, ['char*', 'hex', 'int'])
+        self.assertEqual(local,
+                         '    data_t data1 = {(uint8_t *) params[1], '
+                         '((mbedtls_test_argument_t *) params[2])->len};\n')
+        self.assertEqual(arg_dispatch, ['(char *) params[0]',
+                                        '&data1',
+                                        '((mbedtls_test_argument_t *) params[3])->sint'])
+
+    def test_unsupported_arg(self):
+        """
+        Test unsupported argument type
+        :return:
+        """
+        line = 'void entropy_threshold( char * a, data_t * h, unknown_t result )'
+        self.assertRaises(ValueError, parse_function_arguments, line)
+
+    def test_empty_params(self):
+        """
+        Test no parameters (nothing between parentheses).
+        :return:
+        """
+        line = 'void entropy_threshold()'
+        args, local, arg_dispatch = parse_function_arguments(line)
+        self.assertEqual(args, [])
+        self.assertEqual(local, '')
+        self.assertEqual(arg_dispatch, [])
+
+    def test_blank_params(self):
+        """
+        Test no parameters (space between parentheses).
+        :return:
+        """
+        line = 'void entropy_threshold( )'
+        args, local, arg_dispatch = parse_function_arguments(line)
+        self.assertEqual(args, [])
+        self.assertEqual(local, '')
+        self.assertEqual(arg_dispatch, [])
+
+    def test_void_params(self):
+        """
+        Test no parameters (void keyword).
+        :return:
+        """
+        line = 'void entropy_threshold(void)'
+        args, local, arg_dispatch = parse_function_arguments(line)
+        self.assertEqual(args, [])
+        self.assertEqual(local, '')
+        self.assertEqual(arg_dispatch, [])
+
+    def test_void_space_params(self):
+        """
+        Test no parameters (void with spaces).
+        :return:
+        """
+        line = 'void entropy_threshold( void )'
+        args, local, arg_dispatch = parse_function_arguments(line)
+        self.assertEqual(args, [])
+        self.assertEqual(local, '')
+        self.assertEqual(arg_dispatch, [])
+
+
+class ParseFunctionCode(TestCase):
+    """
+    Test suite for testing parse_function_code()
+    """
+
+    def test_no_function(self):
+        """
+        Test no test function found.
+        :return:
+        """
+        data = '''
+No
+test
+function
+'''
+        stream = StringIOWrapper('test_suite_ut.function', data)
+        err_msg = 'file: test_suite_ut.function - Test functions not found!'
+        self.assertRaisesRegex(GeneratorInputError, err_msg,
+                               parse_function_code, stream, [], [])
+
+    def test_no_end_case_comment(self):
+        """
+        Test missing end case.
+        :return:
+        """
+        data = '''
+void test_func()
+{
+}
+'''
+        stream = StringIOWrapper('test_suite_ut.function', data)
+        err_msg = r'file: test_suite_ut.function - '\
+                  'end case pattern .*? not found!'
+        self.assertRaisesRegex(GeneratorInputError, err_msg,
+                               parse_function_code, stream, [], [])
+
+    @patch("generate_test_code.parse_function_arguments")
+    def test_function_called(self,
+                             parse_function_arguments_mock):
+        """
+        Test parse_function_code()
+        :return:
+        """
+        parse_function_arguments_mock.return_value = ([], '', [])
+        data = '''
+void test_func()
+{
+}
+'''
+        stream = StringIOWrapper('test_suite_ut.function', data)
+        self.assertRaises(GeneratorInputError, parse_function_code,
+                          stream, [], [])
+        self.assertTrue(parse_function_arguments_mock.called)
+        parse_function_arguments_mock.assert_called_with('void test_func()\n')
+
+    @patch("generate_test_code.gen_dispatch")
+    @patch("generate_test_code.gen_dependencies")
+    @patch("generate_test_code.gen_function_wrapper")
+    @patch("generate_test_code.parse_function_arguments")
+    def test_return(self, parse_function_arguments_mock,
+                    gen_function_wrapper_mock,
+                    gen_dependencies_mock,
+                    gen_dispatch_mock):
+        """
+        Test generated code.
+        :return:
+        """
+        parse_function_arguments_mock.return_value = ([], '', [])
+        gen_function_wrapper_mock.return_value = ''
+        gen_dependencies_mock.side_effect = gen_dependencies
+        gen_dispatch_mock.side_effect = gen_dispatch
+        data = '''
+void func()
+{
+    ba ba black sheep
+    have you any wool
+}
+/* END_CASE */
+'''
+        stream = StringIOWrapper('test_suite_ut.function', data)
+        name, arg, code, dispatch_code = parse_function_code(stream, [], [])
+
+        self.assertTrue(parse_function_arguments_mock.called)
+        parse_function_arguments_mock.assert_called_with('void func()\n')
+        gen_function_wrapper_mock.assert_called_with('test_func', '', [])
+        self.assertEqual(name, 'test_func')
+        self.assertEqual(arg, [])
+        expected = '''#line 1 "test_suite_ut.function"
+
+static void test_func(void)
+{
+    ba ba black sheep
+    have you any wool
+exit:
+    ;
+}
+'''
+        self.assertEqual(code, expected)
+        self.assertEqual(dispatch_code, "\n    test_func_wrapper,\n")
+
+    @patch("generate_test_code.gen_dispatch")
+    @patch("generate_test_code.gen_dependencies")
+    @patch("generate_test_code.gen_function_wrapper")
+    @patch("generate_test_code.parse_function_arguments")
+    def test_with_exit_label(self, parse_function_arguments_mock,
+                             gen_function_wrapper_mock,
+                             gen_dependencies_mock,
+                             gen_dispatch_mock):
+        """
+        Test when exit label is present.
+        :return:
+        """
+        parse_function_arguments_mock.return_value = ([], '', [])
+        gen_function_wrapper_mock.return_value = ''
+        gen_dependencies_mock.side_effect = gen_dependencies
+        gen_dispatch_mock.side_effect = gen_dispatch
+        data = '''
+void func()
+{
+    ba ba black sheep
+    have you any wool
+exit:
+    yes sir yes sir
+    3 bags full
+}
+/* END_CASE */
+'''
+        stream = StringIOWrapper('test_suite_ut.function', data)
+        _, _, code, _ = parse_function_code(stream, [], [])
+
+        expected = '''#line 1 "test_suite_ut.function"
+
+static void test_func(void)
+{
+    ba ba black sheep
+    have you any wool
+exit:
+    yes sir yes sir
+    3 bags full
+}
+'''
+        self.assertEqual(code, expected)
+
+    def test_non_void_function(self):
+        """
+        Test invalid signature (non void).
+        :return:
+        """
+        data = 'int entropy_threshold( char * a, data_t * h, int result )'
+        err_msg = 'file: test_suite_ut.function - Test functions not found!'
+        stream = StringIOWrapper('test_suite_ut.function', data)
+        self.assertRaisesRegex(GeneratorInputError, err_msg,
+                               parse_function_code, stream, [], [])
+
+    @patch("generate_test_code.gen_dispatch")
+    @patch("generate_test_code.gen_dependencies")
+    @patch("generate_test_code.gen_function_wrapper")
+    @patch("generate_test_code.parse_function_arguments")
+    def test_function_name_on_newline(self, parse_function_arguments_mock,
+                                      gen_function_wrapper_mock,
+                                      gen_dependencies_mock,
+                                      gen_dispatch_mock):
+        """
+        Test with line break before the function name.
+        :return:
+        """
+        parse_function_arguments_mock.return_value = ([], '', [])
+        gen_function_wrapper_mock.return_value = ''
+        gen_dependencies_mock.side_effect = gen_dependencies
+        gen_dispatch_mock.side_effect = gen_dispatch
+        data = '''
+void
+
+
+func()
+{
+    ba ba black sheep
+    have you any wool
+exit:
+    yes sir yes sir
+    3 bags full
+}
+/* END_CASE */
+'''
+        stream = StringIOWrapper('test_suite_ut.function', data)
+        _, _, code, _ = parse_function_code(stream, [], [])
+
+        expected = '''#line 1 "test_suite_ut.function"
+
+static void
+
+
+test_func(void)
+{
+    ba ba black sheep
+    have you any wool
+exit:
+    yes sir yes sir
+    3 bags full
+}
+'''
+        self.assertEqual(code, expected)
+
+    @patch("generate_test_code.gen_dispatch")
+    @patch("generate_test_code.gen_dependencies")
+    @patch("generate_test_code.gen_function_wrapper")
+    @patch("generate_test_code.parse_function_arguments")
+    def test_case_starting_with_comment(self, parse_function_arguments_mock,
+                                        gen_function_wrapper_mock,
+                                        gen_dependencies_mock,
+                                        gen_dispatch_mock):
+        """
+        Test with comments before the function signature
+        :return:
+        """
+        parse_function_arguments_mock.return_value = ([], '', [])
+        gen_function_wrapper_mock.return_value = ''
+        gen_dependencies_mock.side_effect = gen_dependencies
+        gen_dispatch_mock.side_effect = gen_dispatch
+        data = '''/* comment */
+/* more
+ * comment */
+// this is\\
+still \\
+a comment
+void func()
+{
+    ba ba black sheep
+    have you any wool
+exit:
+    yes sir yes sir
+    3 bags full
+}
+/* END_CASE */
+'''
+        stream = StringIOWrapper('test_suite_ut.function', data)
+        _, _, code, _ = parse_function_code(stream, [], [])
+
+        expected = '''#line 1 "test_suite_ut.function"
+
+
+
+
+
+
+static void test_func(void)
+{
+    ba ba black sheep
+    have you any wool
+exit:
+    yes sir yes sir
+    3 bags full
+}
+'''
+        self.assertEqual(code, expected)
+
+    @patch("generate_test_code.gen_dispatch")
+    @patch("generate_test_code.gen_dependencies")
+    @patch("generate_test_code.gen_function_wrapper")
+    @patch("generate_test_code.parse_function_arguments")
+    def test_comment_in_prototype(self, parse_function_arguments_mock,
+                                  gen_function_wrapper_mock,
+                                  gen_dependencies_mock,
+                                  gen_dispatch_mock):
+        """
+        Test with comments in the function prototype
+        :return:
+        """
+        parse_function_arguments_mock.return_value = ([], '', [])
+        gen_function_wrapper_mock.return_value = ''
+        gen_dependencies_mock.side_effect = gen_dependencies
+        gen_dispatch_mock.side_effect = gen_dispatch
+        data = '''
+void func( int x, // (line \\
+                     comment)
+           int y /* lone closing parenthesis) */ )
+{
+    ba ba black sheep
+    have you any wool
+exit:
+    yes sir yes sir
+    3 bags full
+}
+/* END_CASE */
+'''
+        stream = StringIOWrapper('test_suite_ut.function', data)
+        _, _, code, _ = parse_function_code(stream, [], [])
+
+        expected = '''#line 1 "test_suite_ut.function"
+
+static void test_func( int x,
+
+           int y                                 )
+{
+    ba ba black sheep
+    have you any wool
+exit:
+    yes sir yes sir
+    3 bags full
+}
+'''
+        self.assertEqual(code, expected)
+
+    @patch("generate_test_code.gen_dispatch")
+    @patch("generate_test_code.gen_dependencies")
+    @patch("generate_test_code.gen_function_wrapper")
+    @patch("generate_test_code.parse_function_arguments")
+    def test_line_comment_in_block_comment(self, parse_function_arguments_mock,
+                                           gen_function_wrapper_mock,
+                                           gen_dependencies_mock,
+                                           gen_dispatch_mock):
+        """
+        Test with line comment in block comment.
+        :return:
+        """
+        parse_function_arguments_mock.return_value = ([], '', [])
+        gen_function_wrapper_mock.return_value = ''
+        gen_dependencies_mock.side_effect = gen_dependencies
+        gen_dispatch_mock.side_effect = gen_dispatch
+        data = '''
+void func( int x /* // */ )
+{
+    ba ba black sheep
+    have you any wool
+exit:
+    yes sir yes sir
+    3 bags full
+}
+/* END_CASE */
+'''
+        stream = StringIOWrapper('test_suite_ut.function', data)
+        _, _, code, _ = parse_function_code(stream, [], [])
+
+        expected = '''#line 1 "test_suite_ut.function"
+
+static void test_func( int x          )
+{
+    ba ba black sheep
+    have you any wool
+exit:
+    yes sir yes sir
+    3 bags full
+}
+'''
+        self.assertEqual(code, expected)
+
+    @patch("generate_test_code.gen_dispatch")
+    @patch("generate_test_code.gen_dependencies")
+    @patch("generate_test_code.gen_function_wrapper")
+    @patch("generate_test_code.parse_function_arguments")
+    def test_block_comment_in_line_comment(self, parse_function_arguments_mock,
+                                           gen_function_wrapper_mock,
+                                           gen_dependencies_mock,
+                                           gen_dispatch_mock):
+        """
+        Test with block comment in line comment.
+        :return:
+        """
+        parse_function_arguments_mock.return_value = ([], '', [])
+        gen_function_wrapper_mock.return_value = ''
+        gen_dependencies_mock.side_effect = gen_dependencies
+        gen_dispatch_mock.side_effect = gen_dispatch
+        data = '''
+// /*
+void func( int x )
+{
+    ba ba black sheep
+    have you any wool
+exit:
+    yes sir yes sir
+    3 bags full
+}
+/* END_CASE */
+'''
+        stream = StringIOWrapper('test_suite_ut.function', data)
+        _, _, code, _ = parse_function_code(stream, [], [])
+
+        expected = '''#line 1 "test_suite_ut.function"
+
+
+static void test_func( int x )
+{
+    ba ba black sheep
+    have you any wool
+exit:
+    yes sir yes sir
+    3 bags full
+}
+'''
+        self.assertEqual(code, expected)
+
+
+class ParseFunction(TestCase):
+    """
+    Test Suite for testing parse_functions()
+    """
+
+    @patch("generate_test_code.parse_until_pattern")
+    def test_begin_header(self, parse_until_pattern_mock):
+        """
+        Test that begin header is checked and parse_until_pattern() is called.
+        :return:
+        """
+        def stop(*_unused):
+            """Stop when parse_until_pattern is called."""
+            raise Exception
+        parse_until_pattern_mock.side_effect = stop
+        data = '''/* BEGIN_HEADER */
+#include "mbedtls/ecp.h"
+
+#define ECP_PF_UNKNOWN     -1
+/* END_HEADER */
+'''
+        stream = StringIOWrapper('test_suite_ut.function', data)
+        self.assertRaises(Exception, parse_functions, stream)
+        parse_until_pattern_mock.assert_called_with(stream, END_HEADER_REGEX)
+        self.assertEqual(stream.line_no, 1)
+
+    @patch("generate_test_code.parse_until_pattern")
+    def test_begin_helper(self, parse_until_pattern_mock):
+        """
+        Test that begin helper is checked and parse_until_pattern() is called.
+        :return:
+        """
+        def stop(*_unused):
+            """Stop when parse_until_pattern is called."""
+            raise Exception
+        parse_until_pattern_mock.side_effect = stop
+        data = '''/* BEGIN_SUITE_HELPERS */
+static void print_hello_world()
+{
+    printf("Hello World!\n");
+}
+/* END_SUITE_HELPERS */
+'''
+        stream = StringIOWrapper('test_suite_ut.function', data)
+        self.assertRaises(Exception, parse_functions, stream)
+        parse_until_pattern_mock.assert_called_with(stream,
+                                                    END_SUITE_HELPERS_REGEX)
+        self.assertEqual(stream.line_no, 1)
+
+    @patch("generate_test_code.parse_suite_dependencies")
+    def test_begin_dep(self, parse_suite_dependencies_mock):
+        """
+        Test that begin dep is checked and parse_suite_dependencies() is
+        called.
+        :return:
+        """
+        def stop(*_unused):
+            """Stop when parse_until_pattern is called."""
+            raise Exception
+        parse_suite_dependencies_mock.side_effect = stop
+        data = '''/* BEGIN_DEPENDENCIES
+ * depends_on:MBEDTLS_ECP_C
+ * END_DEPENDENCIES
+ */
+'''
+        stream = StringIOWrapper('test_suite_ut.function', data)
+        self.assertRaises(Exception, parse_functions, stream)
+        parse_suite_dependencies_mock.assert_called_with(stream)
+        self.assertEqual(stream.line_no, 1)
+
+    @patch("generate_test_code.parse_function_dependencies")
+    def test_begin_function_dep(self, func_mock):
+        """
+        Test that begin dep is checked and parse_function_dependencies() is
+        called.
+        :return:
+        """
+        def stop(*_unused):
+            """Stop when parse_until_pattern is called."""
+            raise Exception
+        func_mock.side_effect = stop
+
+        dependencies_str = '/* BEGIN_CASE ' \
+            'depends_on:MBEDTLS_ENTROPY_NV_SEED:MBEDTLS_FS_IO */\n'
+        data = '''%sstatic void test_func()
+{
+}
+''' % dependencies_str
+        stream = StringIOWrapper('test_suite_ut.function', data)
+        self.assertRaises(Exception, parse_functions, stream)
+        func_mock.assert_called_with(dependencies_str)
+        self.assertEqual(stream.line_no, 1)
+
+    @patch("generate_test_code.parse_function_code")
+    @patch("generate_test_code.parse_function_dependencies")
+    def test_return(self, func_mock1, func_mock2):
+        """
+        Test that begin case is checked and parse_function_code() is called.
+        :return:
+        """
+        func_mock1.return_value = []
+        in_func_code = '''static void test_func()
+{
+}
+'''
+        func_dispatch = '''
+    test_func_wrapper,
+'''
+        func_mock2.return_value = 'test_func', [],\
+            in_func_code, func_dispatch
+        dependencies_str = '/* BEGIN_CASE ' \
+            'depends_on:MBEDTLS_ENTROPY_NV_SEED:MBEDTLS_FS_IO */\n'
+        data = '''%sstatic void test_func()
+{
+}
+''' % dependencies_str
+        stream = StringIOWrapper('test_suite_ut.function', data)
+        suite_dependencies, dispatch_code, func_code, func_info = \
+            parse_functions(stream)
+        func_mock1.assert_called_with(dependencies_str)
+        func_mock2.assert_called_with(stream, [], [])
+        self.assertEqual(stream.line_no, 5)
+        self.assertEqual(suite_dependencies, [])
+        expected_dispatch_code = '''/* Function Id: 0 */
+
+    test_func_wrapper,
+'''
+        self.assertEqual(dispatch_code, expected_dispatch_code)
+        self.assertEqual(func_code, in_func_code)
+        self.assertEqual(func_info, {'test_func': (0, [])})
+
+    def test_parsing(self):
+        """
+        Test case parsing.
+        :return:
+        """
+        data = '''/* BEGIN_HEADER */
+#include "mbedtls/ecp.h"
+
+#define ECP_PF_UNKNOWN     -1
+/* END_HEADER */
+
+/* BEGIN_DEPENDENCIES
+ * depends_on:MBEDTLS_ECP_C
+ * END_DEPENDENCIES
+ */
+
+/* BEGIN_CASE depends_on:MBEDTLS_ENTROPY_NV_SEED:MBEDTLS_FS_IO */
+void func1()
+{
+}
+/* END_CASE */
+
+/* BEGIN_CASE depends_on:MBEDTLS_ENTROPY_NV_SEED:MBEDTLS_FS_IO */
+void func2()
+{
+}
+/* END_CASE */
+'''
+        stream = StringIOWrapper('test_suite_ut.function', data)
+        suite_dependencies, dispatch_code, func_code, func_info = \
+            parse_functions(stream)
+        self.assertEqual(stream.line_no, 23)
+        self.assertEqual(suite_dependencies, ['MBEDTLS_ECP_C'])
+
+        expected_dispatch_code = '''/* Function Id: 0 */
+
+#if defined(MBEDTLS_ECP_C) && defined(MBEDTLS_ENTROPY_NV_SEED) && defined(MBEDTLS_FS_IO)
+    test_func1_wrapper,
+#else
+    NULL,
+#endif
+/* Function Id: 1 */
+
+#if defined(MBEDTLS_ECP_C) && defined(MBEDTLS_ENTROPY_NV_SEED) && defined(MBEDTLS_FS_IO)
+    test_func2_wrapper,
+#else
+    NULL,
+#endif
+'''
+        self.assertEqual(dispatch_code, expected_dispatch_code)
+        expected_func_code = '''#if defined(MBEDTLS_ECP_C)
+#line 2 "test_suite_ut.function"
+#include "mbedtls/ecp.h"
+
+#define ECP_PF_UNKNOWN     -1
+#if defined(MBEDTLS_ENTROPY_NV_SEED)
+#if defined(MBEDTLS_FS_IO)
+#line 13 "test_suite_ut.function"
+static void test_func1(void)
+{
+exit:
+    ;
+}
+
+static void test_func1_wrapper( void ** params )
+{
+    (void)params;
+
+    test_func1(  );
+}
+#endif /* MBEDTLS_FS_IO */
+#endif /* MBEDTLS_ENTROPY_NV_SEED */
+#if defined(MBEDTLS_ENTROPY_NV_SEED)
+#if defined(MBEDTLS_FS_IO)
+#line 19 "test_suite_ut.function"
+static void test_func2(void)
+{
+exit:
+    ;
+}
+
+static void test_func2_wrapper( void ** params )
+{
+    (void)params;
+
+    test_func2(  );
+}
+#endif /* MBEDTLS_FS_IO */
+#endif /* MBEDTLS_ENTROPY_NV_SEED */
+#endif /* MBEDTLS_ECP_C */
+'''
+        self.assertEqual(func_code, expected_func_code)
+        self.assertEqual(func_info, {'test_func1': (0, []),
+                                     'test_func2': (1, [])})
+
+    def test_same_function_name(self):
+        """
+        Test name conflict.
+        :return:
+        """
+        data = '''/* BEGIN_HEADER */
+#include "mbedtls/ecp.h"
+
+#define ECP_PF_UNKNOWN     -1
+/* END_HEADER */
+
+/* BEGIN_DEPENDENCIES
+ * depends_on:MBEDTLS_ECP_C
+ * END_DEPENDENCIES
+ */
+
+/* BEGIN_CASE depends_on:MBEDTLS_ENTROPY_NV_SEED:MBEDTLS_FS_IO */
+void func()
+{
+}
+/* END_CASE */
+
+/* BEGIN_CASE depends_on:MBEDTLS_ENTROPY_NV_SEED:MBEDTLS_FS_IO */
+void func()
+{
+}
+/* END_CASE */
+'''
+        stream = StringIOWrapper('test_suite_ut.function', data)
+        self.assertRaises(GeneratorInputError, parse_functions, stream)
+
+
+class EscapedSplit(TestCase):
+    """
+    Test suite for testing escaped_split().
+    Note: Since escaped_split() output is used to write back to the
+    intermediate data file. Any escape characters in the input are
+    retained in the output.
+    """
+
+    def test_invalid_input(self):
+        """
+        Test when input split character is not a character.
+        :return:
+        """
+        self.assertRaises(ValueError, escaped_split, '', 'string')
+
+    def test_empty_string(self):
+        """
+        Test empty string input.
+        :return:
+        """
+        splits = escaped_split('', ':')
+        self.assertEqual(splits, [])
+
+    def test_no_escape(self):
+        """
+        Test with no escape character. The behaviour should be same as
+        str.split()
+        :return:
+        """
+        test_str = 'yahoo:google'
+        splits = escaped_split(test_str, ':')
+        self.assertEqual(splits, test_str.split(':'))
+
+    def test_escaped_input(self):
+        """
+        Test input that has escaped delimiter.
+        :return:
+        """
+        test_str = r'yahoo\:google:facebook'
+        splits = escaped_split(test_str, ':')
+        self.assertEqual(splits, [r'yahoo\:google', 'facebook'])
+
+    def test_escaped_escape(self):
+        """
+        Test input that has escaped delimiter.
+        :return:
+        """
+        test_str = r'yahoo\\:google:facebook'
+        splits = escaped_split(test_str, ':')
+        self.assertEqual(splits, [r'yahoo\\', 'google', 'facebook'])
+
+    def test_all_at_once(self):
+        """
+        Test input that has escaped delimiter.
+        :return:
+        """
+        test_str = r'yahoo\\:google:facebook\:instagram\\:bbc\\:wikipedia'
+        splits = escaped_split(test_str, ':')
+        self.assertEqual(splits, [r'yahoo\\', r'google',
+                                  r'facebook\:instagram\\',
+                                  r'bbc\\', r'wikipedia'])
+
+
+class ParseTestData(TestCase):
+    """
+    Test suite for parse test data.
+    """
+
+    def test_parser(self):
+        """
+        Test that tests are parsed correctly from data file.
+        :return:
+        """
+        data = """
+Diffie-Hellman full exchange #1
+dhm_do_dhm:10:"23":10:"5"
+
+Diffie-Hellman full exchange #2
+dhm_do_dhm:10:"93450983094850938450983409623":10:"9345098304850938450983409622"
+
+Diffie-Hellman full exchange #3
+dhm_do_dhm:10:"9345098382739712938719287391879381271":10:"9345098792137312973297123912791271"
+
+Diffie-Hellman selftest
+dhm_selftest:
+"""
+        stream = StringIOWrapper('test_suite_ut.function', data)
+        # List of (name, function_name, dependencies, args)
+        tests = list(parse_test_data(stream))
+        test1, test2, test3, test4 = tests
+        self.assertEqual(test1[0], 3)
+        self.assertEqual(test1[1], 'Diffie-Hellman full exchange #1')
+        self.assertEqual(test1[2], 'dhm_do_dhm')
+        self.assertEqual(test1[3], [])
+        self.assertEqual(test1[4], ['10', '"23"', '10', '"5"'])
+
+        self.assertEqual(test2[0], 6)
+        self.assertEqual(test2[1], 'Diffie-Hellman full exchange #2')
+        self.assertEqual(test2[2], 'dhm_do_dhm')
+        self.assertEqual(test2[3], [])
+        self.assertEqual(test2[4], ['10', '"93450983094850938450983409623"',
+                                    '10', '"9345098304850938450983409622"'])
+
+        self.assertEqual(test3[0], 9)
+        self.assertEqual(test3[1], 'Diffie-Hellman full exchange #3')
+        self.assertEqual(test3[2], 'dhm_do_dhm')
+        self.assertEqual(test3[3], [])
+        self.assertEqual(test3[4], ['10',
+                                    '"9345098382739712938719287391879381271"',
+                                    '10',
+                                    '"9345098792137312973297123912791271"'])
+
+        self.assertEqual(test4[0], 12)
+        self.assertEqual(test4[1], 'Diffie-Hellman selftest')
+        self.assertEqual(test4[2], 'dhm_selftest')
+        self.assertEqual(test4[3], [])
+        self.assertEqual(test4[4], [])
+
+    def test_with_dependencies(self):
+        """
+        Test that tests with dependencies are parsed.
+        :return:
+        """
+        data = """
+Diffie-Hellman full exchange #1
+depends_on:YAHOO
+dhm_do_dhm:10:"23":10:"5"
+
+Diffie-Hellman full exchange #2
+dhm_do_dhm:10:"93450983094850938450983409623":10:"9345098304850938450983409622"
+
+"""
+        stream = StringIOWrapper('test_suite_ut.function', data)
+        # List of (name, function_name, dependencies, args)
+        tests = list(parse_test_data(stream))
+        test1, test2 = tests
+        self.assertEqual(test1[0], 4)
+        self.assertEqual(test1[1], 'Diffie-Hellman full exchange #1')
+        self.assertEqual(test1[2], 'dhm_do_dhm')
+        self.assertEqual(test1[3], ['YAHOO'])
+        self.assertEqual(test1[4], ['10', '"23"', '10', '"5"'])
+
+        self.assertEqual(test2[0], 7)
+        self.assertEqual(test2[1], 'Diffie-Hellman full exchange #2')
+        self.assertEqual(test2[2], 'dhm_do_dhm')
+        self.assertEqual(test2[3], [])
+        self.assertEqual(test2[4], ['10', '"93450983094850938450983409623"',
+                                    '10', '"9345098304850938450983409622"'])
+
+    def test_no_args(self):
+        """
+        Test GeneratorInputError is raised when test function name and
+        args line is missing.
+        :return:
+        """
+        data = """
+Diffie-Hellman full exchange #1
+depends_on:YAHOO
+
+
+Diffie-Hellman full exchange #2
+dhm_do_dhm:10:"93450983094850938450983409623":10:"9345098304850938450983409622"
+
+"""
+        stream = StringIOWrapper('test_suite_ut.function', data)
+        err = None
+        try:
+            for _, _, _, _, _ in parse_test_data(stream):
+                pass
+        except GeneratorInputError as err:
+            self.assertEqual(type(err), GeneratorInputError)
+
+    def test_incomplete_data(self):
+        """
+        Test GeneratorInputError is raised when test function name
+        and args line is missing.
+        :return:
+        """
+        data = """
+Diffie-Hellman full exchange #1
+depends_on:YAHOO
+"""
+        stream = StringIOWrapper('test_suite_ut.function', data)
+        err = None
+        try:
+            for _, _, _, _, _ in parse_test_data(stream):
+                pass
+        except GeneratorInputError as err:
+            self.assertEqual(type(err), GeneratorInputError)
+
+
+class GenDepCheck(TestCase):
+    """
+    Test suite for gen_dep_check(). It is assumed this function is
+    called with valid inputs.
+    """
+
+    def test_gen_dep_check(self):
+        """
+        Test that dependency check code generated correctly.
+        :return:
+        """
+        expected = """
+        case 5:
+            {
+#if defined(YAHOO)
+                ret = DEPENDENCY_SUPPORTED;
+#else
+                ret = DEPENDENCY_NOT_SUPPORTED;
+#endif
+            }
+            break;"""
+        out = gen_dep_check(5, 'YAHOO')
+        self.assertEqual(out, expected)
+
+    def test_not_defined_dependency(self):
+        """
+        Test dependency with !.
+        :return:
+        """
+        expected = """
+        case 5:
+            {
+#if !defined(YAHOO)
+                ret = DEPENDENCY_SUPPORTED;
+#else
+                ret = DEPENDENCY_NOT_SUPPORTED;
+#endif
+            }
+            break;"""
+        out = gen_dep_check(5, '!YAHOO')
+        self.assertEqual(out, expected)
+
+    def test_empty_dependency(self):
+        """
+        Test invalid dependency input.
+        :return:
+        """
+        self.assertRaises(GeneratorInputError, gen_dep_check, 5, '!')
+
+    def test_negative_dep_id(self):
+        """
+        Test invalid dependency input.
+        :return:
+        """
+        self.assertRaises(GeneratorInputError, gen_dep_check, -1, 'YAHOO')
+
+
+class GenExpCheck(TestCase):
+    """
+    Test suite for gen_expression_check(). It is assumed this function
+    is called with valid inputs.
+    """
+
+    def test_gen_exp_check(self):
+        """
+        Test that expression check code generated correctly.
+        :return:
+        """
+        expected = """
+        case 5:
+            {
+                *out_value = YAHOO;
+            }
+            break;"""
+        out = gen_expression_check(5, 'YAHOO')
+        self.assertEqual(out, expected)
+
+    def test_invalid_expression(self):
+        """
+        Test invalid expression input.
+        :return:
+        """
+        self.assertRaises(GeneratorInputError, gen_expression_check, 5, '')
+
+    def test_negative_exp_id(self):
+        """
+        Test invalid expression id.
+        :return:
+        """
+        self.assertRaises(GeneratorInputError, gen_expression_check,
+                          -1, 'YAHOO')
+
+
+class WriteDependencies(TestCase):
+    """
+    Test suite for testing write_dependencies.
+    """
+
+    def test_no_test_dependencies(self):
+        """
+        Test when test dependencies input is empty.
+        :return:
+        """
+        stream = StringIOWrapper('test_suite_ut.data', '')
+        unique_dependencies = []
+        dep_check_code = write_dependencies(stream, [], unique_dependencies)
+        self.assertEqual(dep_check_code, '')
+        self.assertEqual(len(unique_dependencies), 0)
+        self.assertEqual(stream.getvalue(), '')
+
+    def test_unique_dep_ids(self):
+        """
+
+        :return:
+        """
+        stream = StringIOWrapper('test_suite_ut.data', '')
+        unique_dependencies = []
+        dep_check_code = write_dependencies(stream, ['DEP3', 'DEP2', 'DEP1'],
+                                            unique_dependencies)
+        expect_dep_check_code = '''
+        case 0:
+            {
+#if defined(DEP3)
+                ret = DEPENDENCY_SUPPORTED;
+#else
+                ret = DEPENDENCY_NOT_SUPPORTED;
+#endif
+            }
+            break;
+        case 1:
+            {
+#if defined(DEP2)
+                ret = DEPENDENCY_SUPPORTED;
+#else
+                ret = DEPENDENCY_NOT_SUPPORTED;
+#endif
+            }
+            break;
+        case 2:
+            {
+#if defined(DEP1)
+                ret = DEPENDENCY_SUPPORTED;
+#else
+                ret = DEPENDENCY_NOT_SUPPORTED;
+#endif
+            }
+            break;'''
+        self.assertEqual(dep_check_code, expect_dep_check_code)
+        self.assertEqual(len(unique_dependencies), 3)
+        self.assertEqual(stream.getvalue(), 'depends_on:0:1:2\n')
+
+    def test_dep_id_repeat(self):
+        """
+
+        :return:
+        """
+        stream = StringIOWrapper('test_suite_ut.data', '')
+        unique_dependencies = []
+        dep_check_code = ''
+        dep_check_code += write_dependencies(stream, ['DEP3', 'DEP2'],
+                                             unique_dependencies)
+        dep_check_code += write_dependencies(stream, ['DEP2', 'DEP1'],
+                                             unique_dependencies)
+        dep_check_code += write_dependencies(stream, ['DEP1', 'DEP3'],
+                                             unique_dependencies)
+        expect_dep_check_code = '''
+        case 0:
+            {
+#if defined(DEP3)
+                ret = DEPENDENCY_SUPPORTED;
+#else
+                ret = DEPENDENCY_NOT_SUPPORTED;
+#endif
+            }
+            break;
+        case 1:
+            {
+#if defined(DEP2)
+                ret = DEPENDENCY_SUPPORTED;
+#else
+                ret = DEPENDENCY_NOT_SUPPORTED;
+#endif
+            }
+            break;
+        case 2:
+            {
+#if defined(DEP1)
+                ret = DEPENDENCY_SUPPORTED;
+#else
+                ret = DEPENDENCY_NOT_SUPPORTED;
+#endif
+            }
+            break;'''
+        self.assertEqual(dep_check_code, expect_dep_check_code)
+        self.assertEqual(len(unique_dependencies), 3)
+        self.assertEqual(stream.getvalue(),
+                         'depends_on:0:1\ndepends_on:1:2\ndepends_on:2:0\n')
+
+
+class WriteParams(TestCase):
+    """
+    Test Suite for testing write_parameters().
+    """
+
+    def test_no_params(self):
+        """
+        Test with empty test_args
+        :return:
+        """
+        stream = StringIOWrapper('test_suite_ut.data', '')
+        unique_expressions = []
+        expression_code = write_parameters(stream, [], [], unique_expressions)
+        self.assertEqual(len(unique_expressions), 0)
+        self.assertEqual(expression_code, '')
+        self.assertEqual(stream.getvalue(), '\n')
+
+    def test_no_exp_param(self):
+        """
+        Test when there is no macro or expression in the params.
+        :return:
+        """
+        stream = StringIOWrapper('test_suite_ut.data', '')
+        unique_expressions = []
+        expression_code = write_parameters(stream, ['"Yahoo"', '"abcdef00"',
+                                                    '0'],
+                                           ['char*', 'hex', 'int'],
+                                           unique_expressions)
+        self.assertEqual(len(unique_expressions), 0)
+        self.assertEqual(expression_code, '')
+        self.assertEqual(stream.getvalue(),
+                         ':char*:"Yahoo":hex:"abcdef00":int:0\n')
+
+    def test_hex_format_int_param(self):
+        """
+        Test int parameter in hex format.
+        :return:
+        """
+        stream = StringIOWrapper('test_suite_ut.data', '')
+        unique_expressions = []
+        expression_code = write_parameters(stream,
+                                           ['"Yahoo"', '"abcdef00"', '0xAA'],
+                                           ['char*', 'hex', 'int'],
+                                           unique_expressions)
+        self.assertEqual(len(unique_expressions), 0)
+        self.assertEqual(expression_code, '')
+        self.assertEqual(stream.getvalue(),
+                         ':char*:"Yahoo":hex:"abcdef00":int:0xAA\n')
+
+    def test_with_exp_param(self):
+        """
+        Test when there is macro or expression in the params.
+        :return:
+        """
+        stream = StringIOWrapper('test_suite_ut.data', '')
+        unique_expressions = []
+        expression_code = write_parameters(stream,
+                                           ['"Yahoo"', '"abcdef00"', '0',
+                                            'MACRO1', 'MACRO2', 'MACRO3'],
+                                           ['char*', 'hex', 'int',
+                                            'int', 'int', 'int'],
+                                           unique_expressions)
+        self.assertEqual(len(unique_expressions), 3)
+        self.assertEqual(unique_expressions, ['MACRO1', 'MACRO2', 'MACRO3'])
+        expected_expression_code = '''
+        case 0:
+            {
+                *out_value = MACRO1;
+            }
+            break;
+        case 1:
+            {
+                *out_value = MACRO2;
+            }
+            break;
+        case 2:
+            {
+                *out_value = MACRO3;
+            }
+            break;'''
+        self.assertEqual(expression_code, expected_expression_code)
+        self.assertEqual(stream.getvalue(),
+                         ':char*:"Yahoo":hex:"abcdef00":int:0:exp:0:exp:1'
+                         ':exp:2\n')
+
+    def test_with_repeat_calls(self):
+        """
+        Test when write_parameter() is called with same macro or expression.
+        :return:
+        """
+        stream = StringIOWrapper('test_suite_ut.data', '')
+        unique_expressions = []
+        expression_code = ''
+        expression_code += write_parameters(stream,
+                                            ['"Yahoo"', 'MACRO1', 'MACRO2'],
+                                            ['char*', 'int', 'int'],
+                                            unique_expressions)
+        expression_code += write_parameters(stream,
+                                            ['"abcdef00"', 'MACRO2', 'MACRO3'],
+                                            ['hex', 'int', 'int'],
+                                            unique_expressions)
+        expression_code += write_parameters(stream,
+                                            ['0', 'MACRO3', 'MACRO1'],
+                                            ['int', 'int', 'int'],
+                                            unique_expressions)
+        self.assertEqual(len(unique_expressions), 3)
+        self.assertEqual(unique_expressions, ['MACRO1', 'MACRO2', 'MACRO3'])
+        expected_expression_code = '''
+        case 0:
+            {
+                *out_value = MACRO1;
+            }
+            break;
+        case 1:
+            {
+                *out_value = MACRO2;
+            }
+            break;
+        case 2:
+            {
+                *out_value = MACRO3;
+            }
+            break;'''
+        self.assertEqual(expression_code, expected_expression_code)
+        expected_data_file = ''':char*:"Yahoo":exp:0:exp:1
+:hex:"abcdef00":exp:1:exp:2
+:int:0:exp:2:exp:0
+'''
+        self.assertEqual(stream.getvalue(), expected_data_file)
+
+
+class GenTestSuiteDependenciesChecks(TestCase):
+    """
+    Test suite for testing gen_suite_dep_checks()
+    """
+    def test_empty_suite_dependencies(self):
+        """
+        Test with empty suite_dependencies list.
+
+        :return:
+        """
+        dep_check_code, expression_code = \
+            gen_suite_dep_checks([], 'DEP_CHECK_CODE', 'EXPRESSION_CODE')
+        self.assertEqual(dep_check_code, 'DEP_CHECK_CODE')
+        self.assertEqual(expression_code, 'EXPRESSION_CODE')
+
+    def test_suite_dependencies(self):
+        """
+        Test with suite_dependencies list.
+
+        :return:
+        """
+        dep_check_code, expression_code = \
+            gen_suite_dep_checks(['SUITE_DEP'], 'DEP_CHECK_CODE',
+                                 'EXPRESSION_CODE')
+        expected_dep_check_code = '''
+#if defined(SUITE_DEP)
+DEP_CHECK_CODE
+#endif
+'''
+        expected_expression_code = '''
+#if defined(SUITE_DEP)
+EXPRESSION_CODE
+#endif
+'''
+        self.assertEqual(dep_check_code, expected_dep_check_code)
+        self.assertEqual(expression_code, expected_expression_code)
+
+    def test_no_dep_no_exp(self):
+        """
+        Test when there are no dependency and expression code.
+        :return:
+        """
+        dep_check_code, expression_code = gen_suite_dep_checks([], '', '')
+        self.assertEqual(dep_check_code, '')
+        self.assertEqual(expression_code, '')
+
+
+class GenFromTestData(TestCase):
+    """
+    Test suite for gen_from_test_data()
+    """
+
+    @staticmethod
+    @patch("generate_test_code.write_dependencies")
+    @patch("generate_test_code.write_parameters")
+    @patch("generate_test_code.gen_suite_dep_checks")
+    def test_intermediate_data_file(func_mock1,
+                                    write_parameters_mock,
+                                    write_dependencies_mock):
+        """
+        Test that intermediate data file is written with expected data.
+        :return:
+        """
+        data = '''
+My test
+depends_on:DEP1
+func1:0
+'''
+        data_f = StringIOWrapper('test_suite_ut.data', data)
+        out_data_f = StringIOWrapper('test_suite_ut.datax', '')
+        func_info = {'test_func1': (1, ('int',))}
+        suite_dependencies = []
+        write_parameters_mock.side_effect = write_parameters
+        write_dependencies_mock.side_effect = write_dependencies
+        func_mock1.side_effect = gen_suite_dep_checks
+        gen_from_test_data(data_f, out_data_f, func_info, suite_dependencies)
+        write_dependencies_mock.assert_called_with(out_data_f,
+                                                   ['DEP1'], ['DEP1'])
+        write_parameters_mock.assert_called_with(out_data_f, ['0'],
+                                                 ('int',), [])
+        expected_dep_check_code = '''
+        case 0:
+            {
+#if defined(DEP1)
+                ret = DEPENDENCY_SUPPORTED;
+#else
+                ret = DEPENDENCY_NOT_SUPPORTED;
+#endif
+            }
+            break;'''
+        func_mock1.assert_called_with(
+            suite_dependencies, expected_dep_check_code, '')
+
+    def test_function_not_found(self):
+        """
+        Test that AssertError is raised when function info in not found.
+        :return:
+        """
+        data = '''
+My test
+depends_on:DEP1
+func1:0
+'''
+        data_f = StringIOWrapper('test_suite_ut.data', data)
+        out_data_f = StringIOWrapper('test_suite_ut.datax', '')
+        func_info = {'test_func2': (1, ('int',))}
+        suite_dependencies = []
+        self.assertRaises(GeneratorInputError, gen_from_test_data,
+                          data_f, out_data_f, func_info, suite_dependencies)
+
+    def test_different_func_args(self):
+        """
+        Test that AssertError is raised when no. of parameters and
+        function args differ.
+        :return:
+        """
+        data = '''
+My test
+depends_on:DEP1
+func1:0
+'''
+        data_f = StringIOWrapper('test_suite_ut.data', data)
+        out_data_f = StringIOWrapper('test_suite_ut.datax', '')
+        func_info = {'test_func2': (1, ('int', 'hex'))}
+        suite_dependencies = []
+        self.assertRaises(GeneratorInputError, gen_from_test_data, data_f,
+                          out_data_f, func_info, suite_dependencies)
+
+    def test_output(self):
+        """
+        Test that intermediate data file is written with expected data.
+        :return:
+        """
+        data = '''
+My test 1
+depends_on:DEP1
+func1:0:0xfa:MACRO1:MACRO2
+
+My test 2
+depends_on:DEP1:DEP2
+func2:"yahoo":88:MACRO1
+'''
+        data_f = StringIOWrapper('test_suite_ut.data', data)
+        out_data_f = StringIOWrapper('test_suite_ut.datax', '')
+        func_info = {'test_func1': (0, ('int', 'int', 'int', 'int')),
+                     'test_func2': (1, ('char*', 'int', 'int'))}
+        suite_dependencies = []
+        dep_check_code, expression_code = \
+            gen_from_test_data(data_f, out_data_f, func_info,
+                               suite_dependencies)
+        expected_dep_check_code = '''
+        case 0:
+            {
+#if defined(DEP1)
+                ret = DEPENDENCY_SUPPORTED;
+#else
+                ret = DEPENDENCY_NOT_SUPPORTED;
+#endif
+            }
+            break;
+        case 1:
+            {
+#if defined(DEP2)
+                ret = DEPENDENCY_SUPPORTED;
+#else
+                ret = DEPENDENCY_NOT_SUPPORTED;
+#endif
+            }
+            break;'''
+        expected_data = '''My test 1
+depends_on:0
+0:int:0:int:0xfa:exp:0:exp:1
+
+My test 2
+depends_on:0:1
+1:char*:"yahoo":int:88:exp:0
+
+'''
+        expected_expression_code = '''
+        case 0:
+            {
+                *out_value = MACRO1;
+            }
+            break;
+        case 1:
+            {
+                *out_value = MACRO2;
+            }
+            break;'''
+        self.assertEqual(dep_check_code, expected_dep_check_code)
+        self.assertEqual(out_data_f.getvalue(), expected_data)
+        self.assertEqual(expression_code, expected_expression_code)
+
+
+if __name__ == '__main__':
+    unittest_main()
diff --git a/framework/scripts/test_psa_compliance.py b/framework/scripts/test_psa_compliance.py
new file mode 100755
index 0000000..8053580
--- /dev/null
+++ b/framework/scripts/test_psa_compliance.py
@@ -0,0 +1,151 @@
+#!/usr/bin/env python3
+"""Run the PSA Crypto API compliance test suite.
+Clone the repo and check out the commit specified by PSA_ARCH_TEST_REPO and PSA_ARCH_TEST_REF,
+then compile and run the test suite. The clone is stored at <repository root>/psa-arch-tests.
+Known defects in either the test suite or mbedtls / TF-PSA-Crypto - identified by their test
+number - are ignored, while unexpected failures AND successes are reported as errors, to help
+keep the list of known defects as up to date as possible.
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+import argparse
+import os
+import re
+import shutil
+import subprocess
+import sys
+from typing import List
+from pathlib import Path
+
+from mbedtls_framework import build_tree
+
+# PSA Compliance tests we expect to fail due to known defects in Mbed TLS /
+# TF-PSA-Crypto (or the test suite).
+# The test numbers correspond to the numbers used by the console output of the test suite.
+# Test number 2xx corresponds to the files in the folder
+# psa-arch-tests/api-tests/dev_apis/crypto/test_c0xx
+EXPECTED_FAILURES = {} # type: dict
+
+PSA_ARCH_TESTS_REPO = 'https://github.com/ARM-software/psa-arch-tests.git'
+PSA_ARCH_TESTS_REF = 'v23.06_API1.5_ADAC_EAC'
+
+#pylint: disable=too-many-branches,too-many-statements,too-many-locals
+def main(library_build_dir: str):
+    root_dir = os.getcwd()
+    install_dir = Path(library_build_dir + "/install_dir").resolve()
+    tmp_env = os.environ
+    tmp_env['CC'] = 'gcc'
+    subprocess.check_call(['cmake', '.', '-GUnix Makefiles',
+                           '-B' + library_build_dir,
+                           '-DCMAKE_INSTALL_PREFIX=' + str(install_dir)],
+                          env=tmp_env)
+    subprocess.check_call(['cmake', '--build', library_build_dir, '--target', 'install'])
+
+    if build_tree.is_mbedtls_3_6():
+        libraries_to_link = [str(install_dir.joinpath("lib/libmbedcrypto.a"))]
+    else:
+        libraries_to_link = [str(install_dir.joinpath("lib/" + lib))
+                             for lib in ["libtfpsacrypto.a", "libbuiltin.a",
+                                         "libp256m.a", "libeverest.a"]]
+
+    psa_arch_tests_dir = 'psa-arch-tests'
+    os.makedirs(psa_arch_tests_dir, exist_ok=True)
+    try:
+        os.chdir(psa_arch_tests_dir)
+
+        # Reuse existing local clone
+        subprocess.check_call(['git', 'init'])
+        subprocess.check_call(['git', 'fetch', PSA_ARCH_TESTS_REPO, PSA_ARCH_TESTS_REF])
+        subprocess.check_call(['git', 'checkout', 'FETCH_HEAD'])
+
+        build_dir = 'api-tests/build'
+        try:
+            shutil.rmtree(build_dir)
+        except FileNotFoundError:
+            pass
+        os.mkdir(build_dir)
+        os.chdir(build_dir)
+
+        #pylint: disable=bad-continuation
+        subprocess.check_call([
+            'cmake', '..',
+                     '-GUnix Makefiles',
+                     '-DTARGET=tgt_dev_apis_stdc',
+                     '-DTOOLCHAIN=HOST_GCC',
+                     '-DSUITE=CRYPTO',
+                     '-DPSA_CRYPTO_LIB_FILENAME={}'.format(';'.join(libraries_to_link)),
+                     '-DPSA_INCLUDE_PATHS=' + str(install_dir.joinpath("include"))
+        ])
+
+        subprocess.check_call(['cmake', '--build', '.'])
+
+        proc = subprocess.Popen(['./psa-arch-tests-crypto'],
+                                bufsize=1, stdout=subprocess.PIPE, universal_newlines=True)
+
+        test_re = re.compile(
+            '^TEST: (?P<test_num>[0-9]*)|'
+            '^TEST RESULT: (?P<test_result>FAILED|PASSED)'
+        )
+        test = -1
+        unexpected_successes = set(EXPECTED_FAILURES)
+        expected_failures = [] # type: List[int]
+        unexpected_failures = [] # type: List[int]
+        if proc.stdout is None:
+            return 1
+
+        for line in proc.stdout:
+            print(line, end='')
+            match = test_re.match(line)
+            if match is not None:
+                groupdict = match.groupdict()
+                test_num = groupdict['test_num']
+                if test_num is not None:
+                    test = int(test_num)
+                elif groupdict['test_result'] == 'FAILED':
+                    try:
+                        unexpected_successes.remove(test)
+                        expected_failures.append(test)
+                        print('Expected failure, ignoring')
+                    except KeyError:
+                        unexpected_failures.append(test)
+                        print('ERROR: Unexpected failure')
+                elif test in unexpected_successes:
+                    print('ERROR: Unexpected success')
+        proc.wait()
+
+        print()
+        print('***** test_psa_compliance.py report ******')
+        print()
+        print('Expected failures:', ', '.join(str(i) for i in expected_failures))
+        print('Unexpected failures:', ', '.join(str(i) for i in unexpected_failures))
+        print('Unexpected successes:', ', '.join(str(i) for i in sorted(unexpected_successes)))
+        print()
+        if unexpected_successes or unexpected_failures:
+            if unexpected_successes:
+                print('Unexpected successes encountered.')
+                print('Please remove the corresponding tests from '
+                      'EXPECTED_FAILURES in tests/scripts/compliance_test.py')
+                print()
+            print('FAILED')
+            return 1
+        else:
+            print('SUCCESS')
+            return 0
+    finally:
+        os.chdir(root_dir)
+
+if __name__ == '__main__':
+    BUILD_DIR = 'out_of_source_build'
+
+    # pylint: disable=invalid-name
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--build-dir', nargs=1,
+                        help='path to Mbed TLS / TF-PSA-Crypto build directory')
+    args = parser.parse_args()
+
+    if args.build_dir is not None:
+        BUILD_DIR = args.build_dir[0]
+
+    sys.exit(main(BUILD_DIR))
diff --git a/framework/scripts/test_psa_constant_names.py b/framework/scripts/test_psa_constant_names.py
new file mode 100755
index 0000000..ad13110
--- /dev/null
+++ b/framework/scripts/test_psa_constant_names.py
@@ -0,0 +1,208 @@
+#!/usr/bin/env python3
+"""Test the program psa_constant_names.
+Gather constant names from header files and test cases. Compile a C program
+to print out their numerical values, feed these numerical values to
+psa_constant_names, and check that the output is the original name.
+Return 0 if all test cases pass, 1 if the output was not always as expected,
+or 1 (with a Python backtrace) if there was an operational error.
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+import argparse
+from collections import namedtuple
+import os
+import re
+import subprocess
+import sys
+from typing import Iterable, List, Optional, Tuple
+
+from mbedtls_framework import build_tree
+from mbedtls_framework import c_build_helper
+from mbedtls_framework.macro_collector import InputsForTest, PSAMacroEnumerator
+from mbedtls_framework import typing_util
+
+def gather_inputs(headers: Iterable[str],
+                  test_suites: Iterable[str],
+                  inputs_class=InputsForTest) -> PSAMacroEnumerator:
+    """Read the list of inputs to test psa_constant_names with."""
+    inputs = inputs_class()
+    for header in headers:
+        inputs.parse_header(header)
+    for test_cases in test_suites:
+        inputs.parse_test_cases(test_cases)
+    inputs.add_numerical_values()
+    inputs.gather_arguments()
+    return inputs
+
+def run_c(type_word: str,
+          expressions: Iterable[str],
+          include_path: Optional[str] = None,
+          keep_c: bool = False) -> List[str]:
+    """Generate and run a program to print out numerical values of C expressions."""
+    if type_word == 'status':
+        cast_to = 'long'
+        printf_format = '%ld'
+    else:
+        cast_to = 'unsigned long'
+        printf_format = '0x%08lx'
+    return c_build_helper.get_c_expression_values(
+        cast_to, printf_format,
+        expressions,
+        caller='test_psa_constant_names.py for {} values'.format(type_word),
+        file_label=type_word,
+        header='#include <psa/crypto.h>',
+        include_path=include_path,
+        keep_c=keep_c
+    )
+
+NORMALIZE_STRIP_RE = re.compile(r'\s+')
+def normalize(expr: str) -> str:
+    """Normalize the C expression so as not to care about trivial differences.
+
+    Currently "trivial differences" means whitespace.
+    """
+    return re.sub(NORMALIZE_STRIP_RE, '', expr)
+
+ALG_TRUNCATED_TO_SELF_RE = \
+    re.compile(r'PSA_ALG_AEAD_WITH_SHORTENED_TAG\('
+               r'PSA_ALG_(?:CCM|CHACHA20_POLY1305|GCM)'
+               r', *16\)\Z')
+
+def is_simplifiable(expr: str) -> bool:
+    """Determine whether an expression is simplifiable.
+
+    Simplifiable expressions can't be output in their input form, since
+    the output will be the simple form. Therefore they must be excluded
+    from testing.
+    """
+    if ALG_TRUNCATED_TO_SELF_RE.match(expr):
+        return True
+    return False
+
+def collect_values(inputs: InputsForTest,
+                   type_word: str,
+                   include_path: Optional[str] = None,
+                   keep_c: bool = False) -> Tuple[List[str], List[str]]:
+    """Generate expressions using known macro names and calculate their values.
+
+    Return a list of pairs of (expr, value) where expr is an expression and
+    value is a string representation of its integer value.
+    """
+    names = inputs.get_names(type_word)
+    expressions = sorted(expr
+                         for expr in inputs.generate_expressions(names)
+                         if not is_simplifiable(expr))
+    values = run_c(type_word, expressions,
+                   include_path=include_path, keep_c=keep_c)
+    return expressions, values
+
+class Tests:
+    """An object representing tests and their results."""
+
+    Error = namedtuple('Error',
+                       ['type', 'expression', 'value', 'output'])
+
+    def __init__(self, options) -> None:
+        self.options = options
+        self.count = 0
+        self.errors = [] #type: List[Tests.Error]
+
+    def run_one(self, inputs: InputsForTest, type_word: str) -> None:
+        """Test psa_constant_names for the specified type.
+
+        Run the program on the names for this type.
+        Use the inputs to figure out what arguments to pass to macros that
+        take arguments.
+        """
+        expressions, values = collect_values(inputs, type_word,
+                                             include_path=self.options.include,
+                                             keep_c=self.options.keep_c)
+        output_bytes = subprocess.check_output([self.options.program,
+                                                type_word] + values)
+        output = output_bytes.decode('ascii')
+        outputs = output.strip().split('\n')
+        self.count += len(expressions)
+        for expr, value, output in zip(expressions, values, outputs):
+            if self.options.show:
+                sys.stdout.write('{} {}\t{}\n'.format(type_word, value, output))
+            if normalize(expr) != normalize(output):
+                self.errors.append(self.Error(type=type_word,
+                                              expression=expr,
+                                              value=value,
+                                              output=output))
+
+    def run_all(self, inputs: InputsForTest) -> None:
+        """Run psa_constant_names on all the gathered inputs."""
+        for type_word in ['status', 'algorithm', 'ecc_curve', 'dh_group',
+                          'key_type', 'key_usage']:
+            self.run_one(inputs, type_word)
+
+    def report(self, out: typing_util.Writable) -> None:
+        """Describe each case where the output is not as expected.
+
+        Write the errors to ``out``.
+        Also write a total.
+        """
+        for error in self.errors:
+            out.write('For {} "{}", got "{}" (value: {})\n'
+                      .format(error.type, error.expression,
+                              error.output, error.value))
+        out.write('{} test cases'.format(self.count))
+        if self.errors:
+            out.write(', {} FAIL\n'.format(len(self.errors)))
+        else:
+            out.write(' PASS\n')
+
+HEADERS = ['psa/crypto.h', 'psa/crypto_extra.h', 'psa/crypto_values.h']
+
+if build_tree.is_mbedtls_3_6():
+    TEST_SUITES = ['tests/suites/test_suite_psa_crypto_metadata.data']
+else:
+    TEST_SUITES = ['tf-psa-crypto/tests/suites/test_suite_psa_crypto_metadata.data']
+
+def main():
+    parser = argparse.ArgumentParser(description=globals()['__doc__'])
+    if build_tree.is_mbedtls_3_6():
+        parser.add_argument('--include', '-I',
+                            action='append', default=['include'],
+                            help='Directory for header files')
+    else:
+        parser.add_argument('--include', '-I',
+                            action='append', default=['tf-psa-crypto/include',
+                                                      'tf-psa-crypto/drivers/builtin/include',
+                                                      'tf-psa-crypto/drivers/everest/include',
+                                                      'include'],
+                            help='Directory for header files')
+    parser.add_argument('--keep-c',
+                        action='store_true', dest='keep_c', default=False,
+                        help='Keep the intermediate C file')
+    parser.add_argument('--no-keep-c',
+                        action='store_false', dest='keep_c',
+                        help='Don\'t keep the intermediate C file (default)')
+    if build_tree.is_mbedtls_3_6():
+        parser.add_argument('--program',
+                            default='programs/psa/psa_constant_names',
+                            help='Program to test')
+    else:
+        parser.add_argument('--program',
+                            default='tf-psa-crypto/programs/psa/psa_constant_names',
+                            help='Program to test')
+    parser.add_argument('--show',
+                        action='store_true',
+                        help='Show tested values on stdout')
+    parser.add_argument('--no-show',
+                        action='store_false', dest='show',
+                        help='Don\'t show tested values (default)')
+    options = parser.parse_args()
+    headers = [os.path.join(options.include[0], h) for h in HEADERS]
+    inputs = gather_inputs(headers, TEST_SUITES)
+    tests = Tests(options)
+    tests.run_all(inputs)
+    tests.report(sys.stdout)
+    if tests.errors:
+        sys.exit(1)
+
+if __name__ == '__main__':
+    main()
diff --git a/framework/scripts/translate_ciphers.py b/framework/scripts/translate_ciphers.py
new file mode 100755
index 0000000..5933537
--- /dev/null
+++ b/framework/scripts/translate_ciphers.py
@@ -0,0 +1,176 @@
+#!/usr/bin/env python3
+
+# translate_ciphers.py
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+"""
+Translate standard ciphersuite names to GnuTLS, OpenSSL and Mbed TLS standards.
+
+To test the translation functions run:
+python3 -m unittest translate_cipher.py
+"""
+
+import re
+import argparse
+import unittest
+
+class TestTranslateCiphers(unittest.TestCase):
+    """
+    Ensure translate_ciphers.py translates and formats ciphersuite names
+    correctly
+    """
+    def test_translate_all_cipher_names(self):
+        """
+        Translate standard ciphersuite names to GnuTLS, OpenSSL and
+        Mbed TLS counterpart. Use only a small subset of ciphers
+        that exercise each step of the translation functions
+        """
+        ciphers = [
+            ("TLS_ECDHE_ECDSA_WITH_NULL_SHA",
+             "+ECDHE-ECDSA:+NULL:+SHA1",
+             "ECDHE-ECDSA-NULL-SHA",
+             "TLS-ECDHE-ECDSA-WITH-NULL-SHA"),
+            ("TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
+             "+ECDHE-ECDSA:+AES-128-GCM:+AEAD",
+             "ECDHE-ECDSA-AES128-GCM-SHA256",
+             "TLS-ECDHE-ECDSA-WITH-AES-128-GCM-SHA256"),
+            ("TLS_RSA_WITH_AES_256_CBC_SHA",
+             "+RSA:+AES-256-CBC:+SHA1",
+             "AES256-SHA",
+             "TLS-RSA-WITH-AES-256-CBC-SHA"),
+            ("TLS_PSK_WITH_3DES_EDE_CBC_SHA",
+             "+PSK:+3DES-CBC:+SHA1",
+             "PSK-3DES-EDE-CBC-SHA",
+             "TLS-PSK-WITH-3DES-EDE-CBC-SHA"),
+            ("TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256",
+             None,
+             "ECDHE-ECDSA-CHACHA20-POLY1305",
+             "TLS-ECDHE-ECDSA-WITH-CHACHA20-POLY1305-SHA256"),
+            ("TLS_ECDHE_ECDSA_WITH_AES_128_CCM",
+             "+ECDHE-ECDSA:+AES-128-CCM:+AEAD",
+             None,
+             "TLS-ECDHE-ECDSA-WITH-AES-128-CCM"),
+            ("TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384",
+             None,
+             "ECDHE-ARIA256-GCM-SHA384",
+             "TLS-ECDHE-RSA-WITH-ARIA-256-GCM-SHA384"),
+        ]
+
+        for s, g_exp, o_exp, m_exp in ciphers:
+
+            if g_exp is not None:
+                g = translate_gnutls(s)
+                self.assertEqual(g, g_exp)
+
+            if o_exp is not None:
+                o = translate_ossl(s)
+                self.assertEqual(o, o_exp)
+
+            if m_exp is not None:
+                m = translate_mbedtls(s)
+                self.assertEqual(m, m_exp)
+
+def translate_gnutls(s_cipher):
+    """
+    Translate s_cipher from standard ciphersuite naming convention
+    and return the GnuTLS naming convention
+    """
+
+    # Replace "_" with "-" to handle ciphersuite names based on Mbed TLS
+    # naming convention
+    s_cipher = s_cipher.replace("_", "-")
+
+    s_cipher = re.sub(r'\ATLS-', '+', s_cipher)
+    s_cipher = s_cipher.replace("-WITH-", ":+")
+    s_cipher = s_cipher.replace("-EDE", "")
+
+    # SHA in Mbed TLS == SHA1 GnuTLS,
+    # if the last 3 chars are SHA append 1
+    if s_cipher[-3:] == "SHA":
+        s_cipher = s_cipher+"1"
+
+    # CCM or CCM-8 should be followed by ":+AEAD"
+    # Replace "GCM:+SHAxyz" with "GCM:+AEAD"
+    if "CCM" in s_cipher or "GCM" in s_cipher:
+        s_cipher = re.sub(r"GCM-SHA\d\d\d", "GCM", s_cipher)
+        s_cipher = s_cipher+":+AEAD"
+
+    # Replace the last "-" with ":+"
+    else:
+        index = s_cipher.rindex("-")
+        s_cipher = s_cipher[:index] + ":+" + s_cipher[index+1:]
+
+    return s_cipher
+
+def translate_ossl(s_cipher):
+    """
+    Translate s_cipher from standard ciphersuite naming convention
+    and return the OpenSSL naming convention
+    """
+
+    # Replace "_" with "-" to handle ciphersuite names based on Mbed TLS
+    # naming convention
+    s_cipher = s_cipher.replace("_", "-")
+
+    s_cipher = re.sub(r'^TLS-', '', s_cipher)
+    s_cipher = s_cipher.replace("-WITH", "")
+
+    # Remove the "-" from "ABC-xyz"
+    s_cipher = s_cipher.replace("AES-", "AES")
+    s_cipher = s_cipher.replace("CAMELLIA-", "CAMELLIA")
+    s_cipher = s_cipher.replace("ARIA-", "ARIA")
+
+    # Remove "RSA" if it is at the beginning
+    s_cipher = re.sub(r'^RSA-', r'', s_cipher)
+
+    # For all circumstances outside of PSK
+    if "PSK" not in s_cipher:
+        s_cipher = s_cipher.replace("-EDE", "")
+        s_cipher = s_cipher.replace("3DES-CBC", "DES-CBC3")
+
+        # Remove "CBC" if it is not prefixed by DES
+        s_cipher = re.sub(r'(?<!DES-)CBC-', r'', s_cipher)
+
+    # ECDHE-RSA-ARIA does not exist in OpenSSL
+    s_cipher = s_cipher.replace("ECDHE-RSA-ARIA", "ECDHE-ARIA")
+
+    # POLY1305 should not be followed by anything
+    if "POLY1305" in s_cipher:
+        index = s_cipher.rindex("POLY1305")
+        s_cipher = s_cipher[:index+8]
+
+    # If DES is being used, Replace DHE with EDH
+    if "DES" in s_cipher and "DHE" in s_cipher and "ECDHE" not in s_cipher:
+        s_cipher = s_cipher.replace("DHE", "EDH")
+
+    return s_cipher
+
+def translate_mbedtls(s_cipher):
+    """
+    Translate s_cipher from standard ciphersuite naming convention
+    and return Mbed TLS ciphersuite naming convention
+    """
+
+    # Replace "_" with "-"
+    s_cipher = s_cipher.replace("_", "-")
+
+    return s_cipher
+
+def format_ciphersuite_names(mode, names):
+    t = {"g": translate_gnutls,
+         "o": translate_ossl,
+         "m": translate_mbedtls
+        }[mode]
+    return " ".join(c + '=' + t(c) for c in names)
+
+def main(target, names):
+    print(format_ciphersuite_names(target, names))
+
+if __name__ == "__main__":
+    PARSER = argparse.ArgumentParser()
+    PARSER.add_argument('target', metavar='TARGET', choices=['o', 'g', 'm'])
+    PARSER.add_argument('names', metavar='NAMES', nargs='+')
+    ARGS = PARSER.parse_args()
+    main(ARGS.target, ARGS.names)