aboutsummaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rwxr-xr-xtools/generate_dtb/generate_dtb.sh6
-rwxr-xr-xtools/generate_json/generate_json.sh107
-rwxr-xr-xtools/generate_test_list/generate_test_list.pl193
-rwxr-xr-xtools/generate_test_list/generate_test_list.py363
-rw-r--r--tools/generate_test_list/tests_list.c.tpl7
-rw-r--r--tools/generate_test_list/tests_list.h.tpl6
6 files changed, 468 insertions, 214 deletions
diff --git a/tools/generate_dtb/generate_dtb.sh b/tools/generate_dtb/generate_dtb.sh
index 564c2a05d..1e0797b06 100755
--- a/tools/generate_dtb/generate_dtb.sh
+++ b/tools/generate_dtb/generate_dtb.sh
@@ -1,7 +1,7 @@
#!/bin/bash
#
-# Copyright (c) 2018-2020, Arm Limited. All rights reserved.
+# Copyright (c) 2018-2022, Arm Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
@@ -12,13 +12,14 @@
# $1 = image_name (lowercase)
# $2 = path/to/file.dts
# $3 = build/$PLAT/$BUILD_TYPE/
+# $4 = path to store the dtb generated by this script
ORIGINAL_DTS=$2
MAPFILE="$3/$1/$1.map"
EXTRA_DTS="$3/$1/$1_extra.dts"
COMBINED_DTS="$3/$1/$1_combined.dts"
PREPROCESSED_DTS="$3/$1/$1_preprocessed.dts"
-GENERATED_DTB="$3/$1.dtb"
+GENERATED_DTB=$4
# Look for the start and end of the sections that are only known in the elf file
# after compiling the partition.
@@ -43,7 +44,6 @@ cat "$ORIGINAL_DTS" > "$COMBINED_DTS"
INCLUDES="-I spm/cactus
-I spm/ivy
- -I spm/quark
-I spm/include
-I include/lib"
diff --git a/tools/generate_json/generate_json.sh b/tools/generate_json/generate_json.sh
index f46cf158a..58677004d 100755
--- a/tools/generate_json/generate_json.sh
+++ b/tools/generate_json/generate_json.sh
@@ -1,36 +1,107 @@
#!/bin/bash
#
-# Copyright (c) 2020, Arm Limited. All rights reserved.
+# Copyright (c) 2020-2022, Arm Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# Generate a JSON file which will be fed to TF-A as SPM_LAYOUT_FILE to package
# Secure Partitions as part of FIP.
+# Note the script will append the partition to the existing layout file.
+# If you wish to only generate a layout file with this partition first run
+# "make realclean" to remove the existing file.
-# $1 = Secure Partition (cactus)
-# $2 = Platform built path
-# Output = $2/sp_layout.json
+# $1 = Platform built path
+# $2.. = List of Secure Partitions
+# Output = $1/sp_layout.json
-GENERATED_JSON=$2/sp_layout.json
+GENERATED_JSON=$1/sp_layout.json
+shift # Shift arguments 1
+
+PARTITION_ALREADY_PRESENT=false
+
+CACTUS_PRESENT=false
+IVY_PRESENT=false
+IVY_SHIM_PRESENT=false
+
+for target in "$@"; do
+ case $target in
+ cactus) CACTUS_PRESENT=true ;;
+ ivy) IVY_PRESENT=true ;;
+ ivy_shim) IVY_SHIM_PRESENT=true ;;
+ *) echo "Invalid target $target"; exit 1 ;;
+ esac
+done
+
+echo -e "{" > "$GENERATED_JSON"
# To demonstrate communication between SP's, two cactus S-EL1 instances used.
# To also test mapping of the RXTX region a third cactus S-EL1 instance is used.
# cactus-primary, cactus-secondary and cactus-tertiary have same binary but
# different partition manifests.
-if [ "$1" == "cactus" ]; then
- echo -e "{\n\t\"$1-primary\" : {\n \
- \t\"image\": \"$1.bin\",\n \
- \t\"pm\": \"$1.dts\",\n \
- \t\"owner\": \"SiP\"\n\t},\n\n\t\"$1-secondary\" : {\n \
- \t\"image\": \"$1.bin\",\n \
- \t\"pm\": \"$1-secondary.dts\",\n \
- \t\"owner\": \"Plat\"\n\t},\n\n\t\"$1-tertiary\" : {\n \
- \t\"image\": \"$1.bin\",\n \
- \t\"pm\": \"$1-tertiary.dts\" \n \
- }\n}" \
- > "$GENERATED_JSON"
+if [ $CACTUS_PRESENT == "true" ]; then
+ cat >> "$GENERATED_JSON" << EOF
+"cactus-primary" : {
+ "image": {
+ "file": "cactus.bin",
+ "offset":"0x2000"
+ },
+ "pm": {
+ "file": "cactus.dts",
+ "offset": "0x1000"
+ },
+ "physical-load-address": "0x7000000",
+ "owner": "SiP"
+},
+
+"cactus-secondary" : {
+ "image": "cactus.bin",
+ "pm": "cactus-secondary.dts",
+ "physical-load-address": "0x7100000",
+ "owner": "Plat"
+},
+
+"cactus-tertiary" : {
+ "image": "cactus.bin",
+ "pm": "cactus-tertiary.dts",
+ "physical-load-address": "0x7200000",
+ "owner": "Plat"
+EOF
+ PARTITION_ALREADY_PRESENT=true
+fi
+
+if [ $IVY_PRESENT == "true" ]; then
+ if [ $PARTITION_ALREADY_PRESENT == "true" ]; then
+ echo -ne "\t},\n\n" >> "$GENERATED_JSON"
+ fi
+
+ cat >> "$GENERATED_JSON" << EOF
+"ivy" : {
+ "image": "ivy.bin",
+ "pm": "ivy-sel0.dts",
+ "physical-load-address": "0x7600000",
+ "owner": "Plat"
+}
+EOF
+
+ PARTITION_ALREADY_PRESENT=true
+elif [ $IVY_SHIM_PRESENT == "true" ]; then
+ if [ $PARTITION_ALREADY_PRESENT == "true" ]; then
+ echo -ne "\t},\n\n" >> "$GENERATED_JSON"
+ fi
+cat >> "$GENERATED_JSON" << EOF
+"ivy" : {
+ "image": "ivy.bin",
+ "pm": "ivy-sel1.dts",
+ "physical-load-address": "0x7600000",
+ "owner": "Plat"
+}
+EOF
+
+ PARTITION_ALREADY_PRESENT=true
else
- echo -e "\nWarning: Only Cactus is supported as Secure Partition\n"
+ echo -ne "\t},\n" >> "$GENERATED_JSON"
fi
+
+echo -e "\n}" >> "$GENERATED_JSON"
diff --git a/tools/generate_test_list/generate_test_list.pl b/tools/generate_test_list/generate_test_list.pl
deleted file mode 100755
index 702a9837b..000000000
--- a/tools/generate_test_list/generate_test_list.pl
+++ /dev/null
@@ -1,193 +0,0 @@
-#!/usr/bin/env perl
-
-#
-# Copyright (c) 2018-2020, Arm Limited. All rights reserved.
-#
-# SPDX-License-Identifier: BSD-3-Clause
-#
-
-#
-# Arg0: Name of the C file to generate.
-# Arg1: Name of the header file to generate.
-# Arg2: XML file that contains the list of test suites.
-# Arg3: Text file listing the files to skip. Takes precedence over Arg2 file.
-#
-
-my $TESTLIST_SRC_FILENAME = $ARGV[0];
-my $TESTLIST_HDR_FILENAME = $ARGV[1];
-my $XML_TEST_FILENAME = $ARGV[2];
-my $SKIPPED_LIST_FILENAME = $ARGV[3];
-
-use strict;
-use warnings;
-use File::Temp;
-use XML::LibXML;
-
-# Create the source & header files
-open FILE_SRC, ">", $TESTLIST_SRC_FILENAME or die $!;
-open FILE_HDR, ">", $TESTLIST_HDR_FILENAME or die $!;
-
-#
-# Open the test list
-#
-my $doc;
-my $testsuite_elem;
-my $failure_elem;
-
-if (-e $XML_TEST_FILENAME) {
- my $parser = XML::LibXML->new(expand_entities => 1);
- $doc = $parser->parse_file($XML_TEST_FILENAME);
- $parser->process_xincludes($doc);
-} else {
- exit 1
-}
-
-# We assume if there is a root then it is a 'testsuites' element
-my $root = $doc->documentElement();
-my @all_testcases = $root->findnodes("//testcase");
-my @all_testsuites = $root->findnodes("//testsuite");
-
-
-# Check the validity of the XML file:
-# - A testsuite name must be unique.
-# - A testsuite name must not contain a '/' character.
-# - All test cases belonging to a given testsuite must have unique names.
-for my $testsuite (@all_testsuites) {
- my $testsuite_name = $testsuite->getAttribute('name');
- if ($testsuite_name =~ /\//) {
- print "ERROR: $XML_TEST_FILENAME: Invalid test suite name '$testsuite_name'.\n";
- print "ERROR: $XML_TEST_FILENAME: Test suite names can't include a '/' character.\n";
- exit 1;
- }
- my @testsuites = $root->findnodes("//testsuite[\@name='$testsuite_name']");
- if (@testsuites != 1) {
- print "ERROR: $XML_TEST_FILENAME: Can't have 2 test suites named '$testsuite_name'.\n";
- exit 1;
- }
-
- my @testcases_of_testsuite = $testsuite->findnodes("testcase");
- for my $testcase (@testcases_of_testsuite) {
- my $testcase_name = $testcase->getAttribute('name');
- my @testcases = $testsuite->findnodes("testcase[\@name='$testcase_name']");
- if (@testcases != 1) {
- print "ERROR: $XML_TEST_FILENAME: Can't have 2 tests named '$testsuite_name/$testcase_name'.\n";
- exit 1;
- }
- }
-}
-
-#
-# Get the list of tests to skip.
-# For each test to skip, find it in the XML tree and remove its node.
-#
-if (($SKIPPED_LIST_FILENAME) && (open SKIPPED_FILE, "<", $SKIPPED_LIST_FILENAME)) {
- my @lines = <SKIPPED_FILE>;
- close $SKIPPED_LIST_FILENAME;
-
- # Remove the newlines from the end of each line.
- chomp @lines;
-
- my $line_no = 0;
- my $testsuite_name;
- my $testcase_name;
- my $index = 0;
-
- for my $line (@lines) {
- ++$line_no;
-
- # Skip empty lines.
- if ($line =~ /^ *$/) { next; }
- # Skip comments.
- if ($line =~ /^#/) { next; }
-
- ($testsuite_name, $testcase_name) = split('/', $line);
-
- my @testsuites = $root->findnodes("//testsuite[\@name=\"$testsuite_name\"]");
- if (!@testsuites) {
- print "WARNING: $SKIPPED_LIST_FILENAME:$line_no: Test suite '$testsuite_name' doesn't exist or has already been deleted.\n";
- next;
- }
-
- if (!defined $testcase_name) {
- print "INFO: Testsuite '$testsuite_name' will be skipped.\n";
- $testsuites[0]->unbindNode();
- next;
- }
-
- my @testcases = $testsuites[0]->findnodes("testcase[\@name=\"$testcase_name\"]");
- if (!@testcases) {
- print "WARNING: $SKIPPED_LIST_FILENAME:$line_no: Test case '$testsuite_name/$testcase_name' doesn't exist or has already been deleted.\n";
- next;
- }
-
- print "INFO: Testcase '$testsuite_name/$testcase_name' will be skipped.\n";
- $testcases[0]->unbindNode();
- }
-}
-
-
-@all_testcases = $root->findnodes("//testcase");
-
-#
-# Generate the test function prototypes
-#
-my $testcase_count = 0;
-
-print FILE_SRC "#include \"tftf.h\"\n\n";
-
-for my $testcase (@all_testcases) {
- my $testcase_function = $testcase->getAttribute('function');
- $testcase_count++;
- print FILE_SRC "test_result_t $testcase_function(void);\n";
-}
-
-#
-# Generate the header file.
-#
-print FILE_HDR "#ifndef __TEST_LIST_H__\n";
-print FILE_HDR "#define __TEST_LIST_H__\n\n";
-print FILE_HDR "#define TESTCASE_RESULT_COUNT $testcase_count\n\n";
-print FILE_HDR "#endif\n";
-
-#
-# Generate the lists of testcases
-#
-my $testsuite_index = 0;
-my $testcase_index = 0;
-@all_testsuites = $root->findnodes("//testsuite");
-for my $testsuite (@all_testsuites) {
- my $testsuite_name = $testsuite->getAttribute('name');
- my @testcases = $testsuite->findnodes("//testsuite[\@name='$testsuite_name']//testcase");
-
- print FILE_SRC "\nconst test_case_t testcases_${testsuite_index}[] = {\n";
-
- for my $testcase (@testcases) {
- my $testcase_name = $testcase->getAttribute('name');
- my $testcase_description = $testcase->getAttribute('description');
- my $testcase_function = $testcase->getAttribute('function');
-
- if (!defined($testcase_description)) { $testcase_description = ""; }
-
- print FILE_SRC " { $testcase_index, \"$testcase_name\", \"$testcase_description\", $testcase_function },\n";
-
- $testcase_index++;
- }
- print FILE_SRC " { 0, NULL, NULL, NULL }\n";
- print FILE_SRC "};\n\n";
- $testsuite_index++;
-}
-
-#
-# Generate the lists of testsuites
-#
-$testsuite_index = 0;
-print FILE_SRC "const test_suite_t testsuites[] = {\n";
-for my $testsuite (@all_testsuites) {
- my $testsuite_name = $testsuite->getAttribute('name');
- my $testsuite_description = $testsuite->getAttribute('description');
- print FILE_SRC " { \"$testsuite_name\", \"$testsuite_description\", testcases_${testsuite_index} },\n";
- $testsuite_index++;
-}
-print FILE_SRC " { NULL, NULL, NULL }\n";
-print FILE_SRC "};\n";
-
diff --git a/tools/generate_test_list/generate_test_list.py b/tools/generate_test_list/generate_test_list.py
new file mode 100755
index 000000000..1fbe8ce7e
--- /dev/null
+++ b/tools/generate_test_list/generate_test_list.py
@@ -0,0 +1,363 @@
+#!/usr/bin/env python3
+#
+# Copyright (c) 2023 Google LLC. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+"""Generates the same output as generate_test_list.pl, but using python.
+
+Takes an xml file describing a list of testsuites as well as a skip list file
+and outputs a src and header file that refers to those tests.
+"""
+
+# This script was linted and formatted using the following commands:
+# autoflake -ir --remove-all-unused-imports --expand-star-imports \
+# --remove-duplicate-keys --remove-unused-variables tools/generate_test_list/
+# isort tools/generate_test_list/
+# black tools/generate_test_list/ --line-length 100
+# flake8 tools/generate_test_list/ --max-line-length 100
+
+import argparse
+import os.path
+import urllib.parse
+import xml.etree.ElementInclude
+import xml.parsers.expat
+from dataclasses import dataclass
+from typing import Dict, List
+from xml.etree.ElementTree import Element, TreeBuilder
+
+TESTS_LIST_H_TPL_FILENAME = "tests_list.h.tpl"
+TESTCASE_COUNT_TEMPLATE = "{{testcase_count}}"
+
+TESTS_LIST_C_TPL_FILENAME = "tests_list.c.tpl"
+FUNCTION_PROTOTYPES_TEMPLATE = "{{function_prototypes}}"
+TESTCASE_LISTS_TEMPLATE = "{{testcase_lists}}"
+TESTSUITES_LIST_TEMPLATE = "{{testsuites_list}}"
+
+XINCLUDE_INCLUDE = "xi:include"
+
+MAX_EXPANSION_DEPTH = 5
+
+# Intermediate repesentation classes.
+
+
+@dataclass
+class TestCase:
+ """Class representing a single TFTF test case."""
+
+ name: str
+ function: str
+ description: str = ""
+
+
+@dataclass
+class TestSuite:
+ """Class representing a single TFTF test suite."""
+
+ name: str
+ description: str
+ testcases: List[TestCase]
+
+
+def find_element_with_name_or_return_none(iterable, name: str):
+ """Looks through iterable for an element whose 'name' field matches name."""
+ return next(filter(lambda x: x.name == name, iterable), None)
+
+
+def parse_testsuites_element_into_ir(root: Element) -> List[TestSuite]:
+ """Given the root of a parsed XML file, construct TestSuite objects."""
+ testsuite_xml_elements = root.findall(".//testsuite")
+
+ testsuites = []
+ # Parse into IR
+ for testsuite in testsuite_xml_elements:
+ testcases = []
+ for testcase in testsuite.findall("testcase"):
+ testcases += [
+ TestCase(
+ testcase.get("name"),
+ testcase.get("function"),
+ testcase.get("description", default=""),
+ )
+ ]
+ testsuites += [TestSuite(testsuite.get("name"), testsuite.get("description"), testcases)]
+
+ return testsuites
+
+
+# In order to keep this script standalone (meaning no libraries outside of the
+# standard library), we have to do our own assembling of the XML Elements. This
+# is necessary because python doesn't give us a nice way to support external
+# entity expansion. As such we have to use the low level expat parser and build
+# the tree using TreeBuilder.
+
+
+def parse_xml_no_xinclude_expansion(filename: str) -> Element:
+ """Parse filename into an ElementTree.Element, following external entities."""
+ xml_dir_root = os.path.dirname(filename)
+ with open(filename) as fobj:
+ xml_contents = fobj.read()
+
+ parser = xml.parsers.expat.ParserCreate()
+ parser.SetParamEntityParsing(xml.parsers.expat.XML_PARAM_ENTITY_PARSING_ALWAYS)
+
+ global treebuilder
+ treebuilder = TreeBuilder()
+ global expansion_depth
+ expansion_depth = 0
+
+ def start_element_handler(name: str, attributes):
+ # ElementInclude.include requires that the XInclude namespace is expanded.
+ if name == "xi:include":
+ name = "{http://www.w3.org/2001/XInclude}include"
+ treebuilder.start(name, attributes)
+
+ def end_element_handler(name: str):
+ treebuilder.end(name)
+
+ def external_entity_ref_handler(context, base, systemId, publicId):
+ global expansion_depth
+
+ external_entity_parser = parser.ExternalEntityParserCreate(context, "utf-8")
+ assign_all_parser_callbacks(external_entity_parser)
+ with open(os.path.join(xml_dir_root, systemId)) as fobj:
+ sub_xml_contents = fobj.read()
+ expansion_depth += 1
+ if expansion_depth > MAX_EXPANSION_DEPTH:
+ raise ValueError("Max entity expansion depth reached")
+
+ external_entity_parser.Parse(sub_xml_contents, True)
+ expansion_depth -= 1
+ return 1
+
+ def assign_all_parser_callbacks(p):
+ p.StartElementHandler = start_element_handler
+ p.EndElementHandler = end_element_handler
+ p.ExternalEntityRefHandler = external_entity_ref_handler
+
+ assign_all_parser_callbacks(parser)
+ parser.Parse(xml_contents, True)
+
+ return treebuilder.close()
+
+
+# Older versions of python3 don't support ElementInclude.include's base_url
+# kwarg. This callable class works around this.
+# base_url allows XInclude paths relative to the toplevel XML file to be used.
+class ElementIncludeLoaderAdapter:
+ """Adapts between ElementInclude's loader interface and our XML parser."""
+
+ def __init__(self, base_url: str):
+ self.base_url = base_url
+
+ def __call__(self, href: str, parse: str):
+ if parse != "xml":
+ raise ValueError("'parse' must be 'xml'")
+
+ return parse_xml_no_xinclude_expansion(urllib.parse.urljoin(self.base_url, href))
+
+
+def parse_testsuites_from_file(filename: str) -> List[TestSuite]:
+ """Given an XML file, parse the contents into a List[TestSuite]."""
+ root = parse_xml_no_xinclude_expansion(filename)
+
+ base_url = os.path.abspath(filename)
+ loader = ElementIncludeLoaderAdapter(base_url)
+ xml.etree.ElementInclude.include(root, loader=loader)
+
+ if root.tag == "testsuites":
+ testsuites_xml_elements = [root]
+ elif root.tag == "document":
+ testsuites_xml_elements = root.findall("testsuites")
+ else:
+ raise ValueError(f"Unexpected root tag '{root.tag}' in {filename}")
+
+ testsuites = []
+
+ for testsuites_xml_element in testsuites_xml_elements:
+ testsuites += parse_testsuites_element_into_ir(testsuites_xml_element)
+
+ return testsuites
+
+
+def check_validity_of_names(testsuites: List[TestSuite]):
+ """Checks that all testsuite and testcase names are valid."""
+ testsuite_name_set = set()
+ for ts in testsuites:
+ if "/" in ts.name:
+ raise ValueError(f"ERROR: {args.xml_test_filename}: Invalid test suite name {ts.name}")
+
+ if ts.name in testsuite_name_set:
+ raise ValueError(
+ f"ERROR: {args.xml_test_filename}: Can't have 2 test suites named " f"{ts.name}"
+ )
+
+ testsuite_name_set.add(ts.name)
+
+ testcase_name_set = set()
+ for tc in ts.testcases:
+ if tc.name in testcase_name_set:
+ raise ValueError(
+ f"ERROR: {args.xml_test_filename}: Can't have 2 tests named " f"{tc.name}"
+ )
+
+ testcase_name_set.add(tc.name)
+
+
+def remove_skipped_tests(testsuites: List[TestSuite], skip_tests_filename: str):
+ """Remove skipped tests from testsuites based on skip_tests_filename."""
+ with open(skip_tests_filename) as skipped_file:
+ skipped_file_lines = skipped_file.readlines()
+ for i, l in enumerate(skipped_file_lines):
+ line = l.strip()
+
+ # Skip empty lines and comments
+ if not line or line[0] == "#":
+ continue
+
+ testsuite_name, sep, testcase_name = line.partition("/")
+
+ testsuite = find_element_with_name_or_return_none(testsuites, testsuite_name)
+
+ if not testsuite:
+ print(
+ f"WARNING: {skip_tests_filename}:{i + 1}: Test suite "
+ f"'{testsuite_name}' doesn't exist or has already been deleted."
+ )
+ continue
+
+ if not testcase_name:
+ print(f"INFO: Test suite '{testsuite_name}' will be skipped")
+ testsuites = list(filter(lambda x: x.name != testsuite_name, testsuites))
+ continue
+
+ testcase = find_element_with_name_or_return_none(testsuite.testcases, testcase_name)
+ if not testcase:
+ print(
+ f"WARNING: {skip_tests_filename}:{i + 1}: Test case "
+ f"'{testsuite_name}/{testcase_name} doesn't exist or has already "
+ "been deleted"
+ )
+ continue
+
+ print(f"INFO: Test case '{testsuite_name}/{testcase_name}' will be skipped.")
+ testsuite.testcases.remove(testcase)
+
+ return testsuites
+
+
+def generate_function_prototypes(testcases: List[TestCase]):
+ """Generates function prototypes for the provided list of testcases."""
+ return [f"test_result_t {t.function}(void);" for t in testcases]
+
+
+def generate_testcase_lists(testsuites: List[TestSuite]):
+ """Generates the lists that enumerate the individual testcases in each testsuite."""
+ testcase_lists_contents = []
+ testcase_index = 0
+ for i, testsuite in enumerate(testsuites):
+ testcase_lists_contents += [f"\nconst test_case_t testcases_{i}[] = {{"]
+ for testcase in testsuite.testcases:
+ testcase_lists_contents += [
+ f' {{ {testcase_index}, "{testcase.name}", '
+ f'"{testcase.description}", {testcase.function} }},'
+ ]
+ testcase_index += 1
+ testcase_lists_contents += [" { 0, NULL, NULL, NULL }"]
+ testcase_lists_contents += ["};\n"]
+
+ return testcase_lists_contents
+
+
+def generate_testsuite_lists(testsuites: List[TestSuite]):
+ """Generates the list of testsuites."""
+ testsuites_list_contents = []
+ testsuites_list_contents += ["const test_suite_t testsuites[] = {"]
+ for i, testsuite in enumerate(testsuites):
+ testsuites_list_contents += [
+ f' {{ "{testsuite.name}", "{testsuite.description}", testcases_{i} }},'
+ ]
+ testsuites_list_contents += [" { NULL, NULL, NULL }"]
+ testsuites_list_contents += ["};"]
+ return testsuites_list_contents
+
+
+def generate_file_from_template(
+ template_filename: str, output_filename: str, template: Dict[str, str]
+):
+ """Given a template file, generate an output file based on template dictionary."""
+ with open(template_filename) as template_fobj:
+ template_contents = template_fobj.read()
+
+ output_contents = template_contents
+ for to_find, to_replace in template.items():
+ output_contents = output_contents.replace(to_find, to_replace)
+
+ with open(output_filename, "w") as output_fobj:
+ output_fobj.write(output_contents)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description=__doc__)
+ parser.add_argument(
+ "testlist_src_filename",
+ type=str,
+ help="Output source filename",
+ )
+ parser.add_argument(
+ "testlist_hdr_filename",
+ type=str,
+ help="Output header filename",
+ )
+ parser.add_argument("xml_test_filename", type=str, help="Input xml filename")
+ parser.add_argument(
+ "--plat-skip-file",
+ type=str,
+ help="Filename containing tests to skip for this platform",
+ dest="plat_skipped_list_filename",
+ required=False,
+ )
+ parser.add_argument(
+ "--arch-skip-file",
+ type=str,
+ help="Filename containing tests to skip for this architecture",
+ dest="arch_skipped_list_filename",
+ required=False,
+ )
+ args = parser.parse_args()
+
+ testsuites = parse_testsuites_from_file(args.xml_test_filename)
+
+ check_validity_of_names(testsuites)
+
+ if args.plat_skipped_list_filename:
+ testsuites = remove_skipped_tests(testsuites, args.plat_skipped_list_filename)
+
+ if args.arch_skipped_list_filename:
+ testsuites = remove_skipped_tests(testsuites, args.arch_skipped_list_filename)
+
+ # Flatten all testcases
+ combined_testcases = [tc for ts in testsuites for tc in ts.testcases]
+
+ # Generate header file
+ generate_file_from_template(
+ os.path.join(os.path.dirname(__file__), TESTS_LIST_H_TPL_FILENAME),
+ args.testlist_hdr_filename,
+ {TESTCASE_COUNT_TEMPLATE: str(len(combined_testcases))},
+ )
+
+ # Generate the source file
+ all_function_prototypes = generate_function_prototypes(combined_testcases)
+ testcase_lists_contents = generate_testcase_lists(testsuites)
+ testsuites_list_contents = generate_testsuite_lists(testsuites)
+
+ generate_file_from_template(
+ os.path.join(os.path.dirname(__file__), TESTS_LIST_C_TPL_FILENAME),
+ args.testlist_src_filename,
+ {
+ FUNCTION_PROTOTYPES_TEMPLATE: "\n".join(all_function_prototypes),
+ TESTCASE_LISTS_TEMPLATE: "\n".join(testcase_lists_contents),
+ TESTSUITES_LIST_TEMPLATE: "\n".join(testsuites_list_contents),
+ },
+ )
diff --git a/tools/generate_test_list/tests_list.c.tpl b/tools/generate_test_list/tests_list.c.tpl
new file mode 100644
index 000000000..115cb715a
--- /dev/null
+++ b/tools/generate_test_list/tests_list.c.tpl
@@ -0,0 +1,7 @@
+#include "tftf.h"
+
+{{function_prototypes}}
+
+{{testcase_lists}}
+
+{{testsuites_list}}
diff --git a/tools/generate_test_list/tests_list.h.tpl b/tools/generate_test_list/tests_list.h.tpl
new file mode 100644
index 000000000..963b6985f
--- /dev/null
+++ b/tools/generate_test_list/tests_list.h.tpl
@@ -0,0 +1,6 @@
+#ifndef __TEST_LIST_H__
+#define __TEST_LIST_H__
+
+#define TESTCASE_RESULT_COUNT {{testcase_count}}
+
+#endif // __TEST_LIST_H__