repo: run codespell

Run codespell and replace known misspelling.

Signed-off-by: Nicola Mazzucato <nicola.mazzucato@arm.com>
Change-Id: I6bfcf67b27745c98059a433c929af637f9a02d8e
diff --git a/build_helper/build_helper.py b/build_helper/build_helper.py
index 90ea832..3075fe7 100755
--- a/build_helper/build_helper.py
+++ b/build_helper/build_helper.py
@@ -89,7 +89,7 @@
                   user_args.config)
             sys.exit(1)
     else:
-        print("Error: Configuration not specificed")
+        print("Error: Configuration not specified")
         sys.exit(1)
 
     # Build everything
diff --git a/build_helper/build_helper_config_maps.py b/build_helper/build_helper_config_maps.py
index 0d7aec1..15e2428 100644
--- a/build_helper/build_helper_config_maps.py
+++ b/build_helper/build_helper_config_maps.py
@@ -86,7 +86,7 @@
     "FPON"         : ("-DCONFIG_TFM_ENABLE_FP=ON "
                       "-DTEST_S_FPU=ON -DTEST_NS_FPU=ON "),
     "LZOFF"        : "-DCONFIG_TFM_LAZY_STACKING=OFF ",
-    # Partiton
+    # Partition
     "PSOFF"        : "-DTFM_PARTITION_PROTECTED_STORAGE=OFF ",
     "PSCLEAR"      : "-DPS_ENCRYPTION=OFF -DPS_ROLLBACK_PROTECTION=OFF",
     "PSLIMIT"      : "-DPS_AES_KEY_USAGE_LIMIT=1000000",
diff --git a/build_helper/build_helper_configs.py b/build_helper/build_helper_configs.py
index 81c35b5..1660675 100755
--- a/build_helper/build_helper_configs.py
+++ b/build_helper/build_helper_configs.py
@@ -1644,7 +1644,7 @@
 if __name__ == '__main__':
     import os
 
-    # Default behavior is to export refference config when called
+    # Default behavior is to export reference config when called
     _dir = os.getcwd()
     from utils import save_json
     for _cname, _cfg in _builtin_configs.items():
diff --git a/eclair/external_sources.ecl b/eclair/external_sources.ecl
index f717cdc..3d52b38 100644
--- a/eclair/external_sources.ecl
+++ b/eclair/external_sources.ecl
@@ -29,5 +29,5 @@
 # in production release.
 -file_tag+={external, "^tf-m-tests/.*$"}
 
-# Ignore vendor platform specific soure code
+# Ignore vendor platform specific source code
 -file_tag+={external, "^trusted-firmware-m/platform/ext/target/.*$"}
diff --git a/jenkins/verify.py b/jenkins/verify.py
index 191dfd2..d58f434 100755
--- a/jenkins/verify.py
+++ b/jenkins/verify.py
@@ -99,7 +99,7 @@
         default=os.getenv("GERRIT_PATCHSET_REVISION"),
     )
     PARSER.add_argument(
-        "--verify-name", help="Name to give the job verifcation message."
+        "--verify-name", help="Name to give the job verification message."
     )
     PARSER.add_argument(
         "--user", help="Username to authenticate as.", default=os.getenv("VERIFY_USER")
diff --git a/lava_helper/lava_submit_jobs.py b/lava_helper/lava_submit_jobs.py
index 3a12d68..ddf349c 100755
--- a/lava_helper/lava_submit_jobs.py
+++ b/lava_helper/lava_submit_jobs.py
@@ -38,7 +38,7 @@
 
 
 def submit_lava_jobs(user_args, job_dir=""):
-    """ Submit a job to LAVA backend, block untill it is completed, and
+    """ Submit a job to LAVA backend, block until it is completed, and
     fetch the results files if successful. If not, calls sys exit with 1
     return code """
 
diff --git a/report_parser/report_parser.py b/report_parser/report_parser.py
index 876416a..e340c27 100644
--- a/report_parser/report_parser.py
+++ b/report_parser/report_parser.py
@@ -3,10 +3,10 @@
 """ report_parser.py:
 
     Report parser parses openci json reports and conveys the invormation in a
-    one or more standard formats (To be implememented)
+    one or more standard formats (To be implemented)
 
     After all information is captured it validates the success/failure status
-    and can change the script exit code for intergration with standard CI
+    and can change the script exit code for integration with standard CI
     executors.
     """
 
@@ -100,7 +100,7 @@
         print(err_msg)
         raise Exception(err_msg)
 
-    # Create a dataset for the entires of known data format
+    # Create a dataset for the entries of known data format
     known_data = {n: {} for n in
                   set(known_content_types).intersection(set(cpaths.keys()))}
 
@@ -248,7 +248,7 @@
 
         sever = E.pop("severity")
 
-        # Sort it based on serverity
+        # Sort it based on severity
         try:
             out_data["report"][sever].append(E)
         except KeyError:
@@ -328,7 +328,7 @@
 
 
 def jenkins_mdt_collect(out_f):
-    """ Collects Jenkins enviroment information and stores
+    """ Collects Jenkins environment information and stores
      it in a key value list """
 
     # Jenkins environment parameters are always valid
@@ -350,7 +350,7 @@
                            "GERRIT_CHANGE_OWNER_EMAIL",
                            "GERRIT_PATCHSET_UPLOADER_EMAIL"]
 
-    # Find as mamny of the variables in environent
+    # Find as mamny of the variables in environment
     el = set(os.environ).intersection(set(jenkins_env_keys +
                                           gerrit_trigger_keys))
     # Format it in key:value pairs
@@ -456,7 +456,7 @@
     in_rep = load_json(user_args.report)
     report_eval = None
 
-    # Extract the required condition for evalutation to pass
+    # Extract the required condition for evaluation to pass
     pass_key, pass_val = split_keys(user_args.set_pass)
 
     print("Evaluation will succeed if \"%s\" is \"%s\"" % (pass_key,
@@ -485,7 +485,7 @@
             except Exception as e:
                 report_eval = False
 
-        # If every singel field need to be succesfful, invert the check and
+        # If every single field needs to be succesfful, invert the check and
         # look for those who are not
         elif user_args.allpass:
             try:
@@ -509,7 +509,7 @@
 def main(user_args):
     """ Main logic """
 
-    # Metadat Collect Mode
+    # Metadata Collect Mode
     if user_args.collect:
         metadata_collect(user_args)
         return
@@ -542,7 +542,7 @@
                         dest="allpass",
                         action="store_true",
                         help="When set and a list is provided, all entries"
-                        "must be succefull for evaluation to pass")
+                        "must be successful for evaluation to pass")
     parser.add_argument("-o", "--one-field-must-pass",
                         dest="onepass",
                         action="store",
@@ -570,7 +570,7 @@
                         nargs='*',
                         help=("Pass a space separated list of paths in the"
                               "following format: -p mbedtls=/yourpath/"
-                              "fpv=/another/path .Used in conjuction with -n"))
+                              "fpv=/another/path .Used in conjunction with -n"))
     parser.add_argument("-g", "--git-info",
                         dest="git_info",
                         action="store",
@@ -594,7 +594,7 @@
     parser.add_argument("-j", "--jenkins-info",
                         dest="jenkins_info",
                         action="store_true",
-                        help="Extract jenkings and gerrit trigger enviroment "
+                        help="Extract jenkings and gerrit trigger environment "
                              "information fr. Requires --colect directive."
                              " Optional parameter --output-file ")
     parser.add_argument("-l", "--collate-report",
@@ -615,7 +615,7 @@
                              "following format: -l report_key_0=value_0"
                              " report_key_1=value_0. Filter will remote all"
                              "entries of the original report but the ones"
-                             "mathing the key:value pairs defined and print it"
+                             "matching the key:value pairs defined and print it"
                              "to stdout.Optional parameter --output-file")
     return parser.parse_args()
 
diff --git a/script/static-checks/check-include-order.py b/script/static-checks/check-include-order.py
index 4b0d19d..04bc72f 100755
--- a/script/static-checks/check-include-order.py
+++ b/script/static-checks/check-include-order.py
@@ -128,8 +128,8 @@
     of the lines variable, marked with [1], is intentional and must come
     after the yield. That's because we must yield the (name, lines) tuple
     after we have found the name of the next section but before we assign the
-    name and start collecting lines. Further, [2] is required to yeild the
-    last block as there will not be a block start delimeter at the end of
+    name and start collecting lines. Further, [2] is required to yield the
+    last block as there will not be a block start delimiter at the end of
     the stream.
     """
     lines = []
diff --git a/tfm_ci_pylib/lava_rpc_connector.py b/tfm_ci_pylib/lava_rpc_connector.py
index e6c391e..283fd25 100644
--- a/tfm_ci_pylib/lava_rpc_connector.py
+++ b/tfm_ci_pylib/lava_rpc_connector.py
@@ -69,7 +69,7 @@
         super(LAVA_RPC_connector, self).__init__(server_addr)
 
     def _rpc_cmd_raw(self, cmd, params=None):
-        """ Run a remote comand and return the result. There is no constrain
+        """ Run a remote command and return the result. There is no constrain
         check on the syntax of the command. """
 
         cmd = "self.%s(%s)" % (cmd, params if params else "")
@@ -196,7 +196,7 @@
         return self.scheduler.job_state(job_id)["job_state"]
 
     def cancel_job(self, job_id):
-        """ Cancell job with id=job_id. Returns True if successfull """
+        """ Cancel job with id=job_id. Returns True if successful """
 
         return self.scheduler.jobs.cancel(job_id)
 
diff --git a/tfm_ci_pylib/structured_task.py b/tfm_ci_pylib/structured_task.py
index 5e44481..80119d9 100644
--- a/tfm_ci_pylib/structured_task.py
+++ b/tfm_ci_pylib/structured_task.py
@@ -2,7 +2,7 @@
 
 """ structured_task.py:
 
-    A generic abstraction class for executing a task with prerequesites and
+    A generic abstraction class for executing a task with prerequisites and
     post execution action """
 
 from __future__ import print_function
@@ -62,7 +62,7 @@
 
     @abc.abstractmethod
     def pre_exec(self, eval_ret):
-        """ Tasks that set-up execution enviroment """
+        """ Tasks that set-up execution environment """
 
     @abc.abstractmethod
     def task_exec(self):
diff --git a/tfm_ci_pylib/tfm_build_manager.py b/tfm_ci_pylib/tfm_build_manager.py
index 7ebf157..cbc4ecd 100644
--- a/tfm_ci_pylib/tfm_build_manager.py
+++ b/tfm_ci_pylib/tfm_build_manager.py
@@ -2,7 +2,7 @@
 
 """ tfm_build_manager.py:
 
-    Controlling class managing multiple build configruations for tfm """
+    Controlling class managing multiple build configurations for tfm """
 
 from __future__ import print_function
 from json import tool
@@ -157,7 +157,7 @@
         """ """
 
     def override_tbm_cfg_params(self, config, override_keys, **params):
-        """ Using a dictionay as input, for each key defined in
+        """ Using a dictionary as input, for each key defined in
         override_keys it will replace the config[key] entries with
         the key=value parameters provided """
 
@@ -272,7 +272,7 @@
                               img_sizes=self._tbm_img_sizes,
                               relative_paths=self._tbm_relative_paths))
         # When a seed pool is provided iterate through the entries
-        # and update platform spefific parameters
+        # and update platform specific parameters
         elif len(self._tbm_build_cfg):
             print("\r\n_tbm_build_cfg %s\r\n tbm_common_cfg %s\r\n" \
              % (self._tbm_build_cfg, self.tbm_common_cfg))
@@ -452,7 +452,7 @@
             return False
 
     def post_exec(self, eval_ret):
-        """ Generate a report and fail the script if build == unsuccessfull"""
+        """ Generate a report and fail the script if build == unsuccessful"""
 
         self.print_summary()
         if not eval_ret:
@@ -510,14 +510,14 @@
         abs_code_dir = os.path.abspath(os.path.expanduser(abs_code_dir))
         static_cfg["codebase_root_dir"] = abs_code_dir
 
-        # seed_params is an optional field. Do not proccess if it is missing
+        # seed_params is an optional field. Do not process if it is missing
         if "seed_params" in cfg:
             comb_cfg = cfg["seed_params"]
-            # Generate a list of all possible confugration combinations
+            # Generate a list of all possible configuration combinations
             ret_cfg = TFM_Build_Manager.generate_config_list(comb_cfg,
                                                              static_cfg)
 
-            # valid is an optional field. Do not proccess if it is missing
+            # valid is an optional field. Do not process if it is missing
             if "valid" in cfg:
                 # Valid configurations(Need to build)
                 valid_cfg = cfg["valid"]
@@ -527,7 +527,7 @@
                     static_cfg,
                     valid_cfg))
 
-            # invalid is an optional field. Do not proccess if it is missing
+            # invalid is an optional field. Do not process if it is missing
             if "invalid" in cfg:
                 # Invalid configurations(Do not build)
                 invalid_cfg = cfg["invalid"]
@@ -545,7 +545,7 @@
             self.simple_config = True
         return ret_cfg, static_cfg
 
-    # ----- Override bellow methods when subclassing for other projects ----- #
+    # ----- Override below methods when subclassing for other projects ----- #
 
     def print_summary(self):
         """ Print an comprehensive list of the build jobs with their status """
@@ -588,7 +588,7 @@
                   " Please check config" % static_config["config_type"])
 
         ret_cfg = {}
-        # Notify the user for the rejected configuations
+        # Notify the user for the rejected configurations
         for i in config_list:
             # Convert named tuples to string in a brief format
             config_param = []
@@ -626,7 +626,7 @@
                     if n in seed_config.keys()]
             sorted_default_lst = [seed_config[k] for k in tags]
 
-            # If tags are not alligned with optional list entries quit
+            # If tags are not aligned with optional list entries quit
             if len(tags) != len(optional_list[0]):
                 print(len(tags), len(optional_list[0]))
                 print("Error, tags should be assigned to each "
@@ -634,7 +634,7 @@
                 return []
 
             # Replace wildcard ( "*") entries with every
-            # inluded in cfg variant
+            # included in cfg variant
             for k in optional_list:
                 # Pad the omitted values with wildcard char *
                 res_list = list(k) + ["*"] * (5 - len(k))
@@ -653,7 +653,7 @@
                 # Append the configuration to the existing ones
                 optional_cfg = dict(optional_cfg, **op_cfg)
 
-            # Notify the user for the optional configuations
+            # Notify the user for the optional configurations
             for i in optional_cfg.keys():
                 print("Generating optional config %s" % i)
         else:
diff --git a/tfm_ci_pylib/tfm_builder.py b/tfm_ci_pylib/tfm_builder.py
index 9ca6c59..25ce850 100644
--- a/tfm_ci_pylib/tfm_builder.py
+++ b/tfm_ci_pylib/tfm_builder.py
@@ -31,15 +31,15 @@
     """ Wrap around tfm cmake system and spawn a thread to build the project.
     """
     def __init__(self,
-                 name,      # Proccess name
+                 name,      # Process name
                  work_dir,  # Current working directory(ie logs)
                  cfg_dict,  # Input config dictionary of the following form
                             # input_dict = {"PROJ_CONFIG": "ConfigRegression",
                             #               "TARGET_PLATFORM": "MUSCA_A",
                             #               "COMPILER": "ARMCLANG",
                             #               "CMAKE_BUILD_TYPE": "Debug"}
-                 build_threads=4,   # Number of CPU thrads used in build
-                 silent=False,      # Silence stdout ouptut
+                 build_threads=4,   # Number of CPU threads used in build
+                 silent=False,      # Silence stdout output
                  img_sizes=False,   # Use arm-none-eabi-size for size info
                  relative_paths=False):  # Store relative paths in report
 
@@ -186,7 +186,7 @@
     def task_exec(self):
         """ Main tasks """
 
-        # Mark proccess running as status
+        # Mark process running as status
         self.set_status(-1)
         print("builder _tfb_cfg %s" % self._tfb_cfg)
 
@@ -215,7 +215,7 @@
         rep = {"build_cmd": "%s" % ",".join(build_cmds)}
         self.stash("Build Report", rep)
 
-        # Calll cmake to configure the project
+        # Call cmake to configure the project
         for build_cmd in build_cmds:
             # if a -j parameter is passed as user argument
             user_set_threads_match = threads_no_rex.findall(build_cmd)
@@ -272,7 +272,7 @@
 
         rep["artefacts"] = artefacts
 
-        # Proccess the artifacts into file structures
+        # Process the artifacts into file structures
         art_files = {}
         for art_item in artefacts:
             art_f = {"pl_source": 1,
diff --git a/tfm_ci_pylib/utils.py b/tfm_ci_pylib/utils.py
index 3ef5a48..373b99f 100755
--- a/tfm_ci_pylib/utils.py
+++ b/tfm_ci_pylib/utils.py
@@ -57,7 +57,7 @@
 
 def print_test_dict(data_dict,
                     pad_space=80,
-                    identation=5,
+                    indentation=5,
                     titl="Summary",
                     pad_char="*"):
 
@@ -65,8 +65,8 @@
     {"TEST NAME": "RESULT"} used in CI systems. It will also return
     the string which is printing """
 
-    # Calculate pad space bewteen variables x, y t achieve alignment on y
-    # taking into consideration a maximum aligment boundary p and
+    # Calculate pad space between variables x, y t achieve alignment on y
+    # taking into consideration a maximum alignment boundary p and
     # possible indentation i
     def flex_pad(x, y, p, i):
         return " " * (p - i * 2 - len(x) - len(y)) + "-> "
@@ -75,15 +75,15 @@
     tests = [k + flex_pad(k,
                           v,
                           pad_space,
-                          identation) + v for k, v in data_dict.items()]
+                          indentation) + v for k, v in data_dict.items()]
 
-    # Add the identation
-    tests = map(lambda x: " " * identation + x, tests)
+    # Add the indentation
+    tests = map(lambda x: " " * indentation + x, tests)
 
     # Convert to string
     tests = "\n".join(tests)
 
-    # Calcuate the top header padding ceiling any rounding errors
+    # Calculate the top header padding ceiling any rounding errors
     hdr_pad = (pad_space - len(titl) - 3) / 2
 
     if detect_python3():
@@ -104,7 +104,7 @@
 
 
 def print_test(t_name=None, t_list=None, status="failed", tname="Tests"):
-    """ Print a list of tests in a stuctured ascii table format """
+    """ Print a list of tests in a structured ascii table format """
 
     gfx_line1 = "=" * 80
     gfx_line2 = "\t" + "-" * 70
@@ -127,9 +127,9 @@
          summary=True):
 
     """ Using input of a test_lst and a test results dictionary in the format
-    of test_name: resut key-value pairs, test() method will verify that Every
+    of test_name: result key-value pairs, test() method will verify that Every
     single method in the test_list has been tested and passed. Pass and Failed,
-    status tests can be overriden and error_on_failed flag, exits the script
+    status tests can be overridden and error_on_failed flag, exits the script
     with failure if a single test fails or is not detected. Returns a json
     containing status and fields for each test passed/failed/missing, if error
     on failed is not set.
@@ -146,14 +146,14 @@
     # Calculate the difference of the two sets to find missing tests
     t_report["missing"] = list(set(test_list) - set(test_dict.keys()))
 
-    # Sor the items into the apropriate lists (failed or passed)
+    # Sor the items into the appropriate lists (failed or passed)
     # based on their status.
     for k, v in test_dict.items():
         # print(k, v)
         key = "passed" if v in pass_text else "failed"
         t_report[key] += [k]
 
-    # For the test to pass every singe test in test_list needs to be present
+    # For the test to pass every single test in test_list needs to be present
     # and be in the passed list
     if len(test_list) == len(t_report["passed"]):
         t_report["success"] = True
@@ -195,7 +195,7 @@
 
 
 def sort_dict(config_dict, sort_order_list=None):
-    """ Create a fixed order disctionary out of a config dataset """
+    """ Create a fixed order dictionary out of a config dataset """
 
     if sort_order_list:
         ret = OrderedDict([(k, config_dict[k]) for k in sort_order_list])
@@ -232,7 +232,7 @@
 
 def subprocess_log(cmd, log_f, prefix=None, append=False, silent=False):
     """ Run a command as subproccess an log the output to stdout and fileself.
-    If prefix is spefified it will be added as the first line in file """
+    If prefix is specified it will be added as the first line in file """
 
     with open(log_f, 'a' if append else "w") as F:
         if prefix:
@@ -255,7 +255,7 @@
 
 def run_proccess(cmd):
     """ Run a command as subproccess an log the output to stdout and file.
-    If prefix is spefified it will be added as the first line in file """
+    If prefix is specified it will be added as the first line in file """
 
     pcss = Popen(cmd,
                  stdout=PIPE,
@@ -268,7 +268,7 @@
 
 def get_pid_status(pid):
     """ Read the procfc in Linux machines to determine a proccess's statusself.
-    Returns status if proccess exists or None if it does not """
+    Returns status if process exists or None if it does not """
 
     try:
         with open("/proc/%s/status" % pid, "r") as F:
@@ -281,7 +281,7 @@
 
 def check_pid_status(pid, status_list):
     """ Check a proccess's status againist a provided lists and return True
-    if the proccess exists and has a status included in the list. (Linux) """
+    if the process exists and has a status included in the list. (Linux) """
 
     pid_status = get_pid_status(pid)
 
@@ -316,7 +316,7 @@
 
 def gen_cfg_combinations(name, categories, *args):
     """ Create a list of named tuples of `name`, with elements defined in a
-    space separated string `categories` and equal ammount of lists for said
+    space separated string `categories` and equal amount of lists for said
     categories provided as arguments. Order of arguments should match the
     order of the categories lists """
 
@@ -346,7 +346,7 @@
 
 def arm_non_eabi_size(filename):
     """ Run arm-non-eabi-size command and parse the output using regex. Will
-    return a tuple with the formated data as well as the raw output of the
+    return a tuple with the formatted data as well as the raw output of the
     command """
 
     size_info_rex = re.compile(r'^\s+(?P<text>[0-9]+)\s+(?P<data>[0-9]+)\s+'
@@ -368,7 +368,7 @@
 
 def fromelf(filename):
     """ Run fromelf command and parse the output using regex. Will
-    return a tuple with the formated data as well as the raw output of the
+    return a tuple with the formatted data as well as the raw output of the
     command """
 
     size_info_rex = re.compile(r'^\s+(?P<Code>[0-9]+)\s+(?P<data>[0-9]+)\s+'
@@ -476,7 +476,7 @@
 
     remote = proc_res[2]
     # Internal Gerrit specific code
-    # Intended for converting the git remote to a more usuable url
+    # Intended for converting the git remote to a more usable url
     known_remotes = ["https://gerrit.oss.arm.com",
                      "http://gerrit.mirror.oss.arm.com"]
 
diff --git a/utils/util_cmake.sh b/utils/util_cmake.sh
index c6ade28..c479c2d 100644
--- a/utils/util_cmake.sh
+++ b/utils/util_cmake.sh
@@ -22,7 +22,7 @@
 ##@returns path in windows format
 ##
 ##This function converts MSYS and cygwin paths to windows like path. Can be used
-##to print paths in error message which can be used withouth conversion. This
+##to print paths in error message which can be used without conversion. This
 ##way for example you can get "clickable" path in Eclipse error window.
 ##
 ##Usage:
@@ -44,7 +44,7 @@
 	then
 		#sed:
 		# 1. match /cygdrive/c/ like paths and convert to the c:/ format
-		# 2. if 1 did not match conver /c/ path to c:/ format
+		# 2. if 1 did not match, convert /c/ path to c:/ format
 		path=`builtin echo "$path"|sed "s/\/cygdrive\/\([a-zA-Z]\)\//\1:\//;tx;s/\/\([a-zA-Z]\)\//\1:\//;:x"`
 	fi
 	builtin echo "$path"
@@ -58,7 +58,7 @@
 ##This function converts a path to absolute full path. The function will return
 ##execution environment specific path (/cygdrive/ under Cygwin c:/ under MSys
 ##and /foo/bar under Linux).
-##The patch to conver may or may not contain a file name.
+##The patch to convert may or may not contain a file name.
 ##
 ##Usage:
 ##  Assuming current directory is <i>c:/somedir1/somedir2</i>
@@ -113,7 +113,7 @@
 ##
 ##This function will generate the name for a build directory. The generated name
 ##follow the pattern "<build_base_dir>/build-<build_config_name>".
-##The generted path will be absolute.
+##The generated path will be absolute.
 ##
 ##Usage:
 ##  Assuming CMakeList.txt file is in /foo/bar directory.
@@ -147,7 +147,7 @@
 ##  Assuming CMakeList.txt file is in /foo/bar directory.
 ##  command | result
 ##  --------|-------
-## generate_project "/foo/bar" "/tmp/build" "test_build_st32" "-DCMAKE_BUILD_TYPE=Debug"| Generate makefiles under /tmp/buid/build-test_build_st32 for project /foo/bar/CMakeLists.txt
+## generate_project "/foo/bar" "/tmp/build" "test_build_st32" "-DCMAKE_BUILD_TYPE=Debug"| Generate makefiles under /tmp/build/build-test_build_st32 for project /foo/bar/CMakeLists.txt
 ##
 #This iis needed for doxygen for now.
 #!void generate_project(string dir, string build_base_dir, string build_config_name, string cmake_params){};