Improve readability of the script

Signed-off-by: Pengyu Lv <pengyu.lv@arm.com>
diff --git a/tests/scripts/analyze_outcomes.py b/tests/scripts/analyze_outcomes.py
index 0baba1b..4d13676 100755
--- a/tests/scripts/analyze_outcomes.py
+++ b/tests/scripts/analyze_outcomes.py
@@ -64,7 +64,7 @@
     available = check_test_cases.collect_available_test_cases()
     for suite_case in available:
         hits = 0
-        for _comp, comp_outcomes in outcomes.items():
+        for comp_outcomes in outcomes.values():
             if suite_case in comp_outcomes["successes"] or \
                suite_case in comp_outcomes["failures"]:
                 hits += 1
@@ -96,8 +96,8 @@
 def analyze_driver_vs_reference(results: Results, outcomes,
                                 component_ref, component_driver,
                                 ignored_suites, ignored_tests=None):
-    """Check that all tests passed in the reference component are also
-    passed in the corresponding driver component.
+    """Check that all tests passing in the reference component are also
+    passing in the corresponding driver component.
     Skip:
     - full test suites provided in ignored_suites list
     - only some specific test inside a test suite, for which the corresponding
@@ -144,7 +144,7 @@
 An outcome collection is a dictionary presentation of the outcome file:
 ```
 outcomes = {
-    "<config>": {
+    "<component>": {
         "successes": frozenset(["<suite_case>", ... ]),
         "failures": frozenset(["<suite_case>", ...])
     }
@@ -156,19 +156,19 @@
     outcomes = {}
     with open(outcome_file, 'r', encoding='utf-8') as input_file:
         for line in input_file:
-            (_platform, config, suite, case, result, _cause) = line.split(';')
+            (_platform, component, suite, case, result, _cause) = line.split(';')
             suite_case = ';'.join([suite, case])
-            if config not in outcomes:
-                outcomes[config] = {"successes":[], "failures":[]}
+            if component not in outcomes:
+                outcomes[component] = {"successes":[], "failures":[]}
             if result == 'PASS':
-                outcomes[config]['successes'].append(suite_case)
+                outcomes[component]['successes'].append(suite_case)
             elif result == 'FAIL':
-                outcomes[config]['failures'].append(suite_case)
+                outcomes[component]['failures'].append(suite_case)
 
     # Convert `list` to `frozenset` to improve search performance
-    for config in outcomes:
-        outcomes[config]['successes'] = frozenset(outcomes[config]['successes'])
-        outcomes[config]['failures'] = frozenset(outcomes[config]['failures'])
+    for component in outcomes:
+        outcomes[component]['successes'] = frozenset(outcomes[component]['successes'])
+        outcomes[component]['failures'] = frozenset(outcomes[component]['failures'])
 
     return outcomes
 
@@ -489,9 +489,9 @@
 
         KNOWN_TASKS['analyze_coverage']['args']['full_coverage'] = options.full_coverage
 
-        # If the outcome file already exists, we assume that the user wants to
-        # perform the comparison.
-        # Share the contents among tasks to improve performance.
+        # If the outcome file exists, parse it once and share the result
+        # among tasks to improve performance.
+        # Otherwise, it will be generated by do_analyze_driver_vs_reference.
         if os.path.exists(options.outcomes):
             main_results.info("Read outcome file from {}.", options.outcomes)
             outcomes_or_file = read_outcome_file(options.outcomes)