Sync scripts with Arm internal CI

This patch syncs utility scripts and scripts
in the script directory with the internal CI.

Where a path update is required,
the changes have been commented out.

Signed-off-by: Zelalem <zelalem.aweke@arm.com>
Change-Id: Ifa4bd805e345184d1378e8423e5f878a2fbfbcd4
diff --git a/script/parse_lava_job.py b/script/parse_lava_job.py
index 9e331e3..aa85ca3 100755
--- a/script/parse_lava_job.py
+++ b/script/parse_lava_job.py
@@ -32,6 +32,53 @@
     print()
     sys.exit(0)
 
+def scmi_parse_phase(results, case, special_case):
+    pass_count = 0
+    fail_count = 0
+    false_fail_count = 0
+
+    for phase in results:
+        if phase["metadata"]["definition"] == case:
+            if phase["metadata"]["result"] == "pass":
+                pass_count += 1
+            else:
+                if special_case != "" and phase["metadata"]["case"] == special_case:
+                    false_fail_count += 1
+                else:
+                    fail_count += 1
+
+    print(case)
+    print("pass_count " + str(pass_count))
+    print("fail_count " + str(fail_count))
+    if special_case != "":
+        print("false_fail_count " + str(false_fail_count))
+    if fail_count > 0:
+        report_job_failure()
+
+def parse_scp_scmi_results():
+    #
+    # All protocols but sensor
+    #
+    scmi_parse_phase(results, "scp-scmi-non-sensor-protocol", "")
+
+    #
+    # Protocol sensor, not reading_get
+    #
+    scmi_parse_phase(results, "scp-scmi-sensor-protocol", "")
+
+    #
+    # Protocol sensor, only reading_get
+    # In this case, we know that the reading from the sensor VBIG will fail
+    # cause the big cluster is OFF. Thus we simply discard that false failure.
+    #
+    JUNO_PVT_SENSOR_VOLT_BIG = "1"
+    scmi_parse_phase(results, "scp-scmi-sensor-protocol-get", JUNO_PVT_SENSOR_VOLT_BIG)
+
+    #
+    # Parse the final overall results
+    # We already know the false failures, discard them
+    #
+    scmi_parse_phase(results, "scp-scmi", "sensor_reading_get_sync_allsensorid_")
 
 def parse_cmd_line():
     parser = argparse.ArgumentParser(description="Parse results from LAVA. "
@@ -48,7 +95,7 @@
 args = parse_cmd_line()
 
 with open(args.file) as fd:
-    results = yaml.load(fd)
+    results = yaml.safe_load(fd)
 
     # Iterate through results. Find the element whose name is "job" in the
     # "lava" suite. It contains the result of the overall LAVA run.
@@ -66,11 +113,24 @@
     if args.payload_type == "linux":
         report_job_success()
 
-    # If we've run TFTF tests instead, then do some further parsing.
+    # If we've run TFTF or SCMI tests instead, then do some further parsing.
+    elif args.payload_type == "tftf":
+        session = "TFTF"
+        suite = "tftf"
+    elif args.payload_type == "scp_tests_scmi":
+        session = "SCMI"
+        suite = "scp-scmi"
+        parse_scp_scmi_results()
+
+        print("All tests passed.")
+        report_job_success()
+    else:
+        raise Exception("Payload not defined")
+
     # First make sure the test session finished.
     for phase in filter(lambda p: p["name"] == "lava-test-monitor", results):
         if phase["result"] != "pass":
-            print("TFTF test session failed. Did it time out?")
+            print(session + " test session failed. Did it time out?")
             report_job_failure()
         break
     else:
@@ -79,7 +139,7 @@
     # Then count the number of tests that failed/skipped.
     test_failures = 0
     test_skips = 0
-    for phase in filter(lambda p: p["suite"] == "tftf", results):
+    for phase in filter(lambda p: p["suite"] == suite, results):
         metadata = phase["metadata"]
         testcase_name = metadata["case"]
         testcase_result = metadata["result"]