blob: 59f8e468af33cce665cee23b139a1bcedb90d505 [file] [log] [blame]
Minos Galanakisea421232019-06-20 17:11:28 +01001#!/usr/bin/env python3
2
3""" report_parser.py:
4
5 Report parser parses openci json reports and conveys the invormation in a
6 one or more standard formats (To be implememented)
7
8 After all information is captured it validates the success/failure status
9 and can change the script exit code for intergration with standard CI
10 executors.
11 """
12
13from __future__ import print_function
14
15__copyright__ = """
16/*
17 * Copyright (c) 2018-2019, Arm Limited. All rights reserved.
18 *
19 * SPDX-License-Identifier: BSD-3-Clause
20 *
21 */
22 """
23__author__ = "Minos Galanakis"
24__email__ = "minos.galanakis@linaro.org"
25__project__ = "Trusted Firmware-M Open CI"
26__status__ = "stable"
27__version__ = "1.1"
28
29
30import os
31import re
32import sys
33import json
34import argparse
35from pprint import pprint
36
37try:
38 from tfm_ci_pylib.utils import load_json, get_local_git_info, \
39 save_json, list_subdirs, get_remote_git_info, \
40 convert_git_ref_path, xml_read
41except ImportError:
42 dir_path = os.path.dirname(os.path.realpath(__file__))
43 sys.path.append(os.path.join(dir_path, "../"))
44
45 from tfm_ci_pylib.utils import load_json, get_local_git_info, \
46 save_json, list_subdirs, get_remote_git_info, \
47 convert_git_ref_path, xml_read
48
49
50def split_keys(joint_arg, sep="="):
51 """ Split two keys spread by a separator, and return them as a tuple
52 with whitespace removed """
53
54 keys = joint_arg.split(sep)
55
56 # Remove whitespace
57 keys = map(str.strip, list(keys))
58 # If key contains the word True/False convert it.
59 keys = list(map(lambda x:
60 eval(x.title()) if x.lower() in ["true", "false"] else x,
61 keys))
62 return keys
63
64
65def dependencies_mdt_collect(path_list,
66 out_f=None,
Tamas Ban681834a2019-12-02 11:05:03 +000067 expected_paths=["mbedcrypto",
Minos Galanakisea421232019-06-20 17:11:28 +010068 "cmsis",
69 "checkpatch"]):
70 """ Collect dependencies checkout metadata. It creates a json report which
71 can be optionally exported to a file """
72
73 cpaths = {k: v for k, v in [n.split("=") for n in path_list]}
74 cwd = os.path.abspath(os.getcwd())
75
76 # Create an empty dataset
77 data = {n: {} for n in set(expected_paths).union(set(cpaths.keys()))}
78
Minos Galanakis27046222019-11-06 15:58:48 +000079 # identify paths passes that are not required/expected
80 extra_data = {n: {} for n in set(cpaths.keys()).difference(set(expected_paths))}
81
Minos Galanakisea421232019-06-20 17:11:28 +010082 # Perform basic sanity check
83 if not set(data.keys()).issubset(set(cpaths.keys())):
84 err_msg = "Error locating required paths.\nNeeded: %s\nHas: %s" % (
85 ",".join(data.keys()), ",".join(cpaths.keys())
86 )
87 print(err_msg)
88 raise Exception(err_msg)
89
Minos Galanakisea421232019-06-20 17:11:28 +010090 for d in list_subdirs(cpaths["mbedcrypto"]):
91 print("mbed-crypto dir: ", d)
92 # if checkout directory name contains a git reference convert to short
93 d = convert_git_ref_path(d)
94
95 git_info = get_local_git_info(d)
96 tag = os.path.split(git_info["dir"])[-1].split("-")[-1]
97
98 # Absolute paths will not work in jenkins since it will change the
99 # workspaace directory between stages convert to relative path
100 git_info["dir"] = os.path.relpath(git_info["dir"], cwd)
101 data["mbedcrypto"][tag] = git_info
102
103 for d in list_subdirs(cpaths["cmsis"]):
104 print("CMS subdir: ", d)
105 d = convert_git_ref_path(d)
106 git_info = get_local_git_info(d)
107 tag = os.path.split(git_info["dir"])[-1]
108
109 # Absolute paths will not work in jenkins since it will change the
110 # workspaace directory between stages convert to relative path
111 git_info["dir"] = os.path.relpath(git_info["dir"], cwd)
112 data["cmsis"][tag] = git_info
113
114 if "fastmodel" in cpaths:
115 for d in list_subdirs(cpaths["fastmodel"]):
116 print("Fastmodel subdir:", d)
117 json_info = load_json(os.path.join(d, "version.info"))
118 json_info["dir"] = os.path.relpath(d, cwd)
119
120 tag = json_info["version"]
121 # Absolute paths will not work in jenkins since it will change the
122 # workspaace directory between stages convert to relative path
123 data["fastmodel"][tag] = json_info
124
125 for d in list_subdirs(cpaths["checkpatch"]):
126 print("Checkpatch subdir:", d)
127
128 with open(os.path.join(d, "version.info"), "r") as F:
129 url = F.readline().strip()
130
131 git_info = get_remote_git_info(url)
132 d = convert_git_ref_path(d)
133 git_info['dir'] = d
134 tag = os.path.split(git_info["dir"])[-1].split("_")[-1]
135
136 # Absolute paths will not work in jenkins since it will change the
137 # workspaace directory between stages convert to relative path
138 git_info["dir"] = os.path.relpath(git_info["dir"], cwd)
139 data["checkpatch"][tag] = git_info
140 if "fpga" in cpaths:
141 for d in os.listdir(cpaths["fpga"]):
142 print("FPGA imagefile:", d)
143 if ".tar.gz" in d:
144 name = d.split(".tar.gz")[0]
145 platform, subsys, ver = name.split("_")
146 data["fpga"][name] = {"platform": platform,
147 "subsys": subsys,
148 "version": ver,
149 "recovery": os.path.join(cpaths["fpga"],
150 d)}
Minos Galanakis27046222019-11-06 15:58:48 +0000151
152 #Attempt to detect what the unexpected paths contain
153 for e_path in extra_data.keys():
154 for d in list_subdirs(cpaths[e_path]):
155 print("%s subdir: %s" % (e_path, d))
156 # If it contains a version.info
157 if os.path.isfile(os.path.join(d, "version.info")):
158 json_info = load_json(os.path.join(d, "version.info"))
159 json_info["dir"] = os.path.relpath(d, cwd)
160
161 tag = json_info["version"]
162 # Absolute paths will not work in jenkins since it will change
163 # the workspaace directory between stages convert to rel-path
164 extra_data[e_path][tag] = json_info
165 # If it contains git information
166 elif os.path.exists(os.path.join(d, ".git")):
167 d = convert_git_ref_path(d)
168
169 git_info = get_local_git_info(d)
170 tag = os.path.split(git_info["dir"])[-1].split("-")[-1]
171
172 # Absolute paths will not work in jenkins since it will change
173 # the workspaace directory between stages convert to rel-path
174 git_info["dir"] = os.path.relpath(git_info["dir"], cwd)
175 extra_data[e_path][tag] = git_info
176 # Do not break flow if detection fails
177 else:
178 print("Error determining contents of directory: %s/%s for "
179 "indexing purposes" % (e_path, d))
180 extra_data[e_path][tag] = {"info": "N.A"}
181
182 # Add the extra paths to the expected ones
183 for k, v in extra_data.items():
184 data[k] = v
Minos Galanakisea421232019-06-20 17:11:28 +0100185 if out_f:
186 print("Exporting metadata to", out_f)
187 save_json(out_f, data)
188 else:
189 pprint(data)
190
191
192def cppcheck_mdt_collect(file_list, out_f=None):
193 """ XML parse multiple cppcheck output files and create a json report """
194
195 xml_files = list(map(os.path.abspath, file_list))
196
197 dict_data = []
198 version = None
199 for xf in xml_files:
200 data = xml_read(xf)
201
202 version = data["results"]["cppcheck"]["@version"]
203 # If nothing is found the errors dictionary will be a Nonetype object
204 if data["results"]["errors"] is not None:
205 # Use json to flatten ordered dict
206 str_data = json.dumps(data["results"]["errors"]["error"])
207 # Remove @ prefix on first char of files that cppcheck adds
208 str_data = str_data.replace("@", '')
209
210 # Convert to dict again(xml to json will have added an array)
211 _dt = json.loads(str_data)
212
213 if isinstance(_dt, list):
214 dict_data += _dt
215 # If only one error is foud it will give it as a single item
216 elif isinstance(_dt, dict):
217 dict_data += [_dt]
218 else:
219 print("Ignoring cpp entry %s of type %s" % (_dt, type(_dt)))
220
221 out_data = {"_metadata_": {"cppcheck-version": version},
222 "report": {}}
223
224 for E in dict_data:
225
226 sever = E.pop("severity")
227
228 # Sort it based on serverity
229 try:
230 out_data["report"][sever].append(E)
231 except KeyError:
232 out_data["report"][sever] = [E]
233
234 _errors = 0
235 for msg_sever, msg_sever_entries in out_data["report"].items():
236 out_data["_metadata_"][msg_sever] = str(len(msg_sever_entries))
237 if msg_sever == "error":
238 _errors = len(msg_sever_entries)
239
240 out_data["_metadata_"]["success"] = True if not int(_errors) else False
241
242 if out_f:
243 save_json(out_f, out_data)
244 else:
245 pprint(out_data)
246
247
248def checkpatch_mdt_collect(file_name, out_f=None):
249 """ Regex parse a checpatch output file and create a report """
250
251 out_data = {"_metadata_": {"errors": 0,
252 "warnings": 0,
253 "lines": 0,
254 "success": True},
255 "report": {}
256 }
257 with open(file_name, "r") as F:
258 cpatch_data = F.read().strip()
259
260 # checkpatch will not report anything when no issues are found
261 if len(cpatch_data):
262 stat_rex = re.compile(r'^total: (\d+) errors, '
263 r'(\d+) warnings, (\d+) lines',
264 re.MULTILINE)
265 line_rex = re.compile(r'([\S]+:)\s([\S]+:)\s([\S ]+)\n', re.MULTILINE)
266 ewl = stat_rex.search(cpatch_data)
267 try:
268 _errors, _warnings, _lines = ewl.groups()
269 except Exception as E:
270 print("Exception parsing checkpatch file.", E)
271 # If there is text but not in know format return -1 and fail job
272 _errors = _warnings = _lines = "-1"
273 checkpath_entries = line_rex.findall(cpatch_data)
274
275 for en in checkpath_entries:
276 _file, _line, _ = en[0].split(":")
Galanakis, Minosc3e8c742019-12-02 16:18:50 +0000277 try:
278 _type, _subtype, _ = en[1].split(":")
279 except Exception as e:
280 print("WARNING: Ignoring Malformed checkpatch line: %s" %
281 "".join(en))
282 continue
Minos Galanakisea421232019-06-20 17:11:28 +0100283 _msg = en[2]
284
285 out_data["_metadata_"] = {"errors": _errors,
286 "warnings": _warnings,
287 "lines": _lines,
288 "success": True if not int(_errors)
289 else False}
290
291 E = {"id": _subtype,
292 "verbose": _subtype,
293 "msg": _msg,
294 "location": {"file": _file, "line": _line}
295 }
296 try:
297 out_data["report"][_type.lower()].append(E)
298 except KeyError:
299 out_data["report"][_type.lower()] = [E]
300
301 if out_f:
302 save_json(out_f, out_data)
303 else:
304 pprint(out_data)
305
306
307def jenkins_mdt_collect(out_f):
308 """ Collects Jenkins enviroment information and stores
309 it in a key value list """
310
311 # Jenkins environment parameters are always valid
312 jenkins_env_keys = ["BUILD_ID",
313 "BUILD_URL",
314 "JOB_BASE_NAME",
315 "GERRIT_URL",
316 "GERRIT_PROJECT"]
317 # The following Gerrit parameters only exist when
318 # a job is triggered by a web hook
319 gerrit_trigger_keys = ["GERRIT_CHANGE_NUMBER",
320 "GERRIT_CHANGE_SUBJECT",
321 "GERRIT_CHANGE_ID",
322 "GERRIT_PATCHSET_REVISION",
323 "GERRIT_PATCHSET_NUMBER",
324 "GERRIT_REFSPEC",
325 "GERRIT_CHANGE_URL",
326 "GERRIT_BRANCH",
327 "GERRIT_CHANGE_OWNER_EMAIL",
328 "GERRIT_PATCHSET_UPLOADER_EMAIL"]
329
330 # Find as mamny of the variables in environent
331 el = set(os.environ).intersection(set(jenkins_env_keys +
332 gerrit_trigger_keys))
333 # Format it in key:value pairs
334 out_data = {n: os.environ[n] for n in el}
335 if out_f:
336 save_json(out_f, out_data)
337 else:
338 pprint(out_data)
339
340
341def metadata_collect(user_args):
342 """ Logic for information collection during different stages of
343 the build """
344
345 if user_args.dependencies_checkout and user_args.content_paths:
346 dependencies_mdt_collect(user_args.content_paths,
347 user_args.out_f)
348 elif user_args.git_info:
349 git_info = get_local_git_info(os.path.abspath(user_args.git_info))
350
351 if user_args.out_f:
352 save_json(user_args.out_f, git_info)
353 else:
354 pprint(git_info)
355 elif user_args.cppcheck_files:
356 cppcheck_mdt_collect(user_args.cppcheck_files, user_args.out_f)
357 elif user_args.checkpatch_file:
358 checkpatch_mdt_collect(user_args.checkpatch_file, user_args.out_f)
359 elif user_args.jenkins_info:
360 jenkins_mdt_collect(user_args.out_f)
361 else:
362 print("Invalid Metadata collection arguments")
363 print(user_args)
364 sys.exit(1)
365
366
367def collate_report(key_file_list, ouput_f=None, stdout=True):
368 """ Join different types of json formatted reports into one """
369
370 out_data = {"_metadata_": {}, "report": {}}
371 for kf in key_file_list:
372 try:
373 key, fl = kf.split("=")
374 data = load_json(fl)
375 # If data is a standard reprort (metdata-report parse it)
376 if ("_metadata_" in data.keys() and "report" in data.keys()):
377 out_data["_metadata_"][key] = data["_metadata_"]
378 out_data["report"][key] = data["report"]
379 # Else treat it as a raw information passing dataset
380 else:
381 try:
382 out_data["info"][key] = data
383 except KeyError as E:
384 out_data["info"] = {key: data}
385 except Exception as E:
386 print("Exception parsing argument", kf, E)
387 continue
388 if ouput_f:
389 save_json(ouput_f, out_data)
390 elif stdout:
391 pprint(out_data)
392 return out_data
393
394
395def filter_report(key_value_list, input_f, ouput_f):
396 """ Generates a subset of the data contained in
397 input_f, by selecting only the values defined in key_value list """
398
399 try:
400 rep_data = load_json(input_f)
401 except Exception as E:
402 print("Exception parsing ", input_f, E)
403 sys.exit(1)
404
405 out_data = {}
406 for kf in key_value_list:
407 try:
408 tag, value = kf.split("=")
409 # if multiple selection
410 if(",") in value:
411 out_data[tag] = {}
412 for v in value.split(","):
413 data = rep_data[tag][v]
414 out_data[tag][v] = data
415 else:
416 data = rep_data[tag][value]
417 out_data[tag] = {value: data}
418 except Exception as E:
419 print("Could not extract data-set for k: %s v: %s" % (tag, value))
420 print(E)
421 continue
422 if ouput_f:
423 save_json(ouput_f, out_data)
424 else:
425 pprint(out_data)
426
427
428def parse_report(user_args):
429 """ Parse a report and attempt to determine if it is overall successful or
430 not. It will set the script's exit code accordingly """
431
432 # Parse Mode
433 in_rep = load_json(user_args.report)
434 report_eval = None
435
436 # Extract the required condition for evalutation to pass
437 pass_key, pass_val = split_keys(user_args.set_pass)
438
439 print("Evaluation will succeed if \"%s\" is \"%s\"" % (pass_key,
440 pass_val))
441 try:
442 report_eval = in_rep["_metadata_"][pass_key] == pass_val
443 print("Evaluating detected '%s' field in _metaddata_. " % pass_key)
444 except Exception as E:
445 pass
446
447 if report_eval is None:
448 if isinstance(in_rep, dict):
449 # If report contains an overall success field in metadata do not
450 # parse the items
451 in_rep = in_rep["report"]
452 ev_list = in_rep.values()
453 elif isinstance(in_rep, list):
454 ev_list = in_rep
455 else:
456 print("Invalid data type: %s" % type(in_rep))
457 return
458
459 if user_args.onepass:
460 try:
461 report_eval = in_rep[user_args.onepass][pass_key] == pass_val
462 except Exception as e:
463 report_eval = False
464
465 # If every singel field need to be succesfful, invert the check and
466 # look for those who are not
467 elif user_args.allpass:
468 try:
469 if list(filter(lambda x: x[pass_key] != pass_val, ev_list)):
470 pass
471 else:
472 report_eval = True
473 except Exception as e:
474 print(e)
475 report_eval = False
476 else:
477 print("Evaluation condition not set. Please use -a or -o. Launch"
478 "help (-h) for more information")
479
480 print("Evaluation %s" % ("passed" if report_eval else "failed"))
481 if user_args.eif:
482 print("Setting script exit status")
483 sys.exit(0 if report_eval else 1)
484
485
486def main(user_args):
487 """ Main logic """
488
489 # Metadat Collect Mode
490 if user_args.collect:
491 metadata_collect(user_args)
492 return
493 elif user_args.filter_report:
494 filter_report(user_args.filter_report,
495 user_args.report,
496 user_args.out_f)
497 elif user_args.collate_report:
498 collate_report(user_args.collate_report, user_args.out_f)
499 else:
500 parse_report(user_args)
501
502
503def get_cmd_args():
504 """ Parse command line arguments """
505
506 # Parse command line arguments to override config
507 parser = argparse.ArgumentParser(description="TFM Report Parser.")
508 parser.add_argument("-e", "--error_if_failed",
509 dest="eif",
510 action="store_true",
511 help="If set will change the script exit code")
512 parser.add_argument("-s", "--set-success-field",
513 dest="set_pass",
514 default="status = Success",
515 action="store",
516 help="Set the key which the script will use to"
517 "assert success/failure")
518 parser.add_argument("-a", "--all-fields-must-pass",
519 dest="allpass",
520 action="store_true",
521 help="When set and a list is provided, all entries"
522 "must be succefull for evaluation to pass")
523 parser.add_argument("-o", "--one-field-must-pass",
524 dest="onepass",
525 action="store",
526 help="Only the user defined field must pass")
527 parser.add_argument("-r", "--report",
528 dest="report",
529 action="store",
530 help="JSON file containing input report")
531 parser.add_argument("-c", "--collect",
532 dest="collect",
533 action="store_true",
534 help="When set, the parser will attempt to collect"
535 "information and produce a report")
536 parser.add_argument("-d", "--dependencies-checkout",
537 dest="dependencies_checkout",
538 action="store_true",
539 help="Collect information from a dependencies "
540 "checkout job")
541 parser.add_argument("-f", "--output-file",
542 dest="out_f",
543 action="store",
544 help="Output file to store captured information")
545 parser.add_argument('-p', '--content-paths',
546 dest="content_paths",
547 nargs='*',
548 help=("Pass a space separated list of paths in the"
549 "following format: -p mbedtls=/yourpath/"
550 "fpv=/another/path .Used in conjuction with -n"))
551 parser.add_argument("-g", "--git-info",
552 dest="git_info",
553 action="store",
554 help="Extract git information from given path. "
555 "Requires --colect directive. Optional parameter"
556 "--output-file ")
557 parser.add_argument("-x", "--cpp-check-xml",
558 dest="cppcheck_files",
559 nargs='*',
560 action="store",
561 help="Extract cppcheck static analysis information "
562 " output files, provided as a space separated "
563 "list. Requires --colect directive."
564 " Optional parameter --output-file ")
565 parser.add_argument("-z", "--checkpatch-parse-f",
566 dest="checkpatch_file",
567 action="store",
568 help="Extract checkpatch static analysis information "
569 " output file. Requires --colect directive."
570 " Optional parameter --output-file ")
571 parser.add_argument("-j", "--jenkins-info",
572 dest="jenkins_info",
573 action="store_true",
574 help="Extract jenkings and gerrit trigger enviroment "
575 "information fr. Requires --colect directive."
576 " Optional parameter --output-file ")
577 parser.add_argument("-l", "--collate-report",
578 dest="collate_report",
579 action="store",
580 nargs='*',
581 help="Pass a space separated list of key-value pairs"
582 "following format: -l report_key_0=report_file_0"
583 " report_key_1=report_file_1. Collate will "
584 "generate a joint dataset and print it to stdout."
585 "Optional parameter --output-file ")
586 parser.add_argument("-t", "--filter-report",
587 dest="filter_report",
588 action="store",
589 nargs='*',
590 help="Requires --report parameter for input file."
591 "Pass a space separated list of key-value pairs"
592 "following format: -l report_key_0=value_0"
593 " report_key_1=value_0. Filter will remote all"
594 "entries of the original report but the ones"
595 "mathing the key:value pairs defined and print it"
596 "to stdout.Optional parameter --output-file")
597 return parser.parse_args()
598
599
600if __name__ == "__main__":
601 main(get_cmd_args())