blob: 6acd7f5be7d603bbec8c54edadff57d260b17f6c [file] [log] [blame]
Minos Galanakisea421232019-06-20 17:11:28 +01001#!/usr/bin/env python3
2
3""" report_parser.py:
4
5 Report parser parses openci json reports and conveys the invormation in a
6 one or more standard formats (To be implememented)
7
8 After all information is captured it validates the success/failure status
9 and can change the script exit code for intergration with standard CI
10 executors.
11 """
12
13from __future__ import print_function
14
15__copyright__ = """
16/*
17 * Copyright (c) 2018-2019, Arm Limited. All rights reserved.
18 *
19 * SPDX-License-Identifier: BSD-3-Clause
20 *
21 */
22 """
23__author__ = "Minos Galanakis"
24__email__ = "minos.galanakis@linaro.org"
25__project__ = "Trusted Firmware-M Open CI"
26__status__ = "stable"
27__version__ = "1.1"
28
29
30import os
31import re
32import sys
33import json
34import argparse
35from pprint import pprint
36
37try:
38 from tfm_ci_pylib.utils import load_json, get_local_git_info, \
39 save_json, list_subdirs, get_remote_git_info, \
40 convert_git_ref_path, xml_read
41except ImportError:
42 dir_path = os.path.dirname(os.path.realpath(__file__))
43 sys.path.append(os.path.join(dir_path, "../"))
44
45 from tfm_ci_pylib.utils import load_json, get_local_git_info, \
46 save_json, list_subdirs, get_remote_git_info, \
47 convert_git_ref_path, xml_read
48
49
50def split_keys(joint_arg, sep="="):
51 """ Split two keys spread by a separator, and return them as a tuple
52 with whitespace removed """
53
54 keys = joint_arg.split(sep)
55
56 # Remove whitespace
57 keys = map(str.strip, list(keys))
58 # If key contains the word True/False convert it.
59 keys = list(map(lambda x:
60 eval(x.title()) if x.lower() in ["true", "false"] else x,
61 keys))
62 return keys
63
64
65def dependencies_mdt_collect(path_list,
66 out_f=None,
Tamas Ban681834a2019-12-02 11:05:03 +000067 expected_paths=["mbedcrypto",
Minos Galanakisea421232019-06-20 17:11:28 +010068 "cmsis",
69 "checkpatch"]):
70 """ Collect dependencies checkout metadata. It creates a json report which
71 can be optionally exported to a file """
72
73 cpaths = {k: v for k, v in [n.split("=") for n in path_list]}
74 cwd = os.path.abspath(os.getcwd())
75
76 # Create an empty dataset
77 data = {n: {} for n in set(expected_paths).union(set(cpaths.keys()))}
78
79 # Perform basic sanity check
80 if not set(data.keys()).issubset(set(cpaths.keys())):
81 err_msg = "Error locating required paths.\nNeeded: %s\nHas: %s" % (
82 ",".join(data.keys()), ",".join(cpaths.keys())
83 )
84 print(err_msg)
85 raise Exception(err_msg)
86
Minos Galanakisea421232019-06-20 17:11:28 +010087 for d in list_subdirs(cpaths["mbedcrypto"]):
88 print("mbed-crypto dir: ", d)
89 # if checkout directory name contains a git reference convert to short
90 d = convert_git_ref_path(d)
91
92 git_info = get_local_git_info(d)
93 tag = os.path.split(git_info["dir"])[-1].split("-")[-1]
94
95 # Absolute paths will not work in jenkins since it will change the
96 # workspaace directory between stages convert to relative path
97 git_info["dir"] = os.path.relpath(git_info["dir"], cwd)
98 data["mbedcrypto"][tag] = git_info
99
100 for d in list_subdirs(cpaths["cmsis"]):
101 print("CMS subdir: ", d)
102 d = convert_git_ref_path(d)
103 git_info = get_local_git_info(d)
104 tag = os.path.split(git_info["dir"])[-1]
105
106 # Absolute paths will not work in jenkins since it will change the
107 # workspaace directory between stages convert to relative path
108 git_info["dir"] = os.path.relpath(git_info["dir"], cwd)
109 data["cmsis"][tag] = git_info
110
111 if "fastmodel" in cpaths:
112 for d in list_subdirs(cpaths["fastmodel"]):
113 print("Fastmodel subdir:", d)
114 json_info = load_json(os.path.join(d, "version.info"))
115 json_info["dir"] = os.path.relpath(d, cwd)
116
117 tag = json_info["version"]
118 # Absolute paths will not work in jenkins since it will change the
119 # workspaace directory between stages convert to relative path
120 data["fastmodel"][tag] = json_info
121
122 for d in list_subdirs(cpaths["checkpatch"]):
123 print("Checkpatch subdir:", d)
124
125 with open(os.path.join(d, "version.info"), "r") as F:
126 url = F.readline().strip()
127
128 git_info = get_remote_git_info(url)
129 d = convert_git_ref_path(d)
130 git_info['dir'] = d
131 tag = os.path.split(git_info["dir"])[-1].split("_")[-1]
132
133 # Absolute paths will not work in jenkins since it will change the
134 # workspaace directory between stages convert to relative path
135 git_info["dir"] = os.path.relpath(git_info["dir"], cwd)
136 data["checkpatch"][tag] = git_info
137 if "fpga" in cpaths:
138 for d in os.listdir(cpaths["fpga"]):
139 print("FPGA imagefile:", d)
140 if ".tar.gz" in d:
141 name = d.split(".tar.gz")[0]
142 platform, subsys, ver = name.split("_")
143 data["fpga"][name] = {"platform": platform,
144 "subsys": subsys,
145 "version": ver,
146 "recovery": os.path.join(cpaths["fpga"],
147 d)}
148 if out_f:
149 print("Exporting metadata to", out_f)
150 save_json(out_f, data)
151 else:
152 pprint(data)
153
154
155def cppcheck_mdt_collect(file_list, out_f=None):
156 """ XML parse multiple cppcheck output files and create a json report """
157
158 xml_files = list(map(os.path.abspath, file_list))
159
160 dict_data = []
161 version = None
162 for xf in xml_files:
163 data = xml_read(xf)
164
165 version = data["results"]["cppcheck"]["@version"]
166 # If nothing is found the errors dictionary will be a Nonetype object
167 if data["results"]["errors"] is not None:
168 # Use json to flatten ordered dict
169 str_data = json.dumps(data["results"]["errors"]["error"])
170 # Remove @ prefix on first char of files that cppcheck adds
171 str_data = str_data.replace("@", '')
172
173 # Convert to dict again(xml to json will have added an array)
174 _dt = json.loads(str_data)
175
176 if isinstance(_dt, list):
177 dict_data += _dt
178 # If only one error is foud it will give it as a single item
179 elif isinstance(_dt, dict):
180 dict_data += [_dt]
181 else:
182 print("Ignoring cpp entry %s of type %s" % (_dt, type(_dt)))
183
184 out_data = {"_metadata_": {"cppcheck-version": version},
185 "report": {}}
186
187 for E in dict_data:
188
189 sever = E.pop("severity")
190
191 # Sort it based on serverity
192 try:
193 out_data["report"][sever].append(E)
194 except KeyError:
195 out_data["report"][sever] = [E]
196
197 _errors = 0
198 for msg_sever, msg_sever_entries in out_data["report"].items():
199 out_data["_metadata_"][msg_sever] = str(len(msg_sever_entries))
200 if msg_sever == "error":
201 _errors = len(msg_sever_entries)
202
203 out_data["_metadata_"]["success"] = True if not int(_errors) else False
204
205 if out_f:
206 save_json(out_f, out_data)
207 else:
208 pprint(out_data)
209
210
211def checkpatch_mdt_collect(file_name, out_f=None):
212 """ Regex parse a checpatch output file and create a report """
213
214 out_data = {"_metadata_": {"errors": 0,
215 "warnings": 0,
216 "lines": 0,
217 "success": True},
218 "report": {}
219 }
220 with open(file_name, "r") as F:
221 cpatch_data = F.read().strip()
222
223 # checkpatch will not report anything when no issues are found
224 if len(cpatch_data):
225 stat_rex = re.compile(r'^total: (\d+) errors, '
226 r'(\d+) warnings, (\d+) lines',
227 re.MULTILINE)
228 line_rex = re.compile(r'([\S]+:)\s([\S]+:)\s([\S ]+)\n', re.MULTILINE)
229 ewl = stat_rex.search(cpatch_data)
230 try:
231 _errors, _warnings, _lines = ewl.groups()
232 except Exception as E:
233 print("Exception parsing checkpatch file.", E)
234 # If there is text but not in know format return -1 and fail job
235 _errors = _warnings = _lines = "-1"
236 checkpath_entries = line_rex.findall(cpatch_data)
237
238 for en in checkpath_entries:
239 _file, _line, _ = en[0].split(":")
Galanakis, Minosc3e8c742019-12-02 16:18:50 +0000240 try:
241 _type, _subtype, _ = en[1].split(":")
242 except Exception as e:
243 print("WARNING: Ignoring Malformed checkpatch line: %s" %
244 "".join(en))
245 continue
Minos Galanakisea421232019-06-20 17:11:28 +0100246 _msg = en[2]
247
248 out_data["_metadata_"] = {"errors": _errors,
249 "warnings": _warnings,
250 "lines": _lines,
251 "success": True if not int(_errors)
252 else False}
253
254 E = {"id": _subtype,
255 "verbose": _subtype,
256 "msg": _msg,
257 "location": {"file": _file, "line": _line}
258 }
259 try:
260 out_data["report"][_type.lower()].append(E)
261 except KeyError:
262 out_data["report"][_type.lower()] = [E]
263
264 if out_f:
265 save_json(out_f, out_data)
266 else:
267 pprint(out_data)
268
269
270def jenkins_mdt_collect(out_f):
271 """ Collects Jenkins enviroment information and stores
272 it in a key value list """
273
274 # Jenkins environment parameters are always valid
275 jenkins_env_keys = ["BUILD_ID",
276 "BUILD_URL",
277 "JOB_BASE_NAME",
278 "GERRIT_URL",
279 "GERRIT_PROJECT"]
280 # The following Gerrit parameters only exist when
281 # a job is triggered by a web hook
282 gerrit_trigger_keys = ["GERRIT_CHANGE_NUMBER",
283 "GERRIT_CHANGE_SUBJECT",
284 "GERRIT_CHANGE_ID",
285 "GERRIT_PATCHSET_REVISION",
286 "GERRIT_PATCHSET_NUMBER",
287 "GERRIT_REFSPEC",
288 "GERRIT_CHANGE_URL",
289 "GERRIT_BRANCH",
290 "GERRIT_CHANGE_OWNER_EMAIL",
291 "GERRIT_PATCHSET_UPLOADER_EMAIL"]
292
293 # Find as mamny of the variables in environent
294 el = set(os.environ).intersection(set(jenkins_env_keys +
295 gerrit_trigger_keys))
296 # Format it in key:value pairs
297 out_data = {n: os.environ[n] for n in el}
298 if out_f:
299 save_json(out_f, out_data)
300 else:
301 pprint(out_data)
302
303
304def metadata_collect(user_args):
305 """ Logic for information collection during different stages of
306 the build """
307
308 if user_args.dependencies_checkout and user_args.content_paths:
309 dependencies_mdt_collect(user_args.content_paths,
310 user_args.out_f)
311 elif user_args.git_info:
312 git_info = get_local_git_info(os.path.abspath(user_args.git_info))
313
314 if user_args.out_f:
315 save_json(user_args.out_f, git_info)
316 else:
317 pprint(git_info)
318 elif user_args.cppcheck_files:
319 cppcheck_mdt_collect(user_args.cppcheck_files, user_args.out_f)
320 elif user_args.checkpatch_file:
321 checkpatch_mdt_collect(user_args.checkpatch_file, user_args.out_f)
322 elif user_args.jenkins_info:
323 jenkins_mdt_collect(user_args.out_f)
324 else:
325 print("Invalid Metadata collection arguments")
326 print(user_args)
327 sys.exit(1)
328
329
330def collate_report(key_file_list, ouput_f=None, stdout=True):
331 """ Join different types of json formatted reports into one """
332
333 out_data = {"_metadata_": {}, "report": {}}
334 for kf in key_file_list:
335 try:
336 key, fl = kf.split("=")
337 data = load_json(fl)
338 # If data is a standard reprort (metdata-report parse it)
339 if ("_metadata_" in data.keys() and "report" in data.keys()):
340 out_data["_metadata_"][key] = data["_metadata_"]
341 out_data["report"][key] = data["report"]
342 # Else treat it as a raw information passing dataset
343 else:
344 try:
345 out_data["info"][key] = data
346 except KeyError as E:
347 out_data["info"] = {key: data}
348 except Exception as E:
349 print("Exception parsing argument", kf, E)
350 continue
351 if ouput_f:
352 save_json(ouput_f, out_data)
353 elif stdout:
354 pprint(out_data)
355 return out_data
356
357
358def filter_report(key_value_list, input_f, ouput_f):
359 """ Generates a subset of the data contained in
360 input_f, by selecting only the values defined in key_value list """
361
362 try:
363 rep_data = load_json(input_f)
364 except Exception as E:
365 print("Exception parsing ", input_f, E)
366 sys.exit(1)
367
368 out_data = {}
369 for kf in key_value_list:
370 try:
371 tag, value = kf.split("=")
372 # if multiple selection
373 if(",") in value:
374 out_data[tag] = {}
375 for v in value.split(","):
376 data = rep_data[tag][v]
377 out_data[tag][v] = data
378 else:
379 data = rep_data[tag][value]
380 out_data[tag] = {value: data}
381 except Exception as E:
382 print("Could not extract data-set for k: %s v: %s" % (tag, value))
383 print(E)
384 continue
385 if ouput_f:
386 save_json(ouput_f, out_data)
387 else:
388 pprint(out_data)
389
390
391def parse_report(user_args):
392 """ Parse a report and attempt to determine if it is overall successful or
393 not. It will set the script's exit code accordingly """
394
395 # Parse Mode
396 in_rep = load_json(user_args.report)
397 report_eval = None
398
399 # Extract the required condition for evalutation to pass
400 pass_key, pass_val = split_keys(user_args.set_pass)
401
402 print("Evaluation will succeed if \"%s\" is \"%s\"" % (pass_key,
403 pass_val))
404 try:
405 report_eval = in_rep["_metadata_"][pass_key] == pass_val
406 print("Evaluating detected '%s' field in _metaddata_. " % pass_key)
407 except Exception as E:
408 pass
409
410 if report_eval is None:
411 if isinstance(in_rep, dict):
412 # If report contains an overall success field in metadata do not
413 # parse the items
414 in_rep = in_rep["report"]
415 ev_list = in_rep.values()
416 elif isinstance(in_rep, list):
417 ev_list = in_rep
418 else:
419 print("Invalid data type: %s" % type(in_rep))
420 return
421
422 if user_args.onepass:
423 try:
424 report_eval = in_rep[user_args.onepass][pass_key] == pass_val
425 except Exception as e:
426 report_eval = False
427
428 # If every singel field need to be succesfful, invert the check and
429 # look for those who are not
430 elif user_args.allpass:
431 try:
432 if list(filter(lambda x: x[pass_key] != pass_val, ev_list)):
433 pass
434 else:
435 report_eval = True
436 except Exception as e:
437 print(e)
438 report_eval = False
439 else:
440 print("Evaluation condition not set. Please use -a or -o. Launch"
441 "help (-h) for more information")
442
443 print("Evaluation %s" % ("passed" if report_eval else "failed"))
444 if user_args.eif:
445 print("Setting script exit status")
446 sys.exit(0 if report_eval else 1)
447
448
449def main(user_args):
450 """ Main logic """
451
452 # Metadat Collect Mode
453 if user_args.collect:
454 metadata_collect(user_args)
455 return
456 elif user_args.filter_report:
457 filter_report(user_args.filter_report,
458 user_args.report,
459 user_args.out_f)
460 elif user_args.collate_report:
461 collate_report(user_args.collate_report, user_args.out_f)
462 else:
463 parse_report(user_args)
464
465
466def get_cmd_args():
467 """ Parse command line arguments """
468
469 # Parse command line arguments to override config
470 parser = argparse.ArgumentParser(description="TFM Report Parser.")
471 parser.add_argument("-e", "--error_if_failed",
472 dest="eif",
473 action="store_true",
474 help="If set will change the script exit code")
475 parser.add_argument("-s", "--set-success-field",
476 dest="set_pass",
477 default="status = Success",
478 action="store",
479 help="Set the key which the script will use to"
480 "assert success/failure")
481 parser.add_argument("-a", "--all-fields-must-pass",
482 dest="allpass",
483 action="store_true",
484 help="When set and a list is provided, all entries"
485 "must be succefull for evaluation to pass")
486 parser.add_argument("-o", "--one-field-must-pass",
487 dest="onepass",
488 action="store",
489 help="Only the user defined field must pass")
490 parser.add_argument("-r", "--report",
491 dest="report",
492 action="store",
493 help="JSON file containing input report")
494 parser.add_argument("-c", "--collect",
495 dest="collect",
496 action="store_true",
497 help="When set, the parser will attempt to collect"
498 "information and produce a report")
499 parser.add_argument("-d", "--dependencies-checkout",
500 dest="dependencies_checkout",
501 action="store_true",
502 help="Collect information from a dependencies "
503 "checkout job")
504 parser.add_argument("-f", "--output-file",
505 dest="out_f",
506 action="store",
507 help="Output file to store captured information")
508 parser.add_argument('-p', '--content-paths',
509 dest="content_paths",
510 nargs='*',
511 help=("Pass a space separated list of paths in the"
512 "following format: -p mbedtls=/yourpath/"
513 "fpv=/another/path .Used in conjuction with -n"))
514 parser.add_argument("-g", "--git-info",
515 dest="git_info",
516 action="store",
517 help="Extract git information from given path. "
518 "Requires --colect directive. Optional parameter"
519 "--output-file ")
520 parser.add_argument("-x", "--cpp-check-xml",
521 dest="cppcheck_files",
522 nargs='*',
523 action="store",
524 help="Extract cppcheck static analysis information "
525 " output files, provided as a space separated "
526 "list. Requires --colect directive."
527 " Optional parameter --output-file ")
528 parser.add_argument("-z", "--checkpatch-parse-f",
529 dest="checkpatch_file",
530 action="store",
531 help="Extract checkpatch static analysis information "
532 " output file. Requires --colect directive."
533 " Optional parameter --output-file ")
534 parser.add_argument("-j", "--jenkins-info",
535 dest="jenkins_info",
536 action="store_true",
537 help="Extract jenkings and gerrit trigger enviroment "
538 "information fr. Requires --colect directive."
539 " Optional parameter --output-file ")
540 parser.add_argument("-l", "--collate-report",
541 dest="collate_report",
542 action="store",
543 nargs='*',
544 help="Pass a space separated list of key-value pairs"
545 "following format: -l report_key_0=report_file_0"
546 " report_key_1=report_file_1. Collate will "
547 "generate a joint dataset and print it to stdout."
548 "Optional parameter --output-file ")
549 parser.add_argument("-t", "--filter-report",
550 dest="filter_report",
551 action="store",
552 nargs='*',
553 help="Requires --report parameter for input file."
554 "Pass a space separated list of key-value pairs"
555 "following format: -l report_key_0=value_0"
556 " report_key_1=value_0. Filter will remote all"
557 "entries of the original report but the ones"
558 "mathing the key:value pairs defined and print it"
559 "to stdout.Optional parameter --output-file")
560 return parser.parse_args()
561
562
563if __name__ == "__main__":
564 main(get_cmd_args())