Fathi Boudra | 422bf77 | 2019-12-02 11:10:16 +0200 | [diff] [blame] | 1 | #!/usr/bin/env python3 |
| 2 | # |
| 3 | # Copyright (c) 2019, Arm Limited. All rights reserved. |
| 4 | # |
| 5 | # SPDX-License-Identifier: BSD-3-Clause |
| 6 | # |
| 7 | |
| 8 | import argparse |
| 9 | import json |
| 10 | import re |
| 11 | import shutil |
| 12 | import sys |
| 13 | |
| 14 | |
| 15 | _rule_exclusions = [ |
Zelalem | 219df41 | 2020-05-17 19:21:20 -0500 | [diff] [blame] | 16 | "MISRA C-2012 Rule 2.4", |
| 17 | "MISRA C-2012 Rule 2.5", |
| 18 | "MISRA C-2012 Rule 2.7", |
| 19 | "MISRA C-2012 Rule 5.1", |
| 20 | "MISRA C-2012 Rule 5.8", |
Fathi Boudra | 422bf77 | 2019-12-02 11:10:16 +0200 | [diff] [blame] | 21 | "MISRA C-2012 Rule 8.6", |
Zelalem | 219df41 | 2020-05-17 19:21:20 -0500 | [diff] [blame] | 22 | "MISRA C-2012 Rule 8.7", |
| 23 | "MISRA C-2012 Rule 11.4", |
| 24 | "MISRA C-2012 Rule 11.5", |
| 25 | "MISRA C-2012 Rule 15.1", |
| 26 | "MISRA C-2012 Rule 15.5", |
| 27 | "MISRA C-2012 Rule 15.6", |
| 28 | "MISRA C-2012 Rule 16.1", |
| 29 | "MISRA C-2012 Rule 16.3", |
| 30 | "MISRA C-2012 Rule 17.1", |
| 31 | "MISRA C-2012 Rule 21.6", |
| 32 | "MISRA C-2012 Directive 4.6", |
| 33 | "MISRA C-2012 Directive 4.8", |
| 34 | "MISRA C-2012 Directive 4.9" |
Fathi Boudra | 422bf77 | 2019-12-02 11:10:16 +0200 | [diff] [blame] | 35 | ] |
| 36 | |
| 37 | # The following classification of rules and directives include 'MISRA C:2012 |
| 38 | # Amendment 1' |
| 39 | |
| 40 | # Directives |
| 41 | _dir_required = set(["1.1", "2.1", "3.1", "4.1", "4.3", "4.7", "4.10", "4.11", |
| 42 | "4.12", "4.14"]) |
| 43 | |
| 44 | _dir_advisory = set(["4.2", "4.4", "4.5", "4.6", "4.8", "4.9", "4.13"]) |
| 45 | |
| 46 | # Rules |
| 47 | _rule_mandatory = set(["9.1", "9.2", "9.3", "12.5", "13.6", "17.3", "17.4", |
| 48 | "17.6", "19.1", "21.13", "21.17", "21.18", "21.19", "21.20", "22.2", "22.5", |
| 49 | "22.6"]) |
| 50 | |
| 51 | _rule_required = set(["1.1", "1.3", "2.1", "2.2", "3.1", "3.2", "4.1", "5.1", |
| 52 | "5.2", "5.3", "5.4", "5.5", "5.6", "5.7", "5.8", "6.1", "6.2", "7.1", "7.2", |
| 53 | "7.3", "7.4", "8.1", "8.2", "8.3", "8.4", "8.5", "8.6", "8.7", "8.8", |
| 54 | "8.10", "8.12", "8.14", "9.2", "9.3", "9.4", "9.5", "10.1", "10.2", "10.3", |
| 55 | "10.4", "10.6", "10.7", "10.8", "11.1", "11.2", "11.3", "11.6", "11.7", |
| 56 | "11.8", "11.9", "12.2", "13.1", "13.2", "13.5", "14.1", "14.2", "14.3", |
| 57 | "14.4", "15.2", "15.3", "15.6", "15.7", "16.1", "16.2", "16.3", "16.4", |
| 58 | "16.5", "16.6", "16.7", "17.1", "17.2", "17.7", "18.1", "18.2", "18.3", |
| 59 | "18.6", "18.7", "18.8", "20.3", "20.4", "20.6", "20.7", "20.8", "20.9", |
| 60 | "20.11", "20.12", "20.13", "20.14", "21.1", "21.2", "21.3", "21.4", "21.5", |
| 61 | "21.6", "21.7", "21.8", "21.9", "21.10", "21.11", "21.14", "21.15", "21.16", |
| 62 | "22.1", "22.3", "22.4", "22.7", "22.8", "22.9", "22.10"]) |
| 63 | |
| 64 | _rule_advisory = set(["1.2", "2.3", "2.4", "2.5", "2.6", "2.7", "4.2", "5.9", |
| 65 | "8.9", "8.11", "8.13", "10.5", "11.4", "11.5", "12.1", "12.3", "12.4", |
| 66 | "13.3", "13.4", "15.1", "15.4", "15.5", "17.5", "17.8", "18.4", "18.5", |
| 67 | "19.2", "20.1", "20.2", "20.5", "20.10", "21.12"]) |
| 68 | |
| 69 | |
| 70 | _checker_lookup = { |
| 71 | "Directive": { |
| 72 | "required": _dir_required, |
| 73 | "advisory": _dir_advisory |
| 74 | }, |
| 75 | "Rule": { |
| 76 | "mandatory": _rule_mandatory, |
| 77 | "required": _rule_required, |
| 78 | "advisory": _rule_advisory |
| 79 | } |
| 80 | } |
| 81 | |
| 82 | _checker_re = re.compile(r"""(?P<kind>\w+) (?P<number>[\d\.]+)$""") |
| 83 | |
| 84 | |
| 85 | def _classify_checker(checker): |
| 86 | match = _checker_re.search(checker) |
| 87 | if match: |
| 88 | kind, number = match.group("kind"), match.group("number") |
| 89 | for classification, class_set in _checker_lookup[kind].items(): |
| 90 | if number in class_set: |
| 91 | return classification |
| 92 | |
| 93 | return "unknown" |
| 94 | |
| 95 | |
| 96 | # Return a copy of the original issue description. Update file path to strip |
| 97 | # heading '/', and also insert CID. |
| 98 | def _new_issue(cid, orig_issue): |
| 99 | checker = orig_issue["checker"] |
| 100 | classification = _classify_checker(checker) |
| 101 | |
| 102 | return { |
| 103 | "cid": cid, |
| 104 | "file": orig_issue["file"].lstrip("/"), |
| 105 | "line": orig_issue["mainEventLineNumber"], |
| 106 | "checker": checker, |
| 107 | "classification": classification, |
| 108 | "description": orig_issue["mainEventDescription"] |
| 109 | } |
| 110 | |
| 111 | |
| 112 | def _cls_string(issue): |
| 113 | cls = issue["classification"] |
| 114 | |
| 115 | return " (" + cls + ")" if cls != "unknown" else "" |
| 116 | |
| 117 | |
| 118 | # Given an issue, make a string formed of file name, line number, checker, and |
| 119 | # the CID. This could be used as a dictionary key to identify unique defects |
| 120 | # across the scan. Convert inegers to zero-padded strings for proper sorting. |
| 121 | def make_key(i): |
| 122 | return (i["file"] + str(i["line"]).zfill(5) + i["checker"] + |
| 123 | str(i["cid"]).zfill(5)) |
| 124 | |
| 125 | |
| 126 | # Iterate through all issues that are not ignored. If show_all is set, only |
| 127 | # issues that are not in the comparison snapshot are returned. |
| 128 | def iter_issues(path, show_all=False): |
| 129 | with open(path, encoding="utf-8") as fd: |
| 130 | report = json.load(fd) |
| 131 | |
| 132 | # Unconditional filter |
| 133 | filters = [lambda i: ((i["triage"]["action"] != "Ignore") and |
| 134 | (i["occurrences"][0]["checker"] not in _rule_exclusions))] |
| 135 | |
| 136 | # Whether we need diffs only |
| 137 | if not show_all: |
| 138 | # Pick only issues that are not present in comparison snapshot |
| 139 | filters.append(lambda i: not i["presentInComparisonSnapshot"]) |
| 140 | |
| 141 | # Pick issue when all filters are true |
| 142 | filter_func = lambda i: all([f(i) for f in filters]) |
| 143 | |
| 144 | # Top-level is a group of issues, all sharing a common CID |
| 145 | for issue_group in filter(filter_func, report["issueInfo"]): |
| 146 | # Pick up individual occurrence of the CID |
| 147 | for occurrence in issue_group["occurrences"]: |
| 148 | yield _new_issue(issue_group["cid"], occurrence) |
| 149 | |
| 150 | |
| 151 | # Format issue (returned from iter_issues()) as text. |
| 152 | def format_issue(issue): |
| 153 | return ("{file}:{line}:[{checker}{cls}]<{cid}> {description}").format_map( |
| 154 | dict(issue, cls=_cls_string(issue))) |
| 155 | |
| 156 | |
| 157 | # Format issue (returned from iter_issues()) as HTML table row. |
| 158 | def format_issue_html(issue): |
| 159 | cls = _cls_string(issue) |
| 160 | cov_class = "cov-" + issue["classification"] |
| 161 | |
| 162 | return """\ |
| 163 | <tr class="{cov_class}"> |
| 164 | <td class="cov-file">{file}</td> |
| 165 | <td class="cov-line">{line}</td> |
| 166 | <td class="cov-checker">{checker}{cls}</td> |
| 167 | <td class="cov-cid">{cid}</td> |
| 168 | <td class="cov-description">{description}</td> |
| 169 | </tr>""".format_map(dict(issue, cls=cls, cov_class=cov_class)) |
| 170 | |
| 171 | |
| 172 | if __name__ == "__main__": |
| 173 | parser = argparse.ArgumentParser() |
| 174 | |
| 175 | parser.add_argument("--all", default=False, dest="show_all", |
| 176 | action="store_const", const=True, help="List all issues") |
| 177 | parser.add_argument("--output", |
| 178 | help="File to output filtered defects to in JSON") |
| 179 | parser.add_argument("json_report") |
| 180 | |
| 181 | opts = parser.parse_args() |
| 182 | |
| 183 | issues = [] |
| 184 | for issue in sorted(iter_issues(opts.json_report, opts.show_all), |
| 185 | key=lambda i: make_key(i)): |
| 186 | print(format_issue(issue)) |
| 187 | issues.append(issue) |
| 188 | |
| 189 | if opts.output: |
| 190 | # Dump selected issues |
| 191 | with open(opts.output, "wt") as fd: |
| 192 | fd.write(json.dumps(issues)) |
| 193 | |
| 194 | sys.exit(int(len(issues) > 0)) |