Skip to content

Commit

Permalink
[cmd] Change exit codes
Browse files Browse the repository at this point in the history
It is not possible to compute exit code 2 at "CodeChecker analyze"
command efficiently taking suppression into account. So according to the
new exit codes we indicate only the success and failure of analysis by
zero and non-zero exit codes respectively.

Moreover "diff" and "parse" commands have an exit status depending on
whether there is a resulting report.
  • Loading branch information
bruntib authored and csordasmarton committed Apr 6, 2021
1 parent cb72424 commit 57b302f
Show file tree
Hide file tree
Showing 16 changed files with 108 additions and 119 deletions.
50 changes: 4 additions & 46 deletions analyzer/codechecker_analyzer/cmd/analyze.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,9 +106,8 @@ def get_argparser_ctor_args():
Exit status
------------------------------------------------
0 - Successful analysis and no new reports
0 - Successful analysis
1 - CodeChecker error
2 - At least one report emitted by an analyzer and there is no analyzer failure
3 - Analysis of at least one translation unit failed
128+signum - Terminating on a fatal signal whose number is signum
Expand Down Expand Up @@ -791,36 +790,6 @@ def __get_result_source_files(metadata):
return result_src_files


def __have_new_report(plist_timestamps, output_path):
"""
This is a lightweight implementation of checking whether new reports are
found during analysis. The function checks if a new plist file contains
"check_name" string. There is no plist parsing or "CodeChecker parse" run,
but we say that a report was emitted if any checker writes anything to a
plist file. "New plist file" means that it is either created during the
analysis or its timestamp is greater than before analysis.
plist_timestamps -- A dict storing file names and timestamps before
analysis.
output_path -- Path of the output directory containing plist files.
"""
for f in os.listdir(output_path):
if not f.endswith('.plist'):
continue

abs_f = os.path.join(output_path, f)
if f not in plist_timestamps or \
os.path.getmtime(abs_f) > plist_timestamps[f]:
with open(abs_f, encoding='utf-8', errors='ignore') as plist:
# 'check_name' is a tag value in .plist files which is followed
# by a checker name. In case there is no such tag in the .plist
# file, then no checker found anything.
if 'check_name' in plist.read():
return True

return False


def main(args):
"""
Perform analysis on the given logfiles and store the results in a machine-
Expand Down Expand Up @@ -936,13 +905,6 @@ def main(args):
if not os.path.exists(args.output_path):
os.makedirs(args.output_path)

plist_timestamps = {}
for f in os.listdir(args.output_path):
if not f.endswith('.plist'):
continue
plist_timestamps[f] = os.path.getmtime(
os.path.join(args.output_path, f))

# TODO: I'm not sure that this directory should be created here.
fixit_dir = os.path.join(args.output_path, 'fixit')
if not os.path.exists(fixit_dir):
Expand Down Expand Up @@ -1070,17 +1032,13 @@ def main(args):
pass

# Generally exit status is set by sys.exit() call in CodeChecker. However,
# exit codes 2 and 3 have a special meaning in case of an analysis:
# 2 is returned in case of analyzer report emitted, 3 in case of analyzer
# failed.
# "CodeChecker analyze" is special in the sense that in can be invoked
# exit code 3 has a special meaning: it returns when the underlying
# analyzer tool fails.
# "CodeChecker analyze" is special in the sense that it can be invoked
# either top-level or through "CodeChecker check". In the latter case
# "CodeChecker check" should have the same exit status. Calling sys.exit()
# at this specific point is not an option, because the remaining statements
# of "CodeChecker check" after the analysis wouldn't execute.
for analyzer_data in metadata_tool['analyzers'].values():
if analyzer_data['analyzer_statistics']['failed'] != 0:
return 3

if __have_new_report(plist_timestamps, args.output_path):
return 2
9 changes: 8 additions & 1 deletion analyzer/codechecker_analyzer/cmd/parse.py
Original file line number Diff line number Diff line change
Expand Up @@ -388,8 +388,12 @@ def get_argparser_ctor_args():
generating gerrit output.
Default: {}
Exit status
------------------------------------------------
0 - No report
1 - CodeChecker error
2 - At least one report emitted by an analyzer
""".format(os.path.join(package_root, 'config', 'checker_severity_map.json')),

# Help is shown when the "parent" CodeChecker command lists the
# individual subcommands.
'help': "Print analysis summary and results in a human-readable "
Expand Down Expand Up @@ -898,3 +902,6 @@ def skip_html_report_data_handler(report_hash, source_file, report_line,
"reports!", changed_files)

os.chdir(original_cwd)

if report_count != 0:
sys.exit(2)
16 changes: 8 additions & 8 deletions analyzer/tests/functional/analyze/test_analyze.py
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ def test_compiler_info_files(self):

# THEN
errcode = process.returncode
self.assertEqual(errcode, 2)
self.assertEqual(errcode, 0)

info_File = os.path.join(reports_dir, 'compiler_info.json')
self.assertEqual(os.path.exists(info_File), True)
Expand Down Expand Up @@ -276,7 +276,7 @@ def test_capture_analysis_output(self):
print(out)
print(err)
errcode = process.returncode
self.assertEqual(errcode, 2)
self.assertEqual(errcode, 0)

# We expect the sucess stderr file in the success directory.
success_files = os.listdir(success_dir)
Expand Down Expand Up @@ -600,7 +600,7 @@ def test_compile_uniqueing(self):
process.communicate()

errcode = process.returncode
self.assertEqual(errcode, 2)
self.assertEqual(errcode, 0)
self.assertFalse(os.path.isdir(failed_dir))

self.unique_json_helper(unique_json, True, False, True)
Expand All @@ -619,7 +619,7 @@ def test_compile_uniqueing(self):
process.communicate()

errcode = process.returncode
self.assertEqual(errcode, 2)
self.assertEqual(errcode, 0)
self.assertFalse(os.path.isdir(failed_dir))

self.unique_json_helper(unique_json, False, True, True)
Expand Down Expand Up @@ -676,7 +676,7 @@ def test_compile_uniqueing(self):
process.communicate()

errcode = process.returncode
self.assertEqual(errcode, 2)
self.assertEqual(errcode, 0)
self.assertFalse(os.path.isdir(failed_dir))
self.unique_json_helper(unique_json, True, True, True)

Expand Down Expand Up @@ -712,7 +712,7 @@ def test_invalid_enabled_checker_name(self):
self.assertTrue("non-existing-checker-name" in out)

errcode = process.returncode
self.assertEqual(errcode, 2)
self.assertEqual(errcode, 0)

def test_disable_all_warnings(self):
"""Test disabling warnings as checker groups."""
Expand Down Expand Up @@ -778,7 +778,7 @@ def test_invalid_disabled_checker_name(self):
self.assertTrue("non-existing-checker-name" in out)

errcode = process.returncode
self.assertEqual(errcode, 2)
self.assertEqual(errcode, 0)

def test_multiple_invalid_checker_names(self):
"""Warn in case of multiple invalid checker names."""
Expand Down Expand Up @@ -819,7 +819,7 @@ def test_multiple_invalid_checker_names(self):

errcode = process.returncode

self.assertEqual(errcode, 2)
self.assertEqual(errcode, 0)

def test_makefile_generation(self):
""" Test makefile generation. """
Expand Down
4 changes: 2 additions & 2 deletions analyzer/tests/functional/fixit/test_fixit.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ def test_fixit_list(self):

# THEN
errcode = process.returncode
self.assertEqual(errcode, 2)
self.assertEqual(errcode, 0)

fixit_dir = os.path.join(self.report_dir, 'fixit')
self.assertTrue(os.path.isdir(fixit_dir))
Expand Down Expand Up @@ -194,7 +194,7 @@ def test_fixit_file_modification(self):

# THEN
errcode = process.returncode
self.assertEqual(errcode, 2)
self.assertEqual(errcode, 0)

fixit_dir = os.path.join(self.report_dir, 'fixit')
self.assertTrue(os.path.isdir(fixit_dir))
Expand Down
2 changes: 1 addition & 1 deletion analyzer/tests/functional/skip/test_skip.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ def test_analyze_only_header(self):
print(out)
print(err)
errcode = process.returncode
self.assertEqual(errcode, 2)
self.assertEqual(errcode, 0)

# Check if file is skipped.
report_dir_files = os.listdir(self.report_dir)
Expand Down
2 changes: 1 addition & 1 deletion analyzer/tests/functional/suppress/test_suppress_export.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ def test_source_suppress_export(self):
ret = call_cmd(extract_cmd,
self._test_project_path,
env.test_env(self._test_workspace))
self.assertEqual(ret, 0, "Failed to generate suppress file.")
self.assertEqual(ret, 2, "Failed to generate suppress file.")

with open(generated_file, 'r',
encoding='utf-8', errors='ignore') as generated:
Expand Down
9 changes: 7 additions & 2 deletions docs/analyzer/user_guide.md
Original file line number Diff line number Diff line change
Expand Up @@ -465,9 +465,8 @@ https://github.com/Ericsson/codechecker/blob/master/docs/analyzer/report_identif
Exit status
------------------------------------------------
0 - Successful analysis and no new reports
0 - Successful analysis
1 - CodeChecker error
2 - At least one report emitted by an analyzer and there is no analyzer failure
3 - Analysis of at least one translation unit failed
128+signum - Terminating on a fatal signal whose number is signum
Expand Down Expand Up @@ -1606,6 +1605,12 @@ Environment variables
CC_CHANGED_FILES Path of changed files json from Gerrit. Use it when
generating gerrit output.
Default: <package>/config/checker_severity_map.json
Exit status
------------------------------------------------
0 - No report
1 - CodeChecker error
2 - At least one report emitted by an analyzer
```
</details>

Expand Down
6 changes: 6 additions & 0 deletions docs/web/user_guide.md
Original file line number Diff line number Diff line change
Expand Up @@ -1294,6 +1294,12 @@ envionment variables:
CC_CHANGED_FILES Path of changed files json from Gerrit. Use it when
generating gerrit output.
Exit status
------------------------------------------------
0 - No difference between baseline and newrun
1 - CodeChecker error
2 - There is at least one report difference between baseline and newrun
Example scenario: Compare multiple analysis runs
------------------------------------------------
Compare two runs and show results that didn't exist in the 'run1' but appear in
Expand Down
6 changes: 6 additions & 0 deletions web/client/codechecker_client/cmd/cmd.py
Original file line number Diff line number Diff line change
Expand Up @@ -1296,6 +1296,12 @@ def add_arguments_to_parser(parser):
CC_CHANGED_FILES Path of changed files json from Gerrit. Use it when
generating gerrit output.
Exit status
------------------------------------------------
0 - No difference between baseline and newrun
1 - CodeChecker error
2 - There is at least one report difference between baseline and newrun
Example scenario: Compare multiple analysis runs
------------------------------------------------
Compare two runs and show results that didn't exist in the 'run1' but appear in
Expand Down
3 changes: 3 additions & 0 deletions web/client/codechecker_client/cmd_line_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -1384,6 +1384,9 @@ def print_reports(client,
', '.join(newname_run_names),
', '.join(matching_new_run_names))

if len(reports) != 0:
sys.exit(2)


def handle_list_result_types(args):
# If the given output format is not 'table', redirect logger's output to
Expand Down
35 changes: 15 additions & 20 deletions web/tests/functional/diff_local/test_diff_local.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ def test_resolved_json(self):
core.CallAndMessage checker was disabled in the new run, those
reports should be listed as resolved.
"""
resolved_results = get_diff_results(
resolved_results, _, _ = get_diff_results(
[self.base_reports], [self.new_reports], '--resolved', 'json')
print(resolved_results)

Expand All @@ -73,7 +73,7 @@ def test_new_json(self):
core.StackAddressEscape checker was enabled in the new run, those
reports should be listed as new.
"""
new_results = get_diff_results(
new_results, _, _ = get_diff_results(
[self.base_reports], [self.new_reports], '--new', 'json')
print(new_results)

Expand All @@ -86,14 +86,9 @@ def test_non_existent_reports_directory(self):
Displays detailed information about base and new directories when
any of them are not exist.
"""
error_output = ''
return_code = 0
try:
get_diff_results([self.base_reports], ['unexistent-dir-name'],
'--new')
except subprocess.CalledProcessError as process_error:
return_code = process_error.returncode
error_output = process_error.stderr
_, error_output, return_code = get_diff_results(
[self.base_reports], ['unexistent-dir-name'], '--new',
extra_args=['--url', f"localhost:{env.get_free_port()}/Default"])

self.assertEqual(return_code, 1,
"Exit code should be 1 if directory does not exist.")
Expand All @@ -106,7 +101,7 @@ def test_filter_severity_low_json(self):
core.StackAddressEscape checker was enabled in the new run, those
reports should be listed as new.
"""
low_severity_res = get_diff_results(
low_severity_res, _, _ = get_diff_results(
[self.base_reports], [self.new_reports], '--new', 'json',
['--severity', 'low'])
print(low_severity_res)
Expand All @@ -119,18 +114,18 @@ def test_filter_severity_high_json(self):
core.StackAddressEscape checker (high severity) was enabled
in the new run, those reports should be listed.
"""
high_severity_res = get_diff_results(
high_severity_res, _, _ = get_diff_results(
[self.base_reports], [self.new_reports], '--new', 'json',
['--severity', 'high'])
self.assertEqual((len(high_severity_res)), 4)
self.assertEqual(len(high_severity_res), 4)

def test_filter_severity_high_text(self):
"""Get the high severity new reports.
core.StackAddressEscape checker (high severity) was enabled
in the new run, those reports should be listed.
"""
out = get_diff_results(
out, _, _ = get_diff_results(
[self.base_reports], [self.new_reports], '--new', None,
['--severity', 'high'])
print(out)
Expand All @@ -139,7 +134,7 @@ def test_filter_severity_high_text(self):

def test_filter_severity_high_low_text(self):
"""Get the high and low severity unresolved reports."""
out = get_diff_results(
out, _, _ = get_diff_results(
[self.base_reports], [self.new_reports], '--unresolved', None,
['--severity', 'high', 'low'])
self.assertEqual(len(re.findall(r'\[HIGH\]', out)), 18)
Expand All @@ -149,7 +144,7 @@ def test_filter_severity_high_low_text(self):
"available in the json output.")
def test_filter_severity_high_low_json(self):
"""Get the high and low severity unresolved reports in json."""
high_low_unresolved_results = get_diff_results(
high_low_unresolved_results, _, _ = get_diff_results(
[self.base_reports], [self.new_reports], '--unresolved', 'json',
['--severity', 'high', 'low'])
print(high_low_unresolved_results)
Expand All @@ -158,7 +153,7 @@ def test_filter_severity_high_low_json(self):

def test_multiple_dir(self):
""" Get unresolved reports from muliple local directories. """
unresolved_results = get_diff_results(
unresolved_results, _, _ = get_diff_results(
[self.base_reports, self.new_reports],
[self.new_reports, self.base_reports],
'--unresolved', 'json',
Expand Down Expand Up @@ -260,15 +255,15 @@ def test_suppress_reports(self):
codechecker.log_and_analyze(cfg, self._test_dir)

# Run the diff command and check the results.
res = get_diff_results(
res, _, _ = get_diff_results(
[report_dir_base], [report_dir_new], '--new', 'json')
print(res)
self.assertEqual(len(res), 2)

res = get_diff_results(
res, _, _ = get_diff_results(
[report_dir_base], [report_dir_new], '--unresolved', 'json')
self.assertEqual(len(res), 1)

res = get_diff_results(
res, _, _ = get_diff_results(
[report_dir_base], [report_dir_new], '--resolved', 'json')
self.assertEqual(len(res), 2)
Loading

0 comments on commit 57b302f

Please sign in to comment.