From a2663321899cd0df406dbde28c97b358f95ff1ca Mon Sep 17 00:00:00 2001 From: Valerio Setti Date: Fri, 24 Mar 2023 08:20:18 +0100 Subject: [PATCH 1/3] test: improve analyze_outcomes.py script Allow the script to also execute the tests needed for the following analysis. It doesn't affect the previous usage of this script: - if the output file is already present, then only the analysis is performed - if the outfile does not exists, then tests are also executed before doing the analysis Signed-off-by: Valerio Setti --- tests/scripts/analyze_outcomes.py | 33 ++++++++++++++++++++++++++++--- 1 file changed, 30 insertions(+), 3 deletions(-) diff --git a/tests/scripts/analyze_outcomes.py b/tests/scripts/analyze_outcomes.py index 800b744ca..c954b7dad 100755 --- a/tests/scripts/analyze_outcomes.py +++ b/tests/scripts/analyze_outcomes.py @@ -10,6 +10,8 @@ import argparse import sys import traceback import re +import subprocess +import os import check_test_cases @@ -51,6 +53,25 @@ class TestCaseOutcomes: """ return len(self.successes) + len(self.failures) +def execute_reference_driver_tests(ref_component, driver_component, outcome_file): + """Run the tests that will fullfill the outcome file used for the following + coverage analysis""" + # If the outcome file already exists, we assume that the user wants to + # perform the comparison analysis again without repeating the tests. + if os.path.exists(outcome_file): + Results.log("Outcome file (" + outcome_file + ") already exists. " + \ + "Tests will be skipped.") + return + + shell_command = "tests/scripts/all.sh --outcome-file " + outcome_file + \ + " " + ref_component + " " + driver_component + print("Running: " + shell_command) + ret_val = subprocess.run(shell_command.split(), check=False).returncode + + if ret_val != 0: + Results.log("Error: failed to run reference/driver components") + sys.exit(ret_val) + def analyze_coverage(results, outcomes): """Check that all available test cases are executed at least once.""" available = check_test_cases.collect_available_test_cases() @@ -137,6 +158,9 @@ def do_analyze_coverage(outcome_file, args): def do_analyze_driver_vs_reference(outcome_file, args): """Perform driver vs reference analyze.""" + execute_reference_driver_tests(args['component_ref'], \ + args['component_driver'], outcome_file) + ignored_suites = ['test_suite_' + x for x in args['ignored_suites']] outcomes = read_outcome_file(outcome_file) @@ -152,9 +176,12 @@ TASKS = { 'test_function': do_analyze_coverage, 'args': {} }, - # How to use analyze_driver_vs_reference_xxx locally: - # 1. tests/scripts/all.sh --outcome-file "$PWD/out.csv" - # 2. tests/scripts/analyze_outcomes.py out.csv analyze_driver_vs_reference_xxx + # There are 2 options to use analyze_driver_vs_reference_xxx locally: + # 1. Run tests and then analysis: + # - tests/scripts/all.sh --outcome-file "$PWD/out.csv" + # - tests/scripts/analyze_outcomes.py out.csv analyze_driver_vs_reference_xxx + # 2. Let this script run both automatically: + # - tests/scripts/analyze_outcomes.py out.csv analyze_driver_vs_reference_xxx 'analyze_driver_vs_reference_hash': { 'test_function': do_analyze_driver_vs_reference, 'args': { From 22992a04f1913489fcf703ee553d93cdee519d80 Mon Sep 17 00:00:00 2001 From: Valerio Setti Date: Wed, 29 Mar 2023 11:15:28 +0200 Subject: [PATCH 2/3] Fix function description Signed-off-by: Valerio Setti --- tests/scripts/analyze_outcomes.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/scripts/analyze_outcomes.py b/tests/scripts/analyze_outcomes.py index c954b7dad..7af910f16 100755 --- a/tests/scripts/analyze_outcomes.py +++ b/tests/scripts/analyze_outcomes.py @@ -54,7 +54,8 @@ class TestCaseOutcomes: return len(self.successes) + len(self.failures) def execute_reference_driver_tests(ref_component, driver_component, outcome_file): - """Run the tests that will fullfill the outcome file used for the following + """Run the tests specified in ref_component and driver_component. Results + are stored in the output_file and they will be used for the following coverage analysis""" # If the outcome file already exists, we assume that the user wants to # perform the comparison analysis again without repeating the tests. From f109c66d73c672a54558b98a554199d61a26bfe4 Mon Sep 17 00:00:00 2001 From: Valerio Setti Date: Wed, 29 Mar 2023 11:15:44 +0200 Subject: [PATCH 3/3] Use proper log function Signed-off-by: Valerio Setti --- tests/scripts/analyze_outcomes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/scripts/analyze_outcomes.py b/tests/scripts/analyze_outcomes.py index 7af910f16..ca113a4b0 100755 --- a/tests/scripts/analyze_outcomes.py +++ b/tests/scripts/analyze_outcomes.py @@ -66,7 +66,7 @@ def execute_reference_driver_tests(ref_component, driver_component, outcome_file shell_command = "tests/scripts/all.sh --outcome-file " + outcome_file + \ " " + ref_component + " " + driver_component - print("Running: " + shell_command) + Results.log("Running: " + shell_command) ret_val = subprocess.run(shell_command.split(), check=False).returncode if ret_val != 0: