Run tests for ref_vs_driver outside task function

Signed-off-by: Pengyu Lv <pengyu.lv@arm.com>
This commit is contained in:
Pengyu Lv 2023-11-28 15:30:03 +08:00
parent 18908ec276
commit 20e3ca391e

View file

@ -50,11 +50,7 @@ def execute_reference_driver_tests(results: Results, ref_component, driver_compo
"""Run the tests specified in ref_component and driver_component. Results
are stored in the output_file and they will be used for the following
coverage analysis"""
# If the outcome file already exists, we assume that the user wants to
# perform the comparison analysis again without repeating the tests.
if os.path.exists(outcome_file):
results.info("Outcome file ({}) already exists. Tests will be skipped.", outcome_file)
return
results.new_section("Test {} and {}", ref_component, driver_component)
shell_command = "tests/scripts/all.sh --outcome-file " + outcome_file + \
" " + ref_component + " " + driver_component
@ -188,27 +184,18 @@ suite_case = "<suite>;<case>"
return outcomes
def do_analyze_coverage(results: Results, outcomes_or_file, args):
def do_analyze_coverage(results: Results, outcomes, args):
"""Perform coverage analysis."""
results.new_section("Analyze coverage")
outcomes = read_outcome_file(outcomes_or_file) \
if isinstance(outcomes_or_file, str) else outcomes_or_file
analyze_outcomes(results, outcomes, args)
def do_analyze_driver_vs_reference(results: Results, outcomes_or_file, args):
def do_analyze_driver_vs_reference(results: Results, outcomes, args):
"""Perform driver vs reference analyze."""
results.new_section("Analyze driver {} vs reference {}",
args['component_driver'], args['component_ref'])
ignored_suites = ['test_suite_' + x for x in args['ignored_suites']]
if isinstance(outcomes_or_file, str):
execute_reference_driver_tests(results, args['component_ref'], \
args['component_driver'], outcomes_or_file)
outcomes = read_outcome_file(outcomes_or_file)
else:
outcomes = outcomes_or_file
analyze_driver_vs_reference(results, outcomes,
args['component_ref'], args['component_driver'],
ignored_suites, args['ignored_tests'])
@ -507,17 +494,29 @@ def main():
# If the outcome file exists, parse it once and share the result
# among tasks to improve performance.
# Otherwise, it will be generated by do_analyze_driver_vs_reference.
if os.path.exists(options.outcomes):
main_results.info("Read outcome file from {}.", options.outcomes)
outcomes_or_file = read_outcome_file(options.outcomes)
else:
outcomes_or_file = options.outcomes
# Otherwise, it will be generated by execute_reference_driver_tests.
if not os.path.exists(options.outcomes):
if len(tasks_list) > 1:
sys.stderr.write("mutiple tasks found, please provide a valid outcomes file.\n")
sys.exit(2)
task_name = tasks_list[0]
task = KNOWN_TASKS[task_name]
if task['test_function'] != do_analyze_driver_vs_reference:
sys.stderr.write("please provide valid outcomes file for {}.\n".format(task_name))
sys.exit(2)
execute_reference_driver_tests(main_results,
task['args']['component_ref'],
task['args']['component_driver'],
options.outcomes)
outcomes = read_outcome_file(options.outcomes)
for task in tasks_list:
test_function = KNOWN_TASKS[task]['test_function']
test_args = KNOWN_TASKS[task]['args']
test_function(main_results, outcomes_or_file, test_args)
test_function(main_results, outcomes, test_args)
main_results.info("Overall results: {} warnings and {} errors",
main_results.warning_count, main_results.error_count)