From 68f8dbbb60ebaba2b614eb952807e0dcfd3de08d Mon Sep 17 00:00:00 2001 From: Marek Marecki Date: Mon, 18 Dec 2023 00:26:30 +0100 Subject: [PATCH] Gather stats from the test run and cache them The "stats" are just lists of cases tagged with "ok", "fail", etc. This simple feature allows me to use ]$ make test TESTS=@fail to rerun only the failed tests. --- new/tests/suite.py | 39 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 36 insertions(+), 3 deletions(-) diff --git a/new/tests/suite.py b/new/tests/suite.py index 2d30f9ece..61d5efa46 100755 --- a/new/tests/suite.py +++ b/new/tests/suite.py @@ -820,6 +820,8 @@ def test_case_impl_checks( if r_exit == -6 and check_kind == "abort": pass elif r_exit != 0: + case_log.write(f"test program crashed\n") + case_log.write(f"]$ gdb --args ./build/bin/vm {base_path}.elf\n") return ( Status.Normal, False, @@ -998,6 +1000,7 @@ def test_case_impl(case_log, case_name, test_program, errors): check_kind = None try: check_kind = detect_check_kind(test_program) + case_log.write(f"using check kind: {check_kind}\n\n") except No_check_file_for: return ( Status.Normal, @@ -1023,6 +1026,8 @@ def test_case_impl(case_log, case_name, test_program, errors): test_relocatable = f"{base_path}.o" test_executable = f"{base_path}.elf" + case_log.write("First run\n") + asm = lambda out_reloc, in_asm: test_case_impl_asm(case_log, out_reloc, in_asm) # Some tests (usually for the linker) have their source split over several @@ -1130,6 +1135,8 @@ def test_case_impl(case_log, case_name, test_program, errors): if SKIP_DISASSEMBLER_TESTS: return make_good_report() + case_log.write("Second run\n") + # SECOND RUN # # The second run is not strictly necessary, as the result produced MUST be @@ -1268,7 +1275,8 @@ def prepare_dependencies(cases_dir): def main(args): - CASES_DIR = os.environ.get("VIUA_VM_TEST_CASES_DIR", "./tests/asm") + DEFAULT_CASES_DIR = "./tests/asm" + CASES_DIR = os.environ.get("VIUA_VM_TEST_CASES_DIR", DEFAULT_CASES_DIR) raw_cases = glob.glob(f"{CASES_DIR}/*.asm") cases = [ ( @@ -1278,9 +1286,15 @@ def main(args): for each in sorted(raw_cases) ] + CACHE_DIR = os.path.join(CASES_DIR, ".cache") + if len(args) > 1: run_only_these_cases = set(args[1].split(",")) d = dict(cases) + if "@fail" in run_only_these_cases: + run_only_these_cases.remove("@fail") + with open(os.path.join(CACHE_DIR, "fail"), "r") as ifstream: + run_only_these_cases.update(ifstream.read().splitlines()) cases = list( map( lambda each: ( @@ -1290,6 +1304,7 @@ def main(args): run_only_these_cases, ) ) + cases.sort() print( "looking for test programs in: {} (found {} test program{})".format( @@ -1320,6 +1335,18 @@ def main(args): run_times = [] perf_stats = [] + os.makedirs(CACHE_DIR, exist_ok=True) + list_of_ok = [] + list_of_fail = [] + list_of_skip = [] + list_of_bork = [] + run_list = { + "ok": list_of_ok, + "fail": list_of_fail, + "skip": list_of_skip, + "bork": list_of_bork, + } + print(" running cases") for case_no, ( case_name, @@ -1385,7 +1412,7 @@ def main(args): ) if result: - tag = " ok " + tag = "ok" tag_color = "green" success_cases += 1 else: @@ -1401,9 +1428,11 @@ def main(args): tag_color = "purple_1b" symptom = "internal test suite failure" + run_list[tag].append(case_name) + print( "[{}] {} {}".format( - colorise(tag_color, tag) + colorise(tag_color, f"{tag:^4s}") + ((" => " + colorise("light_red", symptom)) if symptom else ""), ( colorise(CASE_RUNTIME_COLOUR, format_run_time(run_time)) @@ -1434,6 +1463,10 @@ def main(args): internal_test_suite_failure, limit=None, chain=True ) + for result_tag, tagged_cases in run_list.items(): + with open(os.path.join(CACHE_DIR, result_tag), "w") as ofstream: + ofstream.write("\n".join(tagged_cases)) + run_color: str = None run_exit_code: int = 0 if success_cases == len(cases): -- 2.45.2