diff options
author | Andrew Kaster <andrewdkaster@gmail.com> | 2021-04-24 23:53:23 -0600 |
---|---|---|
committer | Andreas Kling <kling@serenityos.org> | 2021-04-25 09:36:49 +0200 |
commit | 35c0a6c54d4e67f0d600044ed8eae0ae5e5adfba (patch) | |
tree | bf10d283d58b3e37c0db7aef4398dff6b1e07cd3 /Userland/Libraries/LibTest/TestSuite.cpp | |
parent | 89ee38fe5cf6f62821dabc98f4dfbc109d0874fd (diff) | |
download | serenity-35c0a6c54d4e67f0d600044ed8eae0ae5e5adfba.zip |
AK+Userland: Move AK/TestSuite.h into LibTest and rework Tests' CMake
As many macros as possible are moved to Macros.h, while the
macros to create a test case are moved to TestCase.h. TestCase is now
the only user-facing header for creating a test case. TestSuite and its
helpers have moved into a .cpp file. Instead of requiring a TEST_MAIN
macro to be instantiated into the test file, a TestMain.cpp file is
provided instead that will be linked against each test. This has the
side effect that, if we wanted to have test cases split across multiple
files, it's as simple as adding them all to the same executable.
The test main should be portable to kernel mode as well, so if
there's a set of tests that should be run in self-test mode in kernel
space, we can accomodate that.
A new serenity_test CMake function streamlines adding a new test with
arguments for the test source file, subdirectory under /usr/Tests to
install the test application and an optional list of libraries to link
against the test application. To accomodate future test where the
provided TestMain.cpp is not suitable (e.g. test-js), a CUSTOM_MAIN
parameter can be passed to the function to not link against the
boilerplate main function.
Diffstat (limited to 'Userland/Libraries/LibTest/TestSuite.cpp')
-rw-r--r-- | Userland/Libraries/LibTest/TestSuite.cpp | 148 |
1 files changed, 148 insertions, 0 deletions
diff --git a/Userland/Libraries/LibTest/TestSuite.cpp b/Userland/Libraries/LibTest/TestSuite.cpp new file mode 100644 index 0000000000..ac2b93b7de --- /dev/null +++ b/Userland/Libraries/LibTest/TestSuite.cpp @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org> + * Copyright (c) 2021, Andrew Kaster <akaster@serenityos.org> + * + * SPDX-License-Identifier: BSD-2-Clause + */ + +#include <LibTest/Macros.h> // intentionally first -- we redefine VERIFY and friends in here + +#include <LibCore/ArgsParser.h> +#include <LibTest/TestSuite.h> +#include <stdlib.h> +#include <sys/time.h> + +namespace Test { + +TestSuite* TestSuite::s_global = nullptr; + +class TestElapsedTimer { +public: + TestElapsedTimer() { restart(); } + + void restart() { gettimeofday(&m_started, nullptr); } + + u64 elapsed_milliseconds() + { + struct timeval now = {}; + gettimeofday(&now, nullptr); + + struct timeval delta = {}; + timersub(&now, &m_started, &delta); + + return delta.tv_sec * 1000 + delta.tv_usec / 1000; + } + +private: + struct timeval m_started = {}; +}; + +// Declared in Macros.h +void current_test_case_did_fail() +{ + TestSuite::the().current_test_case_did_fail(); +} + +// Declared in TestCase.h +void add_test_case_to_suite(const NonnullRefPtr<TestCase>& test_case) +{ + TestSuite::the().add_case(test_case); +} + +int TestSuite::main(const String& suite_name, int argc, char** argv) +{ + m_suite_name = suite_name; + + Core::ArgsParser args_parser; + + bool do_tests_only = getenv("TESTS_ONLY") != nullptr; + bool do_benchmarks_only = false; + bool do_list_cases = false; + const char* search_string = "*"; + + args_parser.add_option(do_tests_only, "Only run tests.", "tests", 0); + args_parser.add_option(do_benchmarks_only, "Only run benchmarks.", "bench", 0); + args_parser.add_option(do_list_cases, "List available test cases.", "list", 0); + args_parser.add_positional_argument(search_string, "Only run matching cases.", "pattern", Core::ArgsParser::Required::No); + args_parser.parse(argc, argv); + + const auto& matching_tests = find_cases(search_string, !do_benchmarks_only, !do_tests_only); + + if (do_list_cases) { + outln("Available cases for {}:", suite_name); + for (const auto& test : matching_tests) { + outln(" {}", test.name()); + } + return 0; + } + + outln("Running {} cases out of {}.", matching_tests.size(), m_cases.size()); + + return run(matching_tests); +} + +NonnullRefPtrVector<TestCase> TestSuite::find_cases(const String& search, bool find_tests, bool find_benchmarks) +{ + NonnullRefPtrVector<TestCase> matches; + for (const auto& t : m_cases) { + if (!search.is_empty() && !t.name().matches(search, CaseSensitivity::CaseInsensitive)) { + continue; + } + + if (!find_tests && !t.is_benchmark()) { + continue; + } + if (!find_benchmarks && t.is_benchmark()) { + continue; + } + + matches.append(t); + } + return matches; +} + +int TestSuite::run(const NonnullRefPtrVector<TestCase>& tests) +{ + size_t test_count = 0; + size_t test_failed_count = 0; + size_t benchmark_count = 0; + TestElapsedTimer global_timer; + + for (const auto& t : tests) { + const auto test_type = t.is_benchmark() ? "benchmark" : "test"; + + warnln("Running {} '{}'.", test_type, t.name()); + m_current_test_case_passed = true; + + TestElapsedTimer timer; + t.func()(); + const auto time = timer.elapsed_milliseconds(); + + dbgln("{} {} '{}' in {}ms", m_current_test_case_passed ? "Completed" : "Failed", test_type, t.name(), time); + + if (t.is_benchmark()) { + m_benchtime += time; + benchmark_count++; + } else { + m_testtime += time; + test_count++; + } + + if (!m_current_test_case_passed) { + test_failed_count++; + } + } + + dbgln("Finished {} tests and {} benchmarks in {}ms ({}ms tests, {}ms benchmarks, {}ms other).", + test_count, + benchmark_count, + global_timer.elapsed_milliseconds(), + m_testtime, + m_benchtime, + global_timer.elapsed_milliseconds() - (m_testtime + m_benchtime)); + dbgln("Out of {} tests, {} passed and {} failed.", test_count, test_count - test_failed_count, test_failed_count); + + return (int)test_failed_count; +} + +} |