From 3b8788d22631ebe0bff6a0916a5546eda8ccfc67 Mon Sep 17 00:00:00 2001 From: Nick Robinson Date: Mon, 18 Dec 2023 23:16:34 +0000 Subject: [PATCH] Add support for skipping testitems (#117) * Initial support for skipping testitems * Add some tests * Simplify module building * Test skipped testitems have empty stats * WIP integration tests for skipping testitems * more tests * more tests 2 * docs * Test JUnit report for skipped test-items * cleanup * Fixup block expr test on v1.10 * Update README.md Co-authored-by: Nathan Daly * Update src/macros.jl Co-authored-by: Nathan Daly * Remove unused file * Fix and test log alignment * Print SKIP in warning color * Emphasise difference between `skip` and filtering `runtests` * fixup! Emphasise difference between `skip` and filtering `runtests` * Bump version * fixup! Fix and test log alignment --------- Co-authored-by: Nathan Daly --- Project.toml | 2 +- README.md | 52 ++++++++++- src/ReTestItems.jl | 37 ++++++++ src/log_capture.jl | 44 +++++---- src/macros.jl | 28 +++++- test/integrationtests.jl | 27 ++++++ test/internals.jl | 44 ++++++++- test/junit_xml.jl | 15 ++- test/log_capture.jl | 36 +++---- test/macros.jl | 114 ++++++++++++++++++++++- test/references/skipped_tests_report.xml | 77 +++++++++++++++ test/testfiles/_skip_tests.jl | 27 ++++++ 12 files changed, 455 insertions(+), 48 deletions(-) create mode 100644 test/references/skipped_tests_report.xml create mode 100644 test/testfiles/_skip_tests.jl diff --git a/Project.toml b/Project.toml index e444bd5..2271b79 100644 --- a/Project.toml +++ b/Project.toml @@ -1,6 +1,6 @@ name = "ReTestItems" uuid = "817f1d60-ba6b-4fd5-9520-3cf149f6a823" -version = "1.22.0" +version = "1.23.0" [deps] Dates = "ade2ca70-3891-5945-98fb-dc099432e06a" diff --git a/README.md b/README.md index 68a5cde..329247b 100644 --- a/README.md +++ b/README.md @@ -60,7 +60,17 @@ julia> runtests( ) ``` -You can use the `name` keyword, to select test-items by name. +For interactive sessions, all logs from the tests will be printed out in the REPL by default. +You can disable this by passing `logs=:issues` in which case logs from a test-item are only printed if that test-items errors or fails. +`logs=:issues` is also the default for non-interactive sessions. + +```julia +julia> runtests("test/Database/"; logs=:issues) +``` + +#### Filtering tests + +You can use the `name` keyword to select test-items by name. Pass a string to select a test-item by its exact name, or pass a regular expression (regex) to match multiple test-item names. @@ -70,12 +80,19 @@ julia> runtests("test/Database/"; name="issue-123") julia> runtests("test/Database/"; name=r"^issue") ``` -For interactive sessions, all logs from the tests will be printed out in the REPL by default. -You can disable this by passing `logs=:issues` in which case logs from a test-item are only printed if that test-items errors or fails. -`logs=:issues` is also the default for non-interactive sessions. +You can pass `tags` to select test-items by tag. +When passing multiple tags a test-item is only run if it has all the requested tags. ```julia -julia> runtests("test/Database/"; logs=:issues) +# Run tests that are tagged as both `regression` and `fast` +julia> runtests("test/Database/"; tags=[:regression, :fast]) +``` + +Filtering by `name` and `tags` can be combined to run only test-items that match both the name and tags. + +```julia +# Run tests named `issue*` which also have tag `regression`. +julia> runtests("test/Database/"; tags=:regression, name=r"^issue") ``` ## Writing tests @@ -130,6 +147,31 @@ end The `setup` is run once on each worker process that requires it; it is not run before every `@testitem` that depends on the setup. +#### Skipping tests + +The `skip` keyword can be used to skip a `@testitem`, meaning no code inside that test-item will run. +A skipped test-item logs that it is being skipped and records a single "skipped" test result, similar to `@test_skip`. + +```julia +@testitem "skipped" skip=true begin + @test false +end +``` + +If `skip` is given as an `Expr`, it must return a `Bool` indicating whether or not to skip the test-item. +This expression will be run in a new module similar to a test-item immediately before the test-item would be run. + +```julia +# Don't run "orc v1" tests if we don't have orc v1 +@testitem "orc v1" skip=:(using LLVM; !LLVM.has_orc_v1()) begin + # tests +end +``` + +The `skip` keyword allows you to define the condition under which a test needs to be skipped, +for example if it can only be run on a certain platform. +See [filtering tests](#filtering-tests) for controlling which tests run in a particular `runtests` call. + #### Post-testitem hook If there is something that should be checked after every single `@testitem`, then it's possible to pass an expression to `runtests` using the `test_end_expr` keyword. diff --git a/src/ReTestItems.jl b/src/ReTestItems.jl index 48d29c2..a882b85 100644 --- a/src/ReTestItems.jl +++ b/src/ReTestItems.jl @@ -861,6 +861,40 @@ end const GLOBAL_TEST_CONTEXT_FOR_TESTING = TestContext("ReTestItems", 0) const GLOBAL_TEST_SETUPS_FOR_TESTING = Dict{Symbol, TestSetup}() +# Check the `skip` keyword, and return a `Bool` indicating if we should skip the testitem. +# If `skip` is an expression, run it in a new module just like how we run testitems. +# If the `skip` expression doesn't return a Bool, throw an informative error. +function should_skip(ti::TestItem) + ti.skip isa Bool && return ti.skip + # `skip` is an expression. + # Give same scope as testitem body, e.g. imports should work. + skip_body = deepcopy(ti.skip::Expr) + softscope_all!(skip_body) + # Run in a new module to not pollute `Main`. + # Need to store the result of the `skip` expression so we can check it. + mod_name = gensym(Symbol(:skip_, ti.name)) + skip_var = gensym(:skip) + skip_mod_expr = :(module $mod_name; $skip_var = $skip_body; end) + skip_mod = Core.eval(Main, skip_mod_expr) + # Check what the expression evaluated to. + skip = getfield(skip_mod, skip_var) + !isa(skip, Bool) && _throw_not_bool(ti, skip) + return skip::Bool +end +_throw_not_bool(ti, skip) = error("Test item $(repr(ti.name)) `skip` keyword must be a `Bool`, got `skip=$(repr(skip))`") + +# Log that we skipped the testitem, and record a "skipped" test result with empty stats. +function skiptestitem(ti::TestItem, ctx::TestContext; verbose_results::Bool=true) + ts = DefaultTestSet(ti.name; verbose=verbose_results) + Test.record(ts, Test.Broken(:skipped, ti.name)) + push!(ti.testsets, ts) + stats = PerfStats() + push!(ti.stats, stats) + log_testitem_skipped(ti, ctx.ntestitems) + return TestItemResult(ts, stats) +end + + # assumes any required setups were expanded outside of a runtests context function runtestitem(ti::TestItem; kw...) # make a fresh TestSetupModules for each testitem run @@ -879,6 +913,9 @@ function runtestitem( ti::TestItem, ctx::TestContext; test_end_expr::Expr=Expr(:block), logs::Symbol=:eager, verbose_results::Bool=true, finish_test::Bool=true, ) + if should_skip(ti)::Bool + return skiptestitem(ti, ctx; verbose_results) + end name = ti.name log_testitem_start(ti, ctx.ntestitems) ts = DefaultTestSet(name; verbose=verbose_results) diff --git a/src/log_capture.jl b/src/log_capture.jl index a76991d..98f6438 100644 --- a/src/log_capture.jl +++ b/src/log_capture.jl @@ -55,7 +55,7 @@ function _print_scaled_one_dec(io, value, scale, label="") end print(io, label) end -function time_print(io; elapsedtime, bytes=0, gctime=0, allocs=0, compile_time=0, recompile_time=0) +function print_time(io; elapsedtime, bytes=0, gctime=0, allocs=0, compile_time=0, recompile_time=0) _print_scaled_one_dec(io, elapsedtime, 1e9, " secs") if gctime > 0 || compile_time > 0 print(io, " (") @@ -241,35 +241,47 @@ function _print_test_errors(report_iob, ts::DefaultTestSet, worker_info) return nothing end -# Marks the start of each test item -function log_testitem_start(ti::TestItem, ntestitems=0) - io = IOContext(IOBuffer(), :color => get(DEFAULT_STDOUT[], :color, false)::Bool) +function print_state(io, state, ti, ntestitems; color=:default) interactive = parse(Bool, get(ENV, "RETESTITEMS_INTERACTIVE", string(Base.isinteractive()))) print(io, format(now(), "HH:MM:SS | ")) !interactive && print(io, _mem_watermark()) - printstyled(io, "START"; bold=true) if ntestitems > 0 + # rpad/lpad so that the eval numbers are all vertically aligned + printstyled(io, rpad(uppercase(state), 5); bold=true, color) print(io, " (", lpad(ti.eval_number[], ndigits(ntestitems)), "/", ntestitems, ")") + else + printstyled(io, uppercase(state); bold=true) end - print(io, " test item $(repr(ti.name)) at ") + print(io, " test item $(repr(ti.name)) ") +end + +function print_file_info(io, ti) + print(io, "at ") printstyled(io, _file_info(ti); bold=true, color=:default) +end + +function log_testitem_skipped(ti::TestItem, ntestitems=0) + io = IOContext(IOBuffer(), :color => get(DEFAULT_STDOUT[], :color, false)::Bool) + print_state(io, "SKIP", ti, ntestitems; color=Base.warn_color()) + print_file_info(io, ti) + println(io) + write(DEFAULT_STDOUT[], take!(io.io)) +end + +# Marks the start of each test item +function log_testitem_start(ti::TestItem, ntestitems=0) + io = IOContext(IOBuffer(), :color => get(DEFAULT_STDOUT[], :color, false)::Bool) + print_state(io, "START", ti, ntestitems) + print_file_info(io, ti) println(io) write(DEFAULT_STDOUT[], take!(io.io)) end -# mostly copied from timing.jl function log_testitem_done(ti::TestItem, ntestitems=0) io = IOContext(IOBuffer(), :color => get(DEFAULT_STDOUT[], :color, false)::Bool) - interactive = parse(Bool, get(ENV, "RETESTITEMS_INTERACTIVE", string(Base.isinteractive()))) - print(io, format(now(), "HH:MM:SS | ")) - !interactive && print(io, _mem_watermark()) - printstyled(io, "DONE "; bold=true) - if ntestitems > 0 - print(io, " (", lpad(ti.eval_number[], ndigits(ntestitems)), "/", ntestitems, ")") - end - print(io, " test item $(repr(ti.name)) ") + print_state(io, "DONE", ti, ntestitems) x = last(ti.stats) # always print stats for most recent run - time_print(io; x.elapsedtime, x.bytes, x.gctime, x.allocs, x.compile_time, x.recompile_time) + print_time(io; x.elapsedtime, x.bytes, x.gctime, x.allocs, x.compile_time, x.recompile_time) println(io) write(DEFAULT_STDOUT[], take!(io.io)) end diff --git a/src/macros.jl b/src/macros.jl index d466251..dbd9426 100644 --- a/src/macros.jl +++ b/src/macros.jl @@ -120,6 +120,7 @@ struct TestItem setups::Vector{Symbol} retries::Int timeout::Union{Int,Nothing} # in seconds + skip::Union{Bool,Expr} file::String line::Int project_root::String @@ -131,10 +132,10 @@ struct TestItem stats::Vector{PerfStats} # populated when the test item is finished running scheduled_for_evaluation::ScheduledForEvaluation # to keep track of whether the test item has been scheduled for evaluation end -function TestItem(number, name, id, tags, default_imports, setups, retries, timeout, file, line, project_root, code) +function TestItem(number, name, id, tags, default_imports, setups, retries, timeout, skip, file, line, project_root, code) _id = @something(id, repr(hash(name, hash(relpath(file, project_root))))) return TestItem( - number, name, _id, tags, default_imports, setups, retries, timeout, file, line, project_root, code, + number, name, _id, tags, default_imports, setups, retries, timeout, skip, file, line, project_root, code, TestSetup[], Ref{Int}(0), DefaultTestSet[], @@ -145,7 +146,7 @@ function TestItem(number, name, id, tags, default_imports, setups, retries, time end """ - @testitem "name" [tags=[] setup=[] retries=0 default_imports=true] begin + @testitem "name" [tags=[] setup=[] retries=0 skip=false default_imports=true] begin # code that will be run as tests end @@ -228,6 +229,18 @@ Note that `timeout` currently only works when tests are run with multiple worker @testitem "Sometimes too slow" timeout=10 begin @test sleep(rand(1:100)) end + +If a `@testitem` needs to be skipped, then you can set the `skip` keyword. +Either pass `skip=true` to unconditionally skip the test item, or pass `skip` an +expression that returns a `Bool` to determine if the testitem should be skipped. + + @testitem "Skip on old Julia" skip=(VERSION < v"1.9") begin + v = [1] + @test 0 == @allocations sum(v) + end + +The `skip` expression is run in its own module, just like a test-item. +No code inside a `@testitem` is run when a test-item is skipped. """ macro testitem(nm, exs...) default_imports = true @@ -235,6 +248,7 @@ macro testitem(nm, exs...) timeout = nothing tags = Symbol[] setup = Any[] + skip = false _id = nothing _run = true # useful for testing `@testitem` itself _source = QuoteNode(__source__) @@ -257,12 +271,16 @@ macro testitem(nm, exs...) setup = map(Symbol, setup.args) elseif kw == :retries retries = ex.args[2] - @assert retries isa Integer "`default_imports` keyword must be passed an `Integer`" + @assert retries isa Integer "`retries` keyword must be passed an `Integer`" elseif kw == :timeout t = ex.args[2] @assert t isa Real "`timeout` keyword must be passed a `Real`" @assert t > 0 "`timeout` keyword must be passed a positive number. Got `timeout=$t`" timeout = ceil(Int, t) + elseif kw == :skip + skip = ex.args[2] + # If the `Expr` doesn't evaluate to a Bool, throws at runtime. + @assert skip isa Union{Bool,Expr} "`skip` keyword must be passed a `Bool`" elseif kw == :_id _id = ex.args[2] # This will always be written to the JUnit XML as a String, require the user @@ -287,7 +305,7 @@ macro testitem(nm, exs...) ti = gensym(:ti) esc(quote let $ti = $TestItem( - $Ref(0), $nm, $_id, $tags, $default_imports, $setup, $retries, $timeout, + $Ref(0), $nm, $_id, $tags, $default_imports, $setup, $retries, $timeout, $skip, $String($_source.file), $_source.line, $gettls(:__RE_TEST_PROJECT__, "."), $q, diff --git a/test/integrationtests.jl b/test/integrationtests.jl index 4910fd0..4237482 100644 --- a/test/integrationtests.jl +++ b/test/integrationtests.jl @@ -1032,4 +1032,31 @@ end @test_throws expected_err runtests(file; nworkers=1, memory_threshold=xx) end +@testset "skipping testitems" begin + # Test report printing has test items as "skipped" (which appear under "Broken") + using IOCapture + file = joinpath(TEST_FILES_DIR, "_skip_tests.jl") + results = encased_testset(()->runtests(file; nworkers=1)) + c = IOCapture.capture() do + Test.print_test_results(results) + end + @test contains( + c.output, + r""" + Test Summary: \s* \| Pass Fail Broken Total Time + ReTestItems \s* \| 4 1 3 8 \s*\d*.\ds + """ + ) +end + +@testset "logs are aligned" begin + file = joinpath(TEST_FILES_DIR, "_skip_tests.jl") + c1 = IOCapture.capture() do + encased_testset(()->runtests(file)) + end + @test contains(c1.output, r"START \(1/6\) test item \"no skip, 1 pass\"") + @test contains(c1.output, r"DONE \(1/6\) test item \"no skip, 1 pass\"") + @test contains(c1.output, r"SKIP \(3/6\) test item \"skip true\"") +end + end # integrationtests.jl testset diff --git a/test/internals.jl b/test/internals.jl index 9c30fc7..67e2811 100644 --- a/test/internals.jl +++ b/test/internals.jl @@ -169,7 +169,7 @@ end # `include_testfiles!` testset @testset "report_empty_testsets" begin using ReTestItems: TestItem, report_empty_testsets, PerfStats, ScheduledForEvaluation using Test: DefaultTestSet, Fail, Error - ti = TestItem(Ref(42), "Dummy TestItem", "DummyID", [], false, [], 0, nothing, "source/path", 42, ".", nothing) + ti = TestItem(Ref(42), "Dummy TestItem", "DummyID", [], false, [], 0, nothing, false, "source/path", 42, ".", nothing) ts = DefaultTestSet("Empty testset") report_empty_testsets(ti, ts) @@ -281,4 +281,46 @@ end @test_throws ArgumentError("\"$nontest_file\" is not a test file") _validated_paths((nontest_file,), true) end +@testset "skiptestitem" begin + # Test that `skiptestitem` unconditionally skips a testitem + # and returns `TestItemResult` with a single "skipped" `Test.Result` + ti = @testitem "skip" _run=false begin + @test true + @test false + @test error() + end + ctx = ReTestItems.TestContext("test_ctx", 1) + ti_res = ReTestItems.skiptestitem(ti, ctx) + @test ti_res isa TestItemResult + test_res = only(ti_res.testset.results) + @test test_res isa Test.Result + @test test_res isa Test.Broken + @test test_res.test_type == :skipped +end + +@testset "should_skip" begin + should_skip = ReTestItems.should_skip + + ti = @testitem("x", skip=true, _run=false, begin end) + @test should_skip(ti) + ti = @testitem("x", skip=false, _run=false, begin end) + @test !should_skip(ti) + + ti = @testitem("x", skip=:(1 == 1), _run=false, begin end) + @test should_skip(ti) + ti = @testitem("x", skip=:(1 != 1), _run=false, begin end) + @test !should_skip(ti) + + ti = @testitem("x", skip=:(x = 1; x + x == 2), _run=false, begin end) + @test should_skip(ti) + ti = @testitem("x", skip=:(x = 1; x + x != 2), _run=false, begin end) + @test !should_skip(ti) + + ti = @testitem("x", skip=:(x = 1; x + x), _run=false, begin end) + @test_throws "Test item \"x\" `skip` keyword must be a `Bool`, got `skip=2`" should_skip(ti) + + ti = @testitem("x", skip=:(x = 1; x + y), _run=false, begin end) + @test_throws UndefVarError(:y) should_skip(ti) +end + end # internals.jl testset diff --git a/test/junit_xml.jl b/test/junit_xml.jl index 07eeb4f..271adbd 100644 --- a/test/junit_xml.jl +++ b/test/junit_xml.jl @@ -26,6 +26,7 @@ function remove_variables(str) # Ignore the full path the test file. r" at .*/testfiles/_junit_xml_test" => " at path/to/testfiles/_junit_xml_test", r" at .*/testfiles/_retry_tests" => " at path/to/testfiles/_retry_tests", + r" at .*/testfiles/_skip_tests" => " at path/to/testfiles/_skip_tests", # Ignore worker pid r"on worker [0-9]*" => "on worker 0", # Remove backticks (because backticks were added to some error messages in v1.9+). @@ -69,7 +70,7 @@ end @testset "junit_xml.jl" verbose=true begin -@testset "JUnit reference tests" begin +@testset "JUnit reference tests" verbose=true begin REF_DIR = joinpath(pkgdir(ReTestItems), "test", "references") @testset "retries=0, nworkers=$nworkers" for nworkers in (0, 1) mktempdir() do dir @@ -122,6 +123,18 @@ end end end end + @testset "skipped testitems" begin + mktempdir() do dir + withenv("RETESTITEMS_REPORT_LOCATION" => dir) do + try # Ignore the fact that the `_skip_tests.jl` testset has failures/errors. + run(`$(Base.julia_cmd()) --project -e 'using ReTestItems; runtests("testfiles/_skip_tests.jl"; report=true)'`) + catch + end + report = only(filter(endswith("xml"), readdir(dir, join=true))) + test_reference(joinpath(REF_DIR, "skipped_tests_report.xml"), report) + end + end + end end @testset "JUnit empty report" begin diff --git a/test/log_capture.jl b/test/log_capture.jl index 254f51c..78d80fb 100644 --- a/test/log_capture.jl +++ b/test/log_capture.jl @@ -33,7 +33,7 @@ end @testset "log capture -- reporting" begin setup1 = @testsetup module TheTestSetup1 end setup2 = @testsetup module TheTestSetup2 end - ti = TestItem(Ref(42), "TheTestItem", "ID007", [], false, [], 0, nothing, "source/path", 42, ".", nothing) + ti = TestItem(Ref(42), "TheTestItem", "ID007", [], false, [], 0, nothing, false, "source/path", 42, ".", nothing) push!(ti.testsetups, setup1) push!(ti.testsetups, setup2) push!(ti.testsets, Test.DefaultTestSet("dummy")) @@ -97,69 +97,69 @@ end @test_throws AssertionError ReTestItems.default_log_display_mode(true, -1, false) end -@testset "time_print" begin +@testset "print_time" begin io = IOBuffer() - ReTestItems.time_print(io, elapsedtime=0) + ReTestItems.print_time(io, elapsedtime=0) @test String(take!(io)) == "0 secs" io = IOBuffer() - ReTestItems.time_print(io, elapsedtime=123.456 * 1e9) + ReTestItems.print_time(io, elapsedtime=123.456 * 1e9) @test String(take!(io)) == "123.5 secs" io = IOBuffer() - ReTestItems.time_print(io, elapsedtime=0.09 * 1e9) + ReTestItems.print_time(io, elapsedtime=0.09 * 1e9) @test String(take!(io)) == "<0.1 secs" io = IOBuffer() - ReTestItems.time_print(io, elapsedtime=1e9, gctime=0.5*1e9) + ReTestItems.print_time(io, elapsedtime=1e9, gctime=0.5*1e9) @test String(take!(io)) == "1.0 secs (50.0% GC)" io = IOBuffer() - ReTestItems.time_print(io, elapsedtime=1e9, gctime=0.0009*1e9) + ReTestItems.print_time(io, elapsedtime=1e9, gctime=0.0009*1e9) @test String(take!(io)) == "1.0 secs (<0.1% GC)" io = IOBuffer() - ReTestItems.time_print(io, elapsedtime=1e9, compile_time=0.5*1e9) + ReTestItems.print_time(io, elapsedtime=1e9, compile_time=0.5*1e9) @test String(take!(io)) == "1.0 secs (50.0% compile)" io = IOBuffer() - ReTestItems.time_print(io, elapsedtime=1e9, compile_time=0.0009*1e9) + ReTestItems.print_time(io, elapsedtime=1e9, compile_time=0.0009*1e9) @test String(take!(io)) == "1.0 secs (<0.1% compile)" io = IOBuffer() - ReTestItems.time_print(io, elapsedtime=1e9, compile_time=0.5*1e9, recompile_time=0.5*1e9) + ReTestItems.print_time(io, elapsedtime=1e9, compile_time=0.5*1e9, recompile_time=0.5*1e9) @test String(take!(io)) == "1.0 secs (50.0% compile, 50.0% recompile)" io = IOBuffer() - ReTestItems.time_print(io, elapsedtime=1e9, compile_time=0.0009*1e9, recompile_time=0.0009*1e9) + ReTestItems.print_time(io, elapsedtime=1e9, compile_time=0.0009*1e9, recompile_time=0.0009*1e9) @test String(take!(io)) == "1.0 secs (<0.1% compile, <0.1% recompile)" io = IOBuffer() - ReTestItems.time_print(io, elapsedtime=1e9, compile_time=0.5*1e9, recompile_time=0.5*1e9, gctime=0.5*1e9) + ReTestItems.print_time(io, elapsedtime=1e9, compile_time=0.5*1e9, recompile_time=0.5*1e9, gctime=0.5*1e9) @test String(take!(io)) == "1.0 secs (50.0% compile, 50.0% recompile, 50.0% GC)" io = IOBuffer() - ReTestItems.time_print(io, elapsedtime=1e9, compile_time=0.0009*1e9, recompile_time=0.0009*1e9, gctime=0.0009*1e9) + ReTestItems.print_time(io, elapsedtime=1e9, compile_time=0.0009*1e9, recompile_time=0.0009*1e9, gctime=0.0009*1e9) @test String(take!(io)) == "1.0 secs (<0.1% compile, <0.1% recompile, <0.1% GC)" io = IOBuffer() - ReTestItems.time_print(io, elapsedtime=1e9, allocs=1, bytes=1024) + ReTestItems.print_time(io, elapsedtime=1e9, allocs=1, bytes=1024) @test String(take!(io)) == "1.0 secs, 1 alloc (1.024 KB)" io = IOBuffer() - ReTestItems.time_print(io, elapsedtime=1e9, allocs=2, bytes=1_024_000) + ReTestItems.print_time(io, elapsedtime=1e9, allocs=2, bytes=1_024_000) @test String(take!(io)) == "1.0 secs, 2 allocs (1.024 MB)" io = IOBuffer() - ReTestItems.time_print(io, elapsedtime=1e9, compile_time=0.5*1e9, recompile_time=0.5*1e9, gctime=0.5*1e9, allocs=9001, bytes=1024_000_000) + ReTestItems.print_time(io, elapsedtime=1e9, compile_time=0.5*1e9, recompile_time=0.5*1e9, gctime=0.5*1e9, allocs=9001, bytes=1024_000_000) @test String(take!(io)) == "1.0 secs (50.0% compile, 50.0% recompile, 50.0% GC), 9.00 K allocs (1.024 GB)" io = IOBuffer() - ReTestItems.time_print(io, elapsedtime=1e9, compile_time=0.0009*1e9, recompile_time=0.0009*1e9, gctime=0.0009*1e9, allocs=9_001_000, bytes=1024_000_000_000) + ReTestItems.print_time(io, elapsedtime=1e9, compile_time=0.0009*1e9, recompile_time=0.0009*1e9, gctime=0.0009*1e9, allocs=9_001_000, bytes=1024_000_000_000) @test String(take!(io)) == "1.0 secs (<0.1% compile, <0.1% recompile, <0.1% GC), 9.00 M allocs (1.024 TB)" io = IOBuffer() - ReTestItems.time_print(io, elapsedtime=1e9, compile_time=1e9, recompile_time=1e9, gctime=1e9, allocs=9_001_000_000, bytes=1024_000_000_000_000) + ReTestItems.print_time(io, elapsedtime=1e9, compile_time=1e9, recompile_time=1e9, gctime=1e9, allocs=9_001_000_000, bytes=1024_000_000_000_000) @test String(take!(io)) == "1.0 secs (100.0% compile, 100.0% recompile, 100.0% GC), 9.00 B allocs (1.024 PB)" end diff --git a/test/macros.jl b/test/macros.jl index 96684d6..7c01885 100644 --- a/test/macros.jl +++ b/test/macros.jl @@ -1,4 +1,3 @@ -using AutoHashEquals using ReTestItems using Test @@ -293,6 +292,119 @@ end end end +@testset "testitem `skip` keyword" begin + function test_skipped(ti_result) + ts = ti_result.testset + # No tests should have been run + @test n_passed(ts) == 0 + # A single "skipped" result should be recorded. Test uses `Broken` for skipped. + @test only(ts.results) isa Test.Broken + # Since no test was run, the stats should be empty / zeroed. + @test ti_result.stats == ReTestItems.PerfStats() + end + # test case `skip` is a `Bool` + ti = @testitem "skip isa bool" skip=true _run=false begin + @test true + end + @test ti.skip + res = ReTestItems.runtestitem(ti) + test_skipped(res) + + # test no code in the test item is run when `skip=true` + ti = @testitem "test contains error" skip=true _run=false begin + @test error("err") + end + @test ti.skip + res = ReTestItems.runtestitem(ti) + test_skipped(res) + + # test case `skip` given a literal that's not a `Bool` + expected = "`skip` keyword must be passed a `Bool`" + @test_throws expected ( + @eval @testitem "bad 1" skip=123 begin + @test true + end + ) + @test_throws expected ( + @eval @testitem "bad 2" skip=foo begin + @test true + end + ) + + # test case `skip` is a `Expr` evaluating to a `Bool` + ti = @testitem "skip isa expr 1" skip=:(1+1 == 2) _run=false begin + @test true + end + # want to test a case where `skip` is not a `:block` + @assert ti.skip.head != :block + @test ti.skip == :(1+1 == 2) + res = ReTestItems.runtestitem(ti) + test_skipped(res) + + # test case `skip` is a `Expr` evaluating to a `Bool` + ti = @testitem "skip isa expr 2" skip=(quote 1+1 == 2 end) _run=false begin + @test true + end + # want to test a case where `skip` is a `:block` + @assert ti.skip.head == :block + @test Base.remove_linenums!(ti.skip) == Base.remove_linenums!(quote 1+1 == 2 end) + res = ReTestItems.runtestitem(ti) + test_skipped(res) + + # test that no code is evaluated until `runtestitem` is called + ti = @testitem "skip expr has error" skip=:(throw("oops")) _run=false begin + @test true + end + @test ti.skip == :(throw("oops")) + @test_throws "oops" ReTestItems.runtestitem(ti) + + # test that skip expression can load modules + ti = @testitem "skip expr loads module" skip=:(using AutoHashEquals; AutoHashEquals isa Module) _run=false begin + @test true + end + @test ti.skip isa Expr + res = ReTestItems.runtestitem(ti) + test_skipped(res) + + # test that skip expression does not pollute Main + var = gensym(:skip_var) + ti = @testitem "skip expr defines variable" skip=:($var=1; $var==1) _run=false begin + @test true + end + @test ti.skip isa Expr + res = ReTestItems.runtestitem(ti) + test_skipped(res) + @test !isdefined(Main, var) + + # test that skip expression does not get modified + @testitem "skip not modified" skip=(x=1; x==1) _run=false begin + @test true + end + @assert ti.skip isa Expr + before = deepcopy(ti.skip) + @assert ti.skip !== before + res = ReTestItems.runtestitem(ti) + test_skipped(res) + @test ti.skip == before + + @testset "skipping is logged" begin + old = ReTestItems.DEFAULT_STDOUT[] + try + io = IOBuffer() + ReTestItems.DEFAULT_STDOUT[] = io + line = @__LINE__() + 1 + ti = @testitem "skip this" skip=true _run=false begin + @test true + end + file = relpath(@__FILE__(), ti.project_root) + ReTestItems.runtestitem(ti) + output = String(take!(io)) + @test contains(output, "SKIP test item \"skip this\" at $file:$line") + finally + ReTestItems.DEFAULT_STDOUT[] = old + end + end +end #= NOTE: diff --git a/test/references/skipped_tests_report.xml b/test/references/skipped_tests_report.xml new file mode 100644 index 0000000..4cd4f9f --- /dev/null +++ b/test/references/skipped_tests_report.xml @@ -0,0 +1,77 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Error in testset "skip false, 1 pass, 1 fail" on worker 18026: +Test Failed at /Users/nick/repos/ReTestItems.jl/test/testfiles/_skip_tests.jl:8 + Expression: false + +No Captured Logs for test item "skip false, 1 pass, 1 fail" at test/testfiles/_skip_tests.jl:6 on worker 18026 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/test/testfiles/_skip_tests.jl b/test/testfiles/_skip_tests.jl new file mode 100644 index 0000000..59fa867 --- /dev/null +++ b/test/testfiles/_skip_tests.jl @@ -0,0 +1,27 @@ +# 1 PASS. +@testitem "no skip, 1 pass" begin + @test true +end +# 1 PASS, 1 FAIL +@testitem "skip false, 1 pass, 1 fail" skip=false begin + @test true + @test false +end +# two tests; SKIPPED. +@testitem "skip true" skip=true begin + @test true + @test true +end +# skip expression false, 2 PASS +@testitem "skip expr false, 2 pass" skip=VERSION > v"3" begin + @test true + @test true +end +# testitem has error, skip expression true; SKIPPED +@testitem "skip expr true" skip=VERSION < v"3" begin + no_existent_func() +end +# multi-line skip expression returns true; SKIPPED +@testitem "skip expr block true" skip=:(using AutoHashEquals; AutoHashEquals isa Module) begin + @test false +end