Skip to content

Commit

Permalink
Allow @testitem to set its own timeout (#129)
Browse files Browse the repository at this point in the history
* Allow `@testitem` to set its own timeout

* Round `timeout` to whole seconds

* Update tests using `TestItem` directly

* Remove TODO comment

* Bump version

* fixup! Round `timeout` to whole seconds

* fixup! Round `timeout` to whole seconds
  • Loading branch information
nickrobinson251 authored Dec 8, 2023
1 parent 3c10e73 commit 94f74bf
Show file tree
Hide file tree
Showing 9 changed files with 108 additions and 18 deletions.
2 changes: 1 addition & 1 deletion Project.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
name = "ReTestItems"
uuid = "817f1d60-ba6b-4fd5-9520-3cf149f6a823"
version = "1.20.1"
version = "1.21.0"

[deps]
Dates = "ade2ca70-3891-5945-98fb-dc099432e06a"
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ runtests("frobble_tests.jl"; nworkers, worker_init_expr)
runtests(MyPackage)
```
- Pass to `runtests` any configuration you want your tests to run with, such as `retries`, `testitem_timeout`, `nworkers`, `nworker_threads`, `worker_init_expr`, `test_end_expr`.
See the `runtests` docstring for details.
See the [`runtests`](https://docs.juliahub.com/General/ReTestItems/stable/autodocs/#ReTestItems.runtests) docstring for details.

---

Expand Down
22 changes: 13 additions & 9 deletions src/ReTestItems.jl
Original file line number Diff line number Diff line change
Expand Up @@ -130,9 +130,11 @@ will be run.
## Configuring `runtests`
- `testitem_timeout::Real`: The number of seconds to wait until a `@testitem` is marked as failed.
Defaults to 30 minutes. Can also be set using the `RETESTITEMS_TESTITEM_TIMEOUT` environment variable.
If a `@testitem` sets its own `timeout` keyword, then that takes precedence.
Fractional values are rounded up to the nearest second.
Note timeouts are currently only applied when `nworkers > 0`.
- `retries::Int=$DEFAULT_RETRIES`: The number of times to retry a `@testitem` if either tests
do not pass or, if running with multiple worker processes, the worker fails or hits the `testitem_timeout`
do not pass or, if running with multiple worker processes, the worker fails or hits the timeout limit
while running the tests. Can also be set using the `RETESTITEMS_RETRIES` environment variable.
If a `@testitem` sets its own `retries` keyword, then the maximum of these two retry numbers
will be used as the retry limit for that `@testitem`. When `report=true`, the report will
Expand Down Expand Up @@ -228,20 +230,22 @@ function runtests(
logs in LOG_DISPLAY_MODES || throw(ArgumentError("`logs` must be one of $LOG_DISPLAY_MODES, got $(repr(logs))"))
report && logs == :eager && throw(ArgumentError("`report=true` is not compatible with `logs=:eager`"))
(0 memory_threshold 1) || throw(ArgumentError("`memory_threshold` must be between 0 and 1, got $(repr(memory_threshold))"))
testitem_timeout > 0 || throw(ArgumentError("`testitem_timeout` must be a postive number, got $(repr(testitem_timeout))"))
# If we were given paths but none were valid, then nothing to run.
!isempty(paths) && isempty(paths′) && return nothing
shouldrun_combined(ti) = shouldrun(ti) && _shouldrun(name, ti.name) && _shouldrun(tags, ti.tags)
mkpath(RETESTITEMS_TEMP_FOLDER) # ensure our folder wasn't removed
save_current_stdio()
nworkers = max(0, nworkers)
retries = max(0, retries)
timeout = ceil(Int, testitem_timeout)
debuglvl = Int(debug)
if debuglvl > 0
LoggingExtras.withlevel(LoggingExtras.Debug; verbosity=debuglvl) do
_runtests(shouldrun_combined, paths′, nworkers, nworker_threads, worker_init_expr, test_end_expr, testitem_timeout, retries, memory_threshold, verbose_results, debuglvl, report, logs)
_runtests(shouldrun_combined, paths′, nworkers, nworker_threads, worker_init_expr, test_end_expr, timeout, retries, memory_threshold, verbose_results, debuglvl, report, logs)
end
else
return _runtests(shouldrun_combined, paths′, nworkers, nworker_threads, worker_init_expr, test_end_expr, testitem_timeout, retries, memory_threshold, verbose_results, debuglvl, report, logs)
return _runtests(shouldrun_combined, paths′, nworkers, nworker_threads, worker_init_expr, test_end_expr, timeout, retries, memory_threshold, verbose_results, debuglvl, report, logs)
end
end

Expand All @@ -255,7 +259,7 @@ end
# By tracking and reusing test environments, we can avoid this issue.
const TEST_ENVS = Dict{String, String}()

function _runtests(shouldrun, paths, nworkers::Int, nworker_threads::String, worker_init_expr::Expr, test_end_expr::Expr, testitem_timeout::Real, retries::Int, memory_threshold::Real, verbose_results::Bool, debug::Int, report::Bool, logs::Symbol)
function _runtests(shouldrun, paths, nworkers::Int, nworker_threads::String, worker_init_expr::Expr, test_end_expr::Expr, testitem_timeout::Int, retries::Int, memory_threshold::Real, verbose_results::Bool, debug::Int, report::Bool, logs::Symbol)
# Don't recursively call `runtests` e.g. if we `include` a file which calls it.
# So we ignore the `runtests(...)` call in `test/runtests.jl` when `runtests(...)`
# was called from the command line.
Expand Down Expand Up @@ -298,7 +302,7 @@ end

function _runtests_in_current_env(
shouldrun, paths, projectfile::String, nworkers::Int, nworker_threads, worker_init_expr::Expr, test_end_expr::Expr,
testitem_timeout::Real, retries::Int, memory_threshold::Real, verbose_results::Bool, debug::Int, report::Bool, logs::Symbol,
testitem_timeout::Int, retries::Int, memory_threshold::Real, verbose_results::Bool, debug::Int, report::Bool, logs::Symbol,
)
start_time = time()
proj_name = something(Pkg.Types.read_project(projectfile).name, "")
Expand Down Expand Up @@ -428,8 +432,7 @@ end

any_non_pass(ts::DefaultTestSet) = ts.anynonpass

function record_timeout!(testitem, run_number::Int, timeout_limit::Real)
timeout_s = round(Int, timeout_limit)
function record_timeout!(testitem, run_number::Int, timeout_s::Int)
time_str = if timeout_s < 60
string(timeout_s, "s")
else
Expand All @@ -441,7 +444,7 @@ function record_timeout!(testitem, run_number::Int, timeout_limit::Real)
end
end
msg = "Timed out after $time_str running test item $(repr(testitem.name)) (run=$run_number)"
record_test_error!(testitem, msg, timeout_limit)
record_test_error!(testitem, msg, timeout_s)
end

function record_worker_terminated!(testitem, worker::Worker, run_number::Int)
Expand Down Expand Up @@ -473,7 +476,7 @@ end

function manage_worker(
worker::Worker, proj_name::AbstractString, testitems::TestItems, testitem::Union{TestItem,Nothing}, nworker_threads, worker_init_expr::Expr, test_end_expr::Expr,
timeout::Real, retries::Int, memory_threshold::Real, verbose_results::Bool, debug::Int, report::Bool, logs::Symbol
default_timeout::Int, retries::Int, memory_threshold::Real, verbose_results::Bool, debug::Int, report::Bool, logs::Symbol
)
ntestitems = length(testitems.testitems)
run_number = 1
Expand All @@ -487,6 +490,7 @@ function manage_worker(
worker = robust_start_worker(proj_name, nworker_threads, worker_init_expr, ntestitems)
end
testitem.workerid[] = worker.pid
timeout = something(testitem.timeout, default_timeout)
fut = remote_eval(worker, :(ReTestItems.runtestitem($testitem, GLOBAL_TEST_CONTEXT; test_end_expr=$(QuoteNode(test_end_expr)), verbose_results=$verbose_results, logs=$(QuoteNode(logs)))))
max_runs = 1 + max(retries, testitem.retries)
try
Expand Down
21 changes: 18 additions & 3 deletions src/macros.jl
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,7 @@ struct TestItem
default_imports::Bool
setups::Vector{Symbol}
retries::Int
timeout::Union{Int,Nothing} # in seconds
file::String
line::Int
project_root::String
Expand All @@ -130,10 +131,10 @@ struct TestItem
stats::Vector{PerfStats} # populated when the test item is finished running
scheduled_for_evaluation::ScheduledForEvaluation # to keep track of whether the test item has been scheduled for evaluation
end
function TestItem(number, name, id, tags, default_imports, setups, retries, file, line, project_root, code)
function TestItem(number, name, id, tags, default_imports, setups, retries, timeout, file, line, project_root, code)
_id = @something(id, repr(hash(name, hash(relpath(file, project_root)))))
return TestItem(
number, name, _id, tags, default_imports, setups, retries, file, line, project_root, code,
number, name, _id, tags, default_imports, setups, retries, timeout, file, line, project_root, code,
TestSetup[],
Ref{Int}(0),
DefaultTestSet[],
Expand Down Expand Up @@ -219,10 +220,19 @@ If a `@testitem` passes on retry, then it will be recorded as passing in the tes
@testitem "Flaky test" retries=1 begin
@test rand() < 1e-4
end
If a `@testitem` should be aborted after a certain period of time, e.g. the test is known
to occassionally hang, then you can set a timeout (in seconds) by passing the `timeout` keyword.
Note that `timeout` currently only works when tests are run with multiple workers.
@testitem "Sometimes too slow" timeout=10 begin
@test sleep(rand(1:100))
end
"""
macro testitem(nm, exs...)
default_imports = true
retries = 0
timeout = nothing
tags = Symbol[]
setup = Any[]
_id = nothing
Expand All @@ -248,6 +258,11 @@ macro testitem(nm, exs...)
elseif kw == :retries
retries = ex.args[2]
@assert retries isa Integer "`default_imports` keyword must be passed an `Integer`"
elseif kw == :timeout
t = ex.args[2]
@assert t isa Real "`timeout` keyword must be passed a `Real`"
@assert t > 0 "`timeout` keyword must be passed a positive number. Got `timeout=$t`"
timeout = ceil(Int, t)
elseif kw == :_id
_id = ex.args[2]
# This will always be written to the JUnit XML as a String, require the user
Expand All @@ -272,7 +287,7 @@ macro testitem(nm, exs...)
ti = gensym(:ti)
esc(quote
let $ti = $TestItem(
$Ref(0), $nm, $_id, $tags, $default_imports, $setup, $retries,
$Ref(0), $nm, $_id, $tags, $default_imports, $setup, $retries, $timeout,
$String($_source.file), $_source.line,
$gettls(:__RE_TEST_PROJECT__, "."),
$q,
Expand Down
30 changes: 28 additions & 2 deletions test/integrationtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -653,7 +653,7 @@ end
rm(tmpdir; force=true)
end

@testset "testitem timeout" begin
@testset "testitem_timeout" begin
file = joinpath(TEST_FILES_DIR, "_timeout_tests.jl")
# NOTE: this test must run with exactly 1 worker, so that we can test that the worker
# is replaced after the timeout and subsequent testitems still run.
Expand All @@ -666,9 +666,14 @@ end
err = only(non_passes(results))
@test err.test_type == :nontest_error
@test err.value == string(ErrorException("Timed out after 4s running test item \"Test item takes 60 seconds\" (run=1)"))

for t in (0, -1.1)
expected = ArgumentError("`testitem_timeout` must be a postive number, got $t")
@test_throws expected runtests(file; nworkers, testitem_timeout=t)
end
end

@testset "testitem timeout set via env variable" begin
@testset "testitem_timeout set via env variable" begin
file = joinpath(TEST_FILES_DIR, "_timeout_tests.jl")
# NOTE: this test must run with exactly 1 worker, so that we can test that the worker
# is replaced after the timeout and subsequent testitems still run.
Expand All @@ -685,6 +690,27 @@ end
@test err.value == string(ErrorException("Timed out after 4s running test item \"Test item takes 60 seconds\" (run=1)"))
end

@testset "@testitem `timeout`" begin
# NOTE: this test must run with >0 worker
# https://github.com/JuliaTesting/ReTestItems.jl/issues/87
nworkers = 1
# This file contains a single test that sets `timeout=6` and sleeps for 10 seconds.
file = joinpath(TEST_FILES_DIR, "_timeout2_tests.jl")
# The @testitem's own `timeout=6` should take precedence.
# The test is partly relying on the error message accurately reflecting the actual behaviour...
# so we test with a really big timeout so it would be obvious if the larger of the two
# timeouts were to be used (in which case the test would fail as the testitem would pass).
for testitem_timeout in (4, 8, 1_000_000)
results = encased_testset(()->runtests(file; nworkers, testitem_timeout))
@test n_tests(results) == 1
@test n_passed(results) == 0
# Test the error is as expected, namely that the timeout is 6 seconds.
err = only(non_passes(results))
@test err.test_type == :nontest_error
@test err.value == string(ErrorException("Timed out after 6s running test item \"Sets timeout=6\" (run=1)"))
end
end

@testset "Error outside `@testitem`" begin
@test_throws Exception runtests(joinpath(TEST_FILES_DIR, "_invalid_file1_test.jl"))
@test_throws Exception runtests(joinpath(TEST_FILES_DIR, "_invalid_file2_test.jl"))
Expand Down
2 changes: 1 addition & 1 deletion test/internals.jl
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ end # `include_testfiles!` testset
@testset "report_empty_testsets" begin
using ReTestItems: TestItem, report_empty_testsets, PerfStats, ScheduledForEvaluation
using Test: DefaultTestSet, Fail, Error
ti = TestItem(Ref(42), "Dummy TestItem", "DummyID", [], false, [], 0, "source/path", 42, ".", nothing)
ti = TestItem(Ref(42), "Dummy TestItem", "DummyID", [], false, [], 0, nothing, "source/path", 42, ".", nothing)

ts = DefaultTestSet("Empty testset")
report_empty_testsets(ti, ts)
Expand Down
2 changes: 1 addition & 1 deletion test/log_capture.jl
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ end
@testset "log capture -- reporting" begin
setup1 = @testsetup module TheTestSetup1 end
setup2 = @testsetup module TheTestSetup2 end
ti = TestItem(Ref(42), "TheTestItem", "ID007", [], false, [], 0, "source/path", 42, ".", nothing)
ti = TestItem(Ref(42), "TheTestItem", "ID007", [], false, [], 0, nothing, "source/path", 42, ".", nothing)
push!(ti.testsetups, setup1)
push!(ti.testsetups, setup2)
push!(ti.testsets, Test.DefaultTestSet("dummy"))
Expand Down
41 changes: 41 additions & 0 deletions test/macros.jl
Original file line number Diff line number Diff line change
Expand Up @@ -252,6 +252,47 @@ end
@test_throws expected (@eval @testitem("five", _id=hash("five"), _run=false, begin end))
end

# Here we are just testing how the `timeout` keyword is parsed.
# The actual timeout functionality is tested in `integrationtests.jl`,
# because we need to call `runtests` with multiple workers to test timeout functionality.
# See https://github.com/JuliaTesting/ReTestItems.jl/issues/87
@testset "testitem `timeout` keyword" begin
expected(t) = "`timeout` keyword must be passed a positive number. Got `timeout=$t`"
for t in (0, -1.1)
@test_throws expected(t) (
@eval @testitem "Bad" timeout=$t begin
@test true
end
)
end

@test_throws "`timeout` keyword must be passed a `Real`" (
@eval @testitem "Bad" timeout=1im begin
@test true
end
)

no_run() do
ti = @testitem "TI" timeout=1 begin
@test true
end
@test ti.timeout isa Int
@test ti.timeout == 1

# We round up to the nearest second.
ti = @testitem "TI" timeout=1.1 begin
@test true
end
@test ti.timeout isa Int
@test ti.timeout == 2

ti = @testitem "TI" begin
@test true
end
@test ti.timeout == nothing
end
end


#=
NOTE:
Expand Down
4 changes: 4 additions & 0 deletions test/testfiles/_timeout2_tests.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
@testitem "Sets timeout=6" timeout=6 begin
sleep(10)
@test true
end

2 comments on commit 94f74bf

@nickrobinson251
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@JuliaRegistrator
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Registration pull request created: JuliaRegistries/General/96756

Tip: Release Notes

Did you know you can add release notes too? Just add markdown formatted text underneath the comment after the text
"Release notes:" and it will be added to the registry PR, and if TagBot is installed it will also be added to the
release that TagBot creates. i.e.

@JuliaRegistrator register

Release notes:

## Breaking changes

- blah

To add them here just re-invoke and the PR will be updated.

Tagging

After the above pull request is merged, it is recommended that a tag is created on this repository for the registered package version.

This will be done automatically if the Julia TagBot GitHub Action is installed, or can be done manually through the github interface, or via:

git tag -a v1.21.0 -m "<description of version>" 94f74bf0333331d024da48846e141c88cd12ffc9
git push origin v1.21.0

Please sign in to comment.