Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

tests: use more specialised testthat functions #858

Merged
merged 1 commit into from
Dec 18, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 12 additions & 12 deletions tests/testthat/test_GraphLearner.R
Original file line number Diff line number Diff line change
Expand Up @@ -932,7 +932,7 @@ test_that("validation, internal_valid_scores", {
# None of the Learners can do validation -> NULL
glrn1 = as_learner(as_graph(lrn("classif.rpart")))$train(tsk("iris"))
expect_false("validation" %in% glrn1$properties)
expect_equal(glrn1$internal_valid_scores, NULL)
expect_null(glrn1$internal_valid_scores)

glrn2 = as_learner(as_graph(lrn("classif.debug")))
expect_true("validation" %in% glrn2$properties)
Expand All @@ -945,7 +945,7 @@ test_that("validation, internal_valid_scores", {

set_validate(glrn2, NULL)
glrn2$train(tsk("iris"))
expect_true(is.null(glrn2$internal_valid_scores))
expect_null(glrn2$internal_valid_scores)

# No validation set specified --> No internal_valid_scores
expect_equal(
Expand All @@ -960,13 +960,13 @@ test_that("internal_tuned_values", {
task = tsk("iris")
glrn1 = as_learner(as_graph(lrn("classif.rpart")))$train(task)
expect_false("internal_tuning" %in% glrn1$properties)
expect_equal(glrn1$internal_tuned_values, NULL)
expect_null(glrn1$internal_tuned_values)

# learner wQ
# ith internal tuning
glrn2 = as_learner(as_graph(lrn("classif.debug")))
expect_true("internal_tuning" %in% glrn2$properties)
expect_equal(glrn2$internal_tuned_values, NULL)
expect_null(glrn2$internal_tuned_values)
glrn2$train(task)
expect_equal(glrn2$internal_tuned_values, named_list())
glrn2$param_set$set_values(classif.debug.early_stopping = TRUE, classif.debug.iter = 1000)
Expand All @@ -981,8 +981,8 @@ test_that("set_validate", {
expect_equal(glrn$validate, "test")
expect_equal(glrn$graph$pipeops$classif.debug$learner$validate, "predefined")
set_validate(glrn, NULL)
expect_equal(glrn$validate, NULL)
expect_equal(glrn$graph$pipeops$classif.debug$learner$validate, NULL)
expect_null(glrn$validate)
expect_null(glrn$graph$pipeops$classif.debug$learner$validate)
set_validate(glrn, 0.2, ids = "classif.debug")
expect_equal(glrn$validate, 0.2)
expect_equal(glrn$graph$pipeops$classif.debug$learner$validate, "predefined")
Expand All @@ -1002,7 +1002,7 @@ test_that("set_validate", {
expect_equal(glrn2$validate, 0.25)
expect_equal(glrn2$graph$pipeops$polearner$learner$validate, "predefined")
expect_equal(glrn2$graph$pipeops$polearner$learner$graph$pipeops$final$learner$validate, "predefined")
expect_equal(glrn2$graph$pipeops$polearner$learner$graph$pipeops$classif.debug$learner$validate, NULL)
expect_null(glrn2$graph$pipeops$polearner$learner$graph$pipeops$classif.debug$learner$validate)

# graphlearner in graphlearner: failure handling
glrn = as_learner(po("pca") %>>% lrn("classif.debug"))
Expand All @@ -1013,15 +1013,15 @@ test_that("set_validate", {
set_validate(gglrn, validate = "test", args = list(po_glrn = list(ids = "pca"))),
"Trying to heuristically reset"
)
expect_equal(gglrn$validate, NULL)
expect_null(gglrn$validate)

# base_learner is not final learner
glrn = as_learner(lrn("classif.debug") %>>% po("nop"))
set_validate(glrn, 0.3)
expect_equal(glrn$graph$pipeops$classif.debug$validate, "predefined")
set_validate(glrn, NULL)
expect_equal(glrn$graph$pipeops$classif.debug$validate, NULL)
expect_equal(glrn$validate, NULL)
expect_null(glrn$graph$pipeops$classif.debug$validate)
expect_null(glrn$validate)

# args and args_all
bglrn = as_learner(ppl("branch", list(lrn("classif.debug", id = "d1"), lrn("classif.debug", id = "d2"))))
Expand All @@ -1033,13 +1033,13 @@ test_that("set_validate", {
# args
set_validate(gglrn, validate = 0.2, args = list(po_glrn = list(ids = "d1")))
expect_equal(gglrn$graph$pipeops[[1L]]$learner$graph$pipeops$d1$validate, "predefined")
expect_equal(gglrn$graph$pipeops[[1L]]$learner$graph$pipeops$d2$validate, NULL)
expect_null(gglrn$graph$pipeops[[1L]]$learner$graph$pipeops$d2$validate)

# args all
gglrn = as_learner(obj)
set_validate(gglrn, validate = 0.2, args_all = list(ids = "d1"))
expect_equal(gglrn$graph$pipeops[[1L]]$learner$graph$pipeops$d1$validate, "predefined")
expect_equal(gglrn$graph$pipeops[[1L]]$learner$graph$pipeops$d2$validate, NULL)
expect_null(gglrn$graph$pipeops[[1L]]$learner$graph$pipeops$d2$validate)
})

test_that("marshal", {
Expand Down
12 changes: 6 additions & 6 deletions tests/testthat/test_PipeOp.R
Original file line number Diff line number Diff line change
Expand Up @@ -87,13 +87,13 @@ test_that("Informative error and warning messages", {

# two 'expect_warning', because we want to 'expect' that there is exactly one warning.
# a function argument for expect_warning that tests exactly this would be a good idea, and has therefore been removed -.-
expect_warning(expect_warning(gr$train(tsk("iris")), "This happened PipeOp classif.debug's \\$train\\(\\)$"), NA)
expect_no_warning(expect_warning(gr$train(tsk("iris")), "This happened PipeOp classif.debug's \\$train\\(\\)$"))

expect_warning(suppressWarnings(gr$train(tsk("iris"))), NA)
expect_no_warning(suppressWarnings(gr$train(tsk("iris"))))

expect_warning(expect_warning(gr$predict(tsk("iris")), "This happened PipeOp classif.debug's \\$predict\\(\\)$"), NA)
expect_no_warning(expect_warning(gr$predict(tsk("iris")), "This happened PipeOp classif.debug's \\$predict\\(\\)$"))

expect_warning(suppressWarnings(gr$predict(tsk("iris"))), NA)
expect_no_warning(suppressWarnings(gr$predict(tsk("iris"))))


gr$param_set$values$classif.debug.warning_train = 0
Expand All @@ -119,8 +119,8 @@ test_that("Informative error and warning messages", {
)
)$new(id = "potest", input = data.table(name = "input", train = "*", predict = "*"), output = data.table(name = "input", train = "*", predict = "*"))

expect_warning(potest$train(list(1)), NA)
expect_warning(potest$predict(list(1)), NA)
expect_no_warning(potest$train(list(1)))
expect_no_warning(potest$predict(list(1)))

})

Expand Down
4 changes: 2 additions & 2 deletions tests/testthat/test_multiplicities.R
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ test_that("PipeOp - unpack_multiplicities", {
expect_error(unpack_multiplicities(list(a = Multiplicity(x = 1, z = 2), b = Multiplicity(x = 10, y = 20)), c(0, 0), c("a", "b"), "test"), regexp = "bad multiplicities")
expect_equal(unpack_multiplicities(list(a = Multiplicity(x = 1, z = 2), b = Multiplicity(x = 10, y = 20)), c(0, 1), c("a", "b"), "test"),
list(x = list(a = 1, b = Multiplicity(x = 10, y = 20)), z = list(a = 2, b = Multiplicity(x = 10, y = 20))))
expect_equal(unpack_multiplicities(list(0), 0, "a", "test"), NULL)
expect_null(unpack_multiplicities(list(0), 0, "a", "test"))
})

test_that("PipeOp - evaluate_multiplicities", {
Expand Down Expand Up @@ -112,7 +112,7 @@ test_that("PipeOp - evaluate_multiplicities", {
old_state = po$state
po$param_set$values$state = "error"
expect_error(po$train(as.Multiplicity(list(0, as.Multiplicity(0)))), regexp = "Error")
expect_equal(po$state, NULL) # state is completely reset to NULL
expect_null(po$state) # state is completely reset to NULL
})

test_that("Graph - add_edge", {
Expand Down
8 changes: 4 additions & 4 deletions tests/testthat/test_parvals.R
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@ test_that("graph param vals", {
gr$add_pipeop(PipeOpPCA$new())
expect_equal(gr$ids(TRUE), c("scale", "pca"))

expect_equal(gr$pipeops$scale$param_set$values$center, NULL)
expect_equal(gr$param_set$values$scale.center, NULL)
expect_null(gr$pipeops$scale$param_set$values$center)
expect_null(gr$param_set$values$scale.center)
gr$param_set$values$scale.center = FALSE
expect_equal(gr$pipeops$scale$param_set$values$center, FALSE)
expect_equal(gr$param_set$values$scale.center, FALSE)
Expand All @@ -16,8 +16,8 @@ test_that("graph param vals", {
expect_equal(gr$param_set$values$scale.center, TRUE)


expect_equal(gr$pipeops$pca$param_set$values$center, NULL)
expect_equal(gr$param_set$values$pca.center, NULL)
expect_null(gr$pipeops$pca$param_set$values$center)
expect_null(gr$param_set$values$pca.center)
gr$param_set$values$pca.center = FALSE
expect_equal(gr$pipeops$pca$param_set$values$center, FALSE)
expect_equal(gr$param_set$values$pca.center, FALSE)
Expand Down
6 changes: 3 additions & 3 deletions tests/testthat/test_pipeop_learner.R
Original file line number Diff line number Diff line change
Expand Up @@ -191,13 +191,13 @@ test_that("validation", {
expect_equal(obj$validate, "predefined")
expect_equal(obj$learner$validate, "predefined")
set_validate(obj, NULL)
expect_equal(obj$validate, NULL)
expect_equal(obj$learner$validate, NULL)
expect_null(obj$validate)
expect_null(obj$learner$validate)
expect_warning({obj$learner$validate = 0.3}, "unexpected behaviour") # nolint

obj = as_pipeop(as_learner(as_graph(lrn("classif.debug"))))
expect_error(set_validate(obj, "predefined", ids = "none_existing"), "Trying to heuristically")
expect_equal(obj$validate, NULL)
expect_null(obj$validate)
})

test_that("internal_tuned_values, internal_valid_scores", {
Expand Down
2 changes: 1 addition & 1 deletion tests/testthat/test_typecheck.R
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ context("Typecheck")

test_that("utility function works", {
skip_if_not_installed("rpart")
expect_equal(get_r6_inheritance("data.table"), NULL)
expect_null(get_r6_inheritance("data.table"))

expect_equal(get_r6_inheritance("PipeOp"), "PipeOp")

Expand Down
Loading