From a3152681422df13ba69e9c505415964b8d35b73d Mon Sep 17 00:00:00 2001 From: Maximilian Muecke Date: Sun, 15 Dec 2024 11:27:00 +0100 Subject: [PATCH] tests: use more specialised testthat functions --- tests/testthat/test_GraphLearner.R | 24 ++++++++++++------------ tests/testthat/test_PipeOp.R | 12 ++++++------ tests/testthat/test_multiplicities.R | 4 ++-- tests/testthat/test_parvals.R | 8 ++++---- tests/testthat/test_pipeop_learner.R | 6 +++--- tests/testthat/test_typecheck.R | 2 +- 6 files changed, 28 insertions(+), 28 deletions(-) diff --git a/tests/testthat/test_GraphLearner.R b/tests/testthat/test_GraphLearner.R index 38115e84a..344c870fa 100644 --- a/tests/testthat/test_GraphLearner.R +++ b/tests/testthat/test_GraphLearner.R @@ -932,7 +932,7 @@ test_that("validation, internal_valid_scores", { # None of the Learners can do validation -> NULL glrn1 = as_learner(as_graph(lrn("classif.rpart")))$train(tsk("iris")) expect_false("validation" %in% glrn1$properties) - expect_equal(glrn1$internal_valid_scores, NULL) + expect_null(glrn1$internal_valid_scores) glrn2 = as_learner(as_graph(lrn("classif.debug"))) expect_true("validation" %in% glrn2$properties) @@ -945,7 +945,7 @@ test_that("validation, internal_valid_scores", { set_validate(glrn2, NULL) glrn2$train(tsk("iris")) - expect_true(is.null(glrn2$internal_valid_scores)) + expect_null(glrn2$internal_valid_scores) # No validation set specified --> No internal_valid_scores expect_equal( @@ -960,13 +960,13 @@ test_that("internal_tuned_values", { task = tsk("iris") glrn1 = as_learner(as_graph(lrn("classif.rpart")))$train(task) expect_false("internal_tuning" %in% glrn1$properties) - expect_equal(glrn1$internal_tuned_values, NULL) + expect_null(glrn1$internal_tuned_values) # learner wQ # ith internal tuning glrn2 = as_learner(as_graph(lrn("classif.debug"))) expect_true("internal_tuning" %in% glrn2$properties) - expect_equal(glrn2$internal_tuned_values, NULL) + expect_null(glrn2$internal_tuned_values) glrn2$train(task) expect_equal(glrn2$internal_tuned_values, named_list()) glrn2$param_set$set_values(classif.debug.early_stopping = TRUE, classif.debug.iter = 1000) @@ -981,8 +981,8 @@ test_that("set_validate", { expect_equal(glrn$validate, "test") expect_equal(glrn$graph$pipeops$classif.debug$learner$validate, "predefined") set_validate(glrn, NULL) - expect_equal(glrn$validate, NULL) - expect_equal(glrn$graph$pipeops$classif.debug$learner$validate, NULL) + expect_null(glrn$validate) + expect_null(glrn$graph$pipeops$classif.debug$learner$validate) set_validate(glrn, 0.2, ids = "classif.debug") expect_equal(glrn$validate, 0.2) expect_equal(glrn$graph$pipeops$classif.debug$learner$validate, "predefined") @@ -1002,7 +1002,7 @@ test_that("set_validate", { expect_equal(glrn2$validate, 0.25) expect_equal(glrn2$graph$pipeops$polearner$learner$validate, "predefined") expect_equal(glrn2$graph$pipeops$polearner$learner$graph$pipeops$final$learner$validate, "predefined") - expect_equal(glrn2$graph$pipeops$polearner$learner$graph$pipeops$classif.debug$learner$validate, NULL) + expect_null(glrn2$graph$pipeops$polearner$learner$graph$pipeops$classif.debug$learner$validate) # graphlearner in graphlearner: failure handling glrn = as_learner(po("pca") %>>% lrn("classif.debug")) @@ -1013,15 +1013,15 @@ test_that("set_validate", { set_validate(gglrn, validate = "test", args = list(po_glrn = list(ids = "pca"))), "Trying to heuristically reset" ) - expect_equal(gglrn$validate, NULL) + expect_null(gglrn$validate) # base_learner is not final learner glrn = as_learner(lrn("classif.debug") %>>% po("nop")) set_validate(glrn, 0.3) expect_equal(glrn$graph$pipeops$classif.debug$validate, "predefined") set_validate(glrn, NULL) - expect_equal(glrn$graph$pipeops$classif.debug$validate, NULL) - expect_equal(glrn$validate, NULL) + expect_null(glrn$graph$pipeops$classif.debug$validate) + expect_null(glrn$validate) # args and args_all bglrn = as_learner(ppl("branch", list(lrn("classif.debug", id = "d1"), lrn("classif.debug", id = "d2")))) @@ -1033,13 +1033,13 @@ test_that("set_validate", { # args set_validate(gglrn, validate = 0.2, args = list(po_glrn = list(ids = "d1"))) expect_equal(gglrn$graph$pipeops[[1L]]$learner$graph$pipeops$d1$validate, "predefined") - expect_equal(gglrn$graph$pipeops[[1L]]$learner$graph$pipeops$d2$validate, NULL) + expect_null(gglrn$graph$pipeops[[1L]]$learner$graph$pipeops$d2$validate) # args all gglrn = as_learner(obj) set_validate(gglrn, validate = 0.2, args_all = list(ids = "d1")) expect_equal(gglrn$graph$pipeops[[1L]]$learner$graph$pipeops$d1$validate, "predefined") - expect_equal(gglrn$graph$pipeops[[1L]]$learner$graph$pipeops$d2$validate, NULL) + expect_null(gglrn$graph$pipeops[[1L]]$learner$graph$pipeops$d2$validate) }) test_that("marshal", { diff --git a/tests/testthat/test_PipeOp.R b/tests/testthat/test_PipeOp.R index 6a030929b..ca84ddc49 100644 --- a/tests/testthat/test_PipeOp.R +++ b/tests/testthat/test_PipeOp.R @@ -87,13 +87,13 @@ test_that("Informative error and warning messages", { # two 'expect_warning', because we want to 'expect' that there is exactly one warning. # a function argument for expect_warning that tests exactly this would be a good idea, and has therefore been removed -.- - expect_warning(expect_warning(gr$train(tsk("iris")), "This happened PipeOp classif.debug's \\$train\\(\\)$"), NA) + expect_no_warning(expect_warning(gr$train(tsk("iris")), "This happened PipeOp classif.debug's \\$train\\(\\)$")) - expect_warning(suppressWarnings(gr$train(tsk("iris"))), NA) + expect_no_warning(suppressWarnings(gr$train(tsk("iris")))) - expect_warning(expect_warning(gr$predict(tsk("iris")), "This happened PipeOp classif.debug's \\$predict\\(\\)$"), NA) + expect_no_warning(expect_warning(gr$predict(tsk("iris")), "This happened PipeOp classif.debug's \\$predict\\(\\)$")) - expect_warning(suppressWarnings(gr$predict(tsk("iris"))), NA) + expect_no_warning(suppressWarnings(gr$predict(tsk("iris")))) gr$param_set$values$classif.debug.warning_train = 0 @@ -119,8 +119,8 @@ test_that("Informative error and warning messages", { ) )$new(id = "potest", input = data.table(name = "input", train = "*", predict = "*"), output = data.table(name = "input", train = "*", predict = "*")) - expect_warning(potest$train(list(1)), NA) - expect_warning(potest$predict(list(1)), NA) + expect_no_warning(potest$train(list(1))) + expect_no_warning(potest$predict(list(1))) }) diff --git a/tests/testthat/test_multiplicities.R b/tests/testthat/test_multiplicities.R index cbb362fd7..df85f13f5 100644 --- a/tests/testthat/test_multiplicities.R +++ b/tests/testthat/test_multiplicities.R @@ -55,7 +55,7 @@ test_that("PipeOp - unpack_multiplicities", { expect_error(unpack_multiplicities(list(a = Multiplicity(x = 1, z = 2), b = Multiplicity(x = 10, y = 20)), c(0, 0), c("a", "b"), "test"), regexp = "bad multiplicities") expect_equal(unpack_multiplicities(list(a = Multiplicity(x = 1, z = 2), b = Multiplicity(x = 10, y = 20)), c(0, 1), c("a", "b"), "test"), list(x = list(a = 1, b = Multiplicity(x = 10, y = 20)), z = list(a = 2, b = Multiplicity(x = 10, y = 20)))) - expect_equal(unpack_multiplicities(list(0), 0, "a", "test"), NULL) + expect_null(unpack_multiplicities(list(0), 0, "a", "test")) }) test_that("PipeOp - evaluate_multiplicities", { @@ -112,7 +112,7 @@ test_that("PipeOp - evaluate_multiplicities", { old_state = po$state po$param_set$values$state = "error" expect_error(po$train(as.Multiplicity(list(0, as.Multiplicity(0)))), regexp = "Error") - expect_equal(po$state, NULL) # state is completely reset to NULL + expect_null(po$state) # state is completely reset to NULL }) test_that("Graph - add_edge", { diff --git a/tests/testthat/test_parvals.R b/tests/testthat/test_parvals.R index c4c3fa115..5fccdc391 100644 --- a/tests/testthat/test_parvals.R +++ b/tests/testthat/test_parvals.R @@ -6,8 +6,8 @@ test_that("graph param vals", { gr$add_pipeop(PipeOpPCA$new()) expect_equal(gr$ids(TRUE), c("scale", "pca")) - expect_equal(gr$pipeops$scale$param_set$values$center, NULL) - expect_equal(gr$param_set$values$scale.center, NULL) + expect_null(gr$pipeops$scale$param_set$values$center) + expect_null(gr$param_set$values$scale.center) gr$param_set$values$scale.center = FALSE expect_equal(gr$pipeops$scale$param_set$values$center, FALSE) expect_equal(gr$param_set$values$scale.center, FALSE) @@ -16,8 +16,8 @@ test_that("graph param vals", { expect_equal(gr$param_set$values$scale.center, TRUE) - expect_equal(gr$pipeops$pca$param_set$values$center, NULL) - expect_equal(gr$param_set$values$pca.center, NULL) + expect_null(gr$pipeops$pca$param_set$values$center) + expect_null(gr$param_set$values$pca.center) gr$param_set$values$pca.center = FALSE expect_equal(gr$pipeops$pca$param_set$values$center, FALSE) expect_equal(gr$param_set$values$pca.center, FALSE) diff --git a/tests/testthat/test_pipeop_learner.R b/tests/testthat/test_pipeop_learner.R index 8191e5d5d..2a9547c5b 100644 --- a/tests/testthat/test_pipeop_learner.R +++ b/tests/testthat/test_pipeop_learner.R @@ -191,13 +191,13 @@ test_that("validation", { expect_equal(obj$validate, "predefined") expect_equal(obj$learner$validate, "predefined") set_validate(obj, NULL) - expect_equal(obj$validate, NULL) - expect_equal(obj$learner$validate, NULL) + expect_null(obj$validate) + expect_null(obj$learner$validate) expect_warning({obj$learner$validate = 0.3}, "unexpected behaviour") # nolint obj = as_pipeop(as_learner(as_graph(lrn("classif.debug")))) expect_error(set_validate(obj, "predefined", ids = "none_existing"), "Trying to heuristically") - expect_equal(obj$validate, NULL) + expect_null(obj$validate) }) test_that("internal_tuned_values, internal_valid_scores", { diff --git a/tests/testthat/test_typecheck.R b/tests/testthat/test_typecheck.R index bb496c57a..bb1b7de60 100644 --- a/tests/testthat/test_typecheck.R +++ b/tests/testthat/test_typecheck.R @@ -3,7 +3,7 @@ context("Typecheck") test_that("utility function works", { skip_if_not_installed("rpart") - expect_equal(get_r6_inheritance("data.table"), NULL) + expect_null(get_r6_inheritance("data.table")) expect_equal(get_r6_inheritance("PipeOp"), "PipeOp")