From ec7fb5f7b075f016f935d1b3a1a40095e04b1d23 Mon Sep 17 00:00:00 2001 From: david-cortes Date: Wed, 19 Jun 2024 21:58:33 +0200 Subject: [PATCH 01/22] new xgboost() interface, part 1 --- R-package/DESCRIPTION | 3 +- R-package/NAMESPACE | 1 + R-package/R/utils.R | 34 + R-package/R/xgb.Booster.R | 20 +- R-package/R/xgb.DMatrix.R | 30 - R-package/R/xgb.dump.R | 4 +- R-package/R/xgb.importance.R | 26 +- R-package/R/xgb.model.dt.tree.R | 5 +- R-package/R/xgb.plot.deepness.R | 5 +- R-package/R/xgb.plot.importance.R | 5 +- R-package/R/xgb.plot.multi.trees.R | 5 +- R-package/R/xgb.plot.shap.R | 10 +- R-package/R/xgb.plot.tree.R | 5 +- R-package/R/xgb.train.R | 6 +- R-package/R/xgboost.R | 1036 ++++++++++++++++- R-package/demo/basic_walkthrough.R | 23 +- R-package/man/print.xgb.Booster.Rd | 5 +- R-package/man/xgb.attr.Rd | 5 +- R-package/man/xgb.config.Rd | 5 +- R-package/man/xgb.dump.Rd | 4 +- R-package/man/xgb.importance.Rd | 26 +- R-package/man/xgb.model.dt.tree.Rd | 5 +- R-package/man/xgb.parameters.Rd | 5 +- R-package/man/xgb.plot.deepness.Rd | 5 +- R-package/man/xgb.plot.importance.Rd | 5 +- R-package/man/xgb.plot.multi.trees.Rd | 5 +- R-package/man/xgb.plot.shap.Rd | 10 +- R-package/man/xgb.plot.tree.Rd | 5 +- R-package/man/xgb.train.Rd | 26 +- R-package/man/xgboost.Rd | 215 ++++ R-package/tests/testthat/test_xgboost.R | 585 ++++++++++ R-package/vignettes/discoverYourData.Rmd | 29 +- R-package/vignettes/xgboostPresentation.Rmd | 37 +- .../feature_interaction_constraint.rst | 5 +- doc/tutorials/monotonic.rst | 5 +- 35 files changed, 1978 insertions(+), 227 deletions(-) create mode 100644 R-package/man/xgboost.Rd create mode 100644 R-package/tests/testthat/test_xgboost.R diff --git a/R-package/DESCRIPTION b/R-package/DESCRIPTION index b4072aff0b41..b4f316965813 100644 --- a/R-package/DESCRIPTION +++ b/R-package/DESCRIPTION @@ -57,7 +57,8 @@ Suggests: igraph (>= 1.0.1), float, titanic, - RhpcBLASctl + RhpcBLASctl, + survival Depends: R (>= 4.3.0) Imports: diff --git a/R-package/NAMESPACE b/R-package/NAMESPACE index c9e085e77e0a..f6cc9062ca4d 100644 --- a/R-package/NAMESPACE +++ b/R-package/NAMESPACE @@ -13,6 +13,7 @@ S3method(predict,xgb.Booster) S3method(print,xgb.Booster) S3method(print,xgb.DMatrix) S3method(print,xgb.cv.synchronous) +S3method(print,xgboost) S3method(setinfo,xgb.Booster) S3method(setinfo,xgb.DMatrix) S3method(variable.names,xgb.Booster) diff --git a/R-package/R/utils.R b/R-package/R/utils.R index 69f358751dc8..59830f8c91e7 100644 --- a/R-package/R/utils.R +++ b/R-package/R/utils.R @@ -30,6 +30,40 @@ NVL <- function(x, val) { return(c('rank:pairwise', 'rank:ndcg', 'rank:map')) } +.OBJECTIVES_NON_DEFAULT_MODE <- function() { + return(c("reg:logistic", "binary:logitraw", "multi:softmax")) +} + +.BINARY_CLASSIF_OBJECTIVES <- function() { + return(c("binary:logistic", "binary:hinge")) +} + +.MULTICLASS_CLASSIF_OBJECTIVES <- function() { + return("multi:softprob") +} + +.SURVIVAL_RIGHT_CENSORING_OBJECTIVES <- function() { + return(c("survival:cox", "survival:aft")) +} + +.SURVIVAL_ALL_CENSORING_OBJECTIVES <- function() { + return("survival:aft") +} + +.REGRESSION_OBJECTIVES <- function() { + return(c( + "reg:squarederror", "reg:squaredlogerror", "reg:logistic", "reg:pseudohubererror", + "reg:absoluteerror", "reg:quantileerror", "count:poisson", "reg:gamma", "reg:tweedie" + )) +} + +.MULTI_TARGET_OBJECTIVES <- function() { + return(c( + "reg:squarederror", "reg:squaredlogerror", "reg:logistic", "reg:pseudohubererror", + "reg:quantileerror", "reg:gamma" + )) +} + # # Low-level functions for boosting -------------------------------------------- diff --git a/R-package/R/xgb.Booster.R b/R-package/R/xgb.Booster.R index 77d75fa9c2a5..643e0771fe53 100644 --- a/R-package/R/xgb.Booster.R +++ b/R-package/R/xgb.Booster.R @@ -663,9 +663,8 @@ validate.features <- function(bst, newdata) { #' data(agaricus.train, package = "xgboost") #' train <- agaricus.train #' -#' bst <- xgboost( -#' data = train$data, -#' label = train$label, +#' bst <- xgb.train( +#' data = xgb.DMatrix(train$data, label = train$label), #' max_depth = 2, #' eta = 1, #' nthread = 2, @@ -767,9 +766,8 @@ xgb.attributes <- function(object) { #' data.table::setDTthreads(nthread) #' train <- agaricus.train #' -#' bst <- xgboost( -#' data = train$data, -#' label = train$label, +#' bst <- xgb.train( +#' data = xgb.DMatrix(train$data, label = train$label), #' max_depth = 2, #' eta = 1, #' nthread = nthread, @@ -817,9 +815,8 @@ xgb.config <- function(object) { #' data(agaricus.train, package = "xgboost") #' train <- agaricus.train #' -#' bst <- xgboost( -#' data = train$data, -#' label = train$label, +#' bst <- xgb.train( +#' data = xgb.DMatrix(train$data, label = train$label), #' max_depth = 2, #' eta = 1, #' nthread = 2, @@ -1230,9 +1227,8 @@ xgb.is.same.Booster <- function(obj1, obj2) { #' data(agaricus.train, package = "xgboost") #' train <- agaricus.train #' -#' bst <- xgboost( -#' data = train$data, -#' label = train$label, +#' bst <- xgb.train( +#' data = xgb.DMatrix(train$data, label = train$label), #' max_depth = 2, #' eta = 1, #' nthread = 2, diff --git a/R-package/R/xgb.DMatrix.R b/R-package/R/xgb.DMatrix.R index 15f6faed0ba0..d87d1cbf71c2 100644 --- a/R-package/R/xgb.DMatrix.R +++ b/R-package/R/xgb.DMatrix.R @@ -853,36 +853,6 @@ xgb.DMatrix.hasinfo <- function(object, info) { } -# get dmatrix from data, label -# internal helper method -xgb.get.DMatrix <- function(data, label, missing, weight, nthread) { - if (inherits(data, "dgCMatrix") || is.matrix(data)) { - if (is.null(label)) { - stop("label must be provided when data is a matrix") - } - dtrain <- xgb.DMatrix(data, label = label, missing = missing, nthread = nthread) - if (!is.null(weight)) { - setinfo(dtrain, "weight", weight) - } - } else { - if (!is.null(label)) { - warning("xgboost: label will be ignored.") - } - if (is.character(data)) { - data <- path.expand(data) - dtrain <- xgb.DMatrix(data[1]) - } else if (inherits(data, "xgb.DMatrix")) { - dtrain <- data - } else if (inherits(data, "data.frame")) { - stop("xgboost doesn't support data.frame as input. Convert it to matrix first.") - } else { - stop("xgboost: invalid input data") - } - } - return(dtrain) -} - - #' Dimensions of xgb.DMatrix #' #' Returns a vector of numbers of rows and of columns in an \code{xgb.DMatrix}. diff --git a/R-package/R/xgb.dump.R b/R-package/R/xgb.dump.R index 2fa5bcb2f628..ef7202a1a5db 100644 --- a/R-package/R/xgb.dump.R +++ b/R-package/R/xgb.dump.R @@ -29,8 +29,8 @@ #' data(agaricus.test, package='xgboost') #' train <- agaricus.train #' test <- agaricus.test -#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2, -#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") +#' bst <- xgb.train(data = xgb.DMatrix(train$data, label = train$label), max_depth = 2, +#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") #' # save the model in file 'xgb.model.dump' #' dump_path = file.path(tempdir(), 'model.dump') #' xgb.dump(bst, dump_path, with_stats = TRUE) diff --git a/R-package/R/xgb.importance.R b/R-package/R/xgb.importance.R index 547d9677b798..bbf816a0d6cc 100644 --- a/R-package/R/xgb.importance.R +++ b/R-package/R/xgb.importance.R @@ -46,9 +46,8 @@ #' # binomial classification using "gbtree": #' data(agaricus.train, package = "xgboost") #' -#' bst <- xgboost( -#' data = agaricus.train$data, -#' label = agaricus.train$label, +#' bst <- xgb.train( +#' data = xgb.DMatrix(agaricus.train$data, label = agaricus.train$label), #' max_depth = 2, #' eta = 1, #' nthread = 2, @@ -59,9 +58,8 @@ #' xgb.importance(model = bst) #' #' # binomial classification using "gblinear": -#' bst <- xgboost( -#' data = agaricus.train$data, -#' label = agaricus.train$label, +#' bst <- xgb.train( +#' data = xgb.DMatrix(agaricus.train$data, label = agaricus.train$label), #' booster = "gblinear", #' eta = 0.3, #' nthread = 1, @@ -73,9 +71,11 @@ #' # multiclass classification using "gbtree": #' nclass <- 3 #' nrounds <- 10 -#' mbst <- xgboost( -#' data = as.matrix(iris[, -5]), -#' label = as.numeric(iris$Species) - 1, +#' mbst <- xgb.train( +#' data = xgb.DMatrix( +#' as.matrix(iris[, -5]), +#' label = as.numeric(iris$Species) - 1 +#' ), #' max_depth = 3, #' eta = 0.2, #' nthread = 2, @@ -99,9 +99,11 @@ #' ) #' #' # multiclass classification using "gblinear": -#' mbst <- xgboost( -#' data = scale(as.matrix(iris[, -5])), -#' label = as.numeric(iris$Species) - 1, +#' mbst <- xgb.train( +#' data = xgb.DMatrix( +#' scale(as.matrix(iris[, -5])), +#' label = as.numeric(iris$Species) - 1 +#' ), #' booster = "gblinear", #' eta = 0.2, #' nthread = 1, diff --git a/R-package/R/xgb.model.dt.tree.R b/R-package/R/xgb.model.dt.tree.R index ff416b73e38a..73cdecc5c3ae 100644 --- a/R-package/R/xgb.model.dt.tree.R +++ b/R-package/R/xgb.model.dt.tree.R @@ -43,9 +43,8 @@ #' nthread <- 1 #' data.table::setDTthreads(nthread) #' -#' bst <- xgboost( -#' data = agaricus.train$data, -#' label = agaricus.train$label, +#' bst <- xgb.train( +#' data = xgb.DMatrix(agaricus.train$data, label = agaricus.train$label), #' max_depth = 2, #' eta = 1, #' nthread = nthread, diff --git a/R-package/R/xgb.plot.deepness.R b/R-package/R/xgb.plot.deepness.R index 8e1972374546..956ee9c83fd0 100644 --- a/R-package/R/xgb.plot.deepness.R +++ b/R-package/R/xgb.plot.deepness.R @@ -48,9 +48,8 @@ #' data.table::setDTthreads(nthread) #' #' ## Change max_depth to a higher number to get a more significant result -#' bst <- xgboost( -#' data = agaricus.train$data, -#' label = agaricus.train$label, +#' bst <- xgb.train( +#' data = xgb.DMatrix(agaricus.train$data, label = agaricus.train$label), #' max_depth = 6, #' nthread = nthread, #' nrounds = 50, diff --git a/R-package/R/xgb.plot.importance.R b/R-package/R/xgb.plot.importance.R index 1848a3a86e53..199595cb8ddf 100644 --- a/R-package/R/xgb.plot.importance.R +++ b/R-package/R/xgb.plot.importance.R @@ -51,9 +51,8 @@ #' nthread <- 2 #' data.table::setDTthreads(nthread) #' -#' bst <- xgboost( -#' data = agaricus.train$data, -#' label = agaricus.train$label, +#' bst <- xgb.train( +#' data = xgb.DMatrix(agaricus.train$data, label = agaricus.train$label), #' max_depth = 3, #' eta = 1, #' nthread = nthread, diff --git a/R-package/R/xgb.plot.multi.trees.R b/R-package/R/xgb.plot.multi.trees.R index e6d678ee7a4f..19a114071509 100644 --- a/R-package/R/xgb.plot.multi.trees.R +++ b/R-package/R/xgb.plot.multi.trees.R @@ -35,9 +35,8 @@ #' nthread <- 2 #' data.table::setDTthreads(nthread) #' -#' bst <- xgboost( -#' data = agaricus.train$data, -#' label = agaricus.train$label, +#' bst <- xgb.train( +#' data = xgb.DMatrix(agaricus.train$data, label = agaricus.train$label), #' max_depth = 15, #' eta = 1, #' nthread = nthread, diff --git a/R-package/R/xgb.plot.shap.R b/R-package/R/xgb.plot.shap.R index 788a095399ed..be3f7116034c 100644 --- a/R-package/R/xgb.plot.shap.R +++ b/R-package/R/xgb.plot.shap.R @@ -82,9 +82,8 @@ #' data.table::setDTthreads(nthread) #' nrounds <- 20 #' -#' bst <- xgboost( -#' agaricus.train$data, -#' agaricus.train$label, +#' bst <- xgb.train( +#' data = xgb.DMatrix(agaricus.train$data, agaricus.train$label), #' nrounds = nrounds, #' eta = 0.1, #' max_depth = 3, @@ -108,9 +107,8 @@ #' set.seed(123) #' is.na(x[sample(nrow(x) * 4, 30)]) <- TRUE # introduce some missing values #' -#' mbst <- xgboost( -#' data = x, -#' label = as.numeric(iris$Species) - 1, +#' mbst <- xgb.train( +#' data = xgb.DMatrix(x, label = as.numeric(iris$Species) - 1), #' nrounds = nrounds, #' max_depth = 2, #' eta = 0.3, diff --git a/R-package/R/xgb.plot.tree.R b/R-package/R/xgb.plot.tree.R index 5ed1e70f695a..502de3f52d61 100644 --- a/R-package/R/xgb.plot.tree.R +++ b/R-package/R/xgb.plot.tree.R @@ -68,9 +68,8 @@ #' @examples #' data(agaricus.train, package = "xgboost") #' -#' bst <- xgboost( -#' data = agaricus.train$data, -#' label = agaricus.train$label, +#' bst <- xgb.train( +#' data = xgb.DMatrix(agaricus.train$data, agaricus.train$label), #' max_depth = 3, #' eta = 1, #' nthread = 2, diff --git a/R-package/R/xgb.train.R b/R-package/R/xgb.train.R index 0aa3cdcf1df0..5b719a17b318 100644 --- a/R-package/R/xgb.train.R +++ b/R-package/R/xgb.train.R @@ -316,12 +316,10 @@ #' early_stopping_rounds = 3) #' #' ## An 'xgboost' interface example: -#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, -#' max_depth = 2, eta = 1, nthread = nthread, nrounds = 2, -#' objective = "binary:logistic") +#' bst <- xgboost(x = agaricus.train$data, y = factor(agaricus.train$label), +#' params = list(max_depth = 2, eta = 1), nthread = nthread, nrounds = 2) #' pred <- predict(bst, agaricus.test$data) #' -#' @rdname xgb.train #' @export xgb.train <- function(params = list(), data, nrounds, evals = list(), obj = NULL, feval = NULL, verbose = 1, print_every_n = 1L, diff --git a/R-package/R/xgboost.R b/R-package/R/xgboost.R index a1d37358162c..aec2b8a54a6f 100644 --- a/R-package/R/xgboost.R +++ b/R-package/R/xgboost.R @@ -1,32 +1,1022 @@ -# Simple interface for training an xgboost model that wraps \code{xgb.train}. -# Its documentation is combined with xgb.train. -# -#' @rdname xgb.train +prescreen.parameters <- function(params) { + if (!NROW(params)) { + return(list()) + } + if (!is.list(params)) { + stop("'params' must be a list or NULL.") + } + + params <- params[!is.null(params)] + + if ("num_class" %in% names(params)) { + stop("'num_class' cannot be manually specified for 'xgboost()'. Pass a factor 'y' instead.") + } + if ("process_type" %in% names(params)) { + if (params$process_type != "default") { + stop("Non-default 'process_type' is not supported for 'xgboost()'. Try 'xgb.train()'.") + } + } + + params_function_args <- c( + "objective", "verbose", "verbosity", "nthread", "seed", + "monotone_constraints", "interaction_constraints" + ) + if (any(names(params) %in% params_function_args)) { + stop( + "'xgboost()' function arguments cannot be passed under 'params'. Got: ", + paste(intersect(names(params), params_function_args), collapse = ", ") + ) + } + + return(params) +} + +prescreen.objective <- function(objective) { + if (!is.null(objective)) { + if (objective %in% .OBJECTIVES_NON_DEFAULT_MODE()) { + stop( + "Objectives with non-default prediction mode (", + paste(.OBJECTIVES_NON_DEFAULT_MODE(), collapse = ", "), + ") are not supported in 'xgboost()'. Try 'xgb.train()'." + ) + } + + if (!is.character(objective) || length(objective) != 1L || is.na(objective)) { + stop("'objective' must be a single character/string variable.") + } + } +} + +process.base.margin <- function(base_margin, nrows, ncols) { + if (!NROW(base_margin)) { + return(NULL) + } + if (is.array(base_margin) && length(dim(base_margin)) > 2) { + stop( + "'base_margin' should not have more than 2 dimensions for any objective (got: ", + length(dim(base_margin)), + " dimensions)." + ) + } + if (inherits(base_margin, c("sparseMatrix", "sparseVector"))) { + warning( + "Got a sparse matrix type (class: ", + paste(class(base_margin), collapse = ", "), + ") for 'base_margin'. Will convert to dense matrix." + ) + base_margin <- as.matrix(base_margin) + } + + if (ncols == 1) { + if (inherits(base_margin, c("matrix", "data.frame"))) { + if (ncol(base_margin) != 1) { + stop("'base_margin' should be a 1-d vector for the given objective and data.") + } + if (is.data.frame(base_margin)) { + base_margin <- base_margin[[1L]] + } else { + base_margin <- base_margin[, 1L] + } + } + if (!is.numeric(base_margin)) { + base_margin <- as.numeric(base_margin) + } + if (length(base_margin) != nrows) { + stop( + "'base_margin' has incorrect number of rows. Expected: ", + nrows, + ". Got: ", + length(base_margin) + ) + } + } else { + supported_multicol <- c("matrix", "data.frame") + if (!inherits(base_margin, supported_multicol)) { + stop( + "'base_margin' should be a matrix with ", + ncols, + " columns for the given objective and data. Got class: ", + paste(class(base_margin), collapse = ", ") + ) + } + if (ncol(base_margin) != ncols) { + stop( + "'base_margin' has incorrect number of columns. Expected: ", + ncols, + ". Got: ", + ncol(base_margin) + ) + } + if (nrow(base_margin) != nrows) { + stop( + "'base_margin' has incorrect number of rows. Expected: ", + nrows, + ". Got: ", + nrow(base_margin) + ) + } + if (!is.matrix(base_margin)) { + base_margin <- as.matrix(base_margin) + } + } + + return(base_margin) +} + +process.y.margin.and.objective <- function( + y, + base_margin, + objective, + params +) { + + if (!NROW(y)) { + stop("Passed empty 'y'.") + } + + if (is.array(y) && length(dim(y)) > 2) { + stop( + "'y' should not have more than 2 dimensions for any objective (got: ", + length(dim(y)), + ")." + ) + } + + if (inherits(y, c("sparseMatrix", "sparseVector"))) { + warning( + "Got a sparse matrix type (class: ", + paste(class(y), collapse = ", "), + ") for 'y'. Will convert to dense matrix." + ) + y <- as.matrix(y) + } + + if (is.character(y)) { + if (!is.vector(y)) { + if (NCOL(y) > 1) { + stop("Multi-column categorical 'y' is not supported.") + } + y <- as.vector(y) + } + y <- factor(y) + } + + if (is.logical(y)) { + if (!is.vector(y)) { + if (NCOL(y) > 1) { + stop("Multi-column logical/boolean 'y' is not supported.") + } + y <- as.vector(y) + } + y <- factor(y, c(FALSE, TRUE)) + } + + if (is.factor(y)) { + + y_levels <- levels(y) + if (length(y_levels) < 2) { + stop("Factor 'y' has less than 2 levels.") + } + if (length(y_levels) == 2) { + if (is.null(objective)) { + objective <- "binary:logistic" + } else{ + if (!(objective %in% .BINARY_CLASSIF_OBJECTIVES())) { + stop( + "Got binary 'y' - supported objectives for this data are: ", + paste(.BINARY_CLASSIF_OBJECTIVES(), collapse = ", "), + ". Was passed: ", + objective + ) + } + } + + if (!is.null(base_margin)) { + base_margin <- process.base.margin(base_margin, length(y), 1) + } + + out <- list( + params = list( + objective = objective + ), + metadata = list( + y_levels = y_levels, + n_targets = 1 + ) + ) + } else { # length(levels) > 2 + if (is.null(objective)) { + objective <- "multi:softprob" + } else { + if (!(objective %in% .MULTICLASS_CLASSIF_OBJECTIVES())) { + stop( + "Got non-binary factor 'y' - supported objectives for this data are: ", + paste(.MULTICLASS_CLASSIF_OBJECTIVES(), collapse = ", "), + ". Was passed: ", + objective + ) + } + } + + if (!is.null(base_margin)) { + base_margin <- process.base.margin(base_margin, length(y), length(y_levels)) + } + + out <- list( + params = list( + objective = objective, + num_class = length(y_levels) + ), + metadata = list( + y_levels = y_levels, + n_targets = length(y_levels) + ) + ) + } + + out$dmatrix_args <- list( + label = as.numeric(y) - 1, + base_margin = base_margin + ) + + } else if (inherits(y, "Surv")) { + + y_attr <- attributes(y) + supported_surv_types <- c("left", "right", "interval") + if (!(y_attr$type %in% supported_surv_types)) { + stop( + "Survival objectives are only supported for types: ", + paste(supported_surv_types, collapse = ", "), + ". Was passed: ", + y_attr$type + ) + } + + if (is.null(objective)) { + objective <- "survival:aft" + } else { + if (y_attr$type == "right") { + if (!(objective %in% .SURVIVAL_RIGHT_CENSORING_OBJECTIVES())) { + stop( + "Got right-censored 'y' variable - supported objectives for this data are: ", + paste(.SURVIVAL_RIGHT_CENSORING_OBJECTIVES(), collapse = ", "), + ". Was passed: ", + objective + ) + } + } else { + if (!(objective %in% .SURVIVAL_ALL_CENSORING_OBJECTIVES())) { + stop( + "Got ", y_attr$type, "-censored 'y' variable - supported objectives for this data are:", + paste(.SURVIVAL_ALL_CENSORING_OBJECTIVES(), collapse = ", "), + ". Was passed: ", + objective + ) + } + } + } + + if (!is.null(base_margin)) { + base_margin <- process.base.margin(base_margin, nrow(y), 1) + } + + out <- list( + params = list( + objective = objective + ), + metadata = list( + n_targets = 1 + ) + ) + + if (objective == "survival:cox") { + # Can only get here when using right censoring + if (y_attr$type != "right") { + stop("Internal error.") + } + + out$dmatrix_args <- list( + label = y[, 1L] * (2 * (y[, 2L] - 0.5)) + ) + + } else { + if (y_attr$type == "left") { + lb <- ifelse( + y[, 2L] == 0, + 0, + y[, 1L] + ) + ub <- y[, 1L] + out$dmatrix_args <- list( + label_lower_bound = lb, + label_upper_bound = ub + ) + } else if (y_attr$type == "right") { + lb <- y[, 1L] + ub <- ifelse( + y[, 2L] == 0, + Inf, + y[, 1L] + ) + out$dmatrix_args <- list( + label_lower_bound = lb, + label_upper_bound = ub + ) + } else if (y_attr$type == "interval") { + out$dmatrix_args <- list( + label_lower_bound = ifelse(y[, 3L] == 2, 0, y[, 1L]), + label_upper_bound = ifelse( + y[, 3L] == 0, Inf, + ifelse(y[, 3L]== 3, y[, 2L], y[, 1L]) + ) + ) + } + + if (min(out$dmatrix_args$label_lower_bound) < 0) { + stop("Survival objectives are only defined for non-negative 'y'.") + } + } + + out$dmatrix_args$base_margin <- base_margin + + } else if (is.vector(y)) { + + if (is.null(objective)) { + objective <- "reg:squarederror" + } else if (!(objective %in% .REGRESSION_OBJECTIVES())) { + stop( + "Got numeric 'y' - supported objectives for this data are: ", + paste(.REGRESSION_OBJECTIVES(), collapse = ", "), + ". Was passed: ", + objective + ) + } + + n_targets <- 1L + if (objective == "reg:quantileerror" && NROW(params$quantile_alpha) > 1) { + n_targets <- NROW(params$quantile_alpha) + } + + if (!is.null(base_margin)) { + base_margin <- process.base.margin(base_margin, length(y), n_targets) + } + + out <- list( + params = list( + objective = objective + ), + metadata = list( + n_targets = n_targets + ), + dmatrix_args = list( + label = as.numeric(y), + base_margin = base_margin + ) + ) + + } else if (is.data.frame(y)) { + if (ncol(y) == 1) { + return(process.y.margin.and.objective(y[[1L]], base_margin, objective, params)) + } + + if (is.null(objective)) { + objective <- "reg:squarederror" + } else if (!(objective %in% .MULTI_TARGET_OBJECTIVES())) { + stop( + "Got multi-column 'y' - supported objectives for this data are: ", + paste(.MULTI_TARGET_OBJECTIVES(), collapse = ", "), + ". Was passed: ", + objective + ) + } + + y_names <- names(y) + y <- lapply(y, function(x) { + if (!inherits(x, c("numeric", "integer"))) { + stop( + "Multi-target 'y' only supports 'numeric' and 'integer' types. Got: ", + paste(class(x), collapse = ", ") + ) + } + return(as.numeric(x)) + }) + y <- as.data.frame(y) |> as.matrix() + + if (!is.null(base_margin)) { + base_margin <- process.base.margin(base_margin, length(y), ncol(y)) + } + + out <- list( + params = list( + objective = objective + ), + dmatrix_args = list( + label = y, + base_margin = base_margin + ), + metadata = list( + y_names = y_names, + n_targets = ncol(y) + ) + ) + + } else if (is.matrix(y)) { + if (ncol(y) == 1) { + return(process.y.margin.and.objective(as.vector(y), base_margin, objective, params)) + } + + if (!is.null(objective) && !(objective %in% .MULTI_TARGET_OBJECTIVES())) { + stop( + "Got multi-column 'y' - supported objectives for this data are: ", + paste(.MULTI_TARGET_OBJECTIVES(), collapse = ", "), + ". Was passed: ", + objective + ) + } + if (is.null(objective)) { + objective <- "reg:squarederror" + } + + y_names <- colnames(y) + if (storage.mode(y) != "double") { + storage.mode(y) <- "double" + } + + if (!is.null(base_margin)) { + base_margin <- process.base.margin(base_margin, nrow(y), ncol(y)) + } + + out <- list( + params = list( + objective = objective + ), + dmatrix_args = list( + label = y, + base_margin = base_margin + ), + metadata = list( + n_targets = ncol(y) + ) + ) + + if (NROW(y_names) == ncol(y)) { + out$metadata$y_names <- y_names + } + + } else { + stop("Passed 'y' object with unsupported class: ", paste(class(y), collapse = ", ")) + } + + return(out) +} + +process.row.weights <- function(w, lst_args) { + if (!is.null(w)) { + if ("label" %in% names(lst_args$dmatrix_args)) { + nrow_y <- NROW(lst_args$dmatrix_args$label) + } else if ("label_lower_bound" %in% names(lst_args$dmatrix_args)) { + nrow_y <- length(lst_args$dmatrix_args$label_lower_bound) + } else { + stop("Internal error.") + } + if (!is.numeric(w)) { + w <- as.numeric(w) + } + if (length(w) != nrow_y) { + stop( + "'weights' must be a 1-d vector with the same length as 'y' (", + length(w), " vs. ", nrow_y, ")." + ) + } + lst_args$dmatrix_args$weight <- w + } + return(lst_args) +} + +check.nthreads <- function(nthreads) { + if (is.null(nthreads)) { + return(1L) + } + if (!inherits(nthreads, c("numeric", "integer")) || !NROW(nthreads)) { + stop("'nthreads' must be a positive scalar value.") + } + if (length(nthreads) > 1L) { + nthreads <- head(nthreads, 1L) + } + if (is.na(nthreads) || nthreads < 1) { + stop("Passed invalid 'nthreads': ", nthreads) + } + if (is.numeric(nthreads)) { + if (floor(nthreads) != nthreads) { + stop("'nthreads' must be an integer.") + } + } + return(as.integer(nthreads)) +} + +check.can.use.qdm <- function(x, params) { + if (inherits(x, "sparseMatrix") && !inherits(x, "dgRMatrix")) { + return(FALSE) + } + if ("booster" %in% names(params)) { + if (params$booster == "gblinear") { + return(FALSE) + } + } + if ("tree_method" %in% names(params)) { + if (params$tree_method %in% c("exact", "approx")) { + return(FALSE) + } + } + return(TRUE) +} + +process.x.and.col.args <- function( + x, + monotone_constraints, + interaction_constraints, + feature_weights, + lst_args +) { + if (is.null(x)) { + stop("'x' cannot be NULL.") + } + if (inherits(x, "xgb.DMatrix")) { + stop("Cannot pass 'xgb.DMatrix' as 'x' to 'xgboost()'. Try 'xgb.train()' instead.") + } + supported_x_types <- c("data.frame", "matrix", "dgCMatrix", "dgRMatrix") + if (!inherits(x, supported_x_types)) { + stop( + "'x' must be one of the following classes: ", + paste(supported_x_types, collapse = ", "), + ". Got: ", + paste(class(x), collapse = ", ") + ) + } + + if (NROW(feature_weights)) { + if (is.list(feature_weights)) { + feature_weights <- unlist(feature_weights) + } + if (!inherits(feature_weights, c("numeric", "integer"))) { + stop("'feature_weights' must be a numeric vector or named list matching to columns of 'x'.") + } + if (length(feature_weights) != ncol(x)) { + stop( + "'feature_weights' does not match in length with columns of 'x' (", + length(feature_weights), " vs. ", ncol(x), ")." + ) + } + if (NROW(names(feature_weights)) && NROW(colnames(x))) { + matched <- match(colnames(x), names(feature_weights)) + matched <- matched[!is.na(matched)] + matched <- matched[!duplicated(matched)] + if (length(matched) > 0 && length(matched) < length(feature_weights)) { + stop( + "'feature_weights' names do not contain all columns of 'x'. Missing: ", + head(setdiff(colnames(x), names(feature_weights))) + ) + } + if (length(matched)) { + feature_weights <- feature_weights[matched] + } else { + warning("Names of 'feature_weights' do not match with 'x'. Names will be ignored.") + } + } + + lst_args$dmatrix_args$feature_weights <- unname(feature_weights) + } + + if (NROW(monotone_constraints)) { + + if (NROW(monotone_constraints) > ncol(x)) { + stop( + "'monotone_constraints' contains more entries than there are columns in 'x' (", + NROW(monotone_constraints), " vs. ", ncol(x), ")." + ) + } + + if (is.list(monotone_constraints)) { + + if (!NROW(names(monotone_constraints))) { + stop( + "If passing 'monotone_constraints' as a named list,", + " must have names matching to columns of 'x'." + ) + } + if (!NROW(colnames(x))) { + stop("If passing 'monotone_constraints' as a named list, 'x' must have column names.") + } + if (any(duplicated(names(monotone_constraints)))) { + stop( + "'monotone_constraints' contains duplicated names: ", + paste( + names(monotone_constraints)[duplicated(names(monotone_constraints))] |> head(), + collapse = ", " + ) + ) + } + if (NROW(setdiff(names(monotone_constraints), colnames(x)))) { + stop( + "'monotone_constraints' contains column names not present in 'x': ", + paste(head(names(monotone_constraints)), collapse = ", ") + ) + } + + vec_monotone_constr <- rep(0, ncol(x)) + matched <- match(names(monotone_constraints), colnames(x)) + vec_monotone_constr[matched] <- unlist(monotone_constraints) + lst_args$params$monotone_constraints <- unname(vec_monotone_constr) + + } else if (inherits(monotone_constraints, c("numeric", "integer"))) { + + if (NROW(names(monotone_constraints)) && NROW(colnames(x))) { + if (length(monotone_constraints) < ncol(x)) { + return( + process.x.and.col.args( + x, + as.list(monotone_constraints), + interaction_constraints, + feature_weights, + lst_args + ) + ) + } else { + matched <- match(names(monotone_constraints), colnames(x)) + matched <- matched[!is.na(matched)] + matched <- matched[!duplicated(matched)] + if (length(matched)) { + monotone_constraints <- monotone_constraints[matched] + } else { + warning("Names of 'monotone_constraints' do not match with 'x'. Names will be ignored.") + } + } + } else { + if (length(monotone_constraints) != ncol(x)) { + stop( + "If passing 'monotone_constraints' as unnamed vector or not using column names,", + " must have length matching to number of columns in 'x'. Got: ", + length(monotone_constraints), " (vs. ", ncol(x), ")" + ) + } + } + + lst_args$params$monotone_constraints <- unname(monotone_constraints) + + } else if (is.character(monotone_constraints)) { + lst_args$params$monotone_constraints <- monotone_constraints + } else { + stop( + "Passed unsupported type for 'monotone_constraints': ", + paste(class(monotone_constraints), collapse = ", ") + ) + } + } + + if (NROW(interaction_constraints)) { + if (!is.list(interaction_constraints)) { + stop("'interaction_constraints' must be a list of vectors.") + } + cnames <- colnames(x) + lst_args$params$interaction_constraints <- lapply(interaction_constraints, function(idx) { + if (!NROW(idx)) { + stop("Elements in 'interaction_constraints' cannot be empty.") + } + + if (is.character(idx)) { + if (!NROW(cnames)) { + stop( + "Passed a character vector for 'interaction_constraints', but 'x' ", + "has no column names to match them against." + ) + } + out <- match(idx, cnames) - 1L + if (anyNA(out)) { + stop( + "'interaction_constraints' contains column names not present in 'x': ", + paste(head(idx[which(is.na(out))]), collapse = ", ") + ) + } + return(out) + } else if (inherits(idx, c("numeric", "integer"))) { + if (anyNA(idx)) { + stop("'interaction_constraints' cannot contain NA values.") + } + if (min(idx) < 1) { + stop("Column indices for 'interaction_constraints' must follow base-1 indexing.") + } + if (max(idx) > ncol(x)) { + stop("'interaction_constraints' contains invalid column indices.") + } + if (is.numeric(idx)) { + if (any(idx != floor(idx))) { + stop( + "'interaction_constraints' must contain only integer indices. Got non-integer: ", + paste(head(idx[which(idx != floor(idx))]), collapse = ", ") + ) + } + } + return(idx - 1L) + } else { + stop( + "Elements in 'interaction_constraints' must be vectors of types ", + "'integer', 'numeric', or 'character'. Got: ", + paste(class(idx), collapse = ", ") + ) + } + }) + } + + return(lst_args) +} + +#' @noMd #' @export -xgboost <- function(data = NULL, label = NULL, missing = NA, weight = NULL, - params = list(), nrounds, - verbose = 1, print_every_n = 1L, - early_stopping_rounds = NULL, maximize = NULL, - save_period = NULL, save_name = "xgboost.model", - xgb_model = NULL, callbacks = list(), ...) { - merged <- check.booster.params(params, ...) - dtrain <- xgb.get.DMatrix( - data = data, - label = label, - missing = missing, - weight = weight, - nthread = merged$nthread +#' @title Fit XGBoost Model +#' @description Fits an XGBoost model (boosted decision tree ensemble) to given x/y data. +#' +#' See the tutorial \href{https://xgboost.readthedocs.io/en/stable/tutorials/model.html +#' }{Introduction to Boosted Trees} for a longer explanation of what XGBoost does. +#' +#' This function is intended to provide a more user-friendly interface for XGBoost that follows +#' R's conventions for model fitting and predictions, but which doesn't expose all of the +#' possible functionalities of the core XGBoost library. +#' +#' See \link{xgb.train} for a more flexible low-level alternative which is similar across different +#' language bindings of XGBoost and which exposes the full library's functionalities. +#' @details For package authors using `xgboost` as a dependency, it is highly recommended to use +#' \link{xgb.train} in package code instead of `xgboost()`, since it has a more stable interface +#' and performs fewer data conversions and copies along the way. +#' @references \itemize{ +#' \item Chen, Tianqi, and Carlos Guestrin. "Xgboost: A scalable tree boosting system." +#' Proceedings of the 22nd acm sigkdd international conference on knowledge discovery and +#' data mining. 2016. +#' \item \url{https://xgboost.readthedocs.io/en/stable/} +#' } +#' @param x The features / covariates. Can be passed as:\itemize{ +#' \item A numeric or integer `matrix`. +#' \item A `data.frame`, in which all columns are one of the following types:\itemize{ +#' \item `numeric` +#' \item `integer` +#' \item `logical` +#' \item `factor` +#' } +#' +#' Columns of `factor` type will be assumed to be categorical, while other column types will +#' be assumed to be numeric. +#' \item A sparse matrix from the `Matrix` package, either as `dgCMatrix` or `dgRMatrix` class. +#' } +#' +#' Note that categorical features are only supported for `data.frame` inputs, and are automatically +#' determined based on their types. See \link{xgb.train} with \link{xgb.DMatrix} for more flexible +#' variants that would allow something like categorical features on sparse matrices. +#' @param y The response variable. Allowed values are:\itemize{ +#' \item A numeric or integer vector (for regression tasks). +#' \item A factor or character vector (for binary and multi-class classification tasks). +#' \item A logical (boolean) vector (for binary classification tasks). +#' \item A numeric or integer matrix or `data.frame` with numeric/integer columns +#' (for multi-task regression tasks). +#' \item A `Surv` object from the `survival` package (for survival tasks). +#' } +#' +#' If `objective` is `NULL`, the right task will be determined automatically based on +#' the class of `y`. +#' +#' If `objective` is not `NULL`, it must match with the type of `y` - e.g. `factor` types of `y` +#' can only be used with classification objectives and vice-versa. +#' +#' For binary classification, the last factor level of `y` will be used as the "positive" +#' class - that is, the numbers from `predict` will reflect the probabilities of belonging to this +#' class instead of to the first factor level. If `y` is a `logical` vector, then `TRUE` will be +#' set as the last level. +#' @param objective Optimization objective to minimize based on the supplied data, to be passed +#' by name as a string / character (e.g. `reg:absoluteerror`). See the \href{ +#' https://xgboost.readthedocs.io/en/stable/parameter.html#learning-task-parameters}{ +#' Learning Task Parameters} page for more detailed information on allowed values. +#' +#' If `NULL` (the default), will be automatically determined from `y` according to the following +#' logic:\itemize{ +#' \item If `y` is a factor with 2 levels, will use `binary:logistic`. +#' \item If `y` is a factor with more than 2 levels, will use `multi:softprob` (number of classes +#' will be determined automatically, should not be passed under `params`). +#' \item If `y` is a `Surv` object from the `survival` package, will use `survival:aft` (note that +#' the only types supported are left / right / interval censored). +#' \item Otherwise, will use `reg:squarederror`. +#' } +#' +#' If `objective` is not `NULL`, it must match with the type of `y` - e.g. `factor` types of `y` +#' can only be used with classification objectives and vice-versa. +#' +#' Note that not all possible `objective` values supported by the core XGBoost library are allowed +#' here - for example, objectives which are a variation of another but with a different default +#' prediction type (e.g. `multi:softmax` vs. `multi:softprob`) are not allowed, and neither are +#' ranking objectives, nor custom objectives at the moment. +#' @param params List of training parameters. See the online documentation \href{ +#' https://xgboost.readthedocs.io/en/stable/parameter.html}{XGBoost Parameters} for details about +#' possible values and what they do. +#' +#' Note that not all possible values from the core XGBoost library are allowed as `params` for +#' 'xgboost()' - in particular, values which are direct arguments to this function (such as +#' `objective` or `nthreads`) cannot be passed under `params` (they should be passed as function +#' arguments instead). Values which otherwise require an already-fitted booster object (such as +#' `process_type`) are also not accepted here. +#' @param nrounds Number of boosting iterations / rounds. +#' +#' Note that the number of default boosting rounds here is not automatically tuned, and different +#' problems will have vastly different optimal numbers of boosting rounds. +#' @param weights Sample weights for each row in `x` and `y`. If `NULL` (the default), each row +#' will have the same weight. +#' +#' If not `NULL`, should be passed as a numeric vector with length matching to the number of +#' rows in `x`. +#' @param verbosity Verbosity of printing messages. Valid values of 0 (silent), 1 (warning), +#' 2 (info), and 3 (debug). +#' @param nthreads Number of parallel threads to use. +#' @param seed Seed to use for random number generation. If passing `NULL`, will draw a random +#' number using R's PRNG system to use as seed. +#' @param monotone_constraints Optional monotonicity constraints for features. +#' +#' Can be passed either as a named list (when `x` has column names), or as a vector. If passed +#' as a vector and `x` has column names, will try to match the elements by name. +#' +#' A value of `+1` for a given feature makes the model predictions / scores constrained to be +#' a monotonically increasing function of that feature (that is, as the value of the feature +#' increases, the model prediction cannot decrease), while a value of `-1` makes it a monotonically +#' decreasing function. A value of zero imposes no constraint. +#' +#' The input for `monotone_constraints` can be a subset of the columns of `x` if named, in which +#' case the columns that are not referred to in `monotone_constraints` will be assumed to have +#' a value of zero (no constraint imposed on the model for those features). +#' +#' See the tutorial \href{https://xgboost.readthedocs.io/en/stable/tutorials/monotonic.html}{ +#' Monotonic Constraints} for a more detailed explanation. +#' @param interaction_constraints Constraints for interaction representing permitted interactions. +#' The constraints must be specified in the form of a list of vectors referencing columns in the +#' data, e.g. `list(c(1, 2), c(3, 4, 5))` (with these numbers being column indices, numeration +#' starting at 1 - i.e. the first sublist references the first and second columns) or +#' `list(c("Sepal.Length", "Sepal.Width"), c("Petal.Length", "Petal.Width"))` (references +#' columns by names), where each vector is a group of indices of features that are allowed to +#' interact with each other. +#' +#' See the tutorial \href{ +#' https://xgboost.readthedocs.io/en/stable/tutorials/feature_interaction_constraint.html +#' }{Feature Interaction Constraints} for more information. +#' @param feature_weights Feature weights for column sampling. +#' +#' Can be passed either as a vector with length matching to columns of `x`, or as a named +#' list (only if `x` has column names) with names matching to columns of 'x'. If it is a +#' named vector, will try to match the entries to column names of `x` by name. +#' +#' If `NULL` (the default), all columns will have the same weight. +#' @param base_margin Base margin used for boosting from existing model. +#' +#' If passing it, will start the gradient boosting procedure from the scores that are provided +#' here - for example, one can pass the raw scores from a previous model, or some per-observation +#' offset, or similar. +#' +#' Should be either a numeric vector or numeric matrix (for multi-class and multi-target objectives) +#' with the same number of rows as `x` and number of columns corresponding to number of optimization +#' targets, and should be in the untransformed scale (for example, for objective `binary:logistic`, +#' it should have log-odds, not probabilities; and for objective `multi:softprob`, should have +#' number of columns matching to number of classes in the data). +#' +#' Note that, if it contains more than one column, then columns will not be matched by name to +#' the corresponding `y` - `base_margin` should have the same column order that the model will use +#' (for example, for objective `multi:softprob`, columns of `base_margin` will be matched against +#' `levels(y)` by their position, regardless of what `colnames(base_margin)` returns). +#' +#' If `NULL`, will start from zero, but note that for most objectives, an intercept is usually +#' added (controllable through parameter `base_score` instead) when `base_margin` is not passed. +#' @return A model object, inheriting from both `xgboost` and `xgb.Booster`. Compared to the regular +#' `xgb.Booster` model class produced by \link{xgb.train}, this `xgboost` class will have an +#' additional attribute `metadata` containing information which is used for formatting prediction +#' outputs, such as class names for classification problems. +#' @examples +#' library(xgboost) +#' data(mtcars) +#' +#' # Fit a small regression model on the mtcars data +#' model_regression <- xgboost(mtcars[, -1], mtcars$mpg, nthreads = 1, nrounds = 3) +#' predict(model_regression, mtcars, validate_features = TRUE) +#' +#' # Task objective is determined automatically according to the type of 'y' +#' data(iris) +#' model_classif <- xgboost(iris[, -5], iris$Species, nthreads = 1, nrounds = 5) +#' predict(model_classif, iris, validate_features = TRUE) +xgboost <- function( + x, + y, + objective = NULL, + params = list(), + nrounds = 100L, + weights = NULL, + verbosity = 0L, + nthreads = parallel::detectCores(), + seed = 0L, + monotone_constraints = NULL, + interaction_constraints = NULL, + feature_weights = NULL, + base_margin = NULL +) { + # Note: some validations on parameter names are performed before passing them to + # 'xgb.train', hence this seemingly redundant conversion of names below. + names(params) <- gsub(".", "_", names(params), fixed = TRUE) + + params <- prescreen.parameters(params) + prescreen.objective(objective) + lst_args <- process.y.margin.and.objective(y, base_margin, objective, params) + lst_args <- process.row.weights(weights, lst_args) + lst_args <- process.x.and.col.args( + x, + monotone_constraints, + interaction_constraints, + feature_weights, + lst_args ) - evals <- list(train = dtrain) + use_qdm <- check.can.use.qdm(x, params) + if (use_qdm && "max_bin" %in% names(params)) { + lst_args$dmatrix_args$max_bin <- max_bin + } + + nthreads <- check.nthreads(nthreads) + lst_args$dmatrix_args$nthread <- nthreads + lst_args$params$nthread <- nthreads + lst_args$params$seed <- seed - bst <- xgb.train(params, dtrain, nrounds, evals, verbose = verbose, print_every_n = print_every_n, - early_stopping_rounds = early_stopping_rounds, maximize = maximize, - save_period = save_period, save_name = save_name, - xgb_model = xgb_model, callbacks = callbacks, ...) - return(bst) + params <- c(lst_args$params, params) + + lst_args$dmatrix_args$data <- x + fn_dm <- if (use_qdm) xgb.QuantileDMatrix else xgb.DMatrix + dm <- do.call(fn_dm, lst_args$dmatrix_args) + model <- xgb.train( + params = params, + data = dm, + nrounds = nrounds, + verbose = verbosity + ) + attributes(model)$metadata <- lst_args$metadata + attributes(model)$call <- match.call() + class(model) <- c("xgboost", class(model)) + return(model) } +#' @export +print.xgboost <- function(x, ...) { + cat("XGBoost model object\n") + cat("Call:\n ") + print(attributes(x)$call) + cat("Objective: ", attributes(x)$params$objective, "\n", sep = "") + cat("Number of iterations: ", xgb.get.num.boosted.rounds(x), "\n", sep = "") + cat("Number of features: ", xgb.num_feature(x), "\n", sep = "") + + printable_head <- function(v) { + v_sub <- head(v, 5L) + return( + sprintf( + "%s%s", + paste(v_sub, collapse = ", "), + ifelse(length(v_sub) < length(v), ", ...", "") + ) + ) + } + + if (NROW(attributes(x)$metadata$y_levels)) { + cat( + "Classes: ", + printable_head(attributes(x)$metadata$y_levels), + "\n", + sep = "" + ) + } else if (NROW(attributes(x)$params$quantile_alpha)) { + cat( + "Prediction quantile", + ifelse(length(attributes(x)$params$quantile_alpha) > 1L, "s", ""), + ": ", + printable_head(attributes(x)$params$quantile_alpha), + "\n", + sep = "" + ) + } else if (NROW(attributes(x)$metadata$y_names)) { + cat( + "Prediction targets: ", + printable_head(attributes(x)$metadata$y_names), + "\n", + sep = "" + ) + } else if (attributes(x)$metadata$n_targets > 1L) { + cat( + "Number of predition targets: ", + attributes(x)$metadata$n_targets, + "\n", + sep = "" + ) + } + + return(x) +} + + #' Training part from Mushroom Data Set #' #' This data set is originally from the Mushroom data set, diff --git a/R-package/demo/basic_walkthrough.R b/R-package/demo/basic_walkthrough.R index 9403bac2064c..c65790109fc2 100644 --- a/R-package/demo/basic_walkthrough.R +++ b/R-package/demo/basic_walkthrough.R @@ -16,29 +16,28 @@ class(train$data) # note: we are putting in sparse matrix here, xgboost naturally handles sparse input # use sparse matrix when your feature is sparse(e.g. when you are using one-hot encoding vector) print("Training xgboost with sparseMatrix") -bst <- xgboost(data = train$data, label = train$label, max_depth = 2, eta = 1, nrounds = 2, - nthread = 2, objective = "binary:logistic") +bst <- xgboost(x = train$data, y = factor(train$label, c(0, 1)), + params = list(max_depth = 2, eta = 1), + nrounds = 2, nthread = 2) # alternatively, you can put in dense matrix, i.e. basic R-matrix print("Training xgboost with Matrix") -bst <- xgboost(data = as.matrix(train$data), label = train$label, max_depth = 2, eta = 1, nrounds = 2, - nthread = 2, objective = "binary:logistic") +bst <- xgboost(x = as.matrix(train$data), y = factor(train$label, c(0, 1)), + params = list(max_depth = 2, eta = 1), + nrounds = 2, nthread = 2) # you can also put in xgb.DMatrix object, which stores label, data and other meta datas needed for advanced features print("Training xgboost with xgb.DMatrix") dtrain <- xgb.DMatrix(data = train$data, label = train$label) -bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nrounds = 2, nthread = 2, - objective = "binary:logistic") +params <- list(max_depth = 2, eta = 1, nthread = 2, objective = "binary:logistic") +bst <- xgb.train(data = dtrain, params = params, nrounds = 2) # Verbose = 0,1,2 print("Train xgboost with verbose 0, no message") -bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nrounds = 2, - nthread = 2, objective = "binary:logistic", verbose = 0) +bst <- xgb.train(data = dtrain, params = params, nrounds = 2, verbose = 0) print("Train xgboost with verbose 1, print evaluation metric") -bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nrounds = 2, - nthread = 2, objective = "binary:logistic", verbose = 1) +bst <- xgb.train(data = dtrain, params = params, nrounds = 2, verbose = 1) print("Train xgboost with verbose 2, also print information about tree") -bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nrounds = 2, - nthread = 2, objective = "binary:logistic", verbose = 2) +bst <- xgb.train(data = dtrain, params = params, nrounds = 2, verbose = 2) # you can also specify data as file path to a LIBSVM format input # since we do not have this file with us, the following line is just for illustration diff --git a/R-package/man/print.xgb.Booster.Rd b/R-package/man/print.xgb.Booster.Rd index 9a783efaff27..fc055318cd01 100644 --- a/R-package/man/print.xgb.Booster.Rd +++ b/R-package/man/print.xgb.Booster.Rd @@ -21,9 +21,8 @@ Print information about \code{xgb.Booster}. data(agaricus.train, package = "xgboost") train <- agaricus.train -bst <- xgboost( - data = train$data, - label = train$label, +bst <- xgb.train( + data = xgb.DMatrix(train$data, label = train$label), max_depth = 2, eta = 1, nthread = 2, diff --git a/R-package/man/xgb.attr.Rd b/R-package/man/xgb.attr.Rd index 8038a2048b70..f23e9234018a 100644 --- a/R-package/man/xgb.attr.Rd +++ b/R-package/man/xgb.attr.Rd @@ -64,9 +64,8 @@ example of these behaviors). data(agaricus.train, package = "xgboost") train <- agaricus.train -bst <- xgboost( - data = train$data, - label = train$label, +bst <- xgb.train( + data = xgb.DMatrix(train$data, label = train$label), max_depth = 2, eta = 1, nthread = 2, diff --git a/R-package/man/xgb.config.Rd b/R-package/man/xgb.config.Rd index 1ab810644db9..dbad1d8cf043 100644 --- a/R-package/man/xgb.config.Rd +++ b/R-package/man/xgb.config.Rd @@ -35,9 +35,8 @@ nthread <- 1 data.table::setDTthreads(nthread) train <- agaricus.train -bst <- xgboost( - data = train$data, - label = train$label, +bst <- xgb.train( + data = xgb.DMatrix(train$data, label = train$label), max_depth = 2, eta = 1, nthread = nthread, diff --git a/R-package/man/xgb.dump.Rd b/R-package/man/xgb.dump.Rd index 6f97f69244b9..199ede1583f8 100644 --- a/R-package/man/xgb.dump.Rd +++ b/R-package/man/xgb.dump.Rd @@ -49,8 +49,8 @@ data(agaricus.train, package='xgboost') data(agaricus.test, package='xgboost') train <- agaricus.train test <- agaricus.test -bst <- xgboost(data = train$data, label = train$label, max_depth = 2, - eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") +bst <- xgb.train(data = xgb.DMatrix(train$data, label = train$label), max_depth = 2, + eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") # save the model in file 'xgb.model.dump' dump_path = file.path(tempdir(), 'model.dump') xgb.dump(bst, dump_path, with_stats = TRUE) diff --git a/R-package/man/xgb.importance.Rd b/R-package/man/xgb.importance.Rd index 73b91e8b4b28..76574b9cbf06 100644 --- a/R-package/man/xgb.importance.Rd +++ b/R-package/man/xgb.importance.Rd @@ -70,9 +70,8 @@ be on the same scale (which is also recommended when using L1 or L2 regularizati # binomial classification using "gbtree": data(agaricus.train, package = "xgboost") -bst <- xgboost( - data = agaricus.train$data, - label = agaricus.train$label, +bst <- xgb.train( + data = xgb.DMatrix(agaricus.train$data, label = agaricus.train$label), max_depth = 2, eta = 1, nthread = 2, @@ -83,9 +82,8 @@ bst <- xgboost( xgb.importance(model = bst) # binomial classification using "gblinear": -bst <- xgboost( - data = agaricus.train$data, - label = agaricus.train$label, +bst <- xgb.train( + data = xgb.DMatrix(agaricus.train$data, label = agaricus.train$label), booster = "gblinear", eta = 0.3, nthread = 1, @@ -97,9 +95,11 @@ xgb.importance(model = bst) # multiclass classification using "gbtree": nclass <- 3 nrounds <- 10 -mbst <- xgboost( - data = as.matrix(iris[, -5]), - label = as.numeric(iris$Species) - 1, +mbst <- xgb.train( + data = xgb.DMatrix( + as.matrix(iris[, -5]), + label = as.numeric(iris$Species) - 1 + ), max_depth = 3, eta = 0.2, nthread = 2, @@ -123,9 +123,11 @@ xgb.importance( ) # multiclass classification using "gblinear": -mbst <- xgboost( - data = scale(as.matrix(iris[, -5])), - label = as.numeric(iris$Species) - 1, +mbst <- xgb.train( + data = xgb.DMatrix( + scale(as.matrix(iris[, -5])), + label = as.numeric(iris$Species) - 1 + ), booster = "gblinear", eta = 0.2, nthread = 1, diff --git a/R-package/man/xgb.model.dt.tree.Rd b/R-package/man/xgb.model.dt.tree.Rd index 75f1cd0f4f77..e9536767986c 100644 --- a/R-package/man/xgb.model.dt.tree.Rd +++ b/R-package/man/xgb.model.dt.tree.Rd @@ -63,9 +63,8 @@ data(agaricus.train, package = "xgboost") nthread <- 1 data.table::setDTthreads(nthread) -bst <- xgboost( - data = agaricus.train$data, - label = agaricus.train$label, +bst <- xgb.train( + data = xgb.DMatrix(agaricus.train$data, label = agaricus.train$label), max_depth = 2, eta = 1, nthread = nthread, diff --git a/R-package/man/xgb.parameters.Rd b/R-package/man/xgb.parameters.Rd index 8d5044cab5cc..82977dc122d4 100644 --- a/R-package/man/xgb.parameters.Rd +++ b/R-package/man/xgb.parameters.Rd @@ -33,9 +33,8 @@ will reset its number of rounds indicator to zero. data(agaricus.train, package = "xgboost") train <- agaricus.train -bst <- xgboost( - data = train$data, - label = train$label, +bst <- xgb.train( + data = xgb.DMatrix(train$data, label = train$label), max_depth = 2, eta = 1, nthread = 2, diff --git a/R-package/man/xgb.plot.deepness.Rd b/R-package/man/xgb.plot.deepness.Rd index 43c0dac777f6..3da8e384e4a1 100644 --- a/R-package/man/xgb.plot.deepness.Rd +++ b/R-package/man/xgb.plot.deepness.Rd @@ -73,9 +73,8 @@ nthread <- 2 data.table::setDTthreads(nthread) ## Change max_depth to a higher number to get a more significant result -bst <- xgboost( - data = agaricus.train$data, - label = agaricus.train$label, +bst <- xgb.train( + data = xgb.DMatrix(agaricus.train$data, label = agaricus.train$label), max_depth = 6, nthread = nthread, nrounds = 50, diff --git a/R-package/man/xgb.plot.importance.Rd b/R-package/man/xgb.plot.importance.Rd index e9c5930c2d57..a9ebcbd2732a 100644 --- a/R-package/man/xgb.plot.importance.Rd +++ b/R-package/man/xgb.plot.importance.Rd @@ -88,9 +88,8 @@ data(agaricus.train) nthread <- 2 data.table::setDTthreads(nthread) -bst <- xgboost( - data = agaricus.train$data, - label = agaricus.train$label, +bst <- xgb.train( + data = xgb.DMatrix(agaricus.train$data, label = agaricus.train$label), max_depth = 3, eta = 1, nthread = nthread, diff --git a/R-package/man/xgb.plot.multi.trees.Rd b/R-package/man/xgb.plot.multi.trees.Rd index 7fa75c85d886..eae84d98edfd 100644 --- a/R-package/man/xgb.plot.multi.trees.Rd +++ b/R-package/man/xgb.plot.multi.trees.Rd @@ -67,9 +67,8 @@ data(agaricus.train, package = "xgboost") nthread <- 2 data.table::setDTthreads(nthread) -bst <- xgboost( - data = agaricus.train$data, - label = agaricus.train$label, +bst <- xgb.train( + data = xgb.DMatrix(agaricus.train$data, label = agaricus.train$label), max_depth = 15, eta = 1, nthread = nthread, diff --git a/R-package/man/xgb.plot.shap.Rd b/R-package/man/xgb.plot.shap.Rd index b460fa1fb3a6..f2d2ea2a05e6 100644 --- a/R-package/man/xgb.plot.shap.Rd +++ b/R-package/man/xgb.plot.shap.Rd @@ -135,9 +135,8 @@ nthread <- 1 data.table::setDTthreads(nthread) nrounds <- 20 -bst <- xgboost( - agaricus.train$data, - agaricus.train$label, +bst <- xgb.train( + data = xgb.DMatrix(agaricus.train$data, agaricus.train$label), nrounds = nrounds, eta = 0.1, max_depth = 3, @@ -161,9 +160,8 @@ x <- as.matrix(iris[, -5]) set.seed(123) is.na(x[sample(nrow(x) * 4, 30)]) <- TRUE # introduce some missing values -mbst <- xgboost( - data = x, - label = as.numeric(iris$Species) - 1, +mbst <- xgb.train( + data = xgb.DMatrix(x, label = as.numeric(iris$Species) - 1), nrounds = nrounds, max_depth = 2, eta = 0.3, diff --git a/R-package/man/xgb.plot.tree.Rd b/R-package/man/xgb.plot.tree.Rd index 69d37301dde6..6064107fc184 100644 --- a/R-package/man/xgb.plot.tree.Rd +++ b/R-package/man/xgb.plot.tree.Rd @@ -96,9 +96,8 @@ This function uses \href{https://www.graphviz.org/}{GraphViz} as DiagrammeR back \examples{ data(agaricus.train, package = "xgboost") -bst <- xgboost( - data = agaricus.train$data, - label = agaricus.train$label, +bst <- xgb.train( + data = xgb.DMatrix(agaricus.train$data, agaricus.train$label), max_depth = 3, eta = 1, nthread = 2, diff --git a/R-package/man/xgb.train.Rd b/R-package/man/xgb.train.Rd index 937020e0dd38..f65a34afc4df 100644 --- a/R-package/man/xgb.train.Rd +++ b/R-package/man/xgb.train.Rd @@ -1,8 +1,7 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/xgb.train.R, R/xgboost.R +% Please edit documentation in R/xgb.train.R \name{xgb.train} \alias{xgb.train} -\alias{xgboost} \title{eXtreme Gradient Boosting Training} \usage{ xgb.train( @@ -22,24 +21,6 @@ xgb.train( callbacks = list(), ... ) - -xgboost( - data = NULL, - label = NULL, - missing = NA, - weight = NULL, - params = list(), - nrounds, - verbose = 1, - print_every_n = 1L, - early_stopping_rounds = NULL, - maximize = NULL, - save_period = NULL, - save_name = "xgboost.model", - xgb_model = NULL, - callbacks = list(), - ... -) } \arguments{ \item{params}{the list of parameters. The complete list of parameters is @@ -371,9 +352,8 @@ bst <- xgb.train(param, dtrain, nrounds = 25, evals = evals, early_stopping_rounds = 3) ## An 'xgboost' interface example: -bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, - max_depth = 2, eta = 1, nthread = nthread, nrounds = 2, - objective = "binary:logistic") +bst <- xgboost(x = agaricus.train$data, y = factor(agaricus.train$label), + params = list(max_depth = 2, eta = 1), nthread = nthread, nrounds = 2) pred <- predict(bst, agaricus.test$data) } diff --git a/R-package/man/xgboost.Rd b/R-package/man/xgboost.Rd new file mode 100644 index 000000000000..921f3a9119cd --- /dev/null +++ b/R-package/man/xgboost.Rd @@ -0,0 +1,215 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/xgboost.R +\name{xgboost} +\alias{xgboost} +\title{Fit XGBoost Model} +\usage{ +xgboost( + x, + y, + objective = NULL, + params = list(), + nrounds = 100L, + weights = NULL, + verbosity = 0L, + nthreads = parallel::detectCores(), + seed = 0L, + monotone_constraints = NULL, + interaction_constraints = NULL, + feature_weights = NULL, + base_margin = NULL +) +} +\arguments{ +\item{x}{The features / covariates. Can be passed as:\itemize{ +\item A numeric or integer `matrix`. +\item A `data.frame`, in which all columns are one of the following types:\itemize{ + \item `numeric` + \item `integer` + \item `logical` + \item `factor` +} + +Columns of `factor` type will be assumed to be categorical, while other column types will +be assumed to be numeric. +\item A sparse matrix from the `Matrix` package, either as `dgCMatrix` or `dgRMatrix` class. +} + +Note that categorical features are only supported for `data.frame` inputs, and are automatically +determined based on their types. See \link{xgb.train} with \link{xgb.DMatrix} for more flexible +variants that would allow something like categorical features on sparse matrices.} + +\item{y}{The response variable. Allowed values are:\itemize{ +\item A numeric or integer vector (for regression tasks). +\item A factor or character vector (for binary and multi-class classification tasks). +\item A logical (boolean) vector (for binary classification tasks). +\item A numeric or integer matrix or `data.frame` with numeric/integer columns +(for multi-task regression tasks). +\item A `Surv` object from the `survival` package (for survival tasks). +} + +If `objective` is `NULL`, the right task will be determined automatically based on +the class of `y`. + +If `objective` is not `NULL`, it must match with the type of `y` - e.g. `factor` types of `y` +can only be used with classification objectives and vice-versa. + +For binary classification, the last factor level of `y` will be used as the "positive" +class - that is, the numbers from `predict` will reflect the probabilities of belonging to this +class instead of to the first factor level. If `y` is a `logical` vector, then `TRUE` will be +set as the last level.} + +\item{objective}{Optimization objective to minimize based on the supplied data, to be passed +by name as a string / character (e.g. `reg:absoluteerror`). See the \href{ +https://xgboost.readthedocs.io/en/stable/parameter.html#learning-task-parameters}{ +Learning Task Parameters} page for more detailed information on allowed values. + +If `NULL` (the default), will be automatically determined from `y` according to the following +logic:\itemize{ +\item If `y` is a factor with 2 levels, will use `binary:logistic`. +\item If `y` is a factor with more than 2 levels, will use `multi:softprob` (number of classes +will be determined automatically, should not be passed under `params`). +\item If `y` is a `Surv` object from the `survival` package, will use `survival:aft` (note that +the only types supported are left / right / interval censored). +\item Otherwise, will use `reg:squarederror`. +} + +If `objective` is not `NULL`, it must match with the type of `y` - e.g. `factor` types of `y` +can only be used with classification objectives and vice-versa. + +Note that not all possible `objective` values supported by the core XGBoost library are allowed +here - for example, objectives which are a variation of another but with a different default +prediction type (e.g. `multi:softmax` vs. `multi:softprob`) are not allowed, and neither are +ranking objectives, nor custom objectives at the moment.} + +\item{params}{List of training parameters. See the online documentation \href{ +https://xgboost.readthedocs.io/en/stable/parameter.html}{XGBoost Parameters} for details about +possible values and what they do. + +Note that not all possible values from the core XGBoost library are allowed as `params` for +'xgboost()' - in particular, values which are direct arguments to this function (such as +`objective` or `nthreads`) cannot be passed under `params` (they should be passed as function +arguments instead). Values which otherwise require an already-fitted booster object (such as +`process_type`) are also not accepted here.} + +\item{nrounds}{Number of boosting iterations / rounds. + +Note that the number of default boosting rounds here is not automatically tuned, and different +problems will have vastly different optimal numbers of boosting rounds.} + +\item{weights}{Sample weights for each row in `x` and `y`. If `NULL` (the default), each row +will have the same weight. + +If not `NULL`, should be passed as a numeric vector with length matching to the number of +rows in `x`.} + +\item{verbosity}{Verbosity of printing messages. Valid values of 0 (silent), 1 (warning), +2 (info), and 3 (debug).} + +\item{nthreads}{Number of parallel threads to use.} + +\item{seed}{Seed to use for random number generation. If passing `NULL`, will draw a random +number using R's PRNG system to use as seed.} + +\item{monotone_constraints}{Optional monotonicity constraints for features. + +Can be passed either as a named list (when `x` has column names), or as a vector. If passed +as a vector and `x` has column names, will try to match the elements by name. + +A value of `+1` for a given feature makes the model predictions / scores constrained to be +a monotonically increasing function of that feature (that is, as the value of the feature +increases, the model prediction cannot decrease), while a value of `-1` makes it a monotonically +decreasing function. A value of zero imposes no constraint. + +The input for `monotone_constraints` can be a subset of the columns of `x` if named, in which +case the columns that are not referred to in `monotone_constraints` will be assumed to have +a value of zero (no constraint imposed on the model for those features). + +See the tutorial \href{https://xgboost.readthedocs.io/en/stable/tutorials/monotonic.html}{ +Monotonic Constraints} for a more detailed explanation.} + +\item{interaction_constraints}{Constraints for interaction representing permitted interactions. +The constraints must be specified in the form of a list of vectors referencing columns in the +data, e.g. `list(c(1, 2), c(3, 4, 5))` (with these numbers being column indices, numeration +starting at 1 - i.e. the first sublist references the first and second columns) or +`list(c("Sepal.Length", "Sepal.Width"), c("Petal.Length", "Petal.Width"))` (references +columns by names), where each vector is a group of indices of features that are allowed to +interact with each other. + +See the tutorial \href{ +https://xgboost.readthedocs.io/en/stable/tutorials/feature_interaction_constraint.html +}{Feature Interaction Constraints} for more information.} + +\item{feature_weights}{Feature weights for column sampling. + +Can be passed either as a vector with length matching to columns of `x`, or as a named +list (only if `x` has column names) with names matching to columns of 'x'. If it is a +named vector, will try to match the entries to column names of `x` by name. + +If `NULL` (the default), all columns will have the same weight.} + +\item{base_margin}{Base margin used for boosting from existing model. + +If passing it, will start the gradient boosting procedure from the scores that are provided +here - for example, one can pass the raw scores from a previous model, or some per-observation +offset, or similar. + +Should be either a numeric vector or numeric matrix (for multi-class and multi-target objectives) +with the same number of rows as `x` and number of columns corresponding to number of optimization +targets, and should be in the untransformed scale (for example, for objective `binary:logistic`, +it should have log-odds, not probabilities; and for objective `multi:softprob`, should have +number of columns matching to number of classes in the data). + +Note that, if it contains more than one column, then columns will not be matched by name to +the corresponding `y` - `base_margin` should have the same column order that the model will use +(for example, for objective `multi:softprob`, columns of `base_margin` will be matched against +`levels(y)` by their position, regardless of what `colnames(base_margin)` returns). + +If `NULL`, will start from zero, but note that for most objectives, an intercept is usually +added (controllable through parameter `base_score` instead) when `base_margin` is not passed.} +} +\value{ +A model object, inheriting from both `xgboost` and `xgb.Booster`. Compared to the regular +`xgb.Booster` model class produced by \link{xgb.train}, this `xgboost` class will have an +additional attribute `metadata` containing information which is used for formatting prediction +outputs, such as class names for classification problems. +} +\description{ +Fits an XGBoost model (boosted decision tree ensemble) to given x/y data. + +See the tutorial \href{https://xgboost.readthedocs.io/en/stable/tutorials/model.html +}{Introduction to Boosted Trees} for a longer explanation of what XGBoost does. + +This function is intended to provide a more user-friendly interface for XGBoost that follows +R's conventions for model fitting and predictions, but which doesn't expose all of the +possible functionalities of the core XGBoost library. + +See \link{xgb.train} for a more flexible low-level alternative which is similar across different +language bindings of XGBoost and which exposes the full library's functionalities. +} +\details{ +For package authors using `xgboost` as a dependency, it is highly recommended to use +\link{xgb.train} in package code instead of `xgboost()`, since it has a more stable interface +and performs fewer data conversions and copies along the way. +} +\examples{ +library(xgboost) +data(mtcars) + +# Fit a small regression model on the mtcars data +model_regression <- xgboost(mtcars[, -1], mtcars$mpg, nthreads = 1, nrounds = 3) +predict(model_regression, mtcars, validate_features = TRUE) + +# Task objective is determined automatically according to the type of 'y' +data(iris) +model_classif <- xgboost(iris[, -5], iris$Species, nthreads = 1, nrounds = 5) +predict(model_classif, iris, validate_features = TRUE) +} +\references{ +\itemize{ +\item Chen, Tianqi, and Carlos Guestrin. "Xgboost: A scalable tree boosting system." +Proceedings of the 22nd acm sigkdd international conference on knowledge discovery and +data mining. 2016. +\item \url{https://xgboost.readthedocs.io/en/stable/} +} +} diff --git a/R-package/tests/testthat/test_xgboost.R b/R-package/tests/testthat/test_xgboost.R new file mode 100644 index 000000000000..8ca51fb9deee --- /dev/null +++ b/R-package/tests/testthat/test_xgboost.R @@ -0,0 +1,585 @@ +library(survival) +library(data.table) + +test_that("Auto determine objective", { + y_num <- seq(1, 10) + res_num <- process.y.margin.and.objective(y_num, NULL, NULL, NULL) + expect_equal(res_num$params$objective, "reg:squarederror") + + y_bin <- factor(c('a', 'b', 'a', 'b'), c('a', 'b')) + res_bin <- process.y.margin.and.objective(y_bin, NULL, NULL, NULL) + expect_equal(res_bin$params$objective, "binary:logistic") + + y_multi <- factor(c('a', 'b', 'a', 'b', 'c'), c('a', 'b', 'c')) + res_multi <- process.y.margin.and.objective(y_multi, NULL, NULL, NULL) + expect_equal(res_multi$params$objective, "multi:softprob") + + y_surv <- Surv(1:10, rep(c(0, 1), 5), type = "right") + res_surv <- process.y.margin.and.objective(y_surv, NULL, NULL, NULL) + expect_equal(res_surv$params$objective, "survival:aft") + + y_multicol <- matrix(seq(1, 20), nrow = 5) + res_multicol <- process.y.margin.and.objective(y_multicol, NULL, NULL, NULL) + expect_equal(res_multicol$params$objective, "reg:squarederror") +}) + +test_that("Process vectors", { + y <- seq(1, 10) + for (y_inp in list(as.integer(y), as.numeric(y))) { + res <- process.y.margin.and.objective(y_inp, NULL, "reg:pseudohubererror", NULL) + expect_equal( + res$dmatrix_args$label, + y + ) + expect_equal( + res$params$objective, + "reg:pseudohubererror" + ) + } +}) + +test_that("Process factors", { + y_bin <- factor(c('a', 'b', 'a', 'b'), c('a', 'b')) + expect_error({ + process.y.margin.and.objective(y_bin, NULL, "multi:softprob", NULL) + }) + for (bin_obj in c("binary:logistic", "binary:hinge")) { + for (y_inp in list(y_bin, as.ordered(y_bin))) { + res_bin <- process.y.margin.and.objective(y_inp, NULL, bin_obj, NULL) + expect_equal( + res_bin$dmatrix_args$label, + c(0, 1, 0, 1) + ) + expect_equal( + res_bin$metadata$y_levels, + c('a', 'b') + ) + expect_equal( + res_bin$params$objective, + bin_obj + ) + } + } + + y_bin2 <- factor(c(1, 0, 1, 0), c(1, 0)) + res_bin <- process.y.margin.and.objective(y_bin2, NULL, "binary:logistic", NULL) + expect_equal( + res_bin$dmatrix_args$label, + c(0, 1, 0, 1) + ) + expect_equal( + res_bin$metadata$y_levels, + c("1", "0") + ) + + y_bin3 <- c(TRUE, FALSE, TRUE) + res_bin <- process.y.margin.and.objective(y_bin3, NULL, "binary:logistic", NULL) + expect_equal( + res_bin$dmatrix_args$label, + c(1, 0, 1) + ) + expect_equal( + res_bin$metadata$y_levels, + c("FALSE", "TRUE") + ) + + y_multi <- factor(c('a', 'b', 'c', 'd', 'a', 'b'), c('a', 'b', 'c', 'd')) + expect_error({ + process.y.margin.and.objective(y_multi, NULL, "binary:logistic", NULL) + }) + expect_error({ + process.y.margin.and.objective(y_multi, NULL, "binary:logistic", NULL) + }) + res_multi <- process.y.margin.and.objective(y_multi, NULL, "multi:softprob", NULL) + expect_equal( + res_multi$dmatrix_args$label, + c(0, 1, 2, 3, 0, 1) + ) + expect_equal( + res_multi$metadata$y_levels, + c('a', 'b', 'c', 'd') + ) + expect_equal( + res_multi$params$num_class, + 4 + ) + expect_equal( + res_multi$params$objective, + "multi:softprob" + ) +}) + +test_that("Process survival objects", { + data(cancer, package = "survival") + y_right <- Surv(cancer$time, cancer$status - 1, type = "right") + res_cox <- process.y.margin.and.objective(y_right, NULL, "survival:cox", NULL) + expect_equal( + res_cox$dmatrix_args$label, + ifelse(cancer$status == 2, cancer$time, -cancer$time) + ) + expect_equal( + res_cox$params$objective, + "survival:cox" + ) + + res_aft <- process.y.margin.and.objective(y_right, NULL, "survival:aft", NULL) + expect_equal( + res_aft$dmatrix_args$label_lower_bound, + cancer$time + ) + expect_equal( + res_aft$dmatrix_args$label_upper_bound, + ifelse(cancer$status == 2, cancer$time, Inf) + ) + expect_equal( + res_aft$params$objective, + "survival:aft" + ) + + y_left <- Surv(seq(1, 4), c(1, 0, 1, 0), type = "left") + expect_error({ + process.y.margin.and.objective(y_left, NULL, "survival:cox", NULL) + }) + res_aft <- process.y.margin.and.objective(y_left, NULL, "survival:aft", NULL) + expect_equal( + res_aft$dmatrix_args$label_lower_bound, + c(1, 0, 3, 0) + ) + expect_equal( + res_aft$dmatrix_args$label_upper_bound, + seq(1, 4) + ) + expect_equal( + res_aft$params$objective, + "survival:aft" + ) + + y_interval <- Surv( + time = c(1, 5, 2, 10, 3), + time2 = c(2, 5, 2.5, 10, 3), + event = c(3, 1, 3, 0, 2), + type = "interval" + ) + expect_error({ + process.y.margin.and.objective(y_interval, NULL, "survival:cox", NULL) + }) + res_aft <- process.y.margin.and.objective(y_interval, NULL, "survival:aft", NULL) + expect_equal( + res_aft$dmatrix_args$label_lower_bound, + c(1, 5, 2, 10, 0) + ) + expect_equal( + res_aft$dmatrix_args$label_upper_bound, + c(2, 5, 2.5, Inf, 3) + ) + expect_equal( + res_aft$params$objective, + "survival:aft" + ) + + y_interval_neg <- Surv( + time = c(1, -5, 2, 10, 3), + time2 = c(2, -5, 2.5, 10, 3), + event = c(3, 1, 3, 0, 2), + type = "interval" + ) + expect_error({ + process.y.margin.and.objective(y_interval_neg, NULL, "survival:aft", NULL) + }) +}) + +test_that("Process multi-target", { + data(mtcars) + y_multi <- data.frame( + y1 = mtcars$mpg, + y2 = mtcars$mpg ^ 2 + ) + for (y_inp in list(y_multi, as.matrix(y_multi), data.table::as.data.table(y_multi))) { + res_multi <- process.y.margin.and.objective(y_inp, NULL, "reg:pseudohubererror", NULL) + expect_equal( + res_multi$dmatrix_args$label, + as.matrix(y_multi) + ) + expect_equal( + res_multi$metadata$y_names, + c("y1", "y2") + ) + expect_equal( + res_multi$params$objective, + "reg:pseudohubererror" + ) + } + + expect_error({ + process.y.margin.and.objective(y_multi, NULL, "count:poisson", NULL) + }) + + y_bad <- data.frame( + c1 = seq(1, 3), + c2 = rep(as.Date("2024-01-01"), 3) + ) + expect_error({ + process.y.margin.and.objective(y_bad, NULL, "reg:squarederror", NULL) + }) + + y_bad <- data.frame( + c1 = seq(1, 3), + c2 = factor(c('a', 'b', 'a'), c('a', 'b')) + ) + expect_error({ + process.y.margin.and.objective(y_bad, NULL, "reg:squarederror", NULL) + }) + + y_bad <- seq(1, 20) + dim(y_bad) <- c(5, 2, 2) + expect_error({ + process.y.margin.and.objective(y_bad, NULL, "reg:squarederror", NULL) + }) +}) + +test_that("Process base_margin", { + y <- seq(101, 110) + bm_good <- seq(1, 10) + for (bm in list(bm_good, as.matrix(bm_good), as.data.frame(as.matrix(bm_good)))) { + res <- process.y.margin.and.objective(y, bm, "reg:squarederror", NULL) + expect_equal( + res$dmatrix_args$base_margin, + seq(1, 10) + ) + } + expect_error({ + process.y.margin.and.objective(y, 5, "reg:squarederror", NULL) + }) + expect_error({ + process.y.margin.and.objective(y, seq(1, 5), "reg:squarederror", NULL) + }) + expect_error({ + process.y.margin.and.objective(y, matrix(seq(1, 20), ncol = 2), "reg:squarederror", NULL) + }) + expect_error({ + process.y.margin.and.objective( + y, + as.data.frame(matrix(seq(1, 20), ncol = 2)), + "reg:squarederror", + NULL + ) + }) + + y <- factor(c('a', 'b', 'c', 'a')) + bm_good <- matrix(seq(1, 12), ncol = 3) + for (bm in list(bm_good, as.data.frame(bm_good))) { + res <- process.y.margin.and.objective(y, bm, "multi:softprob", NULL) + expect_equal( + res$dmatrix_args$base_margin |> unname(), + matrix(seq(1, 12), ncol = 3) + ) + } + expect_error({ + process.y.margin.and.objective(y, as.numeric(bm_good), "multi:softprob", NULL) + }) + expect_error({ + process.y.margin.and.objective(y, 5, "multi:softprob", NULL) + }) + expect_error({ + process.y.margin.and.objective(y, bm_good[, 1], "multi:softprob", NULL) + }) + expect_error({ + process.y.margin.and.objective(y, bm_good[, c(1, 2)], "multi:softprob", NULL) + }) + expect_error({ + process.y.margin.and.objective(y, bm_good[c(1, 2), ], "multi:softprob", NULL) + }) + + y <- seq(101, 110) + bm_good <- matrix(seq(1, 30), ncol = 3) + params <- list(quantile_alpha = c(0.1, 0.5, 0.9)) + for (bm in list(bm_good, as.data.frame(bm_good))) { + res <- process.y.margin.and.objective(y, bm, "reg:quantileerror", params) + expect_equal( + res$dmatrix_args$base_margin |> unname(), + matrix(seq(1, 30), ncol = 3) + ) + } + expect_error({ + process.y.margin.and.objective(y, as.numeric(bm_good), "reg:quantileerror", params) + }) + expect_error({ + process.y.margin.and.objective(y, 5, "reg:quantileerror", params) + }) + expect_error({ + process.y.margin.and.objective(y, bm_good[, 1], "reg:quantileerror", params) + }) + expect_error({ + process.y.margin.and.objective(y, bm_good[, c(1, 2)], "reg:quantileerror", params) + }) + expect_error({ + process.y.margin.and.objective(y, bm_good[c(1, 2, 3), ], "reg:quantileerror", params) + }) + + y <- matrix(seq(101, 130), ncol = 3) + for (bm in list(bm_good, as.data.frame(bm_good))) { + res <- process.y.margin.and.objective(y, bm, "reg:squarederror", params) + expect_equal( + res$dmatrix_args$base_margin |> unname(), + matrix(seq(1, 30), ncol = 3) + ) + } + expect_error({ + process.y.margin.and.objective(y, as.numeric(bm_good), "reg:squarederror", params) + }) + expect_error({ + process.y.margin.and.objective(y, 5, "reg:squarederror", params) + }) + expect_error({ + process.y.margin.and.objective(y, bm_good[, 1], "reg:squarederror", params) + }) + expect_error({ + process.y.margin.and.objective(y, bm_good[, c(1, 2)], "reg:squarederror", params) + }) + expect_error({ + process.y.margin.and.objective(y, bm_good[c(1, 2, 3), ], "reg:squarederror", params) + }) +}) + +test_that("Process monotone constraints", { + data(iris) + mc_list <- list(Sepal.Width = 1) + res <- process.x.and.col.args( + iris, + monotone_constraints = mc_list, + interaction_constraints = NULL, + feature_weights = NULL, + lst_args = list() + ) + expect_equal( + res$params$monotone_constraints, + c(0, 1, 0, 0, 0) + ) + + mc_list2 <- list(Sepal.Width = 1, Petal.Width = -1) + res <- process.x.and.col.args( + iris, + monotone_constraints = mc_list2, + interaction_constraints = NULL, + feature_weights = NULL, + lst_args = list() + ) + expect_equal( + res$params$monotone_constraints, + c(0, 1, 0, -1, 0) + ) + + mc_vec <- c(0, 1, -1, 0, 0) + res <- process.x.and.col.args( + iris, + monotone_constraints = mc_vec, + interaction_constraints = NULL, + feature_weights = NULL, + lst_args = list() + ) + expect_equal( + res$params$monotone_constraints, + c(0, 1, -1, 0, 0) + ) + + mc_named_vec <- c(1, 1) + names(mc_named_vec) <- names(iris)[1:2] + res <- process.x.and.col.args( + iris, + monotone_constraints = mc_named_vec, + interaction_constraints = NULL, + feature_weights = NULL, + lst_args = list() + ) + expect_equal( + res$params$monotone_constraints, + c(1, 1, 0, 0, 0) + ) + + mc_named_all <- c(0, -1, 1, 0, -1) + names(mc_named_all) <- rev(names(iris)) + res <- process.x.and.col.args( + iris, + monotone_constraints = mc_named_all, + interaction_constraints = NULL, + feature_weights = NULL, + lst_args = list() + ) + expect_equal( + res$params$monotone_constraints, + rev(mc_named_all) |> unname() + ) + + expect_error({ + process.x.and.col.args( + iris, + monotone_constraints = list( + Sepal.Width = 1, + Petal.Width = -1, + Sepal.Width = -1 + ), + interaction_constraints = NULL, + feature_weights = NULL, + lst_args = list() + ) + }) + + expect_error({ + process.x.and.col.args( + iris, + monotone_constraints = rep(0, 6), + interaction_constraints = NULL, + feature_weights = NULL, + lst_args = list() + ) + }) +}) + +test_that("Process interaction_constraints", { + data(iris) + res <- process.x.and.col.args(iris, NULL, list(c(1L, 2L)), NULL, NULL) + expect_equal( + res$params$interaction_constraints, + list(c(0, 1)) + ) + res <- process.x.and.col.args(iris, NULL, list(c(1.0, 2.0)), NULL, NULL) + expect_equal( + res$params$interaction_constraints, + list(c(0, 1)) + ) + res <- process.x.and.col.args(iris, NULL, list(c(1, 2), c(3, 4)), NULL, NULL) + expect_equal( + res$params$interaction_constraints, + list(c(0, 1), c(2, 3)) + ) + res <- process.x.and.col.args(iris, NULL, list(c("Sepal.Length", "Sepal.Width")), NULL, NULL) + expect_equal( + res$params$interaction_constraints, + list(c(0, 1)) + ) + res <- process.x.and.col.args( + as.matrix(iris), + NULL, + list(c("Sepal.Length", "Sepal.Width")), + NULL, + NULL + ) + expect_equal( + res$params$interaction_constraints, + list(c(0, 1)) + ) + res <- process.x.and.col.args( + iris, + NULL, + list(c("Sepal.Width", "Petal.Length"), c("Sepal.Length", "Petal.Width", "Species")), + NULL, + NULL + ) + expect_equal( + res$params$interaction_constraints, + list(c(1, 2), c(0, 3, 4)) + ) + + expect_error({ + process.x.and.col.args(iris, NULL, list(c(1L, 20L)), NULL, NULL) + }) + expect_error({ + process.x.and.col.args(iris, NULL, list(c(0L, 2L)), NULL, NULL) + }) + expect_error({ + process.x.and.col.args(iris, NULL, list(c("1", "2")), NULL, NULL) + }) + expect_error({ + process.x.and.col.args(iris, NULL, list(c("Sepal", "Petal")), NULL, NULL) + }) + expect_error({ + process.x.and.col.args(iris, NULL, c(1L, 2L), NULL, NULL) + }) + expect_error({ + process.x.and.col.args(iris, NULL, matrix(c(1L, 2L)), NULL, NULL) + }) + expect_error({ + process.x.and.col.args(iris, NULL, list(c(1, 2.5)), NULL, NULL) + }) +}) + +test_that("Process feature_weights", { + data(iris) + w_vector <- seq(1, 5) + res <- process.x.and.col.args( + iris, + monotone_constraints = NULL, + interaction_constraints = NULL, + feature_weights = w_vector, + lst_args = list() + ) + expect_equal( + res$dmatrix_args$feature_weights, + seq(1, 5) + ) + + w_named_vector <- seq(1, 5) + names(w_named_vector) <- rev(names(iris)) + res <- process.x.and.col.args( + iris, + monotone_constraints = NULL, + interaction_constraints = NULL, + feature_weights = w_named_vector, + lst_args = list() + ) + expect_equal( + res$dmatrix_args$feature_weights, + rev(seq(1, 5)) + ) + + w_list <- list( + Species = 5, + Sepal.Length = 1, + Sepal.Width = 2, + Petal.Length = 3, + Petal.Width = 4 + ) + res <- process.x.and.col.args( + iris, + monotone_constraints = NULL, + interaction_constraints = NULL, + feature_weights = w_list, + lst_args = list() + ) + expect_equal( + res$dmatrix_args$feature_weights, + seq(1, 5) + ) +}) + +test_that("Whole function works", { + data(cancer, package = "survival") + y <- Surv(cancer$time, cancer$status - 1, type = "right") + x <- as.data.table(cancer)[, -c("time", "status")] + model <- xgboost( + x, + y, + monotone_constraints = list(age = -1), + nthreads = 1L, + nrounds = 5L + ) + expect_equal( + attributes(model)$params$objective, + "survival:aft" + ) + expect_equal( + attributes(model)$metadata$n_targets, + 1L + ) + expect_equal( + attributes(model)$params$monotone_constraints, + "(0,-1,0,0,0,0,0,0)" + ) + txt <- capture.output({ + print(model) + }) + expect_true(any(grepl("Objective: survival:aft", txt, fixed = TRUE))) + expect_true(any(grepl("monotone_constraints", txt, fixed = TRUE))) + expect_true(any(grepl("Number of iterations: 5", txt, fixed = TRUE))) + expect_true(any(grepl("Number of features: 8", txt, fixed = TRUE))) +}) diff --git a/R-package/vignettes/discoverYourData.Rmd b/R-package/vignettes/discoverYourData.Rmd index 4b04f771f210..8347d0ee0a84 100644 --- a/R-package/vignettes/discoverYourData.Rmd +++ b/R-package/vignettes/discoverYourData.Rmd @@ -173,8 +173,9 @@ Build the model The code below is very usual. For more information, you can look at the documentation of `xgboost` function (or at the vignette [XGBoost presentation](https://github.com/dmlc/xgboost/blob/master/R-package/vignettes/xgboostPresentation.Rmd)). ```{r} -bst <- xgboost(data = sparse_matrix, label = output_vector, max_depth = 4, - eta = 1, nthread = 2, nrounds = 10, objective = "binary:logistic") +bst <- xgboost(x = sparse_matrix, y = output_vector, + params = list(max_depth = 4, eta = 1), + nthread = 2, nrounds = 10) ``` @@ -299,28 +300,28 @@ test <- agaricus.test #Random Forest - 1000 trees bst <- xgboost( - data = train$data, - label = train$label, - max_depth = 4, - num_parallel_tree = 1000, - subsample = 0.5, - colsample_bytree = 0.5, + x = train$data, + y = factor(train$label, levels = c(0, 1)), + params = list( + max_depth = 4, + num_parallel_tree = 1000, + subsample = 0.5, + colsample_bytree = 0.5 + ), nrounds = 1, - objective = "binary:logistic", nthread = 2 ) #Boosting - 3 rounds bst <- xgboost( - data = train$data, - label = train$label, - max_depth = 4, + x = train$data, + y = factor(train$label, levels = c(0, 1)), + params = list(max_depth = 4), nrounds = 3, - objective = "binary:logistic", nthread = 2 ) ``` -> Note that the parameter `round` is set to `1`. +> Note that the parameter `nrounds` is set to `1`. > [**Random Forests**](https://www.stat.berkeley.edu/~breiman/RandomForests/cc_papers.htm) is a trademark of Leo Breiman and Adele Cutler and is licensed exclusively to Salford Systems for the commercial release of the software. diff --git a/R-package/vignettes/xgboostPresentation.Rmd b/R-package/vignettes/xgboostPresentation.Rmd index fc49adc0fcee..d1ca4f2879a7 100644 --- a/R-package/vignettes/xgboostPresentation.Rmd +++ b/R-package/vignettes/xgboostPresentation.Rmd @@ -146,22 +146,19 @@ In a *sparse* matrix, cells containing `0` are not stored in memory. Therefore, We will train decision tree model using the following parameters: -* `objective = "binary:logistic"`: we will train a binary classification model ; +* `objective = "binary:logistic"`: we will train a binary classification model (note that this is set automatically when `y` is a `factor`) ; * `max_depth = 2`: the trees won't be deep, because our case is very simple ; * `nthread = 2`: the number of CPU threads we are going to use; * `nrounds = 2`: there will be two passes on the data, the second one will enhance the model by further reducing the difference between ground truth and prediction. ```{r trainingSparse, message=F, warning=F} bstSparse <- xgboost( - data = train$data - , label = train$label - , params = list( - max_depth = 2 - , eta = 1 - , nthread = 2 - , objective = "binary:logistic" - ) + x = train$data + , y = factor(train$label, levels = c(0, 1)) + , objective = "binary:logistic" + , params = list(max_depth = 2, eta = 1) , nrounds = 2 + , nthread = 2 ) ``` @@ -175,15 +172,11 @@ Alternatively, you can put your dataset in a *dense* matrix, i.e. a basic **R** ```{r trainingDense, message=F, warning=F} bstDense <- xgboost( - data = as.matrix(train$data), - label = train$label, - params = list( - max_depth = 2, - eta = 1, - nthread = 2, - objective = "binary:logistic" - ), - nrounds = 2 + x = as.matrix(train$data), + y = factor(train$label, levels = c(0, 1)), + params = list(max_depth = 2, eta = 1), + nrounds = 2, + nthread = 2 ) ``` @@ -193,7 +186,7 @@ bstDense <- xgboost( ```{r trainingDmatrix, message=F, warning=F} dtrain <- xgb.DMatrix(data = train$data, label = train$label, nthread = 2) -bstDMatrix <- xgboost( +bstDMatrix <- xgb.train( data = dtrain, params = list( max_depth = 2, @@ -213,7 +206,7 @@ One of the simplest way to see the training progress is to set the `verbose` opt ```{r trainingVerbose0, message=T, warning=F} # verbose = 0, no message -bst <- xgboost( +bst <- xgb.train( data = dtrain , params = list( max_depth = 2 @@ -228,7 +221,7 @@ bst <- xgboost( ```{r trainingVerbose1, message=T, warning=F} # verbose = 1, print evaluation metric -bst <- xgboost( +bst <- xgb.train( data = dtrain , params = list( max_depth = 2 @@ -243,7 +236,7 @@ bst <- xgboost( ```{r trainingVerbose2, message=T, warning=F} # verbose = 2, also print information about tree -bst <- xgboost( +bst <- xgb.train( data = dtrain , params = list( max_depth = 2 diff --git a/doc/tutorials/feature_interaction_constraint.rst b/doc/tutorials/feature_interaction_constraint.rst index b3d655584b95..7f26cd437325 100644 --- a/doc/tutorials/feature_interaction_constraint.rst +++ b/doc/tutorials/feature_interaction_constraint.rst @@ -178,9 +178,10 @@ parameter: Using feature name instead ************************** -XGBoost's Python package supports using feature names instead of feature index for +XGBoost's Python and R packages support using feature names instead of feature index for specifying the constraints. Given a data frame with columns ``["f0", "f1", "f2"]``, the -feature interaction constraint can be specified as ``[["f0", "f2"]]``. +feature interaction constraint can be specified as ``[["f0", "f2"]]`` (Python) or +``list(c("f0", "f2"))`` (R, when passing them to function ``xgboost()``). ************** Advanced topic diff --git a/doc/tutorials/monotonic.rst b/doc/tutorials/monotonic.rst index e663d1109689..6868e0a56037 100644 --- a/doc/tutorials/monotonic.rst +++ b/doc/tutorials/monotonic.rst @@ -97,7 +97,8 @@ Some other examples: Using feature names ******************* -XGBoost's Python package supports using feature names instead of feature index for +XGBoost's Python and R packages support using feature names instead of feature indices for specifying the constraints. Given a data frame with columns ``["f0", "f1", "f2"]``, the -monotonic constraint can be specified as ``{"f0": 1, "f2": -1}``, and ``"f1"`` will +monotonic constraint can be specified as ``{"f0": 1, "f2": -1}`` (Python) or as +``list(f0=1, f2=-1)`` (R, when using 'xgboost()', but not 'xgb.train'), and ``"f1"`` will default to ``0`` (no constraint). From 882d7bc543ab129acf947986a158c01641102150 Mon Sep 17 00:00:00 2001 From: david-cortes Date: Thu, 20 Jun 2024 18:26:45 +0200 Subject: [PATCH 02/22] linter --- R-package/R/utils.R | 4 ++-- R-package/R/xgb.train.R | 6 ------ R-package/R/xgboost.R | 8 ++++---- R-package/man/xgb.train.Rd | 9 --------- 4 files changed, 6 insertions(+), 21 deletions(-) diff --git a/R-package/R/utils.R b/R-package/R/utils.R index 59830f8c91e7..3f67ff23c9f7 100644 --- a/R-package/R/utils.R +++ b/R-package/R/utils.R @@ -42,11 +42,11 @@ NVL <- function(x, val) { return("multi:softprob") } -.SURVIVAL_RIGHT_CENSORING_OBJECTIVES <- function() { +.SURVIVAL_RIGHT_CENSORING_OBJECTIVES <- function() { # nolint return(c("survival:cox", "survival:aft")) } -.SURVIVAL_ALL_CENSORING_OBJECTIVES <- function() { +.SURVIVAL_ALL_CENSORING_OBJECTIVES <- function() { # nolint return("survival:aft") } diff --git a/R-package/R/xgb.train.R b/R-package/R/xgb.train.R index 5b719a17b318..4521bdfe24dd 100644 --- a/R-package/R/xgb.train.R +++ b/R-package/R/xgb.train.R @@ -170,12 +170,6 @@ #' as R attributes, and thus do not get saved when using XGBoost's own serializaters like #' \link{xgb.save} (but are kept when using R serializers like \link{saveRDS}). #' @param ... other parameters to pass to \code{params}. -#' @param label vector of response values. Should not be provided when data is -#' a local data file name or an \code{xgb.DMatrix}. -#' @param missing by default is set to NA, which means that NA values should be considered as 'missing' -#' by the algorithm. Sometimes, 0 or other extreme value might be used to represent missing values. -#' This parameter is only used when input is a dense matrix. -#' @param weight a vector indicating the weight for each row of the input. #' #' @return #' An object of class \code{xgb.Booster}. diff --git a/R-package/R/xgboost.R b/R-package/R/xgboost.R index aec2b8a54a6f..d8357ab5ee13 100644 --- a/R-package/R/xgboost.R +++ b/R-package/R/xgboost.R @@ -180,7 +180,7 @@ process.y.margin.and.objective <- function( if (length(y_levels) == 2) { if (is.null(objective)) { objective <- "binary:logistic" - } else{ + } else { if (!(objective %in% .BINARY_CLASSIF_OBJECTIVES())) { stop( "Got binary 'y' - supported objectives for this data are: ", @@ -327,7 +327,7 @@ process.y.margin.and.objective <- function( label_lower_bound = ifelse(y[, 3L] == 2, 0, y[, 1L]), label_upper_bound = ifelse( y[, 3L] == 0, Inf, - ifelse(y[, 3L]== 3, y[, 2L], y[, 1L]) + ifelse(y[, 3L] == 3, y[, 2L], y[, 1L]) ) ) } @@ -607,7 +607,7 @@ process.x.and.col.args <- function( if (!NROW(colnames(x))) { stop("If passing 'monotone_constraints' as a named list, 'x' must have column names.") } - if (any(duplicated(names(monotone_constraints)))) { + if (anyDuplicated(names(monotone_constraints))) { stop( "'monotone_constraints' contains duplicated names: ", paste( @@ -936,7 +936,7 @@ xgboost <- function( use_qdm <- check.can.use.qdm(x, params) if (use_qdm && "max_bin" %in% names(params)) { - lst_args$dmatrix_args$max_bin <- max_bin + lst_args$dmatrix_args$max_bin <- params$max_bin } nthreads <- check.nthreads(nthreads) diff --git a/R-package/man/xgb.train.Rd b/R-package/man/xgb.train.Rd index f65a34afc4df..2c0587800ca8 100644 --- a/R-package/man/xgb.train.Rd +++ b/R-package/man/xgb.train.Rd @@ -209,15 +209,6 @@ to customize the training process. }\if{html}{\out{}}} \item{...}{other parameters to pass to \code{params}.} - -\item{label}{vector of response values. Should not be provided when data is -a local data file name or an \code{xgb.DMatrix}.} - -\item{missing}{by default is set to NA, which means that NA values should be considered as 'missing' -by the algorithm. Sometimes, 0 or other extreme value might be used to represent missing values. -This parameter is only used when input is a dense matrix.} - -\item{weight}{a vector indicating the weight for each row of the input.} } \value{ An object of class \code{xgb.Booster}. From 63e2ba149dd8d8558e86d8ecdd68291f79d2c68f Mon Sep 17 00:00:00 2001 From: david-cortes Date: Thu, 20 Jun 2024 18:53:03 +0200 Subject: [PATCH 03/22] attempt at solving warning in checks --- R-package/R/xgboost.R | 20 ++++++++++---------- R-package/man/xgboost.Rd | 20 ++++++++++---------- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/R-package/R/xgboost.R b/R-package/R/xgboost.R index d8357ab5ee13..37bf9962e704 100644 --- a/R-package/R/xgboost.R +++ b/R-package/R/xgboost.R @@ -735,8 +735,8 @@ process.x.and.col.args <- function( #' @title Fit XGBoost Model #' @description Fits an XGBoost model (boosted decision tree ensemble) to given x/y data. #' -#' See the tutorial \href{https://xgboost.readthedocs.io/en/stable/tutorials/model.html -#' }{Introduction to Boosted Trees} for a longer explanation of what XGBoost does. +#' See the tutorial \href{https://xgboost.readthedocs.io/en/stable/tutorials/model.html}{ +#' Introduction to Boosted Trees} for a longer explanation of what XGBoost does. #' #' This function is intended to provide a more user-friendly interface for XGBoost that follows #' R's conventions for model fitting and predictions, but which doesn't expose all of the @@ -790,8 +790,8 @@ process.x.and.col.args <- function( #' class instead of to the first factor level. If `y` is a `logical` vector, then `TRUE` will be #' set as the last level. #' @param objective Optimization objective to minimize based on the supplied data, to be passed -#' by name as a string / character (e.g. `reg:absoluteerror`). See the \href{ -#' https://xgboost.readthedocs.io/en/stable/parameter.html#learning-task-parameters}{ +#' by name as a string / character (e.g. `reg:absoluteerror`). See the +#' \href{https://xgboost.readthedocs.io/en/stable/parameter.html#learning-task-parameters}{ #' Learning Task Parameters} page for more detailed information on allowed values. #' #' If `NULL` (the default), will be automatically determined from `y` according to the following @@ -811,9 +811,9 @@ process.x.and.col.args <- function( #' here - for example, objectives which are a variation of another but with a different default #' prediction type (e.g. `multi:softmax` vs. `multi:softprob`) are not allowed, and neither are #' ranking objectives, nor custom objectives at the moment. -#' @param params List of training parameters. See the online documentation \href{ -#' https://xgboost.readthedocs.io/en/stable/parameter.html}{XGBoost Parameters} for details about -#' possible values and what they do. +#' @param params List of training parameters. See the online documentation +#' \href{https://xgboost.readthedocs.io/en/stable/parameter.html}{XGBoost Parameters} for +#' details about possible values and what they do. #' #' Note that not all possible values from the core XGBoost library are allowed as `params` for #' 'xgboost()' - in particular, values which are direct arguments to this function (such as @@ -858,9 +858,9 @@ process.x.and.col.args <- function( #' columns by names), where each vector is a group of indices of features that are allowed to #' interact with each other. #' -#' See the tutorial \href{ -#' https://xgboost.readthedocs.io/en/stable/tutorials/feature_interaction_constraint.html -#' }{Feature Interaction Constraints} for more information. +#' See the tutorial +#' \href{https://xgboost.readthedocs.io/en/stable/tutorials/feature_interaction_constraint.html}{ +#' Feature Interaction Constraints} for more information. #' @param feature_weights Feature weights for column sampling. #' #' Can be passed either as a vector with length matching to columns of `x`, or as a named diff --git a/R-package/man/xgboost.Rd b/R-package/man/xgboost.Rd index 921f3a9119cd..974767c60362 100644 --- a/R-package/man/xgboost.Rd +++ b/R-package/man/xgboost.Rd @@ -60,8 +60,8 @@ class instead of to the first factor level. If `y` is a `logical` vector, then ` set as the last level.} \item{objective}{Optimization objective to minimize based on the supplied data, to be passed -by name as a string / character (e.g. `reg:absoluteerror`). See the \href{ -https://xgboost.readthedocs.io/en/stable/parameter.html#learning-task-parameters}{ +by name as a string / character (e.g. `reg:absoluteerror`). See the +\href{https://xgboost.readthedocs.io/en/stable/parameter.html#learning-task-parameters}{ Learning Task Parameters} page for more detailed information on allowed values. If `NULL` (the default), will be automatically determined from `y` according to the following @@ -82,9 +82,9 @@ here - for example, objectives which are a variation of another but with a diffe prediction type (e.g. `multi:softmax` vs. `multi:softprob`) are not allowed, and neither are ranking objectives, nor custom objectives at the moment.} -\item{params}{List of training parameters. See the online documentation \href{ -https://xgboost.readthedocs.io/en/stable/parameter.html}{XGBoost Parameters} for details about -possible values and what they do. +\item{params}{List of training parameters. See the online documentation +\href{https://xgboost.readthedocs.io/en/stable/parameter.html}{XGBoost Parameters} for +details about possible values and what they do. Note that not all possible values from the core XGBoost library are allowed as `params` for 'xgboost()' - in particular, values which are direct arguments to this function (such as @@ -136,9 +136,9 @@ starting at 1 - i.e. the first sublist references the first and second columns) columns by names), where each vector is a group of indices of features that are allowed to interact with each other. -See the tutorial \href{ -https://xgboost.readthedocs.io/en/stable/tutorials/feature_interaction_constraint.html -}{Feature Interaction Constraints} for more information.} +See the tutorial +\href{https://xgboost.readthedocs.io/en/stable/tutorials/feature_interaction_constraint.html}{ +Feature Interaction Constraints} for more information.} \item{feature_weights}{Feature weights for column sampling. @@ -177,8 +177,8 @@ outputs, such as class names for classification problems. \description{ Fits an XGBoost model (boosted decision tree ensemble) to given x/y data. -See the tutorial \href{https://xgboost.readthedocs.io/en/stable/tutorials/model.html -}{Introduction to Boosted Trees} for a longer explanation of what XGBoost does. +See the tutorial \href{https://xgboost.readthedocs.io/en/stable/tutorials/model.html}{ +Introduction to Boosted Trees} for a longer explanation of what XGBoost does. This function is intended to provide a more user-friendly interface for XGBoost that follows R's conventions for model fitting and predictions, but which doesn't expose all of the From bd06af37f4bb97b1c567bbb42931da8b155b04ec Mon Sep 17 00:00:00 2001 From: david-cortes Date: Sat, 29 Jun 2024 21:43:16 +0200 Subject: [PATCH 04/22] Update R-package/R/xgboost.R Co-authored-by: Michael Mayer --- R-package/R/xgboost.R | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/R-package/R/xgboost.R b/R-package/R/xgboost.R index 37bf9962e704..601f72f0b8d1 100644 --- a/R-package/R/xgboost.R +++ b/R-package/R/xgboost.R @@ -971,7 +971,7 @@ print.xgboost <- function(x, ...) { cat("Number of features: ", xgb.num_feature(x), "\n", sep = "") printable_head <- function(v) { - v_sub <- head(v, 5L) + v_sub <- utils::head(v, 5L) return( sprintf( "%s%s", From 337fdfa97db8d99c2ee8093de6553d52eb56e2f7 Mon Sep 17 00:00:00 2001 From: david-cortes Date: Sat, 29 Jun 2024 21:43:28 +0200 Subject: [PATCH 05/22] Update R-package/R/xgboost.R Co-authored-by: Michael Mayer --- R-package/R/xgboost.R | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/R-package/R/xgboost.R b/R-package/R/xgboost.R index 601f72f0b8d1..8169fa3d9a6b 100644 --- a/R-package/R/xgboost.R +++ b/R-package/R/xgboost.R @@ -574,7 +574,7 @@ process.x.and.col.args <- function( if (length(matched) > 0 && length(matched) < length(feature_weights)) { stop( "'feature_weights' names do not contain all columns of 'x'. Missing: ", - head(setdiff(colnames(x), names(feature_weights))) + utils::head(setdiff(colnames(x), names(feature_weights))) ) } if (length(matched)) { From adf03b363ea54dfc78011683154bd05f3dd4766b Mon Sep 17 00:00:00 2001 From: david-cortes Date: Sat, 29 Jun 2024 21:45:37 +0200 Subject: [PATCH 06/22] Update R-package/R/xgboost.R Co-authored-by: Michael Mayer --- R-package/R/xgboost.R | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/R-package/R/xgboost.R b/R-package/R/xgboost.R index 8169fa3d9a6b..b396d6fd4d98 100644 --- a/R-package/R/xgboost.R +++ b/R-package/R/xgboost.R @@ -421,7 +421,7 @@ process.y.margin.and.objective <- function( ) } else if (is.matrix(y)) { - if (ncol(y) == 1) { + if (ncol(y) == 1L) { return(process.y.margin.and.objective(as.vector(y), base_margin, objective, params)) } From a8bd926ea3a42de5961ac4534f711ee10e3b3c7a Mon Sep 17 00:00:00 2001 From: david-cortes Date: Sat, 29 Jun 2024 21:45:48 +0200 Subject: [PATCH 07/22] Update R-package/R/xgboost.R Co-authored-by: Michael Mayer --- R-package/R/xgboost.R | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/R-package/R/xgboost.R b/R-package/R/xgboost.R index b396d6fd4d98..0dad28258cc8 100644 --- a/R-package/R/xgboost.R +++ b/R-package/R/xgboost.R @@ -375,7 +375,7 @@ process.y.margin.and.objective <- function( ) } else if (is.data.frame(y)) { - if (ncol(y) == 1) { + if (ncol(y) == 1L) { return(process.y.margin.and.objective(y[[1L]], base_margin, objective, params)) } From df50a296c92026188aaf7a69d070bf7e611309aa Mon Sep 17 00:00:00 2001 From: david-cortes Date: Sat, 29 Jun 2024 21:46:38 +0200 Subject: [PATCH 08/22] Update R-package/R/xgboost.R Co-authored-by: Michael Mayer --- R-package/R/xgboost.R | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/R-package/R/xgboost.R b/R-package/R/xgboost.R index 0dad28258cc8..d9ac146ef7cc 100644 --- a/R-package/R/xgboost.R +++ b/R-package/R/xgboost.R @@ -694,7 +694,7 @@ process.x.and.col.args <- function( if (anyNA(out)) { stop( "'interaction_constraints' contains column names not present in 'x': ", - paste(head(idx[which(is.na(out))]), collapse = ", ") + paste(utils::head(idx[which(is.na(out))]), collapse = ", ") ) } return(out) From 057bd23105d06e49813adf831087c1ff0fe55428 Mon Sep 17 00:00:00 2001 From: david-cortes Date: Sat, 29 Jun 2024 21:46:54 +0200 Subject: [PATCH 09/22] Update R-package/R/xgboost.R Co-authored-by: Michael Mayer --- R-package/R/xgboost.R | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/R-package/R/xgboost.R b/R-package/R/xgboost.R index d9ac146ef7cc..f4ef1d737782 100644 --- a/R-package/R/xgboost.R +++ b/R-package/R/xgboost.R @@ -67,7 +67,7 @@ process.base.margin <- function(base_margin, nrows, ncols) { base_margin <- as.matrix(base_margin) } - if (ncols == 1) { + if (ncols == 1L) { if (inherits(base_margin, c("matrix", "data.frame"))) { if (ncol(base_margin) != 1) { stop("'base_margin' should be a 1-d vector for the given objective and data.") From a4df40f0fa65390cef55d84d6f0de46f13707880 Mon Sep 17 00:00:00 2001 From: david-cortes Date: Sat, 29 Jun 2024 21:47:09 +0200 Subject: [PATCH 10/22] Update R-package/R/xgboost.R Co-authored-by: Michael Mayer --- R-package/R/xgboost.R | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/R-package/R/xgboost.R b/R-package/R/xgboost.R index f4ef1d737782..5ac1d2e0f722 100644 --- a/R-package/R/xgboost.R +++ b/R-package/R/xgboost.R @@ -69,7 +69,7 @@ process.base.margin <- function(base_margin, nrows, ncols) { if (ncols == 1L) { if (inherits(base_margin, c("matrix", "data.frame"))) { - if (ncol(base_margin) != 1) { + if (ncol(base_margin) != 1L) { stop("'base_margin' should be a 1-d vector for the given objective and data.") } if (is.data.frame(base_margin)) { From 7628e3aacd78053456fa5ba9036924e857435c85 Mon Sep 17 00:00:00 2001 From: david-cortes Date: Sat, 29 Jun 2024 21:47:33 +0200 Subject: [PATCH 11/22] Update R-package/R/xgboost.R Co-authored-by: Michael Mayer --- R-package/R/xgboost.R | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/R-package/R/xgboost.R b/R-package/R/xgboost.R index 5ac1d2e0f722..f2e9694fe545 100644 --- a/R-package/R/xgboost.R +++ b/R-package/R/xgboost.R @@ -611,7 +611,7 @@ process.x.and.col.args <- function( stop( "'monotone_constraints' contains duplicated names: ", paste( - names(monotone_constraints)[duplicated(names(monotone_constraints))] |> head(), + names(monotone_constraints)[duplicated(names(monotone_constraints))] |> utils::head(), collapse = ", " ) ) From 970abf5f7091bcb78924b74f9de2d51ecf4573b8 Mon Sep 17 00:00:00 2001 From: david-cortes Date: Sat, 29 Jun 2024 21:47:54 +0200 Subject: [PATCH 12/22] Update R-package/R/xgboost.R Co-authored-by: Michael Mayer --- R-package/R/xgboost.R | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/R-package/R/xgboost.R b/R-package/R/xgboost.R index f2e9694fe545..9bf17b9788a8 100644 --- a/R-package/R/xgboost.R +++ b/R-package/R/xgboost.R @@ -712,7 +712,7 @@ process.x.and.col.args <- function( if (any(idx != floor(idx))) { stop( "'interaction_constraints' must contain only integer indices. Got non-integer: ", - paste(head(idx[which(idx != floor(idx))]), collapse = ", ") + paste(utils::head(idx[which(idx != floor(idx))]), collapse = ", ") ) } } From e5fbec6057380e8d134801c9a80ece912a0eb29b Mon Sep 17 00:00:00 2001 From: david-cortes Date: Sat, 29 Jun 2024 21:49:55 +0200 Subject: [PATCH 13/22] Update R-package/R/xgboost.R Co-authored-by: Michael Mayer --- R-package/R/xgboost.R | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/R-package/R/xgboost.R b/R-package/R/xgboost.R index 9bf17b9788a8..cf57337a23fb 100644 --- a/R-package/R/xgboost.R +++ b/R-package/R/xgboost.R @@ -153,7 +153,7 @@ process.y.margin.and.objective <- function( if (is.character(y)) { if (!is.vector(y)) { - if (NCOL(y) > 1) { + if (NCOL(y) > 1L) { stop("Multi-column categorical 'y' is not supported.") } y <- as.vector(y) From 04ba7419680c05cccb9296649ef23c94c1f3e482 Mon Sep 17 00:00:00 2001 From: david-cortes Date: Sat, 29 Jun 2024 21:50:17 +0200 Subject: [PATCH 14/22] Update R-package/R/xgboost.R Co-authored-by: Michael Mayer --- R-package/R/xgboost.R | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/R-package/R/xgboost.R b/R-package/R/xgboost.R index cf57337a23fb..6b75d405da24 100644 --- a/R-package/R/xgboost.R +++ b/R-package/R/xgboost.R @@ -163,7 +163,7 @@ process.y.margin.and.objective <- function( if (is.logical(y)) { if (!is.vector(y)) { - if (NCOL(y) > 1) { + if (NCOL(y) > 1L) { stop("Multi-column logical/boolean 'y' is not supported.") } y <- as.vector(y) From b7be9df6537dc96e49e7dc3685aae72f83524294 Mon Sep 17 00:00:00 2001 From: david-cortes Date: Sat, 29 Jun 2024 21:50:50 +0200 Subject: [PATCH 15/22] Update R-package/R/xgboost.R Co-authored-by: Michael Mayer --- R-package/R/xgboost.R | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/R-package/R/xgboost.R b/R-package/R/xgboost.R index 6b75d405da24..a56c38dc2235 100644 --- a/R-package/R/xgboost.R +++ b/R-package/R/xgboost.R @@ -619,7 +619,7 @@ process.x.and.col.args <- function( if (NROW(setdiff(names(monotone_constraints), colnames(x)))) { stop( "'monotone_constraints' contains column names not present in 'x': ", - paste(head(names(monotone_constraints)), collapse = ", ") + paste(utils::head(names(monotone_constraints)), collapse = ", ") ) } From fe60e6c6711fd347a77c4ebff49d03ea9fe8967c Mon Sep 17 00:00:00 2001 From: david-cortes Date: Sat, 6 Jul 2024 14:30:08 +0200 Subject: [PATCH 16/22] merge check for number of rows in vector/matrix base margin --- R-package/R/xgboost.R | 24 ++++++++---------------- 1 file changed, 8 insertions(+), 16 deletions(-) diff --git a/R-package/R/xgboost.R b/R-package/R/xgboost.R index a56c38dc2235..6afa587f8c5b 100644 --- a/R-package/R/xgboost.R +++ b/R-package/R/xgboost.R @@ -66,6 +66,14 @@ process.base.margin <- function(base_margin, nrows, ncols) { ) base_margin <- as.matrix(base_margin) } + if (NROW(base_margin) != nrows) { + stop( + "'base_margin' has incorrect number of rows. Expected: ", + nrows, + ". Got: ", + NROW(base_margin) + ) + } if (ncols == 1L) { if (inherits(base_margin, c("matrix", "data.frame"))) { @@ -81,14 +89,6 @@ process.base.margin <- function(base_margin, nrows, ncols) { if (!is.numeric(base_margin)) { base_margin <- as.numeric(base_margin) } - if (length(base_margin) != nrows) { - stop( - "'base_margin' has incorrect number of rows. Expected: ", - nrows, - ". Got: ", - length(base_margin) - ) - } } else { supported_multicol <- c("matrix", "data.frame") if (!inherits(base_margin, supported_multicol)) { @@ -107,14 +107,6 @@ process.base.margin <- function(base_margin, nrows, ncols) { ncol(base_margin) ) } - if (nrow(base_margin) != nrows) { - stop( - "'base_margin' has incorrect number of rows. Expected: ", - nrows, - ". Got: ", - nrow(base_margin) - ) - } if (!is.matrix(base_margin)) { base_margin <- as.matrix(base_margin) } From 8b9bfd30734594f956f6ee78d235069390d21e44 Mon Sep 17 00:00:00 2001 From: david-cortes Date: Sat, 6 Jul 2024 14:32:21 +0200 Subject: [PATCH 17/22] remove redundant check --- R-package/R/xgboost.R | 6 ------ 1 file changed, 6 deletions(-) diff --git a/R-package/R/xgboost.R b/R-package/R/xgboost.R index 6afa587f8c5b..03f77d306d0e 100644 --- a/R-package/R/xgboost.R +++ b/R-package/R/xgboost.R @@ -553,12 +553,6 @@ process.x.and.col.args <- function( if (!inherits(feature_weights, c("numeric", "integer"))) { stop("'feature_weights' must be a numeric vector or named list matching to columns of 'x'.") } - if (length(feature_weights) != ncol(x)) { - stop( - "'feature_weights' does not match in length with columns of 'x' (", - length(feature_weights), " vs. ", ncol(x), ")." - ) - } if (NROW(names(feature_weights)) && NROW(colnames(x))) { matched <- match(colnames(x), names(feature_weights)) matched <- matched[!is.na(matched)] From 86e34aef274a0a525782b97e48a74d9b34349b72 Mon Sep 17 00:00:00 2001 From: david-cortes Date: Sat, 6 Jul 2024 14:34:17 +0200 Subject: [PATCH 18/22] allow nthreads=0 --- R-package/R/xgboost.R | 6 +++--- R-package/man/xgboost.Rd | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/R-package/R/xgboost.R b/R-package/R/xgboost.R index 03f77d306d0e..60d96eb2df52 100644 --- a/R-package/R/xgboost.R +++ b/R-package/R/xgboost.R @@ -493,9 +493,9 @@ check.nthreads <- function(nthreads) { stop("'nthreads' must be a positive scalar value.") } if (length(nthreads) > 1L) { - nthreads <- head(nthreads, 1L) + nthreads <- utils::head(nthreads, 1L) } - if (is.na(nthreads) || nthreads < 1) { + if (is.na(nthreads) || nthreads < 0) { stop("Passed invalid 'nthreads': ", nthreads) } if (is.numeric(nthreads)) { @@ -817,7 +817,7 @@ process.x.and.col.args <- function( #' rows in `x`. #' @param verbosity Verbosity of printing messages. Valid values of 0 (silent), 1 (warning), #' 2 (info), and 3 (debug). -#' @param nthreads Number of parallel threads to use. +#' @param nthreads Number of parallel threads to use. If passing zero, will use all CPU threads. #' @param seed Seed to use for random number generation. If passing `NULL`, will draw a random #' number using R's PRNG system to use as seed. #' @param monotone_constraints Optional monotonicity constraints for features. diff --git a/R-package/man/xgboost.Rd b/R-package/man/xgboost.Rd index 974767c60362..ee8a6594e12b 100644 --- a/R-package/man/xgboost.Rd +++ b/R-package/man/xgboost.Rd @@ -106,7 +106,7 @@ rows in `x`.} \item{verbosity}{Verbosity of printing messages. Valid values of 0 (silent), 1 (warning), 2 (info), and 3 (debug).} -\item{nthreads}{Number of parallel threads to use.} +\item{nthreads}{Number of parallel threads to use. If passing zero, will use all CPU threads.} \item{seed}{Seed to use for random number generation. If passing `NULL`, will draw a random number using R's PRNG system to use as seed.} From 949c5192db5194df9c4c432bdd710a18dc9a31f9 Mon Sep 17 00:00:00 2001 From: david-cortes Date: Sat, 6 Jul 2024 14:37:45 +0200 Subject: [PATCH 19/22] add comments about structure of Surv objects --- R-package/R/xgboost.R | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/R-package/R/xgboost.R b/R-package/R/xgboost.R index 60d96eb2df52..d896e4b7500d 100644 --- a/R-package/R/xgboost.R +++ b/R-package/R/xgboost.R @@ -281,6 +281,12 @@ process.y.margin.and.objective <- function( ) ) + # Note: the 'Surv' object class that is passed as 'y' might have either 2 or 3 columns + # depending on the type of censoring, and the last column in both cases is the one that + # indicates the observation type (e.g. censored / uncensored). + # In the case of interval censoring, the second column will not always have values with + # infinites filled in. For more information, see the code behind the 'print.Surv' method. + if (objective == "survival:cox") { # Can only get here when using right censoring if (y_attr$type != "right") { From b418e51e95e2c42762749e08f7fd8af3b2f85314 Mon Sep 17 00:00:00 2001 From: david-cortes Date: Sat, 6 Jul 2024 15:25:35 +0200 Subject: [PATCH 20/22] cast sparse types to CSR in order to use QDM --- R-package/R/xgboost.R | 24 +++++--- R-package/tests/testthat/test_xgboost.R | 76 +++++++++++++++++-------- 2 files changed, 68 insertions(+), 32 deletions(-) diff --git a/R-package/R/xgboost.R b/R-package/R/xgboost.R index d896e4b7500d..c7882caefef0 100644 --- a/R-package/R/xgboost.R +++ b/R-package/R/xgboost.R @@ -513,9 +513,6 @@ check.nthreads <- function(nthreads) { } check.can.use.qdm <- function(x, params) { - if (inherits(x, "sparseMatrix") && !inherits(x, "dgRMatrix")) { - return(FALSE) - } if ("booster" %in% names(params)) { if (params$booster == "gblinear") { return(FALSE) @@ -534,7 +531,8 @@ process.x.and.col.args <- function( monotone_constraints, interaction_constraints, feature_weights, - lst_args + lst_args, + use_qdm ) { if (is.null(x)) { stop("'x' cannot be NULL.") @@ -542,7 +540,7 @@ process.x.and.col.args <- function( if (inherits(x, "xgb.DMatrix")) { stop("Cannot pass 'xgb.DMatrix' as 'x' to 'xgboost()'. Try 'xgb.train()' instead.") } - supported_x_types <- c("data.frame", "matrix", "dgCMatrix", "dgRMatrix") + supported_x_types <- c("data.frame", "matrix", "dgTMatrix", "dgCMatrix", "dgRMatrix") if (!inherits(x, supported_x_types)) { stop( "'x' must be one of the following classes: ", @@ -551,6 +549,12 @@ process.x.and.col.args <- function( paste(class(x), collapse = ", ") ) } + if (use_qdm && inherits(x, "sparseMatrix") && !inherits(x, "dgRMatrix")) { + x <- methods::as(x, "RsparseMatrix") + if (!inherits(x, "RsparseMatrix")) { + stop("Internal error: casting sparse matrix did not yield 'dgRMatrix'.") + } + } if (NROW(feature_weights)) { if (is.list(feature_weights)) { @@ -630,7 +634,8 @@ process.x.and.col.args <- function( as.list(monotone_constraints), interaction_constraints, feature_weights, - lst_args + lst_args, + use_qdm ) ) } else { @@ -719,6 +724,7 @@ process.x.and.col.args <- function( }) } + lst_args$dmatrix_args$data <- x return(lst_args) } @@ -916,6 +922,7 @@ xgboost <- function( params <- prescreen.parameters(params) prescreen.objective(objective) + use_qdm <- check.can.use.qdm(x, params) lst_args <- process.y.margin.and.objective(y, base_margin, objective, params) lst_args <- process.row.weights(weights, lst_args) lst_args <- process.x.and.col.args( @@ -923,10 +930,10 @@ xgboost <- function( monotone_constraints, interaction_constraints, feature_weights, - lst_args + lst_args, + use_qdm ) - use_qdm <- check.can.use.qdm(x, params) if (use_qdm && "max_bin" %in% names(params)) { lst_args$dmatrix_args$max_bin <- params$max_bin } @@ -938,7 +945,6 @@ xgboost <- function( params <- c(lst_args$params, params) - lst_args$dmatrix_args$data <- x fn_dm <- if (use_qdm) xgb.QuantileDMatrix else xgb.DMatrix dm <- do.call(fn_dm, lst_args$dmatrix_args) model <- xgb.train( diff --git a/R-package/tests/testthat/test_xgboost.R b/R-package/tests/testthat/test_xgboost.R index 8ca51fb9deee..e767792991d4 100644 --- a/R-package/tests/testthat/test_xgboost.R +++ b/R-package/tests/testthat/test_xgboost.R @@ -349,7 +349,8 @@ test_that("Process monotone constraints", { monotone_constraints = mc_list, interaction_constraints = NULL, feature_weights = NULL, - lst_args = list() + lst_args = list(), + use_qdm = FALSE ) expect_equal( res$params$monotone_constraints, @@ -362,7 +363,8 @@ test_that("Process monotone constraints", { monotone_constraints = mc_list2, interaction_constraints = NULL, feature_weights = NULL, - lst_args = list() + lst_args = list(), + use_qdm = FALSE ) expect_equal( res$params$monotone_constraints, @@ -375,7 +377,8 @@ test_that("Process monotone constraints", { monotone_constraints = mc_vec, interaction_constraints = NULL, feature_weights = NULL, - lst_args = list() + lst_args = list(), + use_qdm = FALSE ) expect_equal( res$params$monotone_constraints, @@ -389,7 +392,8 @@ test_that("Process monotone constraints", { monotone_constraints = mc_named_vec, interaction_constraints = NULL, feature_weights = NULL, - lst_args = list() + lst_args = list(), + use_qdm = FALSE ) expect_equal( res$params$monotone_constraints, @@ -403,7 +407,8 @@ test_that("Process monotone constraints", { monotone_constraints = mc_named_all, interaction_constraints = NULL, feature_weights = NULL, - lst_args = list() + lst_args = list(), + use_qdm = FALSE ) expect_equal( res$params$monotone_constraints, @@ -420,7 +425,8 @@ test_that("Process monotone constraints", { ), interaction_constraints = NULL, feature_weights = NULL, - lst_args = list() + lst_args = list(), + use_qdm = FALSE ) }) @@ -430,29 +436,32 @@ test_that("Process monotone constraints", { monotone_constraints = rep(0, 6), interaction_constraints = NULL, feature_weights = NULL, - lst_args = list() + lst_args = list(), + use_qdm = FALSE ) }) }) test_that("Process interaction_constraints", { data(iris) - res <- process.x.and.col.args(iris, NULL, list(c(1L, 2L)), NULL, NULL) + res <- process.x.and.col.args(iris, NULL, list(c(1L, 2L)), NULL, NULL, FALSE) expect_equal( res$params$interaction_constraints, list(c(0, 1)) ) - res <- process.x.and.col.args(iris, NULL, list(c(1.0, 2.0)), NULL, NULL) + res <- process.x.and.col.args(iris, NULL, list(c(1.0, 2.0)), NULL, NULL, FALSE) expect_equal( res$params$interaction_constraints, list(c(0, 1)) ) - res <- process.x.and.col.args(iris, NULL, list(c(1, 2), c(3, 4)), NULL, NULL) + res <- process.x.and.col.args(iris, NULL, list(c(1, 2), c(3, 4)), NULL, NULL, FALSE) expect_equal( res$params$interaction_constraints, list(c(0, 1), c(2, 3)) ) - res <- process.x.and.col.args(iris, NULL, list(c("Sepal.Length", "Sepal.Width")), NULL, NULL) + res <- process.x.and.col.args( + iris, NULL, list(c("Sepal.Length", "Sepal.Width")), NULL, NULL, FALSE + ) expect_equal( res$params$interaction_constraints, list(c(0, 1)) @@ -462,7 +471,8 @@ test_that("Process interaction_constraints", { NULL, list(c("Sepal.Length", "Sepal.Width")), NULL, - NULL + NULL, + FALSE ) expect_equal( res$params$interaction_constraints, @@ -473,7 +483,8 @@ test_that("Process interaction_constraints", { NULL, list(c("Sepal.Width", "Petal.Length"), c("Sepal.Length", "Petal.Width", "Species")), NULL, - NULL + NULL, + FALSE ) expect_equal( res$params$interaction_constraints, @@ -481,28 +492,44 @@ test_that("Process interaction_constraints", { ) expect_error({ - process.x.and.col.args(iris, NULL, list(c(1L, 20L)), NULL, NULL) + process.x.and.col.args(iris, NULL, list(c(1L, 20L)), NULL, NULL, FALSE) }) expect_error({ - process.x.and.col.args(iris, NULL, list(c(0L, 2L)), NULL, NULL) + process.x.and.col.args(iris, NULL, list(c(0L, 2L)), NULL, NULL, FALSE) }) expect_error({ - process.x.and.col.args(iris, NULL, list(c("1", "2")), NULL, NULL) + process.x.and.col.args(iris, NULL, list(c("1", "2")), NULL, NULL, FALSE) }) expect_error({ - process.x.and.col.args(iris, NULL, list(c("Sepal", "Petal")), NULL, NULL) + process.x.and.col.args(iris, NULL, list(c("Sepal", "Petal")), NULL, NULL, FALSE) }) expect_error({ - process.x.and.col.args(iris, NULL, c(1L, 2L), NULL, NULL) + process.x.and.col.args(iris, NULL, c(1L, 2L), NULL, NULL, FALSE) }) expect_error({ - process.x.and.col.args(iris, NULL, matrix(c(1L, 2L)), NULL, NULL) + process.x.and.col.args(iris, NULL, matrix(c(1L, 2L)), NULL, NULL, FALSE) }) expect_error({ - process.x.and.col.args(iris, NULL, list(c(1, 2.5)), NULL, NULL) + process.x.and.col.args(iris, NULL, list(c(1, 2.5)), NULL, NULL, FALSE) }) }) +test_that("Sparse matrices are casted to CSR for QDM", { + data(agaricus.test, package = "xgboost") + x <- agaricus.test$data + for (x_in in list(x, methods::as(x, "TsparseMatrix"))) { + res <- process.x.and.col.args( + x_in, + NULL, + NULL, + NULL, + NULL, + TRUE + ) + expect_s4_class(res$dmatrix_args$data, "dgRMatrix") + } +}) + test_that("Process feature_weights", { data(iris) w_vector <- seq(1, 5) @@ -511,7 +538,8 @@ test_that("Process feature_weights", { monotone_constraints = NULL, interaction_constraints = NULL, feature_weights = w_vector, - lst_args = list() + lst_args = list(), + use_qdm = FALSE ) expect_equal( res$dmatrix_args$feature_weights, @@ -525,7 +553,8 @@ test_that("Process feature_weights", { monotone_constraints = NULL, interaction_constraints = NULL, feature_weights = w_named_vector, - lst_args = list() + lst_args = list(), + use_qdm = FALSE ) expect_equal( res$dmatrix_args$feature_weights, @@ -544,7 +573,8 @@ test_that("Process feature_weights", { monotone_constraints = NULL, interaction_constraints = NULL, feature_weights = w_list, - lst_args = list() + lst_args = list(), + use_qdm = FALSE ) expect_equal( res$dmatrix_args$feature_weights, From bd0887dca9301cb5e79c9ed5d8ab69968017c363 Mon Sep 17 00:00:00 2001 From: david-cortes Date: Sat, 6 Jul 2024 15:28:44 +0200 Subject: [PATCH 21/22] update roxygen --- R-package/DESCRIPTION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/R-package/DESCRIPTION b/R-package/DESCRIPTION index 5362d5cf63a4..32f8e70bec7f 100644 --- a/R-package/DESCRIPTION +++ b/R-package/DESCRIPTION @@ -67,6 +67,6 @@ Imports: data.table (>= 1.9.6), jsonlite (>= 1.0) Roxygen: list(markdown = TRUE) -RoxygenNote: 7.3.1 +RoxygenNote: 7.3.2 Encoding: UTF-8 SystemRequirements: GNU make, C++17 From 9306018cb5105cf5d7ea619b86bdbac628a565c8 Mon Sep 17 00:00:00 2001 From: david-cortes Date: Sun, 14 Jul 2024 12:03:54 +0200 Subject: [PATCH 22/22] move all parameters to function arguments --- R-package/R/xgboost.R | 37 ++++++++----------------- R-package/man/xgboost.Rd | 22 +++++++-------- R-package/tests/testthat/test_xgboost.R | 10 ++++++- 3 files changed, 30 insertions(+), 39 deletions(-) diff --git a/R-package/R/xgboost.R b/R-package/R/xgboost.R index c7882caefef0..9ea66731bf81 100644 --- a/R-package/R/xgboost.R +++ b/R-package/R/xgboost.R @@ -17,17 +17,6 @@ prescreen.parameters <- function(params) { } } - params_function_args <- c( - "objective", "verbose", "verbosity", "nthread", "seed", - "monotone_constraints", "interaction_constraints" - ) - if (any(names(params) %in% params_function_args)) { - stop( - "'xgboost()' function arguments cannot be passed under 'params'. Got: ", - paste(intersect(names(params), params_function_args), collapse = ", ") - ) - } - return(params) } @@ -809,15 +798,6 @@ process.x.and.col.args <- function( #' here - for example, objectives which are a variation of another but with a different default #' prediction type (e.g. `multi:softmax` vs. `multi:softprob`) are not allowed, and neither are #' ranking objectives, nor custom objectives at the moment. -#' @param params List of training parameters. See the online documentation -#' \href{https://xgboost.readthedocs.io/en/stable/parameter.html}{XGBoost Parameters} for -#' details about possible values and what they do. -#' -#' Note that not all possible values from the core XGBoost library are allowed as `params` for -#' 'xgboost()' - in particular, values which are direct arguments to this function (such as -#' `objective` or `nthreads`) cannot be passed under `params` (they should be passed as function -#' arguments instead). Values which otherwise require an already-fitted booster object (such as -#' `process_type`) are also not accepted here. #' @param nrounds Number of boosting iterations / rounds. #' #' Note that the number of default boosting rounds here is not automatically tuned, and different @@ -885,6 +865,13 @@ process.x.and.col.args <- function( #' #' If `NULL`, will start from zero, but note that for most objectives, an intercept is usually #' added (controllable through parameter `base_score` instead) when `base_margin` is not passed. +#' @param ... Other training parameters. See the online documentation +#' \href{https://xgboost.readthedocs.io/en/stable/parameter.html}{XGBoost Parameters} for +#' details about possible values and what they do. +#' +#' Note that not all possible values from the core XGBoost library are allowed as `params` for +#' 'xgboost()' - in particular, values which require an already-fitted booster object (such as +#' `process_type`) are not accepted here. #' @return A model object, inheriting from both `xgboost` and `xgb.Booster`. Compared to the regular #' `xgb.Booster` model class produced by \link{xgb.train}, this `xgboost` class will have an #' additional attribute `metadata` containing information which is used for formatting prediction @@ -905,7 +892,6 @@ xgboost <- function( x, y, objective = NULL, - params = list(), nrounds = 100L, weights = NULL, verbosity = 0L, @@ -914,12 +900,11 @@ xgboost <- function( monotone_constraints = NULL, interaction_constraints = NULL, feature_weights = NULL, - base_margin = NULL + base_margin = NULL, + ... ) { - # Note: some validations on parameter names are performed before passing them to - # 'xgb.train', hence this seemingly redundant conversion of names below. - names(params) <- gsub(".", "_", names(params), fixed = TRUE) - + # Note: '...' is a workaround, to be removed later by making all parameters be arguments + params <- list(...) params <- prescreen.parameters(params) prescreen.objective(objective) use_qdm <- check.can.use.qdm(x, params) diff --git a/R-package/man/xgboost.Rd b/R-package/man/xgboost.Rd index ee8a6594e12b..4af8f25ecc04 100644 --- a/R-package/man/xgboost.Rd +++ b/R-package/man/xgboost.Rd @@ -8,7 +8,6 @@ xgboost( x, y, objective = NULL, - params = list(), nrounds = 100L, weights = NULL, verbosity = 0L, @@ -17,7 +16,8 @@ xgboost( monotone_constraints = NULL, interaction_constraints = NULL, feature_weights = NULL, - base_margin = NULL + base_margin = NULL, + ... ) } \arguments{ @@ -82,16 +82,6 @@ here - for example, objectives which are a variation of another but with a diffe prediction type (e.g. `multi:softmax` vs. `multi:softprob`) are not allowed, and neither are ranking objectives, nor custom objectives at the moment.} -\item{params}{List of training parameters. See the online documentation -\href{https://xgboost.readthedocs.io/en/stable/parameter.html}{XGBoost Parameters} for -details about possible values and what they do. - -Note that not all possible values from the core XGBoost library are allowed as `params` for -'xgboost()' - in particular, values which are direct arguments to this function (such as -`objective` or `nthreads`) cannot be passed under `params` (they should be passed as function -arguments instead). Values which otherwise require an already-fitted booster object (such as -`process_type`) are also not accepted here.} - \item{nrounds}{Number of boosting iterations / rounds. Note that the number of default boosting rounds here is not automatically tuned, and different @@ -167,6 +157,14 @@ the corresponding `y` - `base_margin` should have the same column order that the If `NULL`, will start from zero, but note that for most objectives, an intercept is usually added (controllable through parameter `base_score` instead) when `base_margin` is not passed.} + +\item{...}{Other training parameters. See the online documentation +\href{https://xgboost.readthedocs.io/en/stable/parameter.html}{XGBoost Parameters} for +details about possible values and what they do. + +Note that not all possible values from the core XGBoost library are allowed as `params` for +'xgboost()' - in particular, values which require an already-fitted booster object (such as +`process_type`) are not accepted here.} } \value{ A model object, inheriting from both `xgboost` and `xgb.Booster`. Compared to the regular diff --git a/R-package/tests/testthat/test_xgboost.R b/R-package/tests/testthat/test_xgboost.R index e767792991d4..a4ac658a11b8 100644 --- a/R-package/tests/testthat/test_xgboost.R +++ b/R-package/tests/testthat/test_xgboost.R @@ -591,7 +591,8 @@ test_that("Whole function works", { y, monotone_constraints = list(age = -1), nthreads = 1L, - nrounds = 5L + nrounds = 5L, + eta = 3 ) expect_equal( attributes(model)$params$objective, @@ -605,6 +606,13 @@ test_that("Whole function works", { attributes(model)$params$monotone_constraints, "(0,-1,0,0,0,0,0,0)" ) + expect_false( + "interaction_constraints" %in% names(attributes(model)$params) + ) + expect_equal( + attributes(model)$params$eta, + 3 + ) txt <- capture.output({ print(model) })