diff --git a/man/mlr_learners_avg.Rd b/man/mlr_learners_avg.Rd index afba4e11c..d139fa18e 100644 --- a/man/mlr_learners_avg.Rd +++ b/man/mlr_learners_avg.Rd @@ -44,7 +44,7 @@ and \code{"regr.mse"}, i.e. mean squared error for regression. \item \code{optimizer} :: \code{\link[bbotk:Optimizer]{Optimizer}} | \code{character(1)}\cr \code{\link[bbotk:Optimizer]{Optimizer}} used to find optimal thresholds. If \code{character}, converts to \code{\link[bbotk:Optimizer]{Optimizer}} -via \code{\link[bbotk:opt]{opt}}. Initialized to \code{\link[bbotk:OptimizerNLoptr]{OptimizerNLoptr}}. +via \code{\link[bbotk:opt]{opt}}. Initialized to \code{\link[bbotk:mlr_optimizers_nloptr]{OptimizerNLoptr}}. Nloptr hyperparameters are initialized to \code{xtol_rel = 1e-8}, \code{algorithm = "NLOPT_LN_COBYLA"} and equal initial weights for each learner. For more fine-grained control, it is recommended to supply a instantiated \code{\link[bbotk:Optimizer]{Optimizer}}. diff --git a/man/mlr_pipeops_tunethreshold.Rd b/man/mlr_pipeops_tunethreshold.Rd index 3845c8f65..683c87f61 100644 --- a/man/mlr_pipeops_tunethreshold.Rd +++ b/man/mlr_pipeops_tunethreshold.Rd @@ -60,7 +60,7 @@ Initialized to \code{"classif.ce"}, i.e. misclassification error. \item \code{optimizer} :: \code{\link[bbotk:Optimizer]{Optimizer}}|\code{character(1)}\cr \code{\link[bbotk:Optimizer]{Optimizer}} used to find optimal thresholds. If \code{character}, converts to \code{\link[bbotk:Optimizer]{Optimizer}} -via \code{\link[bbotk:opt]{opt}}. Initialized to \code{\link[bbotk:OptimizerGenSA]{OptimizerGenSA}}. +via \code{\link[bbotk:opt]{opt}}. Initialized to \code{\link[bbotk:mlr_optimizers_gensa]{OptimizerGenSA}}. \item \code{log_level} :: \code{character(1)} | \code{integer(1)}\cr Set a temporary log-level for \code{lgr::get_logger("bbotk")}. Initialized to: "warn". }