diff --git a/cpp/CMakeLists.txt b/cpp/CMakeLists.txt index 0e9dfa04d0..ed69ee3df7 100644 --- a/cpp/CMakeLists.txt +++ b/cpp/CMakeLists.txt @@ -1,5 +1,5 @@ #============================================================================= -# Copyright (c) 2018-2024, NVIDIA CORPORATION. +# Copyright (c) 2018-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -109,6 +109,17 @@ set(RMM_LOGGING_LEVEL "INFO" CACHE STRING "Choose the logging level.") set_property(CACHE RMM_LOGGING_LEVEL PROPERTY STRINGS "TRACE" "DEBUG" "INFO" "WARN" "ERROR" "CRITICAL" "OFF") message(VERBOSE "CUML_CPP: RMM_LOGGING_LEVEL = '${RMM_LOGGING_LEVEL}'.") +# Set logging level +set(LIBCUML_LOGGING_LEVEL + "DEBUG" + CACHE STRING "Choose the logging level." +) +set_property( + CACHE LIBCUML_LOGGING_LEVEL PROPERTY STRINGS "TRACE" "DEBUG" "INFO" "WARN" "ERROR" "CRITICAL" + "OFF" +) +message(VERBOSE "CUML: LIBCUML_LOGGING_LEVEL = '${LIBCUML_LOGGING_LEVEL}'.") + if(BUILD_CUML_TESTS OR BUILD_PRIMS_TESTS) # Needed because GoogleBenchmark changes the state of FindThreads.cmake, causing subsequent runs to # have different values for the `Threads::Threads` target. Setting this flag ensures @@ -220,6 +231,15 @@ endif() rapids_cpm_init() rapids_cmake_install_lib_dir(lib_dir) +# Not using rapids-cmake since we never want to find, always download. +CPMAddPackage( + NAME rapids_logger GITHUB_REPOSITORY rapidsai/rapids-logger GIT_SHALLOW FALSE GIT_TAG + 4df3ee70c6746fd1b6c0dc14209dae2e2d4378c6 VERSION 4df3ee70c6746fd1b6c0dc14209dae2e2d4378c6 +) +rapids_make_logger( + ML EXPORT_SET cuml-exports LOGGER_HEADER_DIR include/cuml/common/ LOGGER_MACRO_PREFIX CUML LOGGER_TARGET cuml_logger +) + if(BUILD_CUML_TESTS OR BUILD_PRIMS_TESTS) find_package(Threads) endif() @@ -291,8 +311,7 @@ if(BUILD_CUML_CPP_LIBRARY) # single GPU components # common components - add_library(${CUML_CPP_TARGET} - src/common/logger.cpp) + add_library(${CUML_CPP_TARGET}) if (CUML_ENABLE_GPU) target_compile_definitions(${CUML_CPP_TARGET} PUBLIC CUML_ENABLE_GPU) endif() @@ -564,6 +583,7 @@ if(BUILD_CUML_CPP_LIBRARY) PRIVATE "$<$:${CUML_CXX_FLAGS}>" "$<$:${CUML_CUDA_FLAGS}>" ) + target_compile_definitions(${CUML_CPP_TARGET} PUBLIC "CUML_LOG_ACTIVE_LEVEL=CUML_LOG_LEVEL_${LIBCUML_LOGGING_LEVEL}") target_include_directories(${CUML_CPP_TARGET} PUBLIC @@ -604,6 +624,7 @@ if(BUILD_CUML_CPP_LIBRARY) raft::raft rmm::rmm_logger_impl raft::raft_logger_impl + cuml_logger_impl $ $<$:CUDA::cufft${_ctk_fft_static_suffix}> ${TREELITE_LIBS} @@ -630,6 +651,7 @@ if(BUILD_CUML_CPP_LIBRARY) target_link_libraries(${CUML_CPP_TARGET} PUBLIC rmm::rmm rmm::rmm_logger ${CUVS_LIB} ${_cuml_cpp_public_libs} + cuml_logger PRIVATE ${_cuml_cpp_private_libs} ) diff --git a/cpp/bench/sg/svc.cu b/cpp/bench/sg/svc.cu index 8ddd8be441..5399fec776 100644 --- a/cpp/bench/sg/svc.cu +++ b/cpp/bench/sg/svc.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2024, NVIDIA CORPORATION. + * Copyright (c) 2020-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -100,8 +100,9 @@ std::vector> getInputs() p.blobs.seed = 12345ULL; // SvmParameter{C, cache_size, max_iter, nochange_steps, tol, verbosity}) - p.svm_param = ML::SVM::SvmParameter{1, 200, 100, 100, 1e-3, CUML_LEVEL_INFO, 0, ML::SVM::C_SVC}; - p.model = ML::SVM::SvmModel{0, 0, 0, nullptr, {}, nullptr, 0, nullptr}; + p.svm_param = + ML::SVM::SvmParameter{1, 200, 100, 100, 1e-3, ML::level_enum::info, 0, ML::SVM::C_SVC}; + p.model = ML::SVM::SvmModel{0, 0, 0, nullptr, {}, nullptr, 0, nullptr}; std::vector rowcols = {{50000, 2, 2}, {2048, 100000, 2}, {50000, 1000, 2}}; diff --git a/cpp/bench/sg/svr.cu b/cpp/bench/sg/svr.cu index c061e53b1f..40be89b372 100644 --- a/cpp/bench/sg/svr.cu +++ b/cpp/bench/sg/svr.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2024, NVIDIA CORPORATION. + * Copyright (c) 2020-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -102,7 +102,7 @@ std::vector> getInputs() // SvmParameter{C, cache_size, max_iter, nochange_steps, tol, verbosity, // epsilon, svmType}) p.svm_param = - ML::SVM::SvmParameter{1, 200, 200, 100, 1e-3, CUML_LEVEL_INFO, 0.1, ML::SVM::EPSILON_SVR}; + ML::SVM::SvmParameter{1, 200, 200, 100, 1e-3, ML::level_enum::info, 0.1, ML::SVM::EPSILON_SVR}; p.model = new ML::SVM::SvmModel{0, 0, 0, 0}; std::vector rowcols = {{50000, 2, 2}, {1024, 10000, 10}, {3000, 200, 200}}; diff --git a/cpp/examples/dbscan/dbscan_example.cpp b/cpp/examples/dbscan/dbscan_example.cpp index 3ba367cbdc..f6b07fc946 100644 --- a/cpp/examples/dbscan/dbscan_example.cpp +++ b/cpp/examples/dbscan/dbscan_example.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -209,7 +209,7 @@ int main(int argc, char* argv[]) nullptr, max_bytes_per_batch, ML::Dbscan::EpsNnMethod::BRUTE_FORCE, - false); + ML::level_enum::off); CUDA_RT_CALL(cudaMemcpyAsync( h_labels.data(), d_labels, nRows * sizeof(int), cudaMemcpyDeviceToHost, stream)); CUDA_RT_CALL(cudaStreamSynchronize(stream)); diff --git a/cpp/include/cuml/cluster/dbscan.hpp b/cpp/include/cuml/cluster/dbscan.hpp index d691452db2..d910c03414 100644 --- a/cpp/include/cuml/cluster/dbscan.hpp +++ b/cpp/include/cuml/cluster/dbscan.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2024, NVIDIA CORPORATION. + * Copyright (c) 2018-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,7 +16,7 @@ #pragma once -#include +#include #include @@ -73,7 +73,7 @@ void fit(const raft::handle_t& handle, float* sample_weight = nullptr, size_t max_bytes_per_batch = 0, EpsNnMethod eps_nn_method = BRUTE_FORCE, - int verbosity = CUML_LEVEL_INFO, + level_enum verbosity = ML::level_enum::info, bool opg = false); void fit(const raft::handle_t& handle, double* input, @@ -87,7 +87,7 @@ void fit(const raft::handle_t& handle, double* sample_weight = nullptr, size_t max_bytes_per_batch = 0, EpsNnMethod eps_nn_method = BRUTE_FORCE, - int verbosity = CUML_LEVEL_INFO, + level_enum verbosity = ML::level_enum::info, bool opg = false); void fit(const raft::handle_t& handle, @@ -102,7 +102,7 @@ void fit(const raft::handle_t& handle, float* sample_weight = nullptr, size_t max_bytes_per_batch = 0, EpsNnMethod eps_nn_method = BRUTE_FORCE, - int verbosity = CUML_LEVEL_INFO, + level_enum verbosity = ML::level_enum::info, bool opg = false); void fit(const raft::handle_t& handle, double* input, @@ -116,7 +116,7 @@ void fit(const raft::handle_t& handle, double* sample_weight = nullptr, size_t max_bytes_per_batch = 0, EpsNnMethod eps_nn_method = BRUTE_FORCE, - int verbosity = CUML_LEVEL_INFO, + level_enum verbosity = ML::level_enum::info, bool opg = false); /** @} */ diff --git a/cpp/include/cuml/cluster/kmeans.hpp b/cpp/include/cuml/cluster/kmeans.hpp index f075e49843..9a3ffc768f 100644 --- a/cpp/include/cuml/cluster/kmeans.hpp +++ b/cpp/include/cuml/cluster/kmeans.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,8 +16,6 @@ #pragma once -#include - #include namespace raft { diff --git a/cpp/include/cuml/common/log_levels.hpp b/cpp/include/cuml/common/log_levels.hpp deleted file mode 100644 index 2029f2aeac..0000000000 --- a/cpp/include/cuml/common/log_levels.hpp +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright (c) 2021, NVIDIA CORPORATION. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#pragma once - -/** - * @defgroup CumlLogLevels Logging levels used in cuML - * - * @note exactly match the corresponding ones (but reverse in terms of value) - * in spdlog for wrapping purposes - * - * @{ - */ -#define CUML_LEVEL_TRACE 6 -#define CUML_LEVEL_DEBUG 5 -#define CUML_LEVEL_INFO 4 -#define CUML_LEVEL_WARN 3 -#define CUML_LEVEL_ERROR 2 -#define CUML_LEVEL_CRITICAL 1 -#define CUML_LEVEL_OFF 0 -/** @} */ - -#if !defined(CUML_ACTIVE_LEVEL) -#define CUML_ACTIVE_LEVEL CUML_LEVEL_DEBUG -#endif diff --git a/cpp/include/cuml/common/logger.hpp b/cpp/include/cuml/common/logger.hpp deleted file mode 100644 index 161d881087..0000000000 --- a/cpp/include/cuml/common/logger.hpp +++ /dev/null @@ -1,234 +0,0 @@ -/* - * Copyright (c) 2020-2024, NVIDIA CORPORATION. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#pragma once - -#include - -#include - -#include -#include -#include -#include - -namespace spdlog { -class logger; -namespace sinks { -template -class CallbackSink; -using callback_sink_mt = CallbackSink; -}; // namespace sinks -}; // namespace spdlog - -namespace ML { - -/** - * @defgroup CStringFormat Expand a C-style format string - * - * @brief Expands C-style formatted string into std::string - * - * @param[in] fmt format string - * @param[in] vl respective values for each of format modifiers in the string - * - * @return the expanded `std::string` - * - * @{ - */ -std::string format(const char* fmt, va_list& vl); -std::string format(const char* fmt, ...); -/** @} */ - -/** - * @brief The main Logging class for cuML library. - * - * This class acts as a thin wrapper over the underlying `spdlog` interface. The - * design is done in this way in order to avoid us having to also ship `spdlog` - * header files in our installation. - * - * @todo This currently only supports logging to stdout. Need to add support in - * future to add custom loggers as well [Issue #2046] - */ -class Logger { - public: - /** - * @brief Singleton method to get the underlying logger object - * - * @return the singleton logger object - */ - static Logger& get(); - - /** - * @brief Set the logging level. - * - * Only messages with level equal or above this will be printed - * - * @param[in] level logging level - * - * @note The log level will actually be set only if the input is within the - * range [CUML_LEVEL_TRACE, CUML_LEVEL_OFF]. If it is not, then it'll - * be ignored. See documentation of decisiontree for how this gets used - */ - void setLevel(int level); - - /** - * @brief Set the logging pattern - * - * @param[in] pattern the pattern to be set. Refer this link - * https://github.com/gabime/spdlog/wiki/3.-Custom-formatting - * to know the right syntax of this pattern - */ - void setPattern(const std::string& pattern); - - /** - * @brief Register a callback function to be run in place of usual log call - * - * @param[in] callback the function to be run on all logged messages - */ - void setCallback(void (*callback)(int lvl, const char* msg)); - - /** - * @brief Register a flush function compatible with the registered callback - * - * @param[in] flush the function to use when flushing logs - */ - void setFlush(void (*flush)()); - - /** - * @brief Tells whether messages will be logged for the given log level - * - * @param[in] level log level to be checked for - * @return true if messages will be logged for this level, else false - */ - bool shouldLogFor(int level) const; - - /** - * @brief Query for the current log level - * - * @return the current log level - */ - int getLevel() const; - - /** - * @brief Get the current logging pattern - * @return the pattern - */ - std::string getPattern() const { return currPattern; } - - /** - * @brief Main logging method - * - * @param[in] level logging level of this message - * @param[in] fmt C-like format string, followed by respective params - */ - void log(int level, const char* fmt, ...); - - /** - * @brief Flush logs by calling flush on underlying logger - */ - void flush(); - - private: - Logger(); - ~Logger() {} - - std::shared_ptr sink; - std::shared_ptr logger; - std::string currPattern; - static const std::string DefaultPattern; -}; // class Logger - -/** - * @brief RAII based pattern setter for Logger class - * - * @code{.cpp} - * { - * PatternSetter _("%l -- %v"); - * CUML_LOG_INFO("Test message\n"); - * } - * @endcode - */ -class PatternSetter { - public: - /** - * @brief Set the pattern for the rest of the log messages - * @param[in] pattern pattern to be set - */ - PatternSetter(const std::string& pattern = "%v"); - - /** - * @brief This will restore the previous pattern that was active during the - * moment this object was created - */ - ~PatternSetter(); - - private: - std::string prevPattern; -}; // class PatternSetter - -/** - * @defgroup LoggerMacros Helper macros for dealing with logging - * @{ - */ -#if (CUML_ACTIVE_LEVEL >= CUML_LEVEL_TRACE) -#define CUML_LOG_TRACE(fmt, ...) \ - do { \ - std::stringstream ss; \ - ss << ML::format("%s:%d ", __FILE__, __LINE__); \ - ss << ML::format(fmt, ##__VA_ARGS__); \ - ML::Logger::get().log(CUML_LEVEL_TRACE, ss.str().c_str()); \ - } while (0) -#else -#define CUML_LOG_TRACE(fmt, ...) void(0) -#endif - -#if (CUML_ACTIVE_LEVEL >= CUML_LEVEL_DEBUG) -#define CUML_LOG_DEBUG(fmt, ...) \ - do { \ - std::stringstream ss; \ - ss << ML::format("%s:%d ", __FILE__, __LINE__); \ - ss << ML::format(fmt, ##__VA_ARGS__); \ - ML::Logger::get().log(CUML_LEVEL_DEBUG, ss.str().c_str()); \ - } while (0) -#else -#define CUML_LOG_DEBUG(fmt, ...) void(0) -#endif - -#if (CUML_ACTIVE_LEVEL >= CUML_LEVEL_INFO) -#define CUML_LOG_INFO(fmt, ...) ML::Logger::get().log(CUML_LEVEL_INFO, fmt, ##__VA_ARGS__) -#else -#define CUML_LOG_INFO(fmt, ...) void(0) -#endif - -#if (CUML_ACTIVE_LEVEL >= CUML_LEVEL_WARN) -#define CUML_LOG_WARN(fmt, ...) ML::Logger::get().log(CUML_LEVEL_WARN, fmt, ##__VA_ARGS__) -#else -#define CUML_LOG_WARN(fmt, ...) void(0) -#endif - -#if (CUML_ACTIVE_LEVEL >= CUML_LEVEL_ERROR) -#define CUML_LOG_ERROR(fmt, ...) ML::Logger::get().log(CUML_LEVEL_ERROR, fmt, ##__VA_ARGS__) -#else -#define CUML_LOG_ERROR(fmt, ...) void(0) -#endif - -#if (CUML_ACTIVE_LEVEL >= CUML_LEVEL_CRITICAL) -#define CUML_LOG_CRITICAL(fmt, ...) ML::Logger::get().log(CUML_LEVEL_CRITICAL, fmt, ##__VA_ARGS__) -#else -#define CUML_LOG_CRITICAL(fmt, ...) void(0) -#endif -/** @} */ - -}; // namespace ML diff --git a/cpp/include/cuml/common/utils.hpp b/cpp/include/cuml/common/utils.hpp index 8ac9d93a1f..6c8f690fbf 100644 --- a/cpp/include/cuml/common/utils.hpp +++ b/cpp/include/cuml/common/utils.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,7 +16,7 @@ #pragma once -#include "logger.hpp" +#include #include #include diff --git a/cpp/include/cuml/ensemble/randomforest.hpp b/cpp/include/cuml/ensemble/randomforest.hpp index 2df7929cd5..596e80f535 100644 --- a/cpp/include/cuml/ensemble/randomforest.hpp +++ b/cpp/include/cuml/ensemble/randomforest.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2023, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -101,13 +101,13 @@ struct RF_params { void preprocess_labels(int n_rows, std::vector& labels, std::map& labels_map, - int verbosity = CUML_LEVEL_INFO); + level_enum verbosity = ML::level_enum::info); /* Revert preprocessing effect, if needed. */ void postprocess_labels(int n_rows, std::vector& labels, std::map& labels_map, - int verbosity = CUML_LEVEL_INFO); + level_enum verbosity = ML::level_enum::info); template struct RandomForestMetaData { @@ -147,7 +147,7 @@ void fit(const raft::handle_t& user_handle, int* labels, int n_unique_labels, RF_params rf_params, - int verbosity = CUML_LEVEL_INFO); + level_enum verbosity = ML::level_enum::info); void fit(const raft::handle_t& user_handle, RandomForestClassifierD*& forest, double* input, @@ -156,7 +156,7 @@ void fit(const raft::handle_t& user_handle, int* labels, int n_unique_labels, RF_params rf_params, - int verbosity = CUML_LEVEL_INFO); + level_enum verbosity = ML::level_enum::info); void predict(const raft::handle_t& user_handle, const RandomForestClassifierF* forest, @@ -164,27 +164,27 @@ void predict(const raft::handle_t& user_handle, int n_rows, int n_cols, int* predictions, - int verbosity = CUML_LEVEL_INFO); + level_enum verbosity = ML::level_enum::info); void predict(const raft::handle_t& user_handle, const RandomForestClassifierD* forest, const double* input, int n_rows, int n_cols, int* predictions, - int verbosity = CUML_LEVEL_INFO); + level_enum verbosity = ML::level_enum::info); RF_metrics score(const raft::handle_t& user_handle, const RandomForestClassifierF* forest, const int* ref_labels, int n_rows, const int* predictions, - int verbosity = CUML_LEVEL_INFO); + level_enum verbosity = ML::level_enum::info); RF_metrics score(const raft::handle_t& user_handle, const RandomForestClassifierD* forest, const int* ref_labels, int n_rows, const int* predictions, - int verbosity = CUML_LEVEL_INFO); + level_enum verbosity = ML::level_enum::info); RF_params set_rf_params(int max_depth, int max_leaves, @@ -213,7 +213,7 @@ void fit(const raft::handle_t& user_handle, int n_cols, float* labels, RF_params rf_params, - int verbosity = CUML_LEVEL_INFO); + level_enum verbosity = ML::level_enum::info); void fit(const raft::handle_t& user_handle, RandomForestRegressorD*& forest, double* input, @@ -221,7 +221,7 @@ void fit(const raft::handle_t& user_handle, int n_cols, double* labels, RF_params rf_params, - int verbosity = CUML_LEVEL_INFO); + level_enum verbosity = ML::level_enum::info); void predict(const raft::handle_t& user_handle, const RandomForestRegressorF* forest, @@ -229,25 +229,25 @@ void predict(const raft::handle_t& user_handle, int n_rows, int n_cols, float* predictions, - int verbosity = CUML_LEVEL_INFO); + level_enum verbosity = ML::level_enum::info); void predict(const raft::handle_t& user_handle, const RandomForestRegressorD* forest, const double* input, int n_rows, int n_cols, double* predictions, - int verbosity = CUML_LEVEL_INFO); + level_enum verbosity = ML::level_enum::info); RF_metrics score(const raft::handle_t& user_handle, const RandomForestRegressorF* forest, const float* ref_labels, int n_rows, const float* predictions, - int verbosity = CUML_LEVEL_INFO); + level_enum verbosity = ML::level_enum::info); RF_metrics score(const raft::handle_t& user_handle, const RandomForestRegressorD* forest, const double* ref_labels, int n_rows, const double* predictions, - int verbosity = CUML_LEVEL_INFO); + level_enum verbosity = ML::level_enum::info); }; // namespace ML diff --git a/cpp/include/cuml/manifold/tsne.h b/cpp/include/cuml/manifold/tsne.h index 8c658b3c69..1e1222e2e3 100644 --- a/cpp/include/cuml/manifold/tsne.h +++ b/cpp/include/cuml/manifold/tsne.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -94,7 +94,7 @@ struct TSNEParams { long long random_state = -1; // verbosity level for logging messages during execution - int verbosity = CUML_LEVEL_INFO; + level_enum verbosity = ML::level_enum::info; // Embedding initializer algorithm TSNE_INIT init = TSNE_INIT::RANDOM; diff --git a/cpp/include/cuml/manifold/umapparams.h b/cpp/include/cuml/manifold/umapparams.h index a337c6cf64..a3086e6e13 100644 --- a/cpp/include/cuml/manifold/umapparams.h +++ b/cpp/include/cuml/manifold/umapparams.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -117,7 +117,7 @@ class UMAPParams { /** * Control logging level during algorithm execution */ - int verbosity = CUML_LEVEL_INFO; + level_enum verbosity = level_enum::info; /** * More specific parameters controlling the embedding. If None these diff --git a/cpp/include/cuml/solvers/lars.hpp b/cpp/include/cuml/solvers/lars.hpp index c740b64672..5f795bc735 100644 --- a/cpp/include/cuml/solvers/lars.hpp +++ b/cpp/include/cuml/solvers/lars.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2022, NVIDIA CORPORATION. + * Copyright (c) 2020-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,6 +16,8 @@ #pragma once +#include + #include namespace ML { @@ -66,7 +68,7 @@ void larsFit(const raft::handle_t& handle, math_t* Gram, int max_iter, math_t* coef_path, - int verbosity, + level_enum verbosity, idx_t ld_X, idx_t ld_G, math_t eps); diff --git a/cpp/include/cuml/svm/linear.hpp b/cpp/include/cuml/svm/linear.hpp index 9cefdcc883..a3564d43ed 100644 --- a/cpp/include/cuml/svm/linear.hpp +++ b/cpp/include/cuml/svm/linear.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2023, NVIDIA CORPORATION. + * Copyright (c) 2021-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,6 +16,8 @@ #pragma once +#include + #include namespace ML { @@ -64,7 +66,7 @@ struct LinearSVMParams { */ int lbfgs_memory = 5; /** Triggers extra output when greater than zero. */ - int verbose = 0; + level_enum verbose = level_enum::off; /** * The constant scaling factor of the main term in the loss function. * (You can also think of that as the inverse factor of the penalty term). diff --git a/cpp/include/cuml/svm/svc.hpp b/cpp/include/cuml/svm/svc.hpp index 426a049483..652f8c1b88 100644 --- a/cpp/include/cuml/svm/svc.hpp +++ b/cpp/include/cuml/svm/svc.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -236,10 +236,10 @@ class SVC { math_t tol = 1.0e-3, raft::distance::kernels::KernelParams kernel_params = raft::distance::kernels::KernelParams{raft::distance::kernels::LINEAR, 3, 1, 0}, - math_t cache_size = 200, - int max_iter = -1, - int nochange_steps = 1000, - int verbosity = CUML_LEVEL_INFO); + math_t cache_size = 200, + int max_iter = -1, + int nochange_steps = 1000, + level_enum verbosity = ML::level_enum::info); ~SVC(); diff --git a/cpp/include/cuml/svm/svm_api.h b/cpp/include/cuml/svm/svm_api.h index 5da03f903a..381b4ddf56 100644 --- a/cpp/include/cuml/svm/svm_api.h +++ b/cpp/include/cuml/svm/svm_api.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -43,7 +43,7 @@ typedef enum cumlSvmKernelType { LINEAR, POLYNOMIAL, RBF, TANH } cumlSvmKernelTy * @param [in] tol tolerance to stop fitting * @param [in] verbosity Fine grained control over logging of useful information * as algorithm executes. Currently passing anything greater than or equal to - * CUML_LEVEL_INFO will make it execute quietly + * ML::level_enum::info will make it execute quietly * @param [in] kernel type of kernel (LINEAR, POLYNOMIAL, RBF or TANH) * @param [in] degree of polynomial kernel (ignored by others) * @param [in] gamma multiplier in the RBF, POLYNOMIAL and TANH kernels diff --git a/cpp/include/cuml/svm/svm_parameter.h b/cpp/include/cuml/svm/svm_parameter.h index c5fc4ef2d0..b73ff90e23 100644 --- a/cpp/include/cuml/svm/svm_parameter.h +++ b/cpp/include/cuml/svm/svm_parameter.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,6 +15,8 @@ */ #pragma once +#include + namespace ML { namespace SVM { @@ -37,10 +39,10 @@ struct SvmParameter { //! maximum number of outer SMO iterations. Use -1 to let the SMO solver set //! a default value (100*n_rows). int max_iter; - int nochange_steps; // -#include - -#include // NOLINT -#include // NOLINT - -#include -#include - -namespace ML { - -std::string format(const char* fmt, va_list& vl) -{ - char buf[4096]; - vsnprintf(buf, sizeof(buf), fmt, vl); - return std::string(buf); -} - -std::string format(const char* fmt, ...) -{ - va_list vl; - va_start(vl, fmt); - std::string str = format(fmt, vl); - va_end(vl); - return str; -} - -int convert_level_to_spdlog(int level) -{ - level = std::max(CUML_LEVEL_OFF, std::min(CUML_LEVEL_TRACE, level)); - return CUML_LEVEL_TRACE - level; -} - -const std::string Logger::DefaultPattern("[%L] [%H:%M:%S.%f] %v"); - -Logger& Logger::get() -{ - static Logger logger; - return logger; -} - -Logger::Logger() - : sink{std::make_shared()}, - logger{std::make_shared("cuml", sink)}, - currPattern() -{ - setPattern(DefaultPattern); - setLevel(CUML_LEVEL_INFO); -} - -void Logger::setLevel(int level) -{ - level = convert_level_to_spdlog(level); - logger->set_level(static_cast(level)); -} - -void Logger::setPattern(const std::string& pattern) -{ - currPattern = pattern; - logger->set_pattern(pattern); -} - -void Logger::setCallback(spdlog::sinks::LogCallback callback) { sink->set_callback(callback); } - -void Logger::setFlush(void (*flush)()) { sink->set_flush(flush); } - -bool Logger::shouldLogFor(int level) const -{ - level = convert_level_to_spdlog(level); - auto level_e = static_cast(level); - return logger->should_log(level_e); -} - -int Logger::getLevel() const -{ - auto level_e = logger->level(); - return CUML_LEVEL_TRACE - static_cast(level_e); -} - -void Logger::log(int level, const char* fmt, ...) -{ - level = convert_level_to_spdlog(level); - auto level_e = static_cast(level); - // explicit check to make sure that we only expand messages when required - if (logger->should_log(level_e)) { - va_list vl; - va_start(vl, fmt); - auto msg = format(fmt, vl); - va_end(vl); - logger->log(level_e, msg); - } -} - -void Logger::flush() { logger->flush(); } - -PatternSetter::PatternSetter(const std::string& pattern) : prevPattern() -{ - prevPattern = Logger::get().getPattern(); - Logger::get().setPattern(pattern); -} - -PatternSetter::~PatternSetter() { Logger::get().setPattern(prevPattern); } - -} // namespace ML diff --git a/cpp/src/dbscan/dbscan.cu b/cpp/src/dbscan/dbscan.cu index 43c130297e..c36416a4f2 100644 --- a/cpp/src/dbscan/dbscan.cu +++ b/cpp/src/dbscan/dbscan.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2024, NVIDIA CORPORATION. + * Copyright (c) 2018-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -35,7 +35,7 @@ void fit(const raft::handle_t& handle, float* sample_weight, size_t max_bytes_per_batch, EpsNnMethod eps_nn_method, - int verbosity, + level_enum verbosity, bool opg) { if (opg) @@ -82,7 +82,7 @@ void fit(const raft::handle_t& handle, double* sample_weight, size_t max_bytes_per_batch, EpsNnMethod eps_nn_method, - int verbosity, + level_enum verbosity, bool opg) { if (opg) @@ -129,7 +129,7 @@ void fit(const raft::handle_t& handle, float* sample_weight, size_t max_bytes_per_batch, EpsNnMethod eps_nn_method, - int verbosity, + level_enum verbosity, bool opg) { if (opg) @@ -176,7 +176,7 @@ void fit(const raft::handle_t& handle, double* sample_weight, size_t max_bytes_per_batch, EpsNnMethod eps_nn_method, - int verbosity, + level_enum verbosity, bool opg) { if (opg) diff --git a/cpp/src/dbscan/dbscan.cuh b/cpp/src/dbscan/dbscan.cuh index a8962010a4..a56f62ed7e 100644 --- a/cpp/src/dbscan/dbscan.cuh +++ b/cpp/src/dbscan/dbscan.cuh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2024, NVIDIA CORPORATION. + * Copyright (c) 2018-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -108,10 +108,10 @@ void dbscanFitImpl(const raft::handle_t& handle, size_t max_mbytes_per_batch, EpsNnMethod eps_nn_method, cudaStream_t stream, - int verbosity) + level_enum verbosity) { raft::common::nvtx::range fun_scope("ML::Dbscan::Fit"); - ML::Logger::get().setLevel(verbosity); + ML::default_logger().set_level(verbosity); // XXX: for algo_vd and algo_adj, 0 (naive) is no longer an option and has // been removed. int algo_vd = (metric == cuvs::distance::DistanceType::Precomputed) ? 2 : 1; diff --git a/cpp/src/dbscan/dbscan_api.cpp b/cpp/src/dbscan/dbscan_api.cpp index a052b4e5b2..3ed494751e 100644 --- a/cpp/src/dbscan/dbscan_api.cpp +++ b/cpp/src/dbscan/dbscan_api.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2024, NVIDIA CORPORATION. + * Copyright (c) 2018-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -50,7 +50,7 @@ cumlError_t cumlSpDbscanFit(cumlHandle_t handle, NULL, max_bytes_per_batch, ML::Dbscan::EpsNnMethod::BRUTE_FORCE, - verbosity); + static_cast(verbosity)); } // TODO: Implement this // catch (const MLCommon::Exception& e) @@ -93,7 +93,7 @@ cumlError_t cumlDpDbscanFit(cumlHandle_t handle, NULL, max_bytes_per_batch, ML::Dbscan::EpsNnMethod::BRUTE_FORCE, - verbosity); + static_cast(verbosity)); } // TODO: Implement this // catch (const MLCommon::Exception& e) @@ -107,4 +107,4 @@ cumlError_t cumlDpDbscanFit(cumlHandle_t handle, } return status; } -} \ No newline at end of file +} diff --git a/cpp/src/decisiontree/decisiontree.cuh b/cpp/src/decisiontree/decisiontree.cuh index e5514ffa71..7cc6ea353b 100644 --- a/cpp/src/decisiontree/decisiontree.cuh +++ b/cpp/src/decisiontree/decisiontree.cuh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -364,9 +364,9 @@ class DecisionTree { std::size_t n_cols, DataT* predictions, int num_outputs, - int verbosity) + level_enum verbosity) { - if (verbosity >= 0) { ML::Logger::get().setLevel(verbosity); } + if (verbosity >= level_enum::off) { default_logger().set_level(verbosity); } ASSERT(is_host_ptr(rows) && is_host_ptr(predictions), "DT Error: Current impl. expects both input and predictions to be CPU " "pointers.\n"); diff --git a/cpp/src/glm/qn/mg/qn_mg.cuh b/cpp/src/glm/qn/mg/qn_mg.cuh index d594890f1e..76cb06c9fe 100644 --- a/cpp/src/glm/qn/mg/qn_mg.cuh +++ b/cpp/src/glm/qn/mg/qn_mg.cuh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2023-2024, NVIDIA CORPORATION. + * Copyright (c) 2023-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -66,7 +66,7 @@ int qn_fit_mg(const raft::handle_t& handle, auto obj_function = GLMWithDataMG(handle, rank, n_ranks, n_samples, ®ularizer_obj, X, y, Z, stder_p); return ML::GLM::detail::qn_minimize( - handle, w0, fx, num_iters, obj_function, l1, opt_param, pams.verbose); + handle, w0, fx, num_iters, obj_function, l1, opt_param, static_cast(pams.verbose)); } template @@ -126,4 +126,4 @@ inline void qn_fit_x_mg(const raft::handle_t& handle, }; // namespace opg }; // namespace GLM -}; // namespace ML \ No newline at end of file +}; // namespace ML diff --git a/cpp/src/glm/qn/qn.cuh b/cpp/src/glm/qn/qn.cuh index 5be5abd9a7..ff85761995 100644 --- a/cpp/src/glm/qn/qn.cuh +++ b/cpp/src/glm/qn/qn.cuh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2024, NVIDIA CORPORATION. + * Copyright (c) 2018-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -61,14 +61,16 @@ int qn_fit(const raft::handle_t& handle, if (l2 == 0) { GLMWithData lossWith(&loss, X, y, Z); - return qn_minimize(handle, w0, fx, num_iters, lossWith, l1, opt_param, pams.verbose); + return qn_minimize( + handle, w0, fx, num_iters, lossWith, l1, opt_param, static_cast(pams.verbose)); } else { Tikhonov reg(l2); RegularizedGLM obj(&loss, ®); GLMWithData lossWith(&obj, X, y, Z); - return qn_minimize(handle, w0, fx, num_iters, lossWith, l1, opt_param, pams.verbose); + return qn_minimize( + handle, w0, fx, num_iters, lossWith, l1, opt_param, static_cast(pams.verbose)); } } diff --git a/cpp/src/glm/qn/qn_solvers.cuh b/cpp/src/glm/qn/qn_solvers.cuh index 6f43c7eef0..a438f5da4f 100644 --- a/cpp/src/glm/qn/qn_solvers.cuh +++ b/cpp/src/glm/qn/qn_solvers.cuh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2024, NVIDIA CORPORATION. + * Copyright (c) 2018-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -148,7 +148,7 @@ inline OPT_RETCODE min_lbfgs(const LBFGSParam& param, int* k, // output iterations SimpleVec& workspace, // scratch space cudaStream_t stream, - int verbosity = 0) + level_enum verbosity = 0) { int n = x.len; const int workspace_size = lbfgs_workspace_size(param, n); @@ -179,7 +179,7 @@ inline OPT_RETCODE min_lbfgs(const LBFGSParam& param, std::vector fx_hist(param.past > 0 ? param.past : 0); *k = 0; - ML::Logger::get().setLevel(verbosity); + ML::default_logger().set_level(verbosity); CUML_LOG_DEBUG("Running L-BFGS"); // Evaluate function and compute gradient @@ -278,7 +278,7 @@ inline OPT_RETCODE min_owlqn(const LBFGSParam& param, int* k, SimpleVec& workspace, // scratch space cudaStream_t stream, - const int verbosity = 0) + const level_enum verbosity = 0) { int n = x.len; const int workspace_size = owlqn_workspace_size(param, n); @@ -305,7 +305,7 @@ inline OPT_RETCODE min_owlqn(const LBFGSParam& param, p_ws += vec_size; T* dev_scalar = p_ws; - ML::Logger::get().setLevel(verbosity); + ML::default_logger().set_level(verbosity); SimpleVec svec, yvec; // mask vectors @@ -419,7 +419,7 @@ inline int qn_minimize(const raft::handle_t& handle, LossFunction& loss, const T l1, const LBFGSParam& opt_param, - const int verbosity = 0) + const level_enum verbosity = 0) { // TODO should the worksapce allocation happen outside? cudaStream_t stream = handle.get_stream(); diff --git a/cpp/src/randomforest/randomforest.cu b/cpp/src/randomforest/randomforest.cu index 14e76c8f38..6ba9470018 100644 --- a/cpp/src/randomforest/randomforest.cu +++ b/cpp/src/randomforest/randomforest.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -122,11 +122,11 @@ void print(const RF_metrics rf_metrics) void preprocess_labels(int n_rows, std::vector& labels, std::map& labels_map, - int verbosity) + level_enum verbosity) { std::pair::iterator, bool> ret; int n_unique_labels = 0; - ML::Logger::get().setLevel(verbosity); + ML::default_logger().set_level(verbosity); CUML_LOG_DEBUG("Preprocessing labels"); for (int i = 0; i < n_rows; i++) { @@ -149,9 +149,9 @@ void preprocess_labels(int n_rows, void postprocess_labels(int n_rows, std::vector& labels, std::map& labels_map, - int verbosity) + level_enum verbosity) { - ML::Logger::get().setLevel(verbosity); + ML::default_logger().set_level(verbosity); CUML_LOG_DEBUG("Postrocessing labels"); std::map::iterator it; int n_unique_cnt = labels_map.size(); @@ -166,7 +166,7 @@ void postprocess_labels(int n_rows, labels[i] = reverse_map[prev]; CUML_LOG_DEBUG("Mapping %d back to %d", prev, labels[i]); } - CUML_LOG_DEBUG("Finished postrocessing labels"); + CUML_LOG_DEBUG("Finished postprocessing labels"); } /** @@ -182,10 +182,10 @@ void delete_rf_metadata(RandomForestMetaData* forest) template std::string _get_rf_text(const RandomForestMetaData* forest, bool summary) { - ML::PatternSetter _("%v"); if (!forest) { return "Empty forest"; } else { + default_logger().set_pattern("%v"); std::ostringstream oss; oss << "Forest has " << forest->rf_params.n_trees << " trees, " << "max_depth " << forest->rf_params.tree_params.max_depth << ", and max_leaves " @@ -198,6 +198,7 @@ std::string _get_rf_text(const RandomForestMetaData* forest, bool summary) oss << DT::get_tree_text(forest->trees[i].get()) << "\n"; } } + default_logger().set_pattern(default_pattern()); return oss.str(); } } @@ -385,10 +386,10 @@ void fit(const raft::handle_t& user_handle, int* labels, int n_unique_labels, RF_params rf_params, - int verbosity) + level_enum verbosity) { raft::common::nvtx::range fun_scope("RF::fit @randomforest.cu"); - ML::Logger::get().setLevel(verbosity); + ML::default_logger().set_level(verbosity); ASSERT(forest->trees.empty(), "Cannot fit an existing forest."); forest->trees.resize(rf_params.n_trees); forest->rf_params = rf_params; @@ -406,10 +407,10 @@ void fit(const raft::handle_t& user_handle, int* labels, int n_unique_labels, RF_params rf_params, - int verbosity) + level_enum verbosity) { raft::common::nvtx::range fun_scope("RF::fit @randomforest.cu"); - ML::Logger::get().setLevel(verbosity); + ML::default_logger().set_level(verbosity); ASSERT(forest->trees.empty(), "Cannot fit an existing forest."); forest->trees.resize(rf_params.n_trees); forest->rf_params = rf_params; @@ -440,7 +441,7 @@ void predict(const raft::handle_t& user_handle, int n_rows, int n_cols, int* predictions, - int verbosity) + level_enum verbosity) { ASSERT(!forest->trees.empty(), "Cannot predict! No trees in the forest."); std::shared_ptr> rf_classifier = @@ -454,7 +455,7 @@ void predict(const raft::handle_t& user_handle, int n_rows, int n_cols, int* predictions, - int verbosity) + level_enum verbosity) { ASSERT(!forest->trees.empty(), "Cannot predict! No trees in the forest."); std::shared_ptr> rf_classifier = @@ -482,7 +483,7 @@ RF_metrics score(const raft::handle_t& user_handle, const int* ref_labels, int n_rows, const int* predictions, - int verbosity) + level_enum verbosity) { RF_metrics classification_score = RandomForest::score( user_handle, ref_labels, n_rows, predictions, verbosity, RF_type::CLASSIFICATION); @@ -494,7 +495,7 @@ RF_metrics score(const raft::handle_t& user_handle, const int* ref_labels, int n_rows, const int* predictions, - int verbosity) + level_enum verbosity) { RF_metrics classification_score = RandomForest::score( user_handle, ref_labels, n_rows, predictions, verbosity, RF_type::CLASSIFICATION); @@ -575,10 +576,10 @@ void fit(const raft::handle_t& user_handle, int n_cols, float* labels, RF_params rf_params, - int verbosity) + level_enum verbosity) { raft::common::nvtx::range fun_scope("RF::fit @randomforest.cu"); - ML::Logger::get().setLevel(verbosity); + ML::default_logger().set_level(verbosity); ASSERT(forest->trees.empty(), "Cannot fit an existing forest."); forest->trees.resize(rf_params.n_trees); forest->rf_params = rf_params; @@ -595,10 +596,10 @@ void fit(const raft::handle_t& user_handle, int n_cols, double* labels, RF_params rf_params, - int verbosity) + level_enum verbosity) { raft::common::nvtx::range fun_scope("RF::fit @randomforest.cu"); - ML::Logger::get().setLevel(verbosity); + ML::default_logger().set_level(verbosity); ASSERT(forest->trees.empty(), "Cannot fit an existing forest."); forest->trees.resize(rf_params.n_trees); forest->rf_params = rf_params; @@ -628,7 +629,7 @@ void predict(const raft::handle_t& user_handle, int n_rows, int n_cols, float* predictions, - int verbosity) + level_enum verbosity) { std::shared_ptr> rf_regressor = std::make_shared>(forest->rf_params, RF_type::REGRESSION); @@ -641,7 +642,7 @@ void predict(const raft::handle_t& user_handle, int n_rows, int n_cols, double* predictions, - int verbosity) + level_enum verbosity) { std::shared_ptr> rf_regressor = std::make_shared>(forest->rf_params, RF_type::REGRESSION); @@ -670,7 +671,7 @@ RF_metrics score(const raft::handle_t& user_handle, const float* ref_labels, int n_rows, const float* predictions, - int verbosity) + level_enum verbosity) { RF_metrics regression_score = RandomForest::score( user_handle, ref_labels, n_rows, predictions, verbosity, RF_type::REGRESSION); @@ -683,7 +684,7 @@ RF_metrics score(const raft::handle_t& user_handle, const double* ref_labels, int n_rows, const double* predictions, - int verbosity) + level_enum verbosity) { RF_metrics regression_score = RandomForest::score( user_handle, ref_labels, n_rows, predictions, verbosity, RF_type::REGRESSION); diff --git a/cpp/src/randomforest/randomforest.cuh b/cpp/src/randomforest/randomforest.cuh index 0233db736c..7f82e2877b 100644 --- a/cpp/src/randomforest/randomforest.cuh +++ b/cpp/src/randomforest/randomforest.cuh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -211,9 +211,9 @@ class RandomForest { int n_cols, L* predictions, const RandomForestMetaData* forest, - int verbosity) const + level_enum verbosity) const { - ML::Logger::get().setLevel(verbosity); + ML::default_logger().set_level(verbosity); this->error_checking(input, predictions, n_rows, n_cols, true); std::vector h_predictions(n_rows); cudaStream_t stream = user_handle.get_stream(); @@ -224,7 +224,7 @@ class RandomForest { int row_size = n_cols; - ML::PatternSetter _("%v"); + default_logger().set_pattern("%v"); for (int row_id = 0; row_id < n_rows; row_id++) { std::vector row_prediction(forest->trees[0]->num_outputs); for (int i = 0; i < this->rf_params.n_trees; i++) { @@ -258,6 +258,7 @@ class RandomForest { raft::update_device(predictions, h_predictions.data(), n_rows, stream); user_handle.sync_stream(stream); + default_logger().set_pattern(default_pattern()); } /** @@ -276,16 +277,16 @@ class RandomForest { const L* ref_labels, int n_rows, const L* predictions, - int verbosity, + level_enum verbosity, int rf_type = RF_type::CLASSIFICATION) { - ML::Logger::get().setLevel(verbosity); + ML::default_logger().set_level(verbosity); cudaStream_t stream = user_handle.get_stream(); RF_metrics stats; if (rf_type == RF_type::CLASSIFICATION) { // task classifiation: get classification metrics float accuracy = raft::stats::accuracy(predictions, ref_labels, n_rows, stream); stats = set_rf_metrics_classification(accuracy); - if (ML::Logger::get().shouldLogFor(CUML_LEVEL_DEBUG)) print(stats); + if (ML::default_logger().should_log(ML::level_enum::debug)) print(stats); /* TODO: Potentially augment RF_metrics w/ more metrics (e.g., precision, F1, etc.). For non binary classification problems (i.e., one target and > 2 labels), need avg. @@ -300,7 +301,7 @@ class RandomForest { mean_squared_error, median_abs_error); stats = set_rf_metrics_regression(mean_abs_error, mean_squared_error, median_abs_error); - if (ML::Logger::get().shouldLogFor(CUML_LEVEL_DEBUG)) print(stats); + if (ML::default_logger().should_log(ML::level_enum::debug)) print(stats); } return stats; diff --git a/cpp/src/solver/lars.cu b/cpp/src/solver/lars.cu index b7a478e098..7302ec2957 100644 --- a/cpp/src/solver/lars.cu +++ b/cpp/src/solver/lars.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2024, NVIDIA CORPORATION. + * Copyright (c) 2020-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -37,7 +37,7 @@ template void larsFit(const raft::handle_t& handle, float* Gram, int max_iter, float* coef_path, - int verbosity, + level_enum verbosity, int ld_X, int ld_G, float eps); @@ -54,7 +54,7 @@ template void larsFit(const raft::handle_t& handle, double* Gram, int max_iter, double* coef_path, - int verbosity, + level_enum verbosity, int ld_X, int ld_G, double eps); diff --git a/cpp/src/solver/lars_impl.cuh b/cpp/src/solver/lars_impl.cuh index 37e7c76df5..383a0c9110 100644 --- a/cpp/src/solver/lars_impl.cuh +++ b/cpp/src/solver/lars_impl.cuh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2024, NVIDIA CORPORATION. + * Copyright (c) 2020-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -884,17 +884,17 @@ void larsFit(const raft::handle_t& handle, idx_t* active_idx, math_t* alphas, idx_t* n_active, - math_t* Gram = nullptr, - int max_iter = 500, - math_t* coef_path = nullptr, - int verbosity = 0, - idx_t ld_X = 0, - idx_t ld_G = 0, - math_t eps = -1) + math_t* Gram = nullptr, + int max_iter = 500, + math_t* coef_path = nullptr, + level_enum verbosity = ML::level_enum::off, + idx_t ld_X = 0, + idx_t ld_G = 0, + math_t eps = -1) { ASSERT(n_cols > 0, "Parameter n_cols: number of columns cannot be less than one"); ASSERT(n_rows > 0, "Parameter n_rows: number of rows cannot be less than one"); - ML::Logger::get().setLevel(verbosity); + ML::default_logger().set_level(verbosity); // Set default ld parameters if needed. if (ld_X == 0) ld_X = n_rows; diff --git a/cpp/src/svm/linear.cu b/cpp/src/svm/linear.cu index ac1d561ed0..ebe4521c31 100644 --- a/cpp/src/svm/linear.cu +++ b/cpp/src/svm/linear.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2024, NVIDIA CORPORATION. + * Copyright (c) 2021-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -416,7 +416,7 @@ LinearSVMModel LinearSVMModel::fit(const raft::handle_t& handle, qn_pams.change_tol = params.change_tol; qn_pams.linesearch_max_iter = params.linesearch_max_iter; qn_pams.lbfgs_memory = params.lbfgs_memory; - qn_pams.verbose = params.verbose; + qn_pams.verbose = static_cast(params.verbose); ML::GLM::qn_params qn_pams_logistic = qn_pams; qn_pams_logistic.loss = ML::GLM::QN_LOSS_LOGISTIC; diff --git a/cpp/src/svm/smosolver.h b/cpp/src/svm/smosolver.h index d2355d68a5..c21d45cfbc 100644 --- a/cpp/src/svm/smosolver.h +++ b/cpp/src/svm/smosolver.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -87,7 +87,7 @@ class SmoSolver { f(0, stream), y_label(0, stream) { - ML::Logger::get().setLevel(param.verbosity); + ML::default_logger().set_level(param.verbosity); } void GetNonzeroDeltaAlpha(const math_t* vec, diff --git a/cpp/src/svm/svc.cu b/cpp/src/svm/svc.cu index 7c90f0214b..56f7656626 100644 --- a/cpp/src/svm/svc.cu +++ b/cpp/src/svm/svc.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -139,7 +139,7 @@ SVC::SVC(raft::handle_t& handle, math_t cache_size, int max_iter, int nochange_steps, - int verbosity) + level_enum verbosity) : handle(handle), param(SvmParameter{C, cache_size, max_iter, nochange_steps, tol, verbosity}), kernel_params(kernel_params) diff --git a/cpp/src/svm/svm_api.cpp b/cpp/src/svm/svm_api.cpp index 2f6f2b6efc..392c15bedf 100644 --- a/cpp/src/svm/svm_api.cpp +++ b/cpp/src/svm/svm_api.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -54,7 +54,7 @@ cumlError_t cumlSpSvcFit(cumlHandle_t handle, param.max_iter = max_iter; param.nochange_steps = nochange_steps; param.tol = tol; - param.verbosity = verbosity; + param.verbosity = static_cast(verbosity); raft::distance::kernels::KernelParams kernel_param; kernel_param.kernel = (raft::distance::kernels::KernelType)kernel; @@ -128,7 +128,7 @@ cumlError_t cumlDpSvcFit(cumlHandle_t handle, param.max_iter = max_iter; param.nochange_steps = nochange_steps; param.tol = tol; - param.verbosity = verbosity; + param.verbosity = static_cast(verbosity); raft::distance::kernels::KernelParams kernel_param; kernel_param.kernel = (raft::distance::kernels::KernelType)kernel; diff --git a/cpp/src/svm/workingset.cuh b/cpp/src/svm/workingset.cuh index 318ee5e14d..ad707dc27a 100644 --- a/cpp/src/svm/workingset.cuh +++ b/cpp/src/svm/workingset.cuh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -83,7 +83,7 @@ inline void WorkingSet::SimpleSelect( (int)8 * sizeof(math_t), stream); - if (ML::Logger::get().shouldLogFor(CUML_LEVEL_DEBUG) && n_train < 20) { + if (ML::default_logger().should_log(ML::level_enum::debug) && n_train < 20) { std::stringstream ss; raft::print_device_vector("idx_sorted", f_idx_sorted.data(), n_train, ss); CUML_LOG_DEBUG(ss.str().c_str()); @@ -236,7 +236,7 @@ inline int WorkingSet::GatherAvailable(int n_already_selected, available, n_train, idx.data(), n_already_selected); RAFT_CUDA_TRY(cudaPeekAtLastError()); } - if (ML::Logger::get().shouldLogFor(CUML_LEVEL_DEBUG) && n_train < 20) { + if (ML::default_logger().should_log(ML::level_enum::debug) && n_train < 20) { std::stringstream ss; raft::print_device_vector("avail", available, n_train, ss); CUML_LOG_DEBUG(ss.str().c_str()); @@ -250,7 +250,7 @@ inline int WorkingSet::GatherAvailable(int n_already_selected, thrust::make_permutation_iterator(av_ptr, idx_ptr), thrust::make_permutation_iterator(av_ptr, idx_ptr + n_train), av_sorted_ptr); - if (ML::Logger::get().shouldLogFor(CUML_LEVEL_DEBUG) && n_train < 20) { + if (ML::default_logger().should_log(ML::level_enum::debug) && n_train < 20) { std::stringstream ss; raft::print_device_vector("avail_sorted", available_sorted.data(), n_train, ss); CUML_LOG_DEBUG(ss.str().c_str()); @@ -276,7 +276,7 @@ inline int WorkingSet::GatherAvailable(int n_already_selected, raft::copy( idx.data() + n_already_selected, idx_tmp.data() + n_selected - n_copy, n_copy, stream); } - if (ML::Logger::get().shouldLogFor(CUML_LEVEL_DEBUG) && n_train < 20) { + if (ML::default_logger().should_log(ML::level_enum::debug) && n_train < 20) { std::stringstream ss; raft::print_device_vector("selected", idx.data(), n_already_selected + n_copy, ss); CUML_LOG_DEBUG(ss.str().c_str()); diff --git a/cpp/src/tsne/tsne_runner.cuh b/cpp/src/tsne/tsne_runner.cuh index b735be0e63..cdbfdd2674 100644 --- a/cpp/src/tsne/tsne_runner.cuh +++ b/cpp/src/tsne/tsne_runner.cuh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -63,7 +63,7 @@ class TSNE_runner { this->p = input.d; this->Y = input.y; - ML::Logger::get().setLevel(params.verbosity); + ML::default_logger().set_level(params.verbosity); if (params.dim > 2 and params.algorithm != TSNE_ALGORITHM::EXACT) { params.algorithm = TSNE_ALGORITHM::EXACT; CUML_LOG_WARN( diff --git a/cpp/src/tsne/utils.cuh b/cpp/src/tsne/utils.cuh index 895fe412d2..0ad23bae11 100644 --- a/cpp/src/tsne/utils.cuh +++ b/cpp/src/tsne/utils.cuh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -82,20 +82,20 @@ double SymmetrizeTime = 0, DistancesTime = 0, NormalizeTime = 0, PerplexityTime // To silence warnings #define START_TIMER \ - if (ML::Logger::get().shouldLogFor(CUML_LEVEL_DEBUG)) { \ + if (ML::default_logger().should_log(ML::level_enum::debug)) { \ gettimeofday(&timecheck, NULL); \ start = (long)timecheck.tv_sec * 1000 + (long)timecheck.tv_usec / 1000; \ } #define END_TIMER(add_onto) \ - if (ML::Logger::get().shouldLogFor(CUML_LEVEL_DEBUG)) { \ + if (ML::default_logger().should_log(ML::level_enum::debug)) { \ gettimeofday(&timecheck, NULL); \ end = (long)timecheck.tv_sec * 1000 + (long)timecheck.tv_usec / 1000; \ add_onto += (end - start); \ } #define PRINT_TIMES \ - if (ML::Logger::get().shouldLogFor(CUML_LEVEL_DEBUG)) { \ + if (ML::default_logger().should_log(ML::level_enum::debug)) { \ double total = (SymmetrizeTime + DistancesTime + NormalizeTime + PerplexityTime + \ BoundingBoxKernel_time + ClearKernel1_time + TreeBuildingKernel_time + \ ClearKernel2_time + SummarizationKernel_time + SortKernel_time + \ diff --git a/cpp/src/umap/fuzzy_simpl_set/naive.cuh b/cpp/src/umap/fuzzy_simpl_set/naive.cuh index f872b80c4b..41e54f1f63 100644 --- a/cpp/src/umap/fuzzy_simpl_set/naive.cuh +++ b/cpp/src/umap/fuzzy_simpl_set/naive.cuh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -314,7 +314,7 @@ void launcher(int n, raft::sparse::COO in(stream, n * n_neighbors, n, n); // check for logging in order to avoid the potentially costly `arr2Str` call! - if (ML::Logger::get().shouldLogFor(CUML_LEVEL_DEBUG)) { + if (ML::default_logger().should_log(ML::level_enum::debug)) { CUML_LOG_DEBUG("Smooth kNN Distances"); auto str = raft::arr2Str(sigmas.data(), 25, "sigmas", stream); CUML_LOG_DEBUG("%s", str.c_str()); @@ -342,7 +342,7 @@ void launcher(int n, n_neighbors); RAFT_CUDA_TRY(cudaPeekAtLastError()); - if (ML::Logger::get().shouldLogFor(CUML_LEVEL_DEBUG)) { + if (ML::default_logger().should_log(ML::level_enum::debug)) { CUML_LOG_DEBUG("Compute Membership Strength"); std::stringstream ss; ss << in; diff --git a/cpp/src/umap/runner.cuh b/cpp/src/umap/runner.cuh index 0ceeb3acaa..01aa6f62c7 100644 --- a/cpp/src/umap/runner.cuh +++ b/cpp/src/umap/runner.cuh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -102,7 +102,7 @@ void _get_graph(const raft::handle_t& handle, int k = params->n_neighbors; - ML::Logger::get().setLevel(params->verbosity); + ML::default_logger().set_level(params->verbosity); CUML_LOG_DEBUG("n_neighbors=%d", params->n_neighbors); @@ -159,7 +159,7 @@ void _get_graph_supervised(const raft::handle_t& handle, int k = params->n_neighbors; - ML::Logger::get().setLevel(params->verbosity); + ML::default_logger().set_level(params->verbosity); if (params->target_n_neighbors == -1) params->target_n_neighbors = params->n_neighbors; @@ -247,7 +247,7 @@ void _refine(const raft::handle_t& handle, value_t* embeddings) { cudaStream_t stream = handle.get_stream(); - ML::Logger::get().setLevel(params->verbosity); + ML::default_logger().set_level(params->verbosity); /** * Run simplicial set embedding to approximate low-dimensional representation @@ -263,7 +263,7 @@ void _init_and_refine(const raft::handle_t& handle, value_t* embeddings) { cudaStream_t stream = handle.get_stream(); - ML::Logger::get().setLevel(params->verbosity); + ML::default_logger().set_level(params->verbosity); // Initialize embeddings InitEmbed::run(handle, inputs.n, inputs.d, graph, params, embeddings, stream, params->init); @@ -282,7 +282,7 @@ void _fit(const raft::handle_t& handle, raft::common::nvtx::range fun_scope("umap::unsupervised::fit"); cudaStream_t stream = handle.get_stream(); - ML::Logger::get().setLevel(params->verbosity); + ML::default_logger().set_level(params->verbosity); UMAPAlgo::_get_graph(handle, inputs, params, graph); @@ -318,7 +318,7 @@ void _fit_supervised(const raft::handle_t& handle, raft::common::nvtx::range fun_scope("umap::supervised::fit"); cudaStream_t stream = handle.get_stream(); - ML::Logger::get().setLevel(params->verbosity); + ML::default_logger().set_level(params->verbosity); UMAPAlgo::_get_graph_supervised( handle, inputs, params, graph); @@ -360,7 +360,7 @@ void _transform(const raft::handle_t& handle, raft::common::nvtx::range fun_scope("umap::transform"); cudaStream_t stream = handle.get_stream(); - ML::Logger::get().setLevel(params->verbosity); + ML::default_logger().set_level(params->verbosity); CUML_LOG_DEBUG("Running transform"); diff --git a/cpp/src/umap/simpl_set_embed/algo.cuh b/cpp/src/umap/simpl_set_embed/algo.cuh index b6f2b5286d..6be8b0235b 100644 --- a/cpp/src/umap/simpl_set_embed/algo.cuh +++ b/cpp/src/umap/simpl_set_embed/algo.cuh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -341,7 +341,7 @@ void launcher( make_epochs_per_sample(out.vals(), out.nnz, n_epochs, epochs_per_sample.data(), stream); - if (ML::Logger::get().shouldLogFor(CUML_LEVEL_DEBUG)) { + if (ML::default_logger().should_log(ML::level_enum::debug)) { std::stringstream ss; ss << raft::arr2Str(epochs_per_sample.data(), out.nnz, "epochs_per_sample", stream); CUML_LOG_DEBUG(ss.str().c_str()); diff --git a/cpp/src/umap/supervised.cuh b/cpp/src/umap/supervised.cuh index 21ed42f157..1a9739f280 100644 --- a/cpp/src/umap/supervised.cuh +++ b/cpp/src/umap/supervised.cuh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -301,7 +301,7 @@ void perform_general_intersection(const raft::handle_t& handle, handle, y_inputs, y_inputs, knn_graph, params->target_n_neighbors, params, stream); RAFT_CUDA_TRY(cudaPeekAtLastError()); - if (ML::Logger::get().shouldLogFor(CUML_LEVEL_DEBUG)) { + if (ML::default_logger().should_log(ML::level_enum::debug)) { CUML_LOG_DEBUG("Target kNN Graph"); std::stringstream ss1, ss2; ss1 << raft::arr2Str( @@ -326,7 +326,7 @@ void perform_general_intersection(const raft::handle_t& handle, stream); RAFT_CUDA_TRY(cudaPeekAtLastError()); - if (ML::Logger::get().shouldLogFor(CUML_LEVEL_DEBUG)) { + if (ML::default_logger().should_log(ML::level_enum::debug)) { CUML_LOG_DEBUG("Target Fuzzy Simplicial Set"); std::stringstream ss; ss << ygraph_coo; diff --git a/cpp/test/sg/genetic/evolution_test.cu b/cpp/test/sg/genetic/evolution_test.cu index 526acb5280..27d45645fc 100644 --- a/cpp/test/sg/genetic/evolution_test.cu +++ b/cpp/test/sg/genetic/evolution_test.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2024, NVIDIA CORPORATION. + * Copyright (c) 2021-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -58,7 +58,7 @@ class GeneticEvolutionTest : public ::testing::Test { protected: void SetUp() override { - ML::Logger::get().setLevel(CUML_LEVEL_INFO); + ML::default_logger().set_level(ML::level_enum::info); // Set training param vals hyper_params.population_size = 5000; diff --git a/cpp/test/sg/genetic/program_test.cu b/cpp/test/sg/genetic/program_test.cu index 1205baf9d9..b254b3ed01 100644 --- a/cpp/test/sg/genetic/program_test.cu +++ b/cpp/test/sg/genetic/program_test.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2024, NVIDIA CORPORATION. + * Copyright (c) 2021-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -642,7 +642,7 @@ TEST_F(GeneticProgramTest, ProgramExecution) MLCommon::CompareApprox compApprox(tolerance); // Enable debug logging - ML::Logger::get().setLevel(CUML_LEVEL_INFO); + ML::default_logger().set_level(ML::level_enum::info); // Allocate memory std::vector h_ypred(n_progs * n_samples, 0.0f); diff --git a/cpp/test/sg/hdbscan_test.cu b/cpp/test/sg/hdbscan_test.cu index d90e9f4314..3a86571f00 100644 --- a/cpp/test/sg/hdbscan_test.cu +++ b/cpp/test/sg/hdbscan_test.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2024, NVIDIA CORPORATION. + * Copyright (c) 2021-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -85,7 +85,7 @@ class HDBSCANTest : public ::testing::TestWithParam> { rmm::device_uvector out_probabilities(params.n_row, handle.get_stream()); - Logger::get().setLevel(CUML_LEVEL_DEBUG); + default_logger().set_level(ML::level_enum::debug); HDBSCAN::Common::hdbscan_output out(handle, params.n_row, @@ -167,7 +167,7 @@ class ClusterCondensingTest : public ::testing::TestWithParam out_delta(params.n_row, handle.get_stream()); - Logger::get().setLevel(CUML_LEVEL_DEBUG); + default_logger().set_level(ML::level_enum::debug); raft::sparse::op::coo_sort_by_weight( mst_src.data(), mst_dst.data(), mst_data.data(), (IdxT)mst_src.size(), handle.get_stream()); @@ -257,7 +257,7 @@ class ClusterSelectionTest : public ::testing::TestWithParam>::GetParam(); - Logger::get().setLevel(CUML_LEVEL_DEBUG); + default_logger().set_level(ML::level_enum::debug); rmm::device_uvector condensed_parents(params.condensed_parents.size(), handle.get_stream()); diff --git a/cpp/test/sg/lars_test.cu b/cpp/test/sg/lars_test.cu index d5d276eab8..85657eafea 100644 --- a/cpp/test/sg/lars_test.cu +++ b/cpp/test/sg/lars_test.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2024, NVIDIA CORPORATION. + * Copyright (c) 2020-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -481,9 +481,9 @@ class LarsTestFitPredict : public ::testing::Test { void testFitGram() { - auto stream = handle.get_stream(); - int max_iter = 10; - int verbosity = 0; + auto stream = handle.get_stream(); + int max_iter = 10; + ML::level_enum verbosity = ML::level_enum::off; int n_active; ML::Solver::Lars::larsFit(handle, X.data(), @@ -512,9 +512,9 @@ class LarsTestFitPredict : public ::testing::Test { void testFitX() { - auto stream = handle.get_stream(); - int max_iter = 10; - int verbosity = 0; + auto stream = handle.get_stream(); + int max_iter = 10; + ML::level_enum verbosity = ML::level_enum::off; int n_active; ML::Solver::Lars::larsFit(handle, X.data(), @@ -593,11 +593,11 @@ class LarsTestFitPredict : public ::testing::Test { void testFitLarge() { - auto stream = handle.get_stream(); - int n_rows = 65536; - int n_cols = 10; - int max_iter = n_cols; - int verbosity = 0; + auto stream = handle.get_stream(); + int n_rows = 65536; + int n_cols = 10; + int max_iter = n_cols; + ML::level_enum verbosity = ML::level_enum::off; int n_active; rmm::device_uvector X(n_rows * n_cols, stream); rmm::device_uvector y(n_rows, stream); diff --git a/cpp/test/sg/logger.cpp b/cpp/test/sg/logger.cpp index 1a286c4597..73278447c2 100644 --- a/cpp/test/sg/logger.cpp +++ b/cpp/test/sg/logger.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2024, NVIDIA CORPORATION. + * Copyright (c) 2020-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -29,15 +29,15 @@ TEST(Logger, Test) CUML_LOG_WARN("This is a warning message"); CUML_LOG_INFO("This is an info message"); - Logger::get().setLevel(CUML_LEVEL_WARN); - ASSERT_EQ(CUML_LEVEL_WARN, Logger::get().getLevel()); - Logger::get().setLevel(CUML_LEVEL_INFO); - ASSERT_EQ(CUML_LEVEL_INFO, Logger::get().getLevel()); + default_logger().set_level(ML::level_enum::warn); + ASSERT_EQ(ML::level_enum::warn, default_logger().level()); + default_logger().set_level(ML::level_enum::info); + ASSERT_EQ(ML::level_enum::info, default_logger().level()); - ASSERT_FALSE(Logger::get().shouldLogFor(CUML_LEVEL_TRACE)); - ASSERT_FALSE(Logger::get().shouldLogFor(CUML_LEVEL_DEBUG)); - ASSERT_TRUE(Logger::get().shouldLogFor(CUML_LEVEL_INFO)); - ASSERT_TRUE(Logger::get().shouldLogFor(CUML_LEVEL_WARN)); + ASSERT_FALSE(default_logger().should_log(ML::level_enum::trace)); + ASSERT_FALSE(default_logger().should_log(ML::level_enum::debug)); + ASSERT_TRUE(default_logger().should_log(ML::level_enum::info)); + ASSERT_TRUE(default_logger().should_log(ML::level_enum::warn)); } std::string logged = ""; @@ -52,21 +52,20 @@ class LoggerTest : public ::testing::Test { { flushCount = 0; logged = ""; - Logger::get().setLevel(CUML_LEVEL_TRACE); + default_logger().set_level(ML::level_enum::trace); } void TearDown() override { - Logger::get().setCallback(nullptr); - Logger::get().setFlush(nullptr); - Logger::get().setLevel(CUML_LEVEL_INFO); + default_logger().sinks().pop_back(); + default_logger().set_level(ML::level_enum::info); } }; TEST_F(LoggerTest, callback) { std::string testMsg; - Logger::get().setCallback(exampleCallback); + default_logger().sinks().push_back(std::make_shared(exampleCallback)); testMsg = "This is a critical message"; CUML_LOG_CRITICAL(testMsg.c_str()); @@ -91,8 +90,9 @@ TEST_F(LoggerTest, callback) TEST_F(LoggerTest, flush) { - Logger::get().setFlush(exampleFlush); - Logger::get().flush(); + default_logger().sinks().push_back( + std::make_shared(exampleCallback, exampleFlush)); + default_logger().flush(); ASSERT_EQ(1, flushCount); } diff --git a/cpp/test/sg/quasi_newton.cu b/cpp/test/sg/quasi_newton.cu index bc2120af4e..a0930740fa 100644 --- a/cpp/test/sg/quasi_newton.cu +++ b/cpp/test/sg/quasi_newton.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2024, NVIDIA CORPORATION. + * Copyright (c) 2020-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -111,7 +111,7 @@ T run(const raft::handle_t& handle, T l2, T* w, SimpleDenseMat& z, - int verbosity, + level_enum verbosity, cudaStream_t stream) { qn_params pams; @@ -122,7 +122,7 @@ T run(const raft::handle_t& handle, pams.lbfgs_memory = 5; pams.penalty_l1 = l1; pams.penalty_l2 = l2; - pams.verbose = verbosity; + pams.verbose = static_cast(verbosity); int num_iters = 0; @@ -144,7 +144,7 @@ T run_api(const raft::handle_t& cuml_handle, T l2, T* w, SimpleDenseMat& z, - int verbosity, + level_enum verbosity, cudaStream_t stream) { qn_params pams; @@ -156,7 +156,7 @@ T run_api(const raft::handle_t& cuml_handle, pams.lbfgs_memory = 5; pams.penalty_l1 = l1; pams.penalty_l2 = l2; - pams.verbose = verbosity; + pams.verbose = static_cast(verbosity); pams.fit_intercept = fit_intercept; pams.loss = loss_type; @@ -226,7 +226,7 @@ TEST_F(QuasiNewtonTest, binary_logistic_vs_sklearn) l1 = alpha; l2 = 0.0; - fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); + fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l1_b, fx)); ASSERT_TRUE(checkParamsEqual(handle, &w_l1_b[0], &b_l1_b, w0.data, loss_b, compApprox, stream)); @@ -240,7 +240,7 @@ TEST_F(QuasiNewtonTest, binary_logistic_vs_sklearn) l2, w0.data, z, - 0, + level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l1_b, fx)); @@ -250,7 +250,7 @@ TEST_F(QuasiNewtonTest, binary_logistic_vs_sklearn) l1 = 0; l2 = alpha; - fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); + fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l2_b, fx)); ASSERT_TRUE(checkParamsEqual(handle, &w_l2_b[0], &b_l2_b, w0.data, loss_b, compApprox, stream)); @@ -265,7 +265,7 @@ TEST_F(QuasiNewtonTest, binary_logistic_vs_sklearn) l2, w0.data, z, - 0, + level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l2_b, fx)); @@ -274,7 +274,7 @@ TEST_F(QuasiNewtonTest, binary_logistic_vs_sklearn) l1 = alpha; l2 = 0.0; - fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); + fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l1_no_b, fx)); ASSERT_TRUE( checkParamsEqual(handle, &w_l1_no_b[0], nobptr, w0.data, loss_no_b, compApprox, stream)); @@ -289,7 +289,7 @@ TEST_F(QuasiNewtonTest, binary_logistic_vs_sklearn) l2, w0.data, z, - 0, + level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l1_no_b, fx)); @@ -298,7 +298,7 @@ TEST_F(QuasiNewtonTest, binary_logistic_vs_sklearn) l1 = 0; l2 = alpha; - fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); + fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l2_no_b, fx)); ASSERT_TRUE( checkParamsEqual(handle, &w_l2_no_b[0], nobptr, w0.data, loss_no_b, compApprox, stream)); @@ -313,7 +313,7 @@ TEST_F(QuasiNewtonTest, binary_logistic_vs_sklearn) l2, w0.data, z, - 0, + level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l2_no_b, fx)); } @@ -346,7 +346,7 @@ TEST_F(QuasiNewtonTest, multiclass_logistic_vs_sklearn) l2 = 0.0; double obj_l1_b = 0.5407911382311313; - fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); + fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l1_b, fx)); fx = run_api(cuml_handle, @@ -359,7 +359,7 @@ TEST_F(QuasiNewtonTest, multiclass_logistic_vs_sklearn) l2, w0.data, z, - 0, + level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l1_b, fx)); @@ -367,7 +367,7 @@ TEST_F(QuasiNewtonTest, multiclass_logistic_vs_sklearn) l2 = alpha; double obj_l2_b = 0.5721784062720949; - fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); + fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l2_b, fx)); fx = run_api(cuml_handle, @@ -380,7 +380,7 @@ TEST_F(QuasiNewtonTest, multiclass_logistic_vs_sklearn) l2, w0.data, z, - 0, + level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l2_b, fx)); @@ -388,7 +388,7 @@ TEST_F(QuasiNewtonTest, multiclass_logistic_vs_sklearn) l2 = 0.0; double obj_l1_no_b = 0.6606929813245878; - fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); + fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l1_no_b, fx)); fx = run_api(cuml_handle, @@ -401,7 +401,7 @@ TEST_F(QuasiNewtonTest, multiclass_logistic_vs_sklearn) l2, w0.data, z, - 0, + level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l1_no_b, fx)); @@ -410,7 +410,7 @@ TEST_F(QuasiNewtonTest, multiclass_logistic_vs_sklearn) double obj_l2_no_b = 0.6597171282106854; - fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); + fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l2_no_b, fx)); fx = run_api(cuml_handle, @@ -423,7 +423,7 @@ TEST_F(QuasiNewtonTest, multiclass_logistic_vs_sklearn) l2, w0.data, z, - 0, + level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l2_no_b, fx)); } @@ -457,7 +457,7 @@ TEST_F(QuasiNewtonTest, linear_regression_vs_sklearn) double w_l1_b[2] = {-0.4952397281519840, 0.3813315300180231}; double b_l1_b = -0.08140861819001188; double obj_l1_b = 0.011136986298775138; - fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); + fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l1_b, fx)); ASSERT_TRUE(checkParamsEqual(handle, &w_l1_b[0], &b_l1_b, w0.data, loss_b, compApprox, stream)); @@ -471,7 +471,7 @@ TEST_F(QuasiNewtonTest, linear_regression_vs_sklearn) l2, w0.data, z, - 0, + level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l1_b, fx)); @@ -481,7 +481,7 @@ TEST_F(QuasiNewtonTest, linear_regression_vs_sklearn) double b_l2_b = -0.08062397391797513; double obj_l2_b = 0.004268621967866347; - fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); + fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l2_b, fx)); ASSERT_TRUE(checkParamsEqual(handle, &w_l2_b[0], &b_l2_b, w0.data, loss_b, compApprox, stream)); @@ -495,7 +495,7 @@ TEST_F(QuasiNewtonTest, linear_regression_vs_sklearn) l2, w0.data, z, - 0, + level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l2_b, fx)); @@ -504,7 +504,7 @@ TEST_F(QuasiNewtonTest, linear_regression_vs_sklearn) double w_l1_no_b[2] = {-0.5175178128147135, 0.3720844589831813}; double obj_l1_no_b = 0.013981355746112447; - fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); + fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l1_no_b, fx)); ASSERT_TRUE( checkParamsEqual(handle, &w_l1_no_b[0], nobptr, w0.data, loss_no_b, compApprox, stream)); @@ -519,7 +519,7 @@ TEST_F(QuasiNewtonTest, linear_regression_vs_sklearn) l2, w0.data, z, - 0, + level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l1_no_b, fx)); @@ -528,7 +528,7 @@ TEST_F(QuasiNewtonTest, linear_regression_vs_sklearn) double w_l2_no_b[2] = {-0.5241651041233270, 0.3846317886627560}; double obj_l2_no_b = 0.007061261366969662; - fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); + fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l2_no_b, fx)); ASSERT_TRUE( checkParamsEqual(handle, &w_l2_no_b[0], nobptr, w0.data, loss_no_b, compApprox, stream)); @@ -543,7 +543,7 @@ TEST_F(QuasiNewtonTest, linear_regression_vs_sklearn) l2, w0.data, z, - 0, + level_enum::off, stream); ASSERT_TRUE(compApprox(obj_l2_no_b, fx)); } @@ -667,8 +667,10 @@ TEST_F(QuasiNewtonTest, dense_vs_sparse_logistic) pams.fit_intercept = loss.fit_intercept; double f_dense, f_sparse; - f_dense = run(handle, loss, *Xdev, *ydev, l1, l2, w0_dense.data, z_dense, 0, stream); - f_sparse = run(handle, loss, X_sparse, *ydev, l1, l2, w0_sparse.data, z_sparse, 0, stream); + f_dense = + run(handle, loss, *Xdev, *ydev, l1, l2, w0_dense.data, z_dense, level_enum::off, stream); + f_sparse = + run(handle, loss, X_sparse, *ydev, l1, l2, w0_sparse.data, z_sparse, level_enum::off, stream); ASSERT_TRUE(compApprox(f_dense, f_sparse)); qnPredict( @@ -702,7 +704,7 @@ TEST_F(QuasiNewtonTest, dense_vs_sparse_logistic) l2, w0_dense.data, z_dense, - 0, + level_enum::off, stream); f_sparse = run_api(cuml_handle, QN_LOSS_SOFTMAX, @@ -714,7 +716,7 @@ TEST_F(QuasiNewtonTest, dense_vs_sparse_logistic) l2, w0_sparse.data, z_sparse, - 0, + level_enum::off, stream); ASSERT_TRUE(compApprox(f_dense, f_sparse)); }; diff --git a/cpp/test/sg/svc_test.cu b/cpp/test/sg/svc_test.cu index 0caad107d5..e8ecb297c1 100644 --- a/cpp/test/sg/svc_test.cu +++ b/cpp/test/sg/svc_test.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -590,7 +590,7 @@ SvmParameter getDefaultSvmParameter() param.cache_size = 200; param.max_iter = -1; param.nochange_steps = 1000; - param.verbosity = CUML_LEVEL_INFO; + param.verbosity = ML::level_enum::info; param.epsilon = 0.1; param.svmType = C_SVC; return param; @@ -1381,7 +1381,7 @@ TYPED_TEST(SmoSolverTest, BlobPredict) rmm::device_uvector y_pred(n_pred, stream); make_blobs(this->handle, x.data(), y.data(), p.n_rows, p.n_cols, 2, centers.data()); - SVC svc(this->handle, p.C, p.tol, p.kernel_params, 0, -1, 50, CUML_LEVEL_INFO); + SVC svc(this->handle, p.C, p.tol, p.kernel_params, 0, -1, 50, ML::level_enum::info); svc.fit(x.data(), p.n_rows, p.n_cols, y.data()); // Create a different dataset for prediction @@ -1500,7 +1500,7 @@ TYPED_TEST(SmoSolverTest, DISABLED_MillionRows) make_blobs(this->handle, x.data(), y.data(), p.n_rows, p.n_cols, 2, centers.data()); const int max_iter = 2; SVC svc( - this->handle, p.C, p.tol, p.kernel_params, 0, max_iter, 50, CUML_LEVEL_DEBUG); + this->handle, p.C, p.tol, p.kernel_params, 0, max_iter, 50, ML::level_enum::debug); svc.fit(x.data(), p.n_rows, p.n_cols, y.data()); // predict on the same dataset svc.predict(x.data(), p.n_rows, p.n_cols, y_pred.data()); @@ -1955,7 +1955,7 @@ class SvrTest : public ::testing::Test { auto stream = this->handle.get_stream(); std::vector, smoOutput2>> data{ {SvrInput{ - SvmParameter{1, 0, 1, 10, 1e-3, CUML_LEVEL_INFO, 0.1, EPSILON_SVR}, + SvmParameter{1, 0, 1, 10, 1e-3, ML::level_enum::info, 0.1, EPSILON_SVR}, KernelParams{LINEAR, 3, 1, 0}, 2, // n_rows 1, // n_cols @@ -1965,7 +1965,7 @@ class SvrTest : public ::testing::Test { smoOutput2{2, {-0.8, 0.8}, 2.1, {0.8}, {0, 1}, {0, 1}, {2.1, 2.9}}}, {SvrInput{ - SvmParameter{1, 10, 1, 1, 1e-3, CUML_LEVEL_INFO, 0.1, EPSILON_SVR}, + SvmParameter{1, 10, 1, 1, 1e-3, ML::level_enum::info, 0.1, EPSILON_SVR}, KernelParams{LINEAR, 3, 1, 0}, 2, // n_rows 1, // n_cols @@ -1975,7 +1975,7 @@ class SvrTest : public ::testing::Test { smoOutput2{2, {-0.8, 0.8}, 1.3, {0.8}, {1, 2}, {0, 1}, {2.1, 2.9}}}, {SvrInput{ - SvmParameter{1, 0, 1, 1, 1e-3, CUML_LEVEL_INFO, 0.1, EPSILON_SVR}, + SvmParameter{1, 0, 1, 1, 1e-3, ML::level_enum::info, 0.1, EPSILON_SVR}, KernelParams{LINEAR, 3, 1, 0}, 2, // n_rows 2, // n_cols @@ -1985,7 +1985,7 @@ class SvrTest : public ::testing::Test { smoOutput2{2, {-0.8, 0.8}, 1.3, {0.8, 0.0}, {1, 2, 5, 5}, {0, 1}, {2.1, 2.9}}}, {SvrInput{ - SvmParameter{1, 0, 100, 10, 1e-6, CUML_LEVEL_INFO, 0.1, EPSILON_SVR}, + SvmParameter{1, 0, 100, 10, 1e-6, ML::level_enum::info, 0.1, EPSILON_SVR}, KernelParams{LINEAR, 3, 1, 0}, 7, // n_rows 1, // n_cols @@ -2001,7 +2001,7 @@ class SvrTest : public ::testing::Test { {0.7, 1.8, 2.9, 4, 5.1, 6.2, 7.3}}}, // Almost same as above, but with sample weights {SvrInput{ - SvmParameter{1, 0, 100, 10, 1e-3, CUML_LEVEL_INFO, 0.1, EPSILON_SVR}, + SvmParameter{1, 0, 100, 10, 1e-3, ML::level_enum::info, 0.1, EPSILON_SVR}, KernelParams{LINEAR, 3, 1, 0}, 7, // n_rows 1, // n_cols @@ -2012,7 +2012,7 @@ class SvrTest : public ::testing::Test { smoOutput2{ 6, {}, -15.5, {3.9}, {1.0, 2.0, 3.0, 4.0, 6.0, 7.0}, {0, 1, 2, 3, 5, 6}, {}}}, {SvrInput{ - SvmParameter{1, 0, 100, 10, 1e-6, CUML_LEVEL_INFO, 0.1, EPSILON_SVR}, + SvmParameter{1, 0, 100, 10, 1e-6, ML::level_enum::info, 0.1, EPSILON_SVR}, KernelParams{LINEAR, 3, 1, 0}, 7, // n_rows 1, // n_cols diff --git a/cpp/test/sg/tsne_test.cu b/cpp/test/sg/tsne_test.cu index f1e3d47703..628d2da144 100644 --- a/cpp/test/sg/tsne_test.cu +++ b/cpp/test/sg/tsne_test.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -124,7 +124,7 @@ class TSNETest : public ::testing::TestWithParam { model_params.dim = 2; model_params.n_neighbors = 90; model_params.min_grad_norm = 1e-12; - model_params.verbosity = CUML_LEVEL_DEBUG; + model_params.verbosity = ML::level_enum::debug; model_params.metric = DEFAULT_DISTANCE_METRIC; // Allocate memory diff --git a/docs/source/api.rst b/docs/source/api.rst index a3a2ab73cc..44a29563f3 100644 --- a/docs/source/api.rst +++ b/docs/source/api.rst @@ -91,25 +91,25 @@ they are: - cuml.common.logger value - Verbosity level * - 0 - - cuml.common.logger.level_off + - cuml.common.logger.level_enum.off - Disables all log messages * - 1 - - cuml.common.logger.level_critical + - cuml.common.logger.level_enum.critical - Enables only critical messages * - 2 - - cuml.common.logger.level_error + - cuml.common.logger.level_enum.error - Enables all messages up to and including errors. * - 3 - - cuml.common.logger.level_warn + - cuml.common.logger.level_enum.warn - Enables all messages up to and including warnings. * - 4 or False - - cuml.common.logger.level_info + - cuml.common.logger.level_enum.info - Enables all messages up to and including information messages. * - 5 or True - - cuml.common.logger.level_debug + - cuml.common.logger.level_enum.debug - Enables all messages up to and including debug messages. * - 6 - - cuml.common.logger.level_trace + - cuml.common.logger.level_enum.trace - Enables all messages up to and including trace messages. diff --git a/python/cuml/cuml/cluster/dbscan.pyx b/python/cuml/cuml/cluster/dbscan.pyx index 07af1d142c..7d2c9dad8b 100644 --- a/python/cuml/cuml/cluster/dbscan.pyx +++ b/python/cuml/cuml/cluster/dbscan.pyx @@ -1,5 +1,5 @@ # -# Copyright (c) 2019-2024, NVIDIA CORPORATION. +# Copyright (c) 2019-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -23,12 +23,14 @@ cp = gpu_only_import('cupy') from cuml.internals.array import CumlArray from cuml.internals.base import UniversalBase +from cuml.internals import logger from cuml.common.doc_utils import generate_docstring from cuml.common.array_descriptor import CumlArrayDescriptor from cuml.internals.mixins import ClusterMixin from cuml.internals.mixins import CMajorInputTagMixin from cuml.internals.api_decorators import device_interop_preparation from cuml.internals.api_decorators import enable_device_interop +from cuml.internals.logger cimport level_enum IF GPUBUILD == 1: @@ -57,7 +59,7 @@ IF GPUBUILD == 1: float* sample_weight, size_t max_mbytes_per_batch, EpsNnMethod eps_nn_method, - int verbosity, + level_enum verbosity, bool opg) except + cdef void fit(handle_t& handle, @@ -72,7 +74,7 @@ IF GPUBUILD == 1: double* sample_weight, size_t max_mbytes_per_batch, EpsNnMethod eps_nn_method, - int verbosity, + level_enum verbosity, bool opg) except + cdef void fit(handle_t& handle, @@ -87,7 +89,7 @@ IF GPUBUILD == 1: float* sample_weight, size_t max_mbytes_per_batch, EpsNnMethod eps_nn_method, - int verbosity, + level_enum verbosity, bool opg) except + cdef void fit(handle_t& handle, @@ -102,7 +104,7 @@ IF GPUBUILD == 1: double* sample_weight, size_t max_mbytes_per_batch, EpsNnMethod eps_nn_method, - int verbosity, + level_enum verbosity, bool opg) except + @@ -360,7 +362,7 @@ class DBSCAN(UniversalBase, sample_weight_ptr, self.max_mbytes_per_batch, algorithm, - self.verbose, + self.verbose, opg) else: fit(handle_[0], @@ -375,7 +377,7 @@ class DBSCAN(UniversalBase, sample_weight_ptr, self.max_mbytes_per_batch, algorithm, - self.verbose, + self.verbose, opg) else: @@ -392,7 +394,7 @@ class DBSCAN(UniversalBase, sample_weight_ptr, self.max_mbytes_per_batch, algorithm, - self.verbose, + self.verbose, opg) else: fit(handle_[0], @@ -407,7 +409,7 @@ class DBSCAN(UniversalBase, sample_weight_ptr, self.max_mbytes_per_batch, algorithm, - self.verbose, + self.verbose, opg) # make sure that the `fit` is complete before the following diff --git a/python/cuml/cuml/cluster/hdbscan/hdbscan.pyx b/python/cuml/cuml/cluster/hdbscan/hdbscan.pyx index ff4df849c4..dcc5fefaeb 100644 --- a/python/cuml/cuml/cluster/hdbscan/hdbscan.pyx +++ b/python/cuml/cuml/cluster/hdbscan/hdbscan.pyx @@ -1,4 +1,4 @@ -# Copyright (c) 2021-2024, NVIDIA CORPORATION. +# Copyright (c) 2021-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -968,7 +968,7 @@ class HDBSCAN(UniversalBase, ClusterMixin, CMajorInputTagMixin): def __setstate__(self, state): super(HDBSCAN, self).__init__( handle=state["handle"], - verbose=state["verbose"] + verbose=state["_verbose"] ) if not state["fit_called_"]: diff --git a/python/cuml/cuml/common/kernel_utils.py b/python/cuml/cuml/common/kernel_utils.py index 89a861060e..652a860129 100644 --- a/python/cuml/cuml/common/kernel_utils.py +++ b/python/cuml/cuml/common/kernel_utils.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2019-2024, NVIDIA CORPORATION. +# Copyright (c) 2019-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -111,7 +111,7 @@ def cuda_kernel_factory(nvrtc_kernel_str, dtypes, kernel_name=None): nvrtc_kernel_str, ) - if logger.should_log_for(logger.level_debug): + if logger.should_log_for(logger.level_enum.debug): logger.debug(str(nvrtc_kernel_str)) return cp.RawKernel(nvrtc_kernel_str, kernel_name) diff --git a/python/cuml/cuml/dask/common/dask_df_utils.py b/python/cuml/cuml/dask/common/dask_df_utils.py index d608232ffa..f1daf83e03 100644 --- a/python/cuml/cuml/dask/common/dask_df_utils.py +++ b/python/cuml/cuml/dask/common/dask_df_utils.py @@ -1,4 +1,4 @@ -# Copyright (c) 2019-2023, NVIDIA CORPORATION. +# Copyright (c) 2019-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -39,7 +39,7 @@ def to_dask_cudf(futures, client=None): c = default_client() if client is None else client # Convert a list of futures containing dfs back into a dask_cudf dfs = [d for d in futures if d.type != type(None)] # NOQA - if logger.should_log_for(logger.level_debug): + if logger.should_log_for(logger.level_enum.debug): logger.debug("to_dask_cudf dfs=%s" % str(dfs)) meta = c.submit(get_meta, dfs[0]) meta_local = meta.result() diff --git a/python/cuml/cuml/dask/common/input_utils.py b/python/cuml/cuml/dask/common/input_utils.py index 01c1a4e55f..40650f96a0 100644 --- a/python/cuml/cuml/dask/common/input_utils.py +++ b/python/cuml/cuml/dask/common/input_utils.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2020-2024, NVIDIA CORPORATION. +# Copyright (c) 2020-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -239,7 +239,7 @@ def _to_dask_cudf(futures, client=None): c = default_client() if client is None else client # Convert a list of futures containing dfs back into a dask_cudf dfs = [d for d in futures if d.type != type(None)] # NOQA - if logger.should_log_for(logger.level_debug): + if logger.should_log_for(logger.level_enum.debug): logger.debug("to_dask_cudf dfs=%s" % str(dfs)) meta_future = c.submit(_get_meta, dfs[0], pure=False) meta = meta_future.result() diff --git a/python/cuml/cuml/dask/manifold/umap.py b/python/cuml/cuml/dask/manifold/umap.py index 181bfb0728..bd3c13fd99 100644 --- a/python/cuml/cuml/dask/manifold/umap.py +++ b/python/cuml/cuml/dask/manifold/umap.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020-2024, NVIDIA CORPORATION. +# Copyright (c) 2020-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -44,7 +44,7 @@ class UMAP(BaseEstimator, DelayedTransformMixin): >>> X, y = make_blobs(1000, 10, centers=42, cluster_std=0.1, ... dtype=np.float32, random_state=10) - >>> local_model = UMAP(random_state=10) + >>> local_model = UMAP(random_state=10, verbose=0) >>> selection = np.random.RandomState(10).choice(1000, 100) >>> X_train = X[selection] diff --git a/python/cuml/cuml/ensemble/randomforestclassifier.pyx b/python/cuml/cuml/ensemble/randomforestclassifier.pyx index 45bc4ce2e8..5198d60b28 100644 --- a/python/cuml/cuml/ensemble/randomforestclassifier.pyx +++ b/python/cuml/cuml/ensemble/randomforestclassifier.pyx @@ -1,6 +1,6 @@ # -# Copyright (c) 2019-2024, NVIDIA CORPORATION. +# Copyright (c) 2019-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -34,6 +34,7 @@ from cuml.common.doc_utils import generate_docstring from cuml.common.doc_utils import insert_into_docstring from cuml.common import input_to_cuml_array +from cuml.internals.logger cimport level_enum from cuml.ensemble.randomforest_common import BaseRandomForestModel from cuml.ensemble.randomforest_common import _obtain_fil_model from cuml.ensemble.randomforest_shared cimport * @@ -61,7 +62,7 @@ cdef extern from "cuml/ensemble/randomforest.hpp" namespace "ML": int*, int, RF_params, - int) except + + level_enum) except + cdef void fit(handle_t& handle, RandomForestMetaData[double, int]*, @@ -71,7 +72,7 @@ cdef extern from "cuml/ensemble/randomforest.hpp" namespace "ML": int*, int, RF_params, - int) except + + level_enum) except + cdef void predict(handle_t& handle, RandomForestMetaData[float, int] *, @@ -79,7 +80,7 @@ cdef extern from "cuml/ensemble/randomforest.hpp" namespace "ML": int, int, int*, - bool) except + + level_enum) except + cdef void predict(handle_t& handle, RandomForestMetaData[double, int]*, @@ -87,21 +88,21 @@ cdef extern from "cuml/ensemble/randomforest.hpp" namespace "ML": int, int, int*, - bool) except + + level_enum) except + cdef RF_metrics score(handle_t& handle, RandomForestMetaData[float, int]*, int*, int, int*, - bool) except + + level_enum) except + cdef RF_metrics score(handle_t& handle, RandomForestMetaData[double, int]*, int*, int, int*, - bool) except + + level_enum) except + class RandomForestClassifier(BaseRandomForestModel, @@ -285,7 +286,7 @@ class RandomForestClassifier(BaseRandomForestModel, state["rf_params64"] = rf_forest64.rf_params state["n_cols"] = self.n_cols - state["verbose"] = self.verbose + state["_verbose"] = self._verbose state["treelite_serialized_model"] = self.treelite_serialized_model state["treelite_handle"] = None state["split_criterion"] = self.split_criterion @@ -296,7 +297,7 @@ class RandomForestClassifier(BaseRandomForestModel, super(RandomForestClassifier, self).__init__( split_criterion=state["split_criterion"], handle=state["handle"], - verbose=state["verbose"]) + verbose=state["_verbose"]) cdef RandomForestMetaData[float, int] *rf_forest = \ new RandomForestMetaData[float, int]() cdef RandomForestMetaData[double, int] *rf_forest64 = \ @@ -476,7 +477,7 @@ class RandomForestClassifier(BaseRandomForestModel, y_ptr, self.num_classes, rf_params, - self.verbose) + self.verbose) elif self.dtype == np.float64: rf_params64 = rf_params @@ -488,7 +489,7 @@ class RandomForestClassifier(BaseRandomForestModel, y_ptr, self.num_classes, rf_params64, - self.verbose) + self.verbose) else: raise TypeError("supports only np.float32 and np.float64 input," @@ -528,7 +529,7 @@ class RandomForestClassifier(BaseRandomForestModel, n_rows, n_cols, preds_ptr, - self.verbose) + self.verbose) elif self.dtype == np.float64: predict(handle_[0], @@ -537,7 +538,7 @@ class RandomForestClassifier(BaseRandomForestModel, n_rows, n_cols, preds_ptr, - self.verbose) + self.verbose) else: raise TypeError("supports only np.float32 and np.float64 input," " but input of type '%s' passed." @@ -765,14 +766,14 @@ class RandomForestClassifier(BaseRandomForestModel, y_ptr, n_rows, preds_ptr, - self.verbose) + self.verbose) elif self.dtype == np.float64: self.stats = score(handle_[0], rf_forest64, y_ptr, n_rows, preds_ptr, - self.verbose) + self.verbose) else: raise TypeError("supports only np.float32 and np.float64 input," " but input of type '%s' passed." diff --git a/python/cuml/cuml/ensemble/randomforestregressor.pyx b/python/cuml/cuml/ensemble/randomforestregressor.pyx index 96a197e5c5..6e3a13d0fb 100644 --- a/python/cuml/cuml/ensemble/randomforestregressor.pyx +++ b/python/cuml/cuml/ensemble/randomforestregressor.pyx @@ -1,5 +1,5 @@ # -# Copyright (c) 2019-2024, NVIDIA CORPORATION. +# Copyright (c) 2019-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -30,6 +30,7 @@ from cuml.internals.array import CumlArray import cuml.internals from cuml.internals.mixins import RegressorMixin +from cuml.internals.logger cimport level_enum from cuml.common.doc_utils import generate_docstring from cuml.common.doc_utils import insert_into_docstring from cuml.common import input_to_cuml_array @@ -59,7 +60,7 @@ cdef extern from "cuml/ensemble/randomforest.hpp" namespace "ML": int, float*, RF_params, - int) except + + level_enum) except + cdef void fit(handle_t& handle, RandomForestMetaData[double, double]*, @@ -68,7 +69,7 @@ cdef extern from "cuml/ensemble/randomforest.hpp" namespace "ML": int, double*, RF_params, - int) except + + level_enum) except + cdef void predict(handle_t& handle, RandomForestMetaData[float, float] *, @@ -76,7 +77,7 @@ cdef extern from "cuml/ensemble/randomforest.hpp" namespace "ML": int, int, float*, - int) except + + level_enum) except + cdef void predict(handle_t& handle, RandomForestMetaData[double, double]*, @@ -84,21 +85,21 @@ cdef extern from "cuml/ensemble/randomforest.hpp" namespace "ML": int, int, double*, - int) except + + level_enum) except + cdef RF_metrics score(handle_t& handle, RandomForestMetaData[float, float]*, float*, int, float*, - int) except + + level_enum) except + cdef RF_metrics score(handle_t& handle, RandomForestMetaData[double, double]*, double*, int, double*, - int) except + + level_enum) except + class RandomForestRegressor(BaseRandomForestModel, @@ -289,7 +290,7 @@ class RandomForestRegressor(BaseRandomForestModel, state["rf_params64"] = rf_forest64.rf_params state['n_cols'] = self.n_cols - state["verbose"] = self.verbose + state["_verbose"] = self._verbose state["treelite_serialized_model"] = self.treelite_serialized_model state['handle'] = self.handle state["treelite_handle"] = None @@ -300,7 +301,7 @@ class RandomForestRegressor(BaseRandomForestModel, def __setstate__(self, state): super(RandomForestRegressor, self).__init__( split_criterion=state["split_criterion"], - handle=state["handle"], verbose=state['verbose']) + handle=state["handle"], verbose=state['_verbose']) cdef RandomForestMetaData[float, float] *rf_forest = \ new RandomForestMetaData[float, float]() cdef RandomForestMetaData[double, double] *rf_forest64 = \ @@ -462,7 +463,7 @@ class RandomForestRegressor(BaseRandomForestModel, self.n_cols, y_ptr, rf_params, - self.verbose) + self.verbose) else: rf_params64 = rf_params @@ -473,7 +474,7 @@ class RandomForestRegressor(BaseRandomForestModel, self.n_cols, y_ptr, rf_params64, - self.verbose) + self.verbose) # make sure that the `fit` is complete before the following delete # call happens self.handle.sync() @@ -508,7 +509,7 @@ class RandomForestRegressor(BaseRandomForestModel, n_rows, n_cols, preds_ptr, - self.verbose) + self.verbose) elif self.dtype == np.float64: predict(handle_[0], @@ -517,7 +518,7 @@ class RandomForestRegressor(BaseRandomForestModel, n_rows, n_cols, preds_ptr, - self.verbose) + self.verbose) else: raise TypeError("supports only float32 and float64 input," " but input of type '%s' passed." @@ -685,7 +686,7 @@ class RandomForestRegressor(BaseRandomForestModel, y_ptr, n_rows, preds_ptr, - self.verbose) + self.verbose) elif self.dtype == np.float64: self.temp_stats = score(handle_[0], @@ -693,7 +694,7 @@ class RandomForestRegressor(BaseRandomForestModel, y_ptr, n_rows, preds_ptr, - self.verbose) + self.verbose) if self.accuracy_metric == 'median_ae': stats = self.temp_stats['median_abs_error'] diff --git a/python/cuml/cuml/experimental/accel/__init__.py b/python/cuml/cuml/experimental/accel/__init__.py index cd3c6abf51..9781f88fcc 100644 --- a/python/cuml/cuml/experimental/accel/__init__.py +++ b/python/cuml/cuml/experimental/accel/__init__.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2024, NVIDIA CORPORATION. +# Copyright (c) 2024-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -33,7 +33,7 @@ def _install_for_library(library_name): def install(): """Enable cuML Accelerator Mode.""" - logger.set_level(logger.level_info) + logger.set_level(logger.level_enum.info) logger.set_pattern("%v") logger.info("cuML: Installing experimental accelerator...") diff --git a/python/cuml/cuml/experimental/linear_model/lars.pyx b/python/cuml/cuml/experimental/linear_model/lars.pyx index 4a836740c7..6efbe2d50f 100644 --- a/python/cuml/cuml/experimental/linear_model/lars.pyx +++ b/python/cuml/cuml/experimental/linear_model/lars.pyx @@ -1,5 +1,5 @@ # -# Copyright (c) 2020-2024, NVIDIA CORPORATION. +# Copyright (c) 2020-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -23,7 +23,8 @@ from cuml.internals.safe_imports import cpu_only_import np = cpu_only_import('numpy') from cuml.internals.safe_imports import gpu_only_import cp = gpu_only_import('cupy') -import cuml.internals.logger as logger +from cuml.internals import logger +from cuml.internals cimport logger import cuml.internals from libcpp cimport nullptr @@ -43,7 +44,7 @@ cdef extern from "cuml/solvers/lars.hpp" namespace "ML::Solver::Lars": const handle_t& handle, math_t* X, int n_rows, int n_cols, const math_t* y, math_t* beta, int* active_idx, math_t* alphas, int* n_active, math_t* Gram, int max_iter, math_t* coef_path, - int verbosity, int ld_X, int ld_G, math_t epsilon) except + + logger.level_enum verbosity, int ld_X, int ld_G, math_t epsilon) except + cdef void larsPredict[math_t]( const handle_t& handle, const math_t* X, int n_rows, int n_cols, @@ -270,13 +271,13 @@ class Lars(Base, RegressorMixin): larsFit(handle_[0], X_ptr, n_rows, self.n_cols, y_ptr, beta_ptr, active_idx_ptr, alphas_ptr, &n_active, Gram_ptr, - max_iter, coef_path_ptr, self.verbose, ld_X, + max_iter, coef_path_ptr, self.verbose, ld_X, ld_G, self.eps) else: larsFit(handle_[0], X_ptr, n_rows, self.n_cols, y_ptr, beta_ptr, active_idx_ptr, alphas_ptr, &n_active, Gram_ptr, - max_iter, coef_path_ptr, self.verbose, + max_iter, coef_path_ptr, self.verbose, ld_X, ld_G, self.eps) self.n_active = n_active self.n_iter_ = n_active diff --git a/python/cuml/cuml/explainer/base.pyx b/python/cuml/cuml/explainer/base.pyx index 33dd8da68f..3aae1d02a7 100644 --- a/python/cuml/cuml/explainer/base.pyx +++ b/python/cuml/cuml/explainer/base.pyx @@ -1,5 +1,5 @@ # -# Copyright (c) 2020-2023, NVIDIA CORPORATION. +# Copyright (c) 2020-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -125,13 +125,13 @@ class SHAPBase(): output_type=None): if verbose is True: - self.verbose = logger.level_debug + self.verbose = logger.level_enum.debug elif verbose is False: - self.verbose = logger.level_error + self.verbose = logger.level_enum.error else: self.verbose = verbose - if self.verbose >= logger.level_debug: + if self.verbose >= logger.level_enum.debug: self.time_performance = True else: self.time_performance = False diff --git a/python/cuml/cuml/internals/base.pyx b/python/cuml/cuml/internals/base.pyx index 94fc5fa105..a2a7374a1f 100644 --- a/python/cuml/cuml/internals/base.pyx +++ b/python/cuml/cuml/internals/base.pyx @@ -1,5 +1,5 @@ # -# Copyright (c) 2019-2024, NVIDIA CORPORATION. +# Copyright (c) 2019-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -40,6 +40,7 @@ import cuml.common from cuml.common.sparse_utils import is_sparse import cuml.internals.logger as logger import cuml.internals +from cuml.internals import api_context_managers import cuml.internals.input_utils from cuml.internals.available_devices import is_cuda_available from cuml.internals.device_type import DeviceType @@ -74,6 +75,39 @@ IF GPUBUILD == 1: import cuml.common.cuda +class VerbosityDescriptor: + """Descriptor for ensuring correct type is used for verbosity + + This descriptor ensures that when the 'verbose' attribute of a cuML + estimator is accessed external to the cuML API, an integer is returned + (consistent with Scikit-Learn's API for verbosity). Internal to the API, an + enum is used. Scikit-Learn's numerical values for verbosity are the inverse + of those used by spdlog, so the numerical value is also inverted internal + to the cuML API. This ensures that cuML code treats verbosity values as + expected for an spdlog-based codebase. + """ + def __get__(self, obj, cls=None): + if api_context_managers.in_internal_api(): + return logger.level_enum(6 - obj._verbose) + else: + return obj._verbose + + def __set__(self, obj, value): + if api_context_managers.in_internal_api(): + assert isinstance(value, logger.level_enum), ( + "The log level should always be provided as a level_enum, " + "not an integer" + ) + obj._verbose = 6 - int(value) + else: + if isinstance(value, logger.level_enum): + raise ValueError( + "The log level should always be provided as an integer, " + "not using the enum" + ) + obj._verbose = value + + class Base(TagsMixin, metaclass=cuml.internals.BaseMetaClass): """ @@ -223,18 +257,30 @@ class Base(TagsMixin, ELSE: self.handle = None + # The following manipulation of the root_cm ensures that the verbose + # descriptor sees any set or get of the verbose attribute as happening + # internal to the cuML API. Currently, __init__ calls do not take place + # within an api context manager, so setting "verbose" here would + # otherwise appear to be external to the cuML API. This behavior will + # be corrected with the update of cuML's API context manager + # infrastructure in https://github.com/rapidsai/cuml/pull/6189. + GlobalSettings().prev_root_cm = GlobalSettings().root_cm + GlobalSettings().root_cm = True IF GPUBUILD == 1: # Internally, self.verbose follows the spdlog/c++ standard of # 0 is most logging, and logging decreases from there. # So if the user passes an int value for logging, we convert it. if verbose is True: - self.verbose = logger.level_debug + self.verbose = logger.level_enum.debug elif verbose is False: - self.verbose = logger.level_info + self.verbose = logger.level_enum.info else: - self.verbose = verbose + self.verbose = logger.level_enum(6 - verbose) ELSE: - self.verbose = verbose + self.verbose = logger.level_enum(6 - verbose) + # Please see above note on manipulation of the root_cm. This should be + # rendered unnecessary with https://github.com/rapidsai/cuml/pull/6189. + GlobalSettings().root_cm = GlobalSettings().prev_root_cm self.output_type = _check_output_type_str( cuml.global_settings.output_type @@ -252,6 +298,8 @@ class Base(TagsMixin, if nvtx_benchmark and nvtx_benchmark.lower() == 'true': self.set_nvtx_annotations() + verbose = VerbosityDescriptor() + def __repr__(self): """ Pretty prints the arguments of a class using Scikit-learn standard :) @@ -298,6 +346,14 @@ class Base(TagsMixin, variables = self._get_param_names() for key in variables: var_value = getattr(self, key, None) + # We are currently internal to the cuML API, but the value we + # return will immediately be returned external to the API, so we + # must perform the translation from enum to integer before + # returning the value. Ordinarily, this is handled by + # VerbosityDescriptor for direct access to the verbose + # attribute. + if key == "verbose": + var_value = 6 - int(var_value) params[key] = var_value return params @@ -315,6 +371,9 @@ class Base(TagsMixin, if key not in variables: raise ValueError("Bad param '%s' passed to set_params" % key) else: + # Switch verbose to enum since we are now internal to cuML API + if key == "verbose": + value = logger.level_enum(6 - int(value)) setattr(self, key, value) return self diff --git a/python/cuml/cuml/internals/logger.pxd b/python/cuml/cuml/internals/logger.pxd new file mode 100644 index 0000000000..6556cb0505 --- /dev/null +++ b/python/cuml/cuml/internals/logger.pxd @@ -0,0 +1,97 @@ +# +# Copyright (c) 2020-2025, NVIDIA CORPORATION. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# distutils: language = c++ + + +from libcpp.string cimport string + +IF GPUBUILD == 1: + import sys + from libcpp.memory cimport make_shared, shared_ptr + from libcpp cimport bool + + cdef extern from "cuml/common/logger.hpp" namespace "ML" nogil: + + cpdef enum class level_enum: + trace + debug + info + warn + error + critical + off + n_levels + + cdef cppclass sink: + pass + + ctypedef shared_ptr[sink] sink_ptr + + # Spoof the logger as a namespace to get the sink_vector generated correctly. + cdef extern from "cuml/common/logger.hpp" namespace "ML::logger" nogil: + + cdef cppclass sink_vector: + void push_back(const sink_ptr& sink) except + + void pop_back() except + + + cdef extern from "cuml/common/logger.hpp" namespace "ML" nogil: + cdef cppclass logger: + logger(string name, string filename) except + + void set_level(level_enum log_level) except + + void set_pattern(const string& pattern) + level_enum level() except + + void flush() except + + void flush_on(level_enum level) except + + level_enum flush_level() except + + bool should_log(level_enum msg_level) except + + void log(level_enum lvl, const string& fmt, ...) + const sink_vector& sinks() const + # string getPattern() const + # void flush() + + cdef logger& default_logger() except + + cdef string default_pattern() except + + + ctypedef void(*log_callback_t)(int, const char*) except * with gil + ctypedef void(*flush_callback_t)() except * with gil + + cdef cppclass callback_sink_mt: + callback_sink_mt(log_callback_t callback, flush_callback_t flush) except + + + cdef void _log_callback(int lvl, const char * msg) with gil + cdef void _log_flush() with gil + +ELSE: + cpdef enum class level_enum: + trace = 0 + debug = 1 + info = 2 + warn = 3 + error = 4 + critical = 5 + off = 6 + n_levels = 7 + + +cdef class LogLevelSetter: + """Internal "context manager" object for restoring previous log level""" + cdef level_enum prev_log_level + + +cdef class PatternSetter: + """Internal "context manager" object for restoring previous log pattern""" + cdef string prev_pattern diff --git a/python/cuml/cuml/internals/logger.pyx b/python/cuml/cuml/internals/logger.pyx index 6f76f924d6..0bc09126bd 100644 --- a/python/cuml/cuml/internals/logger.pyx +++ b/python/cuml/cuml/internals/logger.pyx @@ -1,5 +1,5 @@ # -# Copyright (c) 2020-2023, NVIDIA CORPORATION. +# Copyright (c) 2020-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,66 +17,11 @@ # distutils: language = c++ -IF GPUBUILD == 0: - import logging +import logging IF GPUBUILD == 1: import sys - from libcpp.string cimport string - from libcpp cimport bool - - cdef extern from "cuml/common/logger.hpp" namespace "ML" nogil: - cdef cppclass Logger: - @staticmethod - Logger& get() - void setLevel(int level) - void setPattern(const string& pattern) - void setCallback(void(*callback)(int, char*)) - void setFlush(void(*flush)()) - void setCallback(void(*callback)(int, const char*) except *) - void setFlush(void(*flush)() except *) - bool shouldLogFor(int level) const - int getLevel() const - string getPattern() const - void flush() - - cdef extern from "cuml/common/logger.hpp" nogil: - void CUML_LOG_TRACE(const char* fmt, ...) - void CUML_LOG_DEBUG(const char* fmt, ...) - void CUML_LOG_INFO(const char* fmt, ...) - void CUML_LOG_WARN(const char* fmt, ...) - void CUML_LOG_ERROR(const char* fmt, ...) - void CUML_LOG_CRITICAL(const char* fmt, ...) - - cdef int CUML_LEVEL_TRACE - cdef int CUML_LEVEL_DEBUG - cdef int CUML_LEVEL_INFO - cdef int CUML_LEVEL_WARN - cdef int CUML_LEVEL_ERROR - cdef int CUML_LEVEL_CRITICAL - cdef int CUML_LEVEL_OFF - - """Enables all log messages upto and including `trace()`""" - level_trace = CUML_LEVEL_TRACE - - """Enables all log messages upto and including `debug()`""" - level_debug = CUML_LEVEL_DEBUG - - """Enables all log messages upto and including `info()`""" - level_info = CUML_LEVEL_INFO - - """Enables all log messages upto and including `warn()`""" - level_warn = CUML_LEVEL_WARN - - """Enables all log messages upto and include `error()`""" - level_error = CUML_LEVEL_ERROR - - """Enables only `critical()` messages""" - level_critical = CUML_LEVEL_CRITICAL - - """Disables all log messages""" - level_off = CUML_LEVEL_OFF cdef void _log_callback(int lvl, const char * msg) with gil: """ @@ -99,10 +44,10 @@ IF GPUBUILD == 1: sys.stdout.flush() -class LogLevelSetter: +cdef class LogLevelSetter: """Internal "context manager" object for restoring previous log level""" - def __init__(self, prev_log_level): + def __cinit__(self, level_enum prev_log_level): self.prev_log_level = prev_log_level def __enter__(self): @@ -110,7 +55,7 @@ class LogLevelSetter: def __exit__(self, a, b, c): IF GPUBUILD == 1: - Logger.get().setLevel(self.prev_log_level) + default_logger().set_level(self.prev_log_level) def set_level(level): @@ -125,17 +70,16 @@ def set_level(level): # regular usage of setting a logging level for all subsequent logs # in this case, it will enable all logs upto and including `info()` - logger.set_level(logger.level_info) + logger.set_level(logger.level_enum.info) # in case one wants to temporarily set the log level for a code block - with logger.set_level(logger.level_debug) as _: + with logger.set_level(logger.level_enum.debug) as _: logger.debug("Hello world!") Parameters ---------- - level : int - Logging level to be set. \ - It must be one of cuml.internals.logger.LEVEL_* + level : level_enum + Logging level to be set. Returns ------- @@ -144,13 +88,13 @@ def set_level(level): level for a code section, as described in the example section above. """ IF GPUBUILD == 1: - cdef int prev = Logger.get().getLevel() + cdef level_enum prev = default_logger().level() context_object = LogLevelSetter(prev) - Logger.get().setLevel(level) + default_logger().set_level(level) return context_object -class PatternSetter: +cdef class PatternSetter: """Internal "context manager" object for restoring previous log pattern""" def __init__(self, prev_pattern): @@ -161,8 +105,7 @@ class PatternSetter: def __exit__(self, a, b, c): IF GPUBUILD == 1: - cdef string s = self.prev_pattern.encode("utf-8") - Logger.get().setPattern(s) + default_logger().set_pattern(self.prev_pattern) def set_pattern(pattern): @@ -195,10 +138,16 @@ def set_pattern(pattern): pattern for a code section, as described in the example section above. """ IF GPUBUILD == 1: - cdef string prev = Logger.get().getPattern() - context_object = PatternSetter(prev.decode("UTF-8")) + # TODO: We probably can't implement this exact API because you can't + # get the pattern from a spdlog logger since it could be different for + # every sink (conversely, you could set because it forces every sink to + # be the same). The best we can probably do is revert to the default + # pattern. + cdef string prev = default_pattern() + # TODO: Need to cast to a Python string? + context_object = PatternSetter(prev) cdef string s = pattern.encode("UTF-8") - Logger.get().setPattern(s) + default_logger().set_pattern(s) return context_object @@ -212,19 +161,38 @@ def should_log_for(level): .. code-block:: python - if logger.should_log_for(level_info): + if logger.should_log_for(level_enum.info): # which could waste precious CPU cycles my_message = construct_message() logger.info(my_message) Parameters ---------- - level : int - Logging level to be set. \ - It must be one of cuml.common.logger.level_* + level : level_enum + Logging level to be set. """ IF GPUBUILD == 1: - return Logger.get().shouldLogFor(level) + return default_logger().should_log(level) + + +def _log(level_enum lvl, msg, default_func): + """ + Internal function to log a message at a given level. + + Parameters + ---------- + lvl : level_enum + Logging level to be set. + msg : str + Message to be logged. + default_func : function + Default logging function to be used if GPU build is disabled. + """ + IF GPUBUILD == 1: + cdef string s = msg.encode("UTF-8") + default_logger().log(lvl, s) + ELSE: + default_func(msg) def trace(msg): @@ -243,11 +211,8 @@ def trace(msg): msg : str Message to be logged. """ - IF GPUBUILD == 1: - cdef string s = msg.encode("UTF-8") - CUML_LOG_TRACE(s.c_str()) - ELSE: - logging.debug(msg) + # No trace level in Python so we use the closest thing, debug. + _log(level_enum.trace, msg, logging.debug) def debug(msg): @@ -266,11 +231,7 @@ def debug(msg): msg : str Message to be logged. """ - IF GPUBUILD == 1: - cdef string s = msg.encode("UTF-8") - CUML_LOG_DEBUG(s.c_str()) - ELSE: - logging.debug(msg) + _log(level_enum.debug, msg, logging.debug) def info(msg): @@ -289,11 +250,7 @@ def info(msg): msg : str Message to be logged. """ - IF GPUBUILD == 1: - cdef string s = msg.encode("UTF-8") - CUML_LOG_INFO(s.c_str()) - ELSE: - logging.info(msg) + _log(level_enum.info, msg, logging.info) def warn(msg): @@ -312,11 +269,7 @@ def warn(msg): msg : str Message to be logged. """ - IF GPUBUILD == 1: - cdef string s = msg.encode("UTF-8") - CUML_LOG_WARN(s.c_str()) - ELSE: - logging.warning(msg) + _log(level_enum.warn, msg, logging.warn) def error(msg): @@ -335,11 +288,7 @@ def error(msg): msg : str Message to be logged. """ - IF GPUBUILD == 1: - cdef string s = msg.encode("UTF-8") - CUML_LOG_ERROR(s.c_str()) - ELSE: - logging.error(msg) + _log(level_enum.error, msg, logging.error) def critical(msg): @@ -358,11 +307,7 @@ def critical(msg): msg : str Message to be logged. """ - IF GPUBUILD == 1: - cdef string s = msg.encode("UTF-8") - CUML_LOG_CRITICAL(s.c_str()) - ELSE: - logging.critical(msg) + _log(level_enum.critical, msg, logging.critical) def flush(): @@ -370,10 +315,9 @@ def flush(): Flush the logs. """ IF GPUBUILD == 1: - Logger.get().flush() + default_logger().flush() IF GPUBUILD == 1: # Set callback functions to handle redirected sys.stdout in Python - Logger.get().setCallback(_log_callback) - Logger.get().setFlush(_log_flush) + default_logger().sinks().push_back( make_shared[callback_sink_mt](_log_callback, _log_flush)) diff --git a/python/cuml/cuml/linear_model/logistic_regression.pyx b/python/cuml/cuml/linear_model/logistic_regression.pyx index 667cc66a5d..e968093c8e 100644 --- a/python/cuml/cuml/linear_model/logistic_regression.pyx +++ b/python/cuml/cuml/linear_model/logistic_regression.pyx @@ -1,5 +1,5 @@ # -# Copyright (c) 2019-2024, NVIDIA CORPORATION. +# Copyright (c) 2019-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -29,7 +29,7 @@ from cuml.internals.mixins import ClassifierMixin, FMajorInputTagMixin, SparseIn from cuml.common.array_descriptor import CumlArrayDescriptor from cuml.internals.array import CumlArray from cuml.common.doc_utils import generate_docstring -import cuml.internals.logger as logger +from cuml.internals import logger from cuml.common import input_to_cuml_array from cuml.common import using_output_type from cuml.internals.api_decorators import device_interop_preparation @@ -278,7 +278,7 @@ class LogisticRegression(UniversalBase, handle=self.handle, ) - if logger.should_log_for(logger.level_debug): + if logger.should_log_for(logger.level_enum.debug): self.verb_prefix = "CY::" logger.debug(self.verb_prefix + "Estimator parameters:") logger.debug(pprint.pformat(self.__dict__)) @@ -354,24 +354,24 @@ class LogisticRegression(UniversalBase, else: loss = "sigmoid" - if logger.should_log_for(logger.level_debug): + if logger.should_log_for(logger.level_enum.debug): logger.debug(self.verb_prefix + "Setting loss to " + str(loss)) self.solver_model.loss = loss - if logger.should_log_for(logger.level_debug): + if logger.should_log_for(logger.level_enum.debug): logger.debug(self.verb_prefix + "Calling QN fit " + str(loss)) self.solver_model.fit(X, y_m, sample_weight=sample_weight, convert_dtype=convert_dtype) # coefficients and intercept are contained in the same array - if logger.should_log_for(logger.level_debug): + if logger.should_log_for(logger.level_enum.debug): logger.debug( self.verb_prefix + "Setting coefficients " + str(loss) ) - if logger.should_log_for(logger.level_trace): + if logger.should_log_for(logger.level_enum.trace): with using_output_type("cupy"): logger.trace(self.verb_prefix + "Coefficients: " + str(self.solver_model.coef_)) @@ -566,7 +566,7 @@ class LogisticRegression(UniversalBase, def __setstate__(self, state): super().__init__(handle=None, - verbose=state["verbose"]) + verbose=state["_verbose"]) self.__dict__.update(state) def get_attr_names(self): diff --git a/python/cuml/cuml/manifold/simpl_set.pyx b/python/cuml/cuml/manifold/simpl_set.pyx index b0be2d5de7..ffd1d13463 100644 --- a/python/cuml/cuml/manifold/simpl_set.pyx +++ b/python/cuml/cuml/manifold/simpl_set.pyx @@ -1,5 +1,5 @@ # -# Copyright (c) 2022-2024, NVIDIA CORPORATION. +# Copyright (c) 2022-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -165,7 +165,7 @@ def fuzzy_simplicial_set(X, umap_params.p = 2.0 else: umap_params.p = metric_kwds.get("p", 2.0) - umap_params.verbosity = verbose + umap_params.verbosity = verbose X_m, _, _, _ = \ input_to_cuml_array(X, @@ -366,7 +366,7 @@ def simplicial_set_embedding( umap_params.target_metric = MetricType.CATEGORICAL umap_params.target_weight = output_metric_kwds['p'] \ if 'p' in output_metric_kwds else 0.5 - umap_params.verbosity = verbose + umap_params.verbosity = verbose X_m, _, _, _ = \ input_to_cuml_array(data, diff --git a/python/cuml/cuml/manifold/t_sne.pyx b/python/cuml/cuml/manifold/t_sne.pyx index 01ea7c0957..7ff8702a2c 100644 --- a/python/cuml/cuml/manifold/t_sne.pyx +++ b/python/cuml/cuml/manifold/t_sne.pyx @@ -1,4 +1,4 @@ -# Copyright (c) 2019-2024, NVIDIA CORPORATION. +# Copyright (c) 2019-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -31,7 +31,8 @@ from cuml.internals.base import UniversalBase from pylibraft.common.handle cimport handle_t from cuml.internals.api_decorators import device_interop_preparation from cuml.internals.api_decorators import enable_device_interop -import cuml.internals.logger as logger +from cuml.internals import logger +from cuml.internals cimport logger from cuml.internals.array import CumlArray @@ -82,7 +83,7 @@ cdef extern from "cuml/manifold/tsne.h" namespace "ML": float pre_momentum, float post_momentum, long long random_state, - int verbosity, + logger.level_enum verbosity, TSNE_INIT init, bool square_distances, DistanceType metric, @@ -512,7 +513,7 @@ class TSNE(UniversalBase, self.pre_learning_rate = max(n / 3.0, 1) self.post_learning_rate = self.pre_learning_rate self.early_exaggeration = 24.0 if n > 10000 else 12.0 - if logger.should_log_for(logger.level_debug): + if logger.should_log_for(logger.level_enum.debug): logger.debug("New n_neighbors = {}, learning_rate = {}, " "exaggeration = {}" .format(self.n_neighbors, self.pre_learning_rate, @@ -616,7 +617,7 @@ class TSNE(UniversalBase, params.pre_momentum = self.pre_momentum params.post_momentum = self.post_momentum params.random_state = seed - params.verbosity = self.verbose + params.verbosity = self.verbose params.square_distances = self.square_distances params.algorithm = algo @@ -693,7 +694,7 @@ class TSNE(UniversalBase, def __setstate__(self, state): super(TSNE, self).__init__(handle=None, - verbose=state['verbose']) + verbose=state['_verbose']) self.__dict__.update(state) return state diff --git a/python/cuml/cuml/manifold/umap.pyx b/python/cuml/cuml/manifold/umap.pyx index 7af507eb08..079b270d0a 100644 --- a/python/cuml/cuml/manifold/umap.pyx +++ b/python/cuml/cuml/manifold/umap.pyx @@ -1,5 +1,5 @@ # -# Copyright (c) 2019-2024, NVIDIA CORPORATION. +# Copyright (c) 2019-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -39,6 +39,7 @@ import cuml.internals from cuml.internals.base import UniversalBase from cuml.common.doc_utils import generate_docstring from cuml.internals import logger +from cuml.internals.logger cimport level_enum from cuml.internals.available_devices import is_cuda_available from cuml.internals.input_utils import input_to_cuml_array from cuml.internals.array import CumlArray @@ -434,7 +435,18 @@ class UMAP(UniversalBase, self.precomputed_knn = extract_knn_infos(precomputed_knn, n_neighbors) - logger.set_level(verbose) + # We need to set this log level here so that it is propagated in time + # for the logger.info call below. We cannot use the verbose parameter + # directly because Base.__init__ contains the logic for converting + # boolean values to suitable integers. We access self._verbose instead + # of self.verbose because due to the same issues described in + # Base.__init__'s logic for setting verbose, this code is not + # considered to be within a root context and therefore considered + # external. Rather than mucking with the decorator, for this specific + # case since we're trying to set the properties of the underlying + # logger we may as well access our underlying value directly and + # perform the necessary arithmetic. + logger.set_level(logger.level_enum(6 - self._verbose)) if build_algo == "auto" or build_algo == "brute_force_knn" or build_algo == "nn_descent": if self.deterministic and build_algo == "auto": @@ -470,7 +482,7 @@ class UMAP(UniversalBase, umap_params.repulsion_strength = cls.repulsion_strength umap_params.negative_sample_rate = cls.negative_sample_rate umap_params.transform_queue_size = cls.transform_queue_size - umap_params.verbosity = cls.verbose + umap_params.verbosity = cls.verbose umap_params.a = cls.a umap_params.b = cls.b if cls.init == "spectral": diff --git a/python/cuml/cuml/manifold/umap_utils.pxd b/python/cuml/cuml/manifold/umap_utils.pxd index edf8039053..498e495733 100644 --- a/python/cuml/cuml/manifold/umap_utils.pxd +++ b/python/cuml/cuml/manifold/umap_utils.pxd @@ -1,5 +1,5 @@ # -# Copyright (c) 2022-2024, NVIDIA CORPORATION. +# Copyright (c) 2022-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -24,6 +24,7 @@ from libc.stdint cimport uint64_t, uintptr_t, int64_t from libcpp cimport bool from libcpp.memory cimport shared_ptr from cuml.metrics.distance_type cimport DistanceType +from cuml.internals.logger cimport level_enum cdef extern from "cuml/manifold/umapparams.h" namespace "ML::UMAPParams": @@ -61,7 +62,7 @@ cdef extern from "cuml/manifold/umapparams.h" namespace "ML": float repulsion_strength, int negative_sample_rate, float transform_queue_size, - int verbosity, + level_enum verbosity, float a, float b, float initial_alpha, diff --git a/python/cuml/cuml/neighbors/kneighbors_classifier_mg.pyx b/python/cuml/cuml/neighbors/kneighbors_classifier_mg.pyx index f319a6adac..129ffa9ca7 100644 --- a/python/cuml/cuml/neighbors/kneighbors_classifier_mg.pyx +++ b/python/cuml/cuml/neighbors/kneighbors_classifier_mg.pyx @@ -1,5 +1,5 @@ # -# Copyright (c) 2020-2023, NVIDIA CORPORATION. +# Copyright (c) 2020-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -153,7 +153,7 @@ class KNeighborsClassifierMG(NearestNeighborsMG): cdef handle_t* handle_ = self.handle.getHandle() - is_verbose = logger.should_log_for(logger.level_debug) + is_verbose = logger.should_log_for(logger.level_enum.debug) knn_classify( handle_[0], out_result_local_parts, @@ -265,7 +265,7 @@ class KNeighborsClassifierMG(NearestNeighborsMG): p_cai.ptr) cdef handle_t* handle_ = self.handle.getHandle() - is_verbose = logger.should_log_for(logger.level_debug) + is_verbose = logger.should_log_for(logger.level_enum.debug) # Launch distributed operations knn_classify( diff --git a/python/cuml/cuml/neighbors/kneighbors_regressor_mg.pyx b/python/cuml/cuml/neighbors/kneighbors_regressor_mg.pyx index 48accf5730..ff7a1bc193 100644 --- a/python/cuml/cuml/neighbors/kneighbors_regressor_mg.pyx +++ b/python/cuml/cuml/neighbors/kneighbors_regressor_mg.pyx @@ -1,5 +1,5 @@ # -# Copyright (c) 2020-2023, NVIDIA CORPORATION. +# Copyright (c) 2020-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -127,7 +127,7 @@ class KNeighborsRegressorMG(NearestNeighborsMG): o_cai.ptr, n_rows * n_outputs)) cdef handle_t* handle_ = self.handle.getHandle() - is_verbose = logger.should_log_for(logger.level_debug) + is_verbose = logger.should_log_for(logger.level_enum.debug) # Launch distributed operations knn_regress( diff --git a/python/cuml/cuml/neighbors/nearest_neighbors_mg.pyx b/python/cuml/cuml/neighbors/nearest_neighbors_mg.pyx index 5596c1e2f6..fff40af0e2 100644 --- a/python/cuml/cuml/neighbors/nearest_neighbors_mg.pyx +++ b/python/cuml/cuml/neighbors/nearest_neighbors_mg.pyx @@ -1,5 +1,5 @@ # -# Copyright (c) 2020-2023, NVIDIA CORPORATION. +# Copyright (c) 2020-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -125,7 +125,7 @@ class NearestNeighborsMG(NearestNeighbors): result = type(self).alloc_local_output(local_query_rows, self.n_neighbors) cdef handle_t* handle_ = self.handle.getHandle() - is_verbose = logger.should_log_for(logger.level_debug) + is_verbose = logger.should_log_for(logger.level_enum.debug) # Launch distributed operations knn( diff --git a/python/cuml/cuml/svm/linear.pyx b/python/cuml/cuml/svm/linear.pyx index c5ff47cde9..9a0efedbc3 100644 --- a/python/cuml/cuml/svm/linear.pyx +++ b/python/cuml/cuml/svm/linear.pyx @@ -1,4 +1,4 @@ -# Copyright (c) 2021-2024, NVIDIA CORPORATION. +# Copyright (c) 2021-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -30,6 +30,8 @@ from cuml.internals.base_helpers import BaseMetaClass from cuml.common.array_descriptor import CumlArrayDescriptor from cuml.internals.array import CumlArray from cuml.internals.base import Base +from cuml.internals.logger cimport level_enum +from cuml.internals.logger import level_enum as py_level_enum from pylibraft.common.handle cimport handle_t from pylibraft.common.interruptible import cuda_interruptible from cuml.common import input_to_cuml_array @@ -69,7 +71,7 @@ cdef extern from "cuml/svm/linear.hpp" namespace "ML::SVM" nogil: int max_iter int linesearch_max_iter int lbfgs_memory - int verbose + level_enum verbose double C double grad_tol double change_tol @@ -204,6 +206,18 @@ class LSVMPWrapper(LSVMPWrapper_): else: raise ValueError(f"Unknown loss string value: {loss}") + @property + def verbose(self): + # Reverse ordering of log levels to convert spdlog level values to + # Scikit-Learn log level values + return 6 - int(self._getparam('verbose')) + + @verbose.setter + def verbose(self, level: int): + # Reverse ordering of log levels to convert spdlog level values to + # Scikit-Learn log level values + self._setparam('verbose', py_level_enum(6 - level)) + # Add properties for parameters with a trivial conversion def __add_prop(prop_name): diff --git a/python/cuml/cuml/svm/svc.pyx b/python/cuml/cuml/svm/svc.pyx index 290f5bc2a2..bafe84e505 100644 --- a/python/cuml/cuml/svm/svc.pyx +++ b/python/cuml/cuml/svm/svc.pyx @@ -1,4 +1,4 @@ -# Copyright (c) 2019-2024, NVIDIA CORPORATION. +# Copyright (c) 2019-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -33,6 +33,7 @@ from cuml.internals.array import CumlArray from cuml.internals.mixins import ClassifierMixin from cuml.common.doc_utils import generate_docstring from cuml.internals.logger import warn +from cuml.internals.logger cimport level_enum from pylibraft.common.handle cimport handle_t from pylibraft.common.interruptible import cuda_interruptible from cuml.common import input_to_cuml_array, input_to_host_array, input_to_host_array_with_sparse_support @@ -76,7 +77,7 @@ cdef extern from "cuml/svm/svm_parameter.h" namespace "ML::SVM": int max_iter int nochange_steps double tol - int verbosity + level_enum verbosity double epsilon SvmType svmType diff --git a/python/cuml/cuml/svm/svm_base.pyx b/python/cuml/cuml/svm/svm_base.pyx index 9b68147f2b..7970e99338 100644 --- a/python/cuml/cuml/svm/svm_base.pyx +++ b/python/cuml/cuml/svm/svm_base.pyx @@ -1,4 +1,4 @@ -# Copyright (c) 2019-2024, NVIDIA CORPORATION. +# Copyright (c) 2019-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -35,6 +35,7 @@ from cuml.common import input_to_cuml_array from cuml.internals.input_utils import determine_array_type_full from cuml.common import using_output_type from cuml.internals.logger import warn +from cuml.internals.logger cimport level_enum from cuml.internals.mixins import FMajorInputTagMixin from cuml.internals.array_sparse import SparseCumlArray, SparseCumlArrayInput from libcpp cimport bool @@ -68,7 +69,7 @@ cdef extern from "cuml/svm/svm_parameter.h" namespace "ML::SVM": int max_iter int nochange_steps double tol - int verbosity + level_enum verbosity double epsilon SvmType svmType @@ -684,7 +685,7 @@ class SVMBase(Base, def __setstate__(self, state): super(SVMBase, self).__init__(handle=None, - verbose=state['verbose']) + verbose=state['_verbose']) self.__dict__.update(state) self._model = self._get_svm_model() self._freeSvmBuffers = False diff --git a/python/cuml/cuml/svm/svr.pyx b/python/cuml/cuml/svm/svr.pyx index a2527f4358..51f3110f77 100644 --- a/python/cuml/cuml/svm/svr.pyx +++ b/python/cuml/cuml/svm/svr.pyx @@ -1,4 +1,4 @@ -# Copyright (c) 2019-2023, NVIDIA CORPORATION. +# Copyright (c) 2019-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -34,6 +34,7 @@ from pylibraft.common.handle cimport handle_t from cuml.common import input_to_cuml_array from libcpp cimport nullptr from cuml.svm.svm_base import SVMBase +from cuml.internals.logger cimport level_enum cdef extern from "cuml/matrix/kernelparams.h" namespace "MLCommon::Matrix": enum KernelType: @@ -56,7 +57,7 @@ cdef extern from "cuml/svm/svm_parameter.h" namespace "ML::SVM": int max_iter int nochange_steps double tol - int verbosity + level_enum verbosity double epsilon SvmType svmType diff --git a/python/cuml/cuml/tests/dask/test_dask_logistic_regression.py b/python/cuml/cuml/tests/dask/test_dask_logistic_regression.py index 94ebecf6b0..0a3d47ea3f 100644 --- a/python/cuml/cuml/tests/dask/test_dask_logistic_regression.py +++ b/python/cuml/cuml/tests/dask/test_dask_logistic_regression.py @@ -1,4 +1,4 @@ -# Copyright (c) 2019-2024, NVIDIA CORPORATION. +# Copyright (c) 2019-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -21,6 +21,7 @@ from sklearn.datasets import make_classification from sklearn.linear_model import LogisticRegression as skLR from cuml.internals.safe_imports import cpu_only_import +from cuml.internals import logger from cuml.testing.utils import array_equal from scipy.sparse import csr_matrix, load_npz, save_npz import random @@ -197,8 +198,10 @@ def assert_params( assert qnpams["fit_intercept"] == fit_intercept assert qnpams["max_iter"] == max_iter assert qnpams["linesearch_max_iter"] == linesearch_max_iter - assert ( - qnpams["verbose"] == 5 if verbose is True else 4 + assert qnpams["verbose"] == ( + logger.level_enum.debug + if verbose is True + else logger.level_enum.info ) # cuml Verbosity Levels assert ( lr.output_type == "input" if output_type is None else output_type diff --git a/python/cuml/cuml/tests/test_hdbscan.py b/python/cuml/cuml/tests/test_hdbscan.py index 0a9a3a6382..a00ed73761 100644 --- a/python/cuml/cuml/tests/test_hdbscan.py +++ b/python/cuml/cuml/tests/test_hdbscan.py @@ -1,4 +1,4 @@ -# Copyright (c) 2021-2023, NVIDIA CORPORATION. +# Copyright (c) 2021-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -185,7 +185,7 @@ def test_hdbscan_blobs( ) cuml_agg = HDBSCAN( - verbose=logger.level_info, + verbose=logger.level_enum.info, allow_single_cluster=allow_single_cluster, min_samples=min_samples, max_cluster_size=max_cluster_size, @@ -251,7 +251,7 @@ def test_hdbscan_sklearn_datasets( X = test_datasets.data cuml_agg = HDBSCAN( - verbose=logger.level_info, + verbose=logger.level_enum.info, allow_single_cluster=allow_single_cluster, gen_min_span_tree=True, min_samples=min_samples, @@ -309,7 +309,7 @@ def test_hdbscan_sklearn_extract_clusters( ): X = test_datasets.data cuml_agg = HDBSCAN( - verbose=logger.level_info, + verbose=logger.level_enum.info, allow_single_cluster=allow_single_cluster, gen_min_span_tree=True, min_samples=min_samples, @@ -365,7 +365,7 @@ def test_hdbscan_cluster_patterns( X, y = get_pattern(dataset, nrows)[0] cuml_agg = HDBSCAN( - verbose=logger.level_info, + verbose=logger.level_enum.info, allow_single_cluster=allow_single_cluster, min_samples=min_samples, max_cluster_size=max_cluster_size, @@ -428,7 +428,7 @@ def test_hdbscan_cluster_patterns_extract_clusters( X, y = get_pattern(dataset, nrows)[0] cuml_agg = HDBSCAN( - verbose=logger.level_info, + verbose=logger.level_enum.info, allow_single_cluster=allow_single_cluster, min_samples=min_samples, max_cluster_size=max_cluster_size, @@ -572,7 +572,7 @@ def test_all_points_membership_vectors_blobs( ) cuml_agg = HDBSCAN( - verbose=logger.level_info, + verbose=logger.level_enum.info, allow_single_cluster=allow_single_cluster, max_cluster_size=max_cluster_size, min_cluster_size=min_cluster_size, @@ -628,7 +628,7 @@ def test_all_points_membership_vectors_moons( X, y = datasets.make_moons(n_samples=nrows, noise=0.05, random_state=42) cuml_agg = HDBSCAN( - verbose=logger.level_info, + verbose=logger.level_enum.info, min_samples=min_samples, allow_single_cluster=allow_single_cluster, max_cluster_size=max_cluster_size, @@ -686,7 +686,7 @@ def test_all_points_membership_vectors_circles( ) cuml_agg = HDBSCAN( - verbose=logger.level_info, + verbose=logger.level_enum.info, min_samples=min_samples, allow_single_cluster=allow_single_cluster, max_cluster_size=max_cluster_size, @@ -762,7 +762,7 @@ def test_approximate_predict_blobs( ) cuml_agg = HDBSCAN( - verbose=logger.level_info, + verbose=logger.level_enum.info, allow_single_cluster=allow_single_cluster, max_cluster_size=max_cluster_size, min_cluster_size=min_cluster_size, @@ -823,7 +823,7 @@ def test_approximate_predict_moons( X_test = X[nrows:] cuml_agg = HDBSCAN( - verbose=logger.level_info, + verbose=logger.level_enum.info, allow_single_cluster=allow_single_cluster, min_samples=min_samples, max_cluster_size=max_cluster_size, @@ -890,7 +890,7 @@ def test_approximate_predict_circles( X_test = X[nrows:] cuml_agg = HDBSCAN( - verbose=logger.level_info, + verbose=logger.level_enum.info, allow_single_cluster=allow_single_cluster, min_samples=min_samples, max_cluster_size=max_cluster_size, @@ -958,7 +958,7 @@ def test_approximate_predict_digits( ) cuml_agg = HDBSCAN( - verbose=logger.level_info, + verbose=logger.level_enum.info, allow_single_cluster=allow_single_cluster, min_samples=min_samples, max_cluster_size=max_cluster_size, @@ -1032,7 +1032,7 @@ def test_membership_vector_blobs( ) cuml_agg = HDBSCAN( - verbose=logger.level_info, + verbose=logger.level_enum.info, allow_single_cluster=allow_single_cluster, max_cluster_size=max_cluster_size, min_cluster_size=min_cluster_size, @@ -1098,7 +1098,7 @@ def test_membership_vector_moons( X_test = X[nrows:] cuml_agg = HDBSCAN( - verbose=logger.level_info, + verbose=logger.level_enum.info, min_samples=min_samples, allow_single_cluster=allow_single_cluster, max_cluster_size=max_cluster_size, @@ -1164,7 +1164,7 @@ def test_membership_vector_circles( X_test = X[nrows:] cuml_agg = HDBSCAN( - verbose=logger.level_info, + verbose=logger.level_enum.info, min_samples=min_samples, allow_single_cluster=allow_single_cluster, max_cluster_size=max_cluster_size, diff --git a/python/cuml/cuml/tests/test_kmeans.py b/python/cuml/cuml/tests/test_kmeans.py index ec5a2e0a3a..9c01486ffe 100644 --- a/python/cuml/cuml/tests/test_kmeans.py +++ b/python/cuml/cuml/tests/test_kmeans.py @@ -1,4 +1,4 @@ -# Copyright (c) 2019-2024, NVIDIA CORPORATION. +# Copyright (c) 2019-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -62,7 +62,7 @@ def get_data_consistency_test(): @pytest.fixture def random_state(): random_state = random.randint(0, 10**6) - with logger.set_level(logger.level_debug): + with logger.set_level(logger.level_enum.debug): logger.debug("Random seed: {}".format(random_state)) return random_state diff --git a/python/cuml/cuml/tests/test_logger.py b/python/cuml/cuml/tests/test_logger.py index c478a9fdc1..dc6c1285e3 100644 --- a/python/cuml/cuml/tests/test_logger.py +++ b/python/cuml/cuml/tests/test_logger.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2020-2023, NVIDIA CORPORATION. +# Copyright (c) 2020-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -27,9 +27,9 @@ def test_logger(): logger.error("This is a error message") logger.critical("This is a critical message") - with logger.set_level(logger.level_warn): - assert logger.should_log_for(logger.level_warn) - assert not logger.should_log_for(logger.level_info) + with logger.set_level(logger.level_enum.warn): + assert logger.should_log_for(logger.level_enum.warn) + assert not logger.should_log_for(logger.level_enum.info) with logger.set_pattern("%v"): logger.info("This is an info message") @@ -38,7 +38,7 @@ def test_logger(): def test_redirected_logger(): new_stdout = StringIO() - with logger.set_level(logger.level_trace): + with logger.set_level(logger.level_enum.trace): # We do not test trace because CUML_LOG_TRACE is not compiled by # default test_msg = "This is a debug message" @@ -76,7 +76,7 @@ def test_log_flush(): stdout_buffer = BytesIO() new_stdout = TextIOWrapper(stdout_buffer) - with logger.set_level(logger.level_trace): + with logger.set_level(logger.level_enum.trace): test_msg = "This is a debug message" with redirect_stdout(new_stdout): logger.debug(test_msg) diff --git a/python/cuml/cuml/tests/test_metrics.py b/python/cuml/cuml/tests/test_metrics.py index 2189dcdc41..5886ff68d4 100644 --- a/python/cuml/cuml/tests/test_metrics.py +++ b/python/cuml/cuml/tests/test_metrics.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2021-2024, NVIDIA CORPORATION. +# Copyright (c) 2021-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -109,7 +109,7 @@ @pytest.fixture(scope="module") def random_state(): random_state = random.randint(0, 10**6) - with logger.set_level(logger.level_debug): + with logger.set_level(logger.level_enum.debug): logger.debug("Random seed: {}".format(random_state)) return random_state diff --git a/python/cuml/cuml/tests/test_nearest_neighbors.py b/python/cuml/cuml/tests/test_nearest_neighbors.py index aa612b7763..d997ee578f 100644 --- a/python/cuml/cuml/tests/test_nearest_neighbors.py +++ b/python/cuml/cuml/tests/test_nearest_neighbors.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2019-2024, NVIDIA CORPORATION. +# Copyright (c) 2019-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -617,14 +617,14 @@ def test_nearest_neighbors_sparse( a = a.astype("bool").astype("float32") b = b.astype("bool").astype("float32") - logger.set_level(logger.level_debug) + logger.set_level(logger.level_enum.debug) nn = cuKNN( metric=metric, p=2.0, n_neighbors=n_neighbors, algorithm="brute", output_type="numpy", - verbose=logger.level_debug, + verbose=logger.level_enum.debug, algo_params={ "batch_size_index": batch_size_index, "batch_size_query": batch_size_query, diff --git a/python/cuml/cuml/tests/test_umap.py b/python/cuml/cuml/tests/test_umap.py index 41f47bdaee..6d91012177 100644 --- a/python/cuml/cuml/tests/test_umap.py +++ b/python/cuml/cuml/tests/test_umap.py @@ -1,4 +1,4 @@ -# Copyright (c) 2019-2024, NVIDIA CORPORATION. +# Copyright (c) 2019-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -197,7 +197,7 @@ def test_umap_transform_on_digits_sparse( fitter = cuUMAP( n_neighbors=15, - verbose=logger.level_info, + verbose=logger.level_enum.info, init="random", n_epochs=0, min_dist=0.01, @@ -236,7 +236,7 @@ def test_umap_transform_on_digits(target_metric): fitter = cuUMAP( n_neighbors=15, - verbose=logger.level_debug, + verbose=logger.level_enum.debug, init="random", n_epochs=0, min_dist=0.01, diff --git a/wiki/python/ESTIMATOR_GUIDE.md b/wiki/python/ESTIMATOR_GUIDE.md index 5413bfd6be..45ca84373f 100644 --- a/wiki/python/ESTIMATOR_GUIDE.md +++ b/wiki/python/ESTIMATOR_GUIDE.md @@ -52,7 +52,7 @@ At a high level, all cuML Estimators must: ```python class MyEstimator(Base): - def __init__(self, *, extra_arg=True, handle=None, verbose=False, output_type=None): + def __init__(self, *, extra_arg=True, handle=None, verbose=logger.level_enum.info, output_type=None): super().__init__(handle=handle, verbose=verbose, output_type=output_type) ... ```