diff --git a/opennn/adaptive_moment_estimation.h b/opennn/adaptive_moment_estimation.h index add052bdb..9dacdb578 100644 --- a/opennn/adaptive_moment_estimation.h +++ b/opennn/adaptive_moment_estimation.h @@ -25,7 +25,7 @@ class AdaptiveMomentEstimation : public OptimizationAlgorithm public: - explicit AdaptiveMomentEstimation(LossIndex* = nullptr); + AdaptiveMomentEstimation(LossIndex* = nullptr); const type& get_learning_rate() const; const type& get_beta_1() const; @@ -41,7 +41,7 @@ class AdaptiveMomentEstimation : public OptimizationAlgorithm void set_batch_samples_number(const Index& new_batch_samples_number); - void set_default() final; + void set_default() override; // Get @@ -67,17 +67,17 @@ class AdaptiveMomentEstimation : public OptimizationAlgorithm // Training - TrainingResults perform_training() final; + TrainingResults perform_training() override; - string write_optimization_algorithm_type() const final; + string write_optimization_algorithm_type() const override; // Serialization - Tensor to_string_matrix() const final; + Tensor to_string_matrix() const override; - void from_XML(const XMLDocument&) final; + void from_XML(const XMLDocument&) override; - void to_XML(XMLPrinter&) const final; + void to_XML(XMLPrinter&) const override; void update_parameters(BackPropagation&, AdaptiveMomentEstimationData&) const; @@ -120,7 +120,7 @@ class AdaptiveMomentEstimation : public OptimizationAlgorithm struct AdaptiveMomentEstimationData : public OptimizationAlgorithmData { - explicit AdaptiveMomentEstimationData(AdaptiveMomentEstimation* = nullptr); + AdaptiveMomentEstimationData(AdaptiveMomentEstimation* = nullptr); void set(AdaptiveMomentEstimation* = nullptr); diff --git a/opennn/addition_layer_3d.h b/opennn/addition_layer_3d.h index 6be6f000f..c94703596 100644 --- a/opennn/addition_layer_3d.h +++ b/opennn/addition_layer_3d.h @@ -24,18 +24,18 @@ class AdditionLayer3D : public Layer public: - explicit AdditionLayer3D(const Index& = 0, const Index& = 0); + AdditionLayer3D(const Index& = 0, const Index& = 0); Index get_inputs_number_xxx() const; Index get_inputs_depth() const; // @todo - dimensions get_input_dimensions() const final + dimensions get_input_dimensions() const override { throw runtime_error("XXX"); } - dimensions get_output_dimensions() const final; + dimensions get_output_dimensions() const override; void set(const Index& = 0, const Index& = 0); @@ -44,15 +44,15 @@ class AdditionLayer3D : public Layer void forward_propagate(const vector>&, unique_ptr&, - const bool&) final; + const bool&) override; void back_propagate(const vector>&, const vector>&, unique_ptr&, - unique_ptr&) const final; + unique_ptr&) const override; - void from_XML(const XMLDocument&) final; - void to_XML(XMLPrinter&) const final; + void from_XML(const XMLDocument&) override; + void to_XML(XMLPrinter&) const override; #ifdef OPENNN_CUDA #include "../../opennn_cuda/opennn_cuda/addition_layer_3d_cuda.h" @@ -68,13 +68,13 @@ class AdditionLayer3D : public Layer struct AdditionLayer3DForwardPropagation : LayerForwardPropagation { - explicit AdditionLayer3DForwardPropagation(const Index& = 0, Layer* new_layer = nullptr); + AdditionLayer3DForwardPropagation(const Index& = 0, Layer* new_layer = nullptr); - pair get_outputs_pair() const final; + pair get_outputs_pair() const override; - void set(const Index& = 0, Layer* = nullptr) final; + void set(const Index& = 0, Layer* = nullptr) override; - void print() const; + void print() const override; Tensor outputs; }; @@ -82,13 +82,13 @@ struct AdditionLayer3DForwardPropagation : LayerForwardPropagation struct AdditionLayer3DBackPropagation : LayerBackPropagation { - explicit AdditionLayer3DBackPropagation(const Index& = 0, Layer* = nullptr); + AdditionLayer3DBackPropagation(const Index& = 0, Layer* = nullptr); - vector> get_input_derivative_pairs() const; + vector> get_input_derivative_pairs() const override; - void set(const Index& = 0, Layer* = nullptr) final; + void set(const Index& = 0, Layer* = nullptr) override; - void print() const; + void print() const override; Tensor input_1_derivatives; Tensor input_2_derivatives; diff --git a/opennn/auto_association_data_set.h b/opennn/auto_association_data_set.h index ac0130f91..ce68657c2 100644 --- a/opennn/auto_association_data_set.h +++ b/opennn/auto_association_data_set.h @@ -19,7 +19,7 @@ class AutoAssociativeDataSet : public DataSet public: - explicit AutoAssociativeDataSet(); + AutoAssociativeDataSet(); vector get_associative_raw_variables() const; const Tensor& get_associative_data() const; diff --git a/opennn/auto_associative_neural_network.h b/opennn/auto_associative_neural_network.h index 975fde8d0..d73276959 100644 --- a/opennn/auto_associative_neural_network.h +++ b/opennn/auto_associative_neural_network.h @@ -21,7 +21,7 @@ class AutoAssociativeNeuralNetwork : public NeuralNetwork public: - explicit AutoAssociativeNeuralNetwork(); + AutoAssociativeNeuralNetwork(); BoxPlot get_auto_associative_distances_box_plot() const; Descriptives get_distance_descriptives() const; diff --git a/opennn/bounding_layer.h b/opennn/bounding_layer.h index 69dccd3e9..4915838b2 100644 --- a/opennn/bounding_layer.h +++ b/opennn/bounding_layer.h @@ -19,12 +19,12 @@ class BoundingLayer : public Layer public: - explicit BoundingLayer(const dimensions& = {0}, const string& = "bounding_layer"); + BoundingLayer(const dimensions& = {0}, const string& = "bounding_layer"); enum class BoundingMethod{NoBounding, Bounding}; - dimensions get_input_dimensions() const final; - dimensions get_output_dimensions() const final; + dimensions get_input_dimensions() const override; + dimensions get_output_dimensions() const override; const BoundingMethod& get_bounding_method() const; @@ -40,8 +40,8 @@ class BoundingLayer : public Layer void set(const dimensions & = { 0 }, const string & = "bounding_layer"); - void set_input_dimensions(const dimensions&) final; - void set_output_dimensions(const dimensions&) final; + void set_input_dimensions(const dimensions&) override; + void set_output_dimensions(const dimensions&) override; void set_bounding_method(const BoundingMethod&); void set_bounding_method(const string&); @@ -56,19 +56,19 @@ class BoundingLayer : public Layer void forward_propagate(const vector>&, unique_ptr&, - const bool&) final; + const bool&) override; // Expression - string get_expression(const vector& = vector(), const vector& = vector()) const final; + string get_expression(const vector& = vector(), const vector& = vector()) const override; // Serialization void print() const; - void from_XML(const XMLDocument&) final; + void from_XML(const XMLDocument&) override; - void to_XML(XMLPrinter&) const final; + void to_XML(XMLPrinter&) const override; private: @@ -82,11 +82,11 @@ class BoundingLayer : public Layer struct BoundingLayerForwardPropagation : LayerForwardPropagation { - explicit BoundingLayerForwardPropagation(const Index& = 0, Layer* = nullptr); + BoundingLayerForwardPropagation(const Index& = 0, Layer* = nullptr); - pair get_outputs_pair() const final; + pair get_outputs_pair() const override; - void set(const Index& = 0, Layer* = nullptr) final; + void set(const Index& = 0, Layer* = nullptr) override; void print() const; diff --git a/opennn/box_plot.h b/opennn/box_plot.h index 51507e699..ee8a6e9e7 100644 --- a/opennn/box_plot.h +++ b/opennn/box_plot.h @@ -8,11 +8,11 @@ namespace opennn struct BoxPlot { - explicit BoxPlot(const type& = type(NAN), - const type& = type(NAN), - const type& = type(NAN), - const type& = type(NAN), - const type& = type(NAN)); + BoxPlot(const type& = type(NAN), + const type& = type(NAN), + const type& = type(NAN), + const type& = type(NAN), + const type& = type(NAN)); void set(const type& = type(NAN), const type& = type(NAN), diff --git a/opennn/conjugate_gradient.h b/opennn/conjugate_gradient.h index b49741cee..5892b533a 100644 --- a/opennn/conjugate_gradient.h +++ b/opennn/conjugate_gradient.h @@ -24,7 +24,7 @@ class ConjugateGradient : public OptimizationAlgorithm enum class TrainingDirectionMethod{PR, FR}; - explicit ConjugateGradient(LossIndex* = nullptr); + ConjugateGradient(LossIndex* = nullptr); // Get @@ -46,9 +46,9 @@ class ConjugateGradient : public OptimizationAlgorithm // Set - void set_default() final; + void set_default() override; - void set_loss_index(LossIndex*) final; + void set_loss_index(LossIndex*) override; // Training operators @@ -81,17 +81,17 @@ class ConjugateGradient : public OptimizationAlgorithm // Training - TrainingResults perform_training() final; + TrainingResults perform_training() override; - string write_optimization_algorithm_type() const final; + string write_optimization_algorithm_type() const override; // Serialization - Tensor to_string_matrix() const final; + Tensor to_string_matrix() const override; - void from_XML(const XMLDocument&) final; + void from_XML(const XMLDocument&) override; - void to_XML(XMLPrinter&) const final; + void to_XML(XMLPrinter&) const override; void update_parameters( const Batch&, @@ -123,7 +123,7 @@ class ConjugateGradient : public OptimizationAlgorithm struct ConjugateGradientData : public OptimizationAlgorithmData { - explicit ConjugateGradientData(ConjugateGradient* = nullptr); + ConjugateGradientData(ConjugateGradient* = nullptr); void set(ConjugateGradient* = nullptr); diff --git a/opennn/convolutional_layer.h b/opennn/convolutional_layer.h index 1c13b468a..edb720cc6 100644 --- a/opennn/convolutional_layer.h +++ b/opennn/convolutional_layer.h @@ -37,12 +37,12 @@ class ConvolutionalLayer : public Layer enum class ConvolutionType{Valid, Same}; - explicit ConvolutionalLayer(const dimensions& = {3, 3, 1}, // Input dimensions {height,width,channels} - const dimensions& = {3, 3, 1, 1}, // Kernel dimensions {kernel_height,kernel_width,channels,kernels_number} - const ActivationFunction& = ActivationFunction::Linear, - const dimensions& = { 1, 1 }, // Stride dimensions {row_stride,column_stride} - const ConvolutionType& = ConvolutionType::Valid, // Convolution type (Valid || Same) - const string = "convolutional_layer"); + ConvolutionalLayer(const dimensions& = {3, 3, 1}, // Input dimensions {height,width,channels} + const dimensions& = {3, 3, 1, 1}, // Kernel dimensions {kernel_height,kernel_width,channels,kernels_number} + const ActivationFunction& = ActivationFunction::Linear, + const dimensions& = { 1, 1 }, // Stride dimensions {row_stride,column_stride} + const ConvolutionType& = ConvolutionType::Valid, // Convolution type (Valid || Same) + const string = "convolutional_layer"); bool get_batch_normalization() const; @@ -81,8 +81,8 @@ class ConvolutionalLayer : public Layer Index get_input_height() const; Index get_input_width() const; - Tensor get_parameters() const final; - Index get_parameters_number() const final; + Tensor get_parameters() const override; + Index get_parameters_number() const override; // Set @@ -131,21 +131,21 @@ class ConvolutionalLayer : public Layer void forward_propagate(const vector>&, unique_ptr&, - const bool&) final; + const bool&) override; // Back propagation void back_propagate(const vector>&, const vector>&, unique_ptr&, - unique_ptr&) const final; + unique_ptr&) const override; void insert_gradient(unique_ptr&, const Index&, - Tensor&) const final; + Tensor&) const override; - void from_XML(const XMLDocument&) final; - void to_XML(XMLPrinter&) const final; + void from_XML(const XMLDocument&) override; + void to_XML(XMLPrinter&) const override; void print() const override; @@ -197,11 +197,11 @@ class ConvolutionalLayer : public Layer struct ConvolutionalLayerForwardPropagation : LayerForwardPropagation { - explicit ConvolutionalLayerForwardPropagation(const Index& = 0, Layer* = nullptr); + ConvolutionalLayerForwardPropagation(const Index& = 0, Layer* = nullptr); - pair get_outputs_pair() const final; + pair get_outputs_pair() const override; - void set(const Index& = 0, Layer* = nullptr) final; + void set(const Index& = 0, Layer* = nullptr) override; void print() const; @@ -218,11 +218,11 @@ struct ConvolutionalLayerForwardPropagation : LayerForwardPropagation struct ConvolutionalLayerBackPropagation : LayerBackPropagation { - explicit ConvolutionalLayerBackPropagation(const Index& = 0, Layer* = nullptr); + ConvolutionalLayerBackPropagation(const Index& = 0, Layer* = nullptr); vector> get_input_derivative_pairs() const; - void set(const Index& = 0, Layer* = nullptr) final; + void set(const Index& = 0, Layer* = nullptr) override; void print() const; diff --git a/opennn/correlation.h b/opennn/correlation.h index 915445ff0..332230361 100644 --- a/opennn/correlation.h +++ b/opennn/correlation.h @@ -12,7 +12,7 @@ struct Correlation enum class Form{Linear, Logistic, Logarithmic, Exponential, Power}; - explicit Correlation() {} + Correlation() {} void set_perfect() { diff --git a/opennn/cross_entropy_error.h b/opennn/cross_entropy_error.h index abac701a8..d04cc85f9 100644 --- a/opennn/cross_entropy_error.h +++ b/opennn/cross_entropy_error.h @@ -19,11 +19,11 @@ class CrossEntropyError : public LossIndex public: - explicit CrossEntropyError(NeuralNetwork* = nullptr, DataSet* = nullptr); + CrossEntropyError(NeuralNetwork* = nullptr, DataSet* = nullptr); void calculate_error(const Batch&, const ForwardPropagation&, - BackPropagation&) const final; + BackPropagation&) const override; void calculate_binary_error(const Batch&, const ForwardPropagation&, @@ -37,7 +37,7 @@ class CrossEntropyError : public LossIndex void calculate_output_delta(const Batch&, ForwardPropagation&, - BackPropagation&) const final; + BackPropagation&) const override; void calculate_binary_output_delta(const Batch&, ForwardPropagation&, @@ -47,14 +47,14 @@ class CrossEntropyError : public LossIndex ForwardPropagation&, BackPropagation&) const; - string get_loss_method() const final; - string get_error_type_text() const final; + string get_loss_method() const override; + string get_error_type_text() const override; // Serialization virtual void from_XML(const XMLDocument&); - void to_XML(XMLPrinter&) const final; + void to_XML(XMLPrinter&) const override; #ifdef OPENNN_CUDA #include "../../opennn_cuda/opennn_cuda/cross_entropy_error_cuda.h" diff --git a/opennn/cross_entropy_error_3d.h b/opennn/cross_entropy_error_3d.h index 187cc6274..6069b0043 100644 --- a/opennn/cross_entropy_error_3d.h +++ b/opennn/cross_entropy_error_3d.h @@ -19,22 +19,22 @@ class CrossEntropyError3D : public LossIndex public: - explicit CrossEntropyError3D(NeuralNetwork* = nullptr, DataSet* = nullptr); + CrossEntropyError3D(NeuralNetwork* = nullptr, DataSet* = nullptr); void calculate_error(const Batch&, const ForwardPropagation&, - BackPropagation&) const final; + BackPropagation&) const override; void calculate_output_delta(const Batch&, ForwardPropagation&, - BackPropagation&) const final; + BackPropagation&) const override; - string get_loss_method() const final; - string get_error_type_text() const final; + string get_loss_method() const override; + string get_error_type_text() const override; virtual void from_XML(const XMLDocument&); - void to_XML(XMLPrinter&) const final; + void to_XML(XMLPrinter&) const override; #ifdef OPENNN_CUDA #include "../../opennn_cuda/opennn_cuda/cross_entropy_error_3d_cuda.h" diff --git a/opennn/data_set.cpp b/opennn/data_set.cpp index 102469981..a02665a3e 100755 --- a/opennn/data_set.cpp +++ b/opennn/data_set.cpp @@ -2044,7 +2044,7 @@ vector DataSet::unuse_uncorrelated_raw_variables(const type& minimum_cor } -vector DataSet::unuse_multicollinear_raw_variables(Tensor& original_variable_indices, Tensor& final_variable_indices) +vector DataSet::unuse_multicollinear_raw_variables(Tensor& original_variable_indices, Tensor& override_variable_indices) { vector unused_raw_variables; @@ -2054,9 +2054,9 @@ vector DataSet::unuse_multicollinear_raw_variables(Tensor& ori bool found = false; - for(Index j = 0; j < final_variable_indices.size(); j++) + for(Index j = 0; j < override_variable_indices.size(); j++) { - if(original_raw_variable_index == final_variable_indices(j)) + if(original_raw_variable_index == override_variable_indices(j)) { found = true; break; diff --git a/opennn/data_set.h b/opennn/data_set.h index 795373c25..b1cf1c36a 100755 --- a/opennn/data_set.h +++ b/opennn/data_set.h @@ -27,15 +27,15 @@ class DataSet enum class Codification { UTF8, SHIFT_JIS }; - explicit DataSet(const Index& = 0, - const dimensions& = {0}, - const dimensions& = {0}); - - explicit DataSet(const filesystem::path&, - const string&, - const bool& = true, - const bool& = false, - const Codification& = Codification::UTF8); + DataSet(const Index& = 0, + const dimensions& = {0}, + const dimensions& = {0}); + + DataSet(const filesystem::path&, + const string&, + const bool& = true, + const bool& = false, + const Codification& = Codification::UTF8); // Enumerations diff --git a/opennn/descriptives.h b/opennn/descriptives.h index dea085bbb..ed3700f39 100644 --- a/opennn/descriptives.h +++ b/opennn/descriptives.h @@ -8,7 +8,7 @@ namespace opennn struct Descriptives { - explicit Descriptives(const type& = type(NAN), const type& = type(NAN), const type& = type(NAN), const type& = type(NAN)); + Descriptives(const type& = type(NAN), const type& = type(NAN), const type& = type(NAN), const type& = type(NAN)); Tensor to_tensor() const; diff --git a/opennn/embedding_layer.cpp b/opennn/embedding_layer.cpp index 404e87892..62576295c 100644 --- a/opennn/embedding_layer.cpp +++ b/opennn/embedding_layer.cpp @@ -128,7 +128,9 @@ void EmbeddingLayer::set_input_dimensions_xxx(const Index& new_inputs_dimension) { input_dimensions_xxx = new_inputs_dimension; - set_embedding_weights(); + embedding_weights.resize(input_dimensions_xxx, depth); + set_parameters_random(); + } @@ -142,7 +144,8 @@ void EmbeddingLayer::set_depth(const Index& new_depth) { depth = new_depth; - set_embedding_weights(); + embedding_weights.resize(input_dimensions_xxx, depth); + set_parameters_random(); } @@ -158,14 +161,6 @@ void EmbeddingLayer::set_dropout_rate(const type& new_dropout_rate) } -void EmbeddingLayer::set_embedding_weights() -{ - embedding_weights.resize(input_dimensions_xxx, depth); - - set_parameters_random(); -} - - void EmbeddingLayer::set_parameters(const Tensor& new_parameters, const Index& index) { memcpy(embedding_weights.data(), new_parameters.data() + index, embedding_weights.size()*sizeof(type)); @@ -211,11 +206,11 @@ void EmbeddingLayer::lookup_embedding(const Tensor& inputs, Tensor>& input Tensor& embedding_weights_derivatives = embedding_layer_back_propagation->embedding_weights_derivatives; embedding_weights_derivatives.setZero(); - + for(Index i = 0; i < batch_samples_number; i++) { - if(positional_encoding) - sample_deltas.device(*thread_pool_device) - = deltas.chip(i, 0) * sample_deltas.constant(sqrt(depth)); - else - sample_deltas.device(*thread_pool_device) = deltas.chip(i, 0); + positional_encoding + ? sample_deltas.device(*thread_pool_device) = deltas.chip(i, 0) * sample_deltas.constant(sqrt(depth)) + : sample_deltas.device(*thread_pool_device) = deltas.chip(i, 0); for(Index j = 0; j < inputs_number; j++) embedding_weights_derivatives.chip(Index(inputs(i, j)), 0).device(*thread_pool_device) @@ -315,8 +308,6 @@ void EmbeddingLayer::insert_gradient(unique_ptr& back_prop void EmbeddingLayer::from_XML(const XMLDocument& document) { - // Embedding layer - const XMLElement* embedding_layer_element = document.FirstChildElement("Embedding"); if(!embedding_layer_element) diff --git a/opennn/embedding_layer.h b/opennn/embedding_layer.h index a6c6fbff2..cc8fb5cb2 100644 --- a/opennn/embedding_layer.h +++ b/opennn/embedding_layer.h @@ -25,7 +25,7 @@ class EmbeddingLayer : public Layer public: - explicit EmbeddingLayer(const Index& = 0, + EmbeddingLayer(const Index& = 0, const Index& = 0, const Index& = 0, const bool& = false); @@ -36,10 +36,10 @@ class EmbeddingLayer : public Layer bool get_positional_encoding() const; dimensions get_input_dimensions() const; - dimensions get_output_dimensions() const final; + dimensions get_output_dimensions() const override; - Index get_parameters_number() const final; - Tensor get_parameters() const final; + Index get_parameters_number() const override; + Tensor get_parameters() const override; void set(const Index& = 0, const Index& = 0, const Index& = 0, const bool& = false); @@ -50,11 +50,9 @@ class EmbeddingLayer : public Layer void set_dropout_rate(const type&); - void set_embedding_weights(); - - void set_parameters(const Tensor&, const Index& index = 0) final; - void set_parameters_random() final; - void set_parameters_constant(const type&) final; + void set_parameters(const Tensor&, const Index& index = 0) override; + void set_parameters_random() override; + void set_parameters_constant(const type&) override; void dropout(Tensor&) const; @@ -62,12 +60,12 @@ class EmbeddingLayer : public Layer void forward_propagate(const vector>&, unique_ptr&, - const bool&) final; + const bool&) override; void back_propagate(const vector>&, const vector>&, unique_ptr&, - unique_ptr&) const final; + unique_ptr&) const override; void add_deltas(const vector>&) const; @@ -75,8 +73,8 @@ class EmbeddingLayer : public Layer const Index& index, Tensor& gradient) const; - void from_XML(const XMLDocument&) final; - void to_XML(XMLPrinter&) const final; + void from_XML(const XMLDocument&) override; + void to_XML(XMLPrinter&) const override; #ifdef OPENNN_CUDA #include "../../opennn_cuda/opennn_cuda/embedding_layer_cuda.h" @@ -102,11 +100,11 @@ class EmbeddingLayer : public Layer struct EmbeddingLayerForwardPropagation : LayerForwardPropagation { - explicit EmbeddingLayerForwardPropagation(const Index& = 0, Layer* = nullptr); + EmbeddingLayerForwardPropagation(const Index& = 0, Layer* = nullptr); - pair get_outputs_pair() const final; + pair get_outputs_pair() const override; - void set(const Index& = 0, Layer* = nullptr) final; + void set(const Index& = 0, Layer* = nullptr) override; void print() const; @@ -122,11 +120,11 @@ struct EmbeddingLayerForwardPropagation : LayerForwardPropagation struct EmbeddingLayerBackPropagation : LayerBackPropagation { - explicit EmbeddingLayerBackPropagation(const Index& = 0, Layer* = nullptr); + EmbeddingLayerBackPropagation(const Index& = 0, Layer* = nullptr); vector> get_input_derivative_pairs() const; - void set(const Index& = 0, Layer* = nullptr) final; + void set(const Index& = 0, Layer* = nullptr) override; void print() const; diff --git a/opennn/flatten_layer.h b/opennn/flatten_layer.h index 3c7f98460..bc1b6c0f8 100644 --- a/opennn/flatten_layer.h +++ b/opennn/flatten_layer.h @@ -27,10 +27,10 @@ class FlattenLayer : public Layer public: - explicit FlattenLayer(const dimensions& = {0,0,0}); + FlattenLayer(const dimensions& = {0,0,0}); dimensions get_input_dimensions() const; - dimensions get_output_dimensions() const final; + dimensions get_output_dimensions() const override; Index get_input_height() const; Index get_input_width() const; @@ -42,20 +42,20 @@ class FlattenLayer : public Layer void forward_propagate(const vector>&, unique_ptr&, - const bool&) final; + const bool&) override; // Back-propagation void back_propagate(const vector>&, const vector>&, unique_ptr&, - unique_ptr&) const final; + unique_ptr&) const override; // Serialization - void from_XML(const XMLDocument&) final; + void from_XML(const XMLDocument&) override; - void to_XML(XMLPrinter&) const final; + void to_XML(XMLPrinter&) const override; void print() const; @@ -71,11 +71,11 @@ class FlattenLayer : public Layer struct FlattenLayerForwardPropagation : LayerForwardPropagation { - explicit FlattenLayerForwardPropagation(const Index& = 0, Layer* = nullptr); + FlattenLayerForwardPropagation(const Index& = 0, Layer* = nullptr); - pair get_outputs_pair() const final; + pair get_outputs_pair() const override; - void set(const Index& = 0, Layer* = nullptr) final; + void set(const Index& = 0, Layer* = nullptr) override; void print() const; @@ -85,11 +85,11 @@ struct FlattenLayerForwardPropagation : LayerForwardPropagation struct FlattenLayerBackPropagation : LayerBackPropagation { - explicit FlattenLayerBackPropagation(const Index& = 0, Layer* = nullptr); + FlattenLayerBackPropagation(const Index& = 0, Layer* = nullptr); vector> get_input_derivative_pairs() const; - void set(const Index& = 0, Layer* = nullptr) final; + void set(const Index& = 0, Layer* = nullptr) override; void print() const; diff --git a/opennn/genetic_algorithm.h b/opennn/genetic_algorithm.h index ee3db477b..59e7fc084 100644 --- a/opennn/genetic_algorithm.h +++ b/opennn/genetic_algorithm.h @@ -19,7 +19,7 @@ class GeneticAlgorithm : public InputsSelection public: - explicit GeneticAlgorithm(TrainingStrategy* = nullptr); + GeneticAlgorithm(TrainingStrategy* = nullptr); enum class InitializationMethod{Random,Correlations}; @@ -43,7 +43,7 @@ class GeneticAlgorithm : public InputsSelection const InitializationMethod& get_initialization_method() const; - virtual void set_default() final; + virtual void set_default() override; void set_population(const Tensor&); @@ -93,7 +93,7 @@ class GeneticAlgorithm : public InputsSelection const vector& get_original_unused_raw_variables(); - InputsSelectionResults perform_inputs_selection () final; + InputsSelectionResults perform_inputs_selection () override; Tensor to_string_matrix() const; diff --git a/opennn/growing_inputs.h b/opennn/growing_inputs.h index 04348a698..0dafed1f9 100644 --- a/opennn/growing_inputs.h +++ b/opennn/growing_inputs.h @@ -19,7 +19,7 @@ class GrowingInputs : public InputsSelection public: - explicit GrowingInputs(TrainingStrategy* = nullptr); + GrowingInputs(TrainingStrategy* = nullptr); const Index& get_maximum_inputs_number() const; @@ -27,7 +27,7 @@ class GrowingInputs : public InputsSelection const Index& get_maximum_selection_failures() const; - virtual void set_default() final; + virtual void set_default() override; void set_maximum_inputs_number(const Index&); @@ -35,7 +35,7 @@ class GrowingInputs : public InputsSelection void set_maximum_selection_failures(const Index&); - InputsSelectionResults perform_inputs_selection() final; + InputsSelectionResults perform_inputs_selection() override; Tensor to_string_matrix() const; diff --git a/opennn/growing_neurons.h b/opennn/growing_neurons.h index fc58071a2..62356dfa9 100644 --- a/opennn/growing_neurons.h +++ b/opennn/growing_neurons.h @@ -21,7 +21,7 @@ class GrowingNeurons : public NeuronsSelection public: - explicit GrowingNeurons(TrainingStrategy* = nullptr); + GrowingNeurons(TrainingStrategy* = nullptr); const Index& get_step() const; @@ -33,7 +33,7 @@ class GrowingNeurons : public NeuronsSelection void set_maximum_selection_failures(const Index&); - NeuronsSelectionResults perform_neurons_selection() final; + NeuronsSelectionResults perform_neurons_selection() override; Tensor to_string_matrix() const; diff --git a/opennn/histogram.h b/opennn/histogram.h index 7afe6fa0c..9dbeaec45 100644 --- a/opennn/histogram.h +++ b/opennn/histogram.h @@ -8,15 +8,15 @@ namespace opennn struct Histogram { - explicit Histogram(const Index& = 0); + Histogram(const Index& = 0); - explicit Histogram(const Tensor&, const Tensor&); + Histogram(const Tensor&, const Tensor&); - explicit Histogram(const Tensor&, const Tensor&, const Tensor&, const Tensor&); + Histogram(const Tensor&, const Tensor&, const Tensor&, const Tensor&); - explicit Histogram(const Tensor&, const Index&); + Histogram(const Tensor&, const Index&); - explicit Histogram(const Tensor&); + Histogram(const Tensor&); // Methods diff --git a/opennn/image_data_set.h b/opennn/image_data_set.h index 28c04debe..714c9b5ca 100644 --- a/opennn/image_data_set.h +++ b/opennn/image_data_set.h @@ -19,7 +19,7 @@ class ImageDataSet : public DataSet public: - explicit ImageDataSet(const Index& = 0, const dimensions& = {0, 0, 0}, const dimensions& = {0}); + ImageDataSet(const Index& = 0, const dimensions& = {0, 0, 0}, const dimensions& = {0}); Index get_channels_number() const; Index get_image_width() const; @@ -55,12 +55,12 @@ class ImageDataSet : public DataSet void set_random_vertical_translation_minimum(const type&); void set_random_vertical_translation_maximum(const type&); - vector scale_variables(const VariableUse&) final; + vector scale_variables(const VariableUse&) override; void read_bmp(); - void from_XML(const XMLDocument&) final; - void to_XML(XMLPrinter&) const final; + void from_XML(const XMLDocument&) override; + void to_XML(XMLPrinter&) const override; private: diff --git a/opennn/inputs_selection.h b/opennn/inputs_selection.h index d0ce56fa8..6166b8ce5 100644 --- a/opennn/inputs_selection.h +++ b/opennn/inputs_selection.h @@ -31,7 +31,7 @@ class InputsSelection }; - explicit InputsSelection(TrainingStrategy* = nullptr); + InputsSelection(TrainingStrategy* = nullptr); TrainingStrategy* get_training_strategy() const; @@ -100,7 +100,7 @@ class InputsSelection struct InputsSelectionResults { - explicit InputsSelectionResults(const Index& = 0); + InputsSelectionResults(const Index& = 0); Index get_epochs_number() const; diff --git a/opennn/kmeans.cpp b/opennn/kmeans.cpp index 10427fd89..52b5896cc 100755 --- a/opennn/kmeans.cpp +++ b/opennn/kmeans.cpp @@ -167,8 +167,8 @@ Index KMeans::find_optimal_clusters(const Tensor& sum_squared_error_val Tensor initial_endpoint(2); initial_endpoint.setValues({ type(1), type(sum_squared_error_values(0)) }); - Tensor final_endpoint(2); - final_endpoint.setValues({ type(clusters_number), sum_squared_error_values(clusters_number - 1) }); + Tensor override_endpoint(2); + override_endpoint.setValues({ type(clusters_number), sum_squared_error_values(clusters_number - 1) }); type max_distance = type(0); Index optimal_clusters_number = 1; @@ -181,10 +181,10 @@ Index KMeans::find_optimal_clusters(const Tensor& sum_squared_error_val current_point.setValues({ type(cluster_index), sum_squared_error_values(cluster_index - 1) }); perpendicular_distance - = type(abs((final_endpoint(1) - initial_endpoint(1)) * current_point(0) - - (final_endpoint(0) - initial_endpoint(0)) * current_point(1) + - final_endpoint(0) * initial_endpoint(1) - final_endpoint(1) * initial_endpoint(0))) / - type(sqrt(pow(final_endpoint(1) - initial_endpoint(1), 2) + pow(final_endpoint(0) - initial_endpoint(0), 2))); + = type(abs((override_endpoint(1) - initial_endpoint(1)) * current_point(0) - + (override_endpoint(0) - initial_endpoint(0)) * current_point(1) + + override_endpoint(0) * initial_endpoint(1) - override_endpoint(1) * initial_endpoint(0))) / + type(sqrt(pow(override_endpoint(1) - initial_endpoint(1), 2) + pow(override_endpoint(0) - initial_endpoint(0), 2))); if(perpendicular_distance > max_distance) { diff --git a/opennn/language_data_set.cpp b/opennn/language_data_set.cpp index c1bacc1d8..cae70d02d 100644 --- a/opennn/language_data_set.cpp +++ b/opennn/language_data_set.cpp @@ -980,7 +980,7 @@ vector> filter_inputs(const vector>& trimmed } -vector generate_final_vocabulary(const vector& reserved_tokens, +vector generate_override_vocabulary(const vector& reserved_tokens, const set& character_tokens, const map& current_tokens) { @@ -1006,18 +1006,18 @@ vector generate_final_vocabulary(const vector& reserved_tokens, vocabulary.push_back(token); set seen_tokens; - vector final_vocabulary; + vector override_vocabulary; for(const string& word : vocabulary) { if(seen_tokens.find(word) == seen_tokens.end()) { seen_tokens.insert(word); - final_vocabulary.push_back(word); + override_vocabulary.push_back(word); } } - return final_vocabulary; + return override_vocabulary; } @@ -1109,7 +1109,7 @@ vector calculate_vocabulary_with_threshold(const vector context_vocabulary_map; - for(size_t i = 0; i < context_vocabulary.size(); i++) - context_vocabulary_map[context_vocabulary[i]] = type(i); + const unordered_map context_vocabulary_map(context_vocabulary.begin(), context_vocabulary.end()); + const unordered_map completion_vocabulary_map(completion_vocabulary.begin(), completion_vocabulary.end()); - unordered_map completion_vocabulary_map; - for(size_t i = 0; i < completion_vocabulary.size(); i++) - completion_vocabulary_map[completion_vocabulary[i]] = type(i); + // unordered_map context_vocabulary_map; + // for(size_t i = 0; i < context_vocabulary.size(); i++) + // context_vocabulary_map[context_vocabulary[i]] = type(i); + + // unordered_map completion_vocabulary_map; + // for(size_t i = 0; i < completion_vocabulary.size(); i++) + // completion_vocabulary_map[completion_vocabulary[i]] = type(i); // const Index context_vocabulary_size = context_vocabulary.size(); // const Index completion_vocabulary_size = completion_vocabulary.size(); diff --git a/opennn/language_data_set.h b/opennn/language_data_set.h index a4b796f02..a6f42b244 100644 --- a/opennn/language_data_set.h +++ b/opennn/language_data_set.h @@ -19,7 +19,7 @@ class LanguageDataSet : public DataSet public: - explicit LanguageDataSet(); + LanguageDataSet(); const vector& get_context_vocabulary() const; const vector& get_completion_vocabulary() const; diff --git a/opennn/layer.h b/opennn/layer.h index 590fae28e..a30b950dc 100644 --- a/opennn/layer.h +++ b/opennn/layer.h @@ -49,7 +49,7 @@ class Layer MultiheadAttention, Embedding}; - explicit Layer(); + Layer(); string get_name() const; diff --git a/opennn/layer_back_propagation.h b/opennn/layer_back_propagation.h index f668a8684..62856febb 100644 --- a/opennn/layer_back_propagation.h +++ b/opennn/layer_back_propagation.h @@ -10,7 +10,7 @@ class Layer; struct LayerBackPropagation { - explicit LayerBackPropagation() {} + LayerBackPropagation() {} virtual vector> get_input_derivative_pairs() const = 0; diff --git a/opennn/layer_back_propagation_lm.h b/opennn/layer_back_propagation_lm.h index 7465bd944..029f31ecc 100644 --- a/opennn/layer_back_propagation_lm.h +++ b/opennn/layer_back_propagation_lm.h @@ -10,7 +10,7 @@ class Layer; struct LayerBackPropagationLM { - explicit LayerBackPropagationLM() {} + LayerBackPropagationLM() {} virtual vector> get_input_derivative_pairs() const = 0; diff --git a/opennn/layer_forward_propagation.h b/opennn/layer_forward_propagation.h index 57b28fc29..4de0bf76e 100644 --- a/opennn/layer_forward_propagation.h +++ b/opennn/layer_forward_propagation.h @@ -10,7 +10,7 @@ class Layer; struct LayerForwardPropagation { - explicit LayerForwardPropagation() + LayerForwardPropagation() { } diff --git a/opennn/learning_rate_algorithm.h b/opennn/learning_rate_algorithm.h index f764d3a2d..82538cd25 100644 --- a/opennn/learning_rate_algorithm.h +++ b/opennn/learning_rate_algorithm.h @@ -23,7 +23,7 @@ class LearningRateAlgorithm enum class LearningRateMethod{GoldenSection, BrentMethod}; - explicit LearningRateAlgorithm(LossIndex* = nullptr); + LearningRateAlgorithm(LossIndex* = nullptr); struct Triplet { diff --git a/opennn/levenberg_marquardt_algorithm.h b/opennn/levenberg_marquardt_algorithm.h index cc5c08e1c..235507074 100644 --- a/opennn/levenberg_marquardt_algorithm.h +++ b/opennn/levenberg_marquardt_algorithm.h @@ -21,7 +21,7 @@ class LevenbergMarquardtAlgorithm : public OptimizationAlgorithm public: - explicit LevenbergMarquardtAlgorithm(LossIndex* = nullptr); + LevenbergMarquardtAlgorithm(LossIndex* = nullptr); const type& get_minimum_loss_decrease() const; const type& get_loss_goal() const; @@ -40,7 +40,7 @@ class LevenbergMarquardtAlgorithm : public OptimizationAlgorithm // Set - void set_default() final; + void set_default() override; void set_damping_parameter(const type&); @@ -61,9 +61,9 @@ class LevenbergMarquardtAlgorithm : public OptimizationAlgorithm // Training - void check() const final; + void check() const override; - TrainingResults perform_training() final; + TrainingResults perform_training() override; void update_parameters( const Batch&, @@ -71,15 +71,15 @@ class LevenbergMarquardtAlgorithm : public OptimizationAlgorithm BackPropagationLM&, LevenbergMarquardtAlgorithmData&); - string write_optimization_algorithm_type() const final; + string write_optimization_algorithm_type() const override; // Serialization - Tensor to_string_matrix() const final; + Tensor to_string_matrix() const override; - void from_XML(const XMLDocument&) final; + void from_XML(const XMLDocument&) override; - void to_XML(XMLPrinter&) const final; + void to_XML(XMLPrinter&) const override; private: @@ -108,7 +108,7 @@ class LevenbergMarquardtAlgorithm : public OptimizationAlgorithm struct LevenbergMarquardtAlgorithmData : public OptimizationAlgorithmData { - explicit LevenbergMarquardtAlgorithmData(LevenbergMarquardtAlgorithm* new_Levenberg_Marquardt_method = nullptr) + LevenbergMarquardtAlgorithmData(LevenbergMarquardtAlgorithm* new_Levenberg_Marquardt_method = nullptr) { set(new_Levenberg_Marquardt_method); } diff --git a/opennn/long_short_term_memory_layer.h b/opennn/long_short_term_memory_layer.h index 3074a1154..12e1f642b 100755 --- a/opennn/long_short_term_memory_layer.h +++ b/opennn/long_short_term_memory_layer.h @@ -34,15 +34,15 @@ class LongShortTermMemoryLayer : public Layer SoftSign, HardSigmoid}; - explicit LongShortTermMemoryLayer(const Index& = 0, const Index& = 0, const Index& = 0); + LongShortTermMemoryLayer(const Index& = 0, const Index& = 0, const Index& = 0); - dimensions get_input_dimensions() const final; - dimensions get_output_dimensions() const final; + dimensions get_input_dimensions() const override; + dimensions get_output_dimensions() const override; Index get_timesteps() const; - Index get_parameters_number() const final; - Tensor get_parameters() const final; + Index get_parameters_number() const override; + Tensor get_parameters() const override; const LongShortTermMemoryLayer::ActivationFunction& get_activation_function() const; const LongShortTermMemoryLayer::ActivationFunction& get_recurrent_activation_function() const; @@ -52,10 +52,10 @@ class LongShortTermMemoryLayer : public Layer void set(const Index& = 0, const Index& = 0, const Index& = 0); - void set_input_dimensions(const dimensions&) final; - void set_output_dimensions(const dimensions&) final; + void set_input_dimensions(const dimensions&) override; + void set_output_dimensions(const dimensions&) override; - void set_parameters(const Tensor&, const Index&) final; + void set_parameters(const Tensor&, const Index&) override; // Activation functions @@ -69,9 +69,9 @@ class LongShortTermMemoryLayer : public Layer // Parameters initialization - void set_parameters_constant(const type&) final; + void set_parameters_constant(const type&) override; - void set_parameters_random() final; + void set_parameters_random() override; // Forward propagation @@ -90,18 +90,18 @@ class LongShortTermMemoryLayer : public Layer void forward_propagate(const vector>&, unique_ptr&, - const bool&) final; + const bool&) override; // Back propagation void insert_gradient(unique_ptr&, const Index& , - Tensor&) const final; + Tensor&) const override; void back_propagate(const vector>&, const vector>&, unique_ptr&, - unique_ptr&) const final; + unique_ptr&) const override; void calculate_forget_parameter_derivatives(const Tensor&, const Tensor&, @@ -129,13 +129,13 @@ class LongShortTermMemoryLayer : public Layer string get_activation_function_string_expression() const; - string get_expression(const vector& = vector(), const vector& = vector()) const final; + string get_expression(const vector& = vector(), const vector& = vector()) const override; // Serialization - void from_XML(const XMLDocument&) final; + void from_XML(const XMLDocument&) override; - void to_XML(XMLPrinter&) const final; + void to_XML(XMLPrinter&) const override; private: @@ -170,11 +170,11 @@ class LongShortTermMemoryLayer : public Layer struct LongShortTermMemoryLayerForwardPropagation : LayerForwardPropagation { - explicit LongShortTermMemoryLayerForwardPropagation(const Index& = 0, Layer* = nullptr); + LongShortTermMemoryLayerForwardPropagation(const Index& = 0, Layer* = nullptr); - pair get_outputs_pair() const final; + pair get_outputs_pair() const override; - void set(const Index& = 0, Layer* = nullptr) final; + void set(const Index& = 0, Layer* = nullptr) override; void print() const; @@ -217,11 +217,11 @@ struct LongShortTermMemoryLayerForwardPropagation : LayerForwardPropagation struct LongShortTermMemoryLayerBackPropagation : LayerBackPropagation { - explicit LongShortTermMemoryLayerBackPropagation(const Index& = 0, Layer* = nullptr); + LongShortTermMemoryLayerBackPropagation(const Index& = 0, Layer* = nullptr); vector> get_input_derivative_pairs() const; - void set(const Index& = 0, Layer* = nullptr) final; + void set(const Index& = 0, Layer* = nullptr) override; void set_derivatives_zero(); diff --git a/opennn/loss_index.h b/opennn/loss_index.h index 220c49a6c..82e20528b 100644 --- a/opennn/loss_index.h +++ b/opennn/loss_index.h @@ -30,7 +30,7 @@ class LossIndex public: - explicit LossIndex(NeuralNetwork* = nullptr, DataSet* = nullptr); + LossIndex(NeuralNetwork* = nullptr, DataSet* = nullptr); enum class RegularizationMethod{L1, L2, NoRegularization}; @@ -197,7 +197,7 @@ class LossIndex struct BackPropagationLM { - explicit BackPropagationLM(const Index& = 0, LossIndex* = nullptr); + BackPropagationLM(const Index& = 0, LossIndex* = nullptr); void set(const Index& = 0, LossIndex* = nullptr); diff --git a/opennn/mean_squared_error.h b/opennn/mean_squared_error.h index fac5916cb..a28edd96d 100644 --- a/opennn/mean_squared_error.h +++ b/opennn/mean_squared_error.h @@ -19,38 +19,38 @@ class MeanSquaredError : public LossIndex public: - explicit MeanSquaredError(NeuralNetwork* = nullptr, DataSet* = nullptr); + MeanSquaredError(NeuralNetwork* = nullptr, DataSet* = nullptr); void calculate_error(const Batch&, const ForwardPropagation&, - BackPropagation&) const final; + BackPropagation&) const override; void calculate_output_delta(const Batch&, ForwardPropagation&, - BackPropagation&) const final; + BackPropagation&) const override; // Back propagation LM void calculate_error_lm(const Batch&, const ForwardPropagation&, - BackPropagationLM&) const final; + BackPropagationLM&) const override; void calculate_output_delta_lm(const Batch&, ForwardPropagation&, - BackPropagationLM&) const final; + BackPropagationLM&) const override; void calculate_error_gradient_lm(const Batch&, - BackPropagationLM&) const final; + BackPropagationLM&) const override; void calculate_error_hessian_lm(const Batch&, - BackPropagationLM&) const final; + BackPropagationLM&) const override; // Serialization - void to_XML(XMLPrinter &) const final; + void to_XML(XMLPrinter &) const override; - string get_loss_method() const final; - string get_error_type_text() const final; + string get_loss_method() const override; + string get_error_type_text() const override; #ifdef OPENNN_CUDA #include "../../opennn_cuda/opennn_cuda/mean_squared_error_cuda.h" diff --git a/opennn/minkowski_error.h b/opennn/minkowski_error.h index 3bf76ef4c..831feb518 100644 --- a/opennn/minkowski_error.h +++ b/opennn/minkowski_error.h @@ -19,7 +19,7 @@ class MinkowskiError : public LossIndex public: - explicit MinkowskiError(NeuralNetwork* = nullptr, DataSet* = nullptr); + MinkowskiError(NeuralNetwork* = nullptr, DataSet* = nullptr); type get_Minkowski_parameter() const; @@ -29,18 +29,18 @@ class MinkowskiError : public LossIndex void calculate_error(const Batch& batch, const ForwardPropagation& forward_propagation, - BackPropagation& back_propagation) const final; + BackPropagation& back_propagation) const override; void calculate_output_delta(const Batch&, ForwardPropagation&, - BackPropagation&) const final; + BackPropagation&) const override; - string get_loss_method() const final; - string get_error_type_text() const final; + string get_loss_method() const override; + string get_error_type_text() const override; virtual void from_XML(const XMLDocument&); - void to_XML(XMLPrinter&) const final; + void to_XML(XMLPrinter&) const override; private: diff --git a/opennn/model_selection.h b/opennn/model_selection.h index 321dcf50e..30df84d30 100644 --- a/opennn/model_selection.h +++ b/opennn/model_selection.h @@ -24,7 +24,7 @@ class ModelSelection // Constructors - explicit ModelSelection(TrainingStrategy* = nullptr); + ModelSelection(TrainingStrategy* = nullptr); enum class NeuronsSelectionMethod{GROWING_NEURONS}; diff --git a/opennn/multihead_attention_layer.cpp b/opennn/multihead_attention_layer.cpp index 60051ee51..73f188cff 100644 --- a/opennn/multihead_attention_layer.cpp +++ b/opennn/multihead_attention_layer.cpp @@ -335,13 +335,15 @@ void MultiheadAttentionLayer::apply_causal_mask(Tensor& attention_score { const Index batch_samples_number = attention_scores.dimension(2); + const Index context_input_size = context_size * input_size; + + for(Index head_index = 0; head_index < heads_number; head_index++) { for(Index sample_index = 0; sample_index < batch_samples_number; sample_index++) { type* sample_attention_scores_data = attention_scores.data() - + sample_index * context_size * input_size - + head_index * context_size * input_size * batch_samples_number; + + (sample_index * heads_number + head_index) * context_input_size * batch_samples_number; TensorMap> sample_attention_scores(sample_attention_scores_data, context_size, @@ -411,7 +413,6 @@ void MultiheadAttentionLayer::calculate_output_projection(const Tensor& type* head_attention_output_data = attention_outputs_data + head_index * input_size * hidden_depth * batch_size; TensorMap> head_projection_output(head_projection_output_data, batch_size, input_size, depth); - const TensorMap> head_projection_weights(head_projection_weights_data, hidden_depth, depth); for(Index sample_index = 0; sample_index < batch_size; sample_index++) @@ -1009,7 +1010,6 @@ vector> MultiheadAttentionLayerBackPropagation::get_inpu {(type*)(context_derivatives.data()), {batch_samples_number, context_size, depth}} }; } - } // OpenNN: Open Neural Networks Library. diff --git a/opennn/multihead_attention_layer.h b/opennn/multihead_attention_layer.h index 068fbed12..343d8e601 100644 --- a/opennn/multihead_attention_layer.h +++ b/opennn/multihead_attention_layer.h @@ -24,7 +24,7 @@ class MultiheadAttentionLayer : public Layer public: - explicit MultiheadAttentionLayer(const Index& = 0, + MultiheadAttentionLayer(const Index& = 0, const Index& = 0, const Index& = 0, const Index& = 0, @@ -39,19 +39,19 @@ class MultiheadAttentionLayer : public Layer Index get_weights_depth() const; // @todo - dimensions get_input_dimensions() const final + dimensions get_input_dimensions() const override { throw runtime_error("XXX"); } - dimensions get_output_dimensions() const final; + dimensions get_output_dimensions() const override; - Index get_parameters_number() const final; - Tensor get_parameters() const final; + Index get_parameters_number() const override; + Tensor get_parameters() const override; void set(const Index& = 0, const Index& = 0, const Index& = 0, const Index& = 0); - void set_parameters(const Tensor&, const Index& index = 0) final; + void set_parameters(const Tensor&, const Index& index = 0) override; void set_input_size(const Index&); void set_context_size(const Index&); @@ -59,9 +59,9 @@ class MultiheadAttentionLayer : public Layer void set_heads_number(const Index&); void set_weights(); - void set_parameters_random() final; + void set_parameters_random() override; void set_parameters_glorot(); - void set_parameters_constant(const type&) final; + void set_parameters_constant(const type&) override; void set_dropout_rate(const type&); void set_causal_mask(const bool&); @@ -81,19 +81,19 @@ class MultiheadAttentionLayer : public Layer void forward_propagate(const vector>&, unique_ptr&, - const bool&) final; + const bool&) override; void back_propagate(const vector>&, const vector>&, unique_ptr&, - unique_ptr&) const final; + unique_ptr&) const override; void insert_gradient(unique_ptr&, const Index&, - Tensor&) const final; + Tensor&) const override; - void from_XML(const XMLDocument&) final; - void to_XML(XMLPrinter&) const final; + void from_XML(const XMLDocument&) override; + void to_XML(XMLPrinter&) const override; #ifdef OPENNN_CUDA #include "../../opennn_cuda/opennn_cuda/multihead_attention_layer_cuda.h" @@ -143,12 +143,12 @@ class MultiheadAttentionLayer : public Layer struct MultiheadAttentionLayerForwardPropagation : LayerForwardPropagation { - explicit MultiheadAttentionLayerForwardPropagation(const Index& new_batch_samples_number = 0, + MultiheadAttentionLayerForwardPropagation(const Index& new_batch_samples_number = 0, Layer* new_layer = nullptr); - pair get_outputs_pair() const final; + pair get_outputs_pair() const override; - void set(const Index& = 0, Layer* = nullptr) final; + void set(const Index& = 0, Layer* = nullptr) override; void print() const; @@ -170,11 +170,11 @@ struct MultiheadAttentionLayerForwardPropagation : LayerForwardPropagation struct MultiheadAttentionLayerBackPropagation : LayerBackPropagation { - explicit MultiheadAttentionLayerBackPropagation(const Index& = 0, Layer* = nullptr); + MultiheadAttentionLayerBackPropagation(const Index& = 0, Layer* = nullptr); vector> get_input_derivative_pairs() const; - void set(const Index& = 0, Layer* = nullptr) final; + void set(const Index& = 0, Layer* = nullptr) override; void print() const; diff --git a/opennn/neural_network.h b/opennn/neural_network.h index 3094d3b19..3cf03f041 100644 --- a/opennn/neural_network.h +++ b/opennn/neural_network.h @@ -41,12 +41,12 @@ class NeuralNetwork JavaScript, PHP}; - explicit NeuralNetwork(const NeuralNetwork::ModelType& = NeuralNetwork::ModelType::Default, + NeuralNetwork(const NeuralNetwork::ModelType& = NeuralNetwork::ModelType::Default, const dimensions& = {}, const dimensions& = {}, const dimensions& = {}); - explicit NeuralNetwork(const filesystem::path&); + NeuralNetwork(const filesystem::path&); void add_layer(unique_ptr, const vector& = vector()); diff --git a/opennn/neurons_selection.h b/opennn/neurons_selection.h index 6f82c1092..a34d9062c 100644 --- a/opennn/neurons_selection.h +++ b/opennn/neurons_selection.h @@ -22,7 +22,7 @@ class NeuronsSelection enum class StoppingCondition { MaximumTime, SelectionErrorGoal, MaximumEpochs, MaximumSelectionFailures, MaximumNeurons }; - explicit NeuronsSelection(TrainingStrategy* = nullptr); + NeuronsSelection(TrainingStrategy* = nullptr); TrainingStrategy* get_training_strategy() const; @@ -92,7 +92,7 @@ class NeuronsSelection struct NeuronsSelectionResults { - explicit NeuronsSelectionResults(const Index& maximum_epochs_number = 0); + NeuronsSelectionResults(const Index& maximum_epochs_number = 0); void resize_history(const Index& new_size); diff --git a/opennn/normalization_layer_3d.cpp b/opennn/normalization_layer_3d.cpp index 7313000bc..d4b342161 100644 --- a/opennn/normalization_layer_3d.cpp +++ b/opennn/normalization_layer_3d.cpp @@ -14,7 +14,7 @@ namespace opennn { NormalizationLayer3D::NormalizationLayer3D(const Index& new_inputs_number, - const Index& new_inputs_depth) : Layer() + const Index& new_inputs_depth) : Layer() { set(new_inputs_number, new_inputs_depth); @@ -152,9 +152,6 @@ void NormalizationLayer3D::forward_propagate(const vector(layer_forward_propagation.get()); - // @todo Can we avoid normalized_inputs - - Tensor& normalized_inputs = normalization_layer_3d_forward_propagation->normalized_inputs; Tensor& outputs = normalization_layer_3d_forward_propagation->outputs; Tensor& means = normalization_layer_3d_forward_propagation->means; @@ -170,12 +167,6 @@ void NormalizationLayer3D::forward_propagate(const vector>& const NormalizationLayer3DForwardPropagation* normalization_layer_3d_forward_propagation = static_cast(forward_propagation.get()); - const Tensor& normalized_inputs = normalization_layer_3d_forward_propagation->normalized_inputs; + const Tensor& outputs = normalization_layer_3d_forward_propagation->outputs; const Tensor& standard_deviations = normalization_layer_3d_forward_propagation->standard_deviations; @@ -227,19 +218,19 @@ void NormalizationLayer3D::back_propagate(const vector>& // Parameters derivatives - gammas_derivatives.device(*thread_pool_device) = (normalized_inputs * deltas).sum(sum_dimensions_2); + gammas_derivatives.device(*thread_pool_device) = (outputs * deltas).sum(sum_dimensions_2); betas_derivatives.device(*thread_pool_device) = deltas.sum(sum_dimensions_2); // Input derivatives - standard_deviation_derivatives.device(*thread_pool_device) = normalized_inputs; + standard_deviation_derivatives.device(*thread_pool_device) = outputs; scaled_deltas.device(*thread_pool_device) = deltas; multiply_matrices(thread_pool_device.get(), scaled_deltas, gammas); - aux_2d.device(*thread_pool_device) = 1 / type(inputs_depth) * (scaled_deltas * normalized_inputs).sum(sum_dimensions_1) / (standard_deviations_matrix + epsilon); + aux_2d.device(*thread_pool_device) = 1 / type(inputs_depth) * (scaled_deltas * outputs).sum(sum_dimensions_1) / (standard_deviations_matrix + epsilon); multiply_matrices(thread_pool_device.get(), standard_deviation_derivatives, aux_2d); @@ -345,8 +336,6 @@ void NormalizationLayer3DForwardPropagation::set(const Index& new_batch_samples_ outputs.resize(batch_samples_number, inputs_number, inputs_depth); - normalized_inputs.resize(batch_samples_number, inputs_number, inputs_depth); - means.resize(batch_samples_number, inputs_number, inputs_depth); standard_deviations.resize(batch_samples_number, inputs_number, inputs_depth); } diff --git a/opennn/normalization_layer_3d.h b/opennn/normalization_layer_3d.h index 19cc5e306..bec93024c 100644 --- a/opennn/normalization_layer_3d.h +++ b/opennn/normalization_layer_3d.h @@ -24,54 +24,54 @@ class NormalizationLayer3D : public Layer public: - explicit NormalizationLayer3D(const Index& = 0, const Index& = 0); + NormalizationLayer3D(const Index& = 0, const Index& = 0); Index get_inputs_number_xxx() const; Index get_inputs_depth() const; // @todo - dimensions get_input_dimensions() const final + dimensions get_input_dimensions() const override { throw runtime_error("XXX"); } - dimensions get_output_dimensions() const final; + dimensions get_output_dimensions() const override; Index get_gammas_number() const; Index get_betas_number() const; - Index get_parameters_number() const final; - Tensor get_parameters() const final; + Index get_parameters_number() const override; + Tensor get_parameters() const override; void set(const Index& = 0, const Index& = 0); void set_inputs_number(const Index&); void set_inputs_depth(const Index&); - void set_parameters(const Tensor&, const Index& index = 0) final; + void set_parameters(const Tensor&, const Index& index = 0) override; void set_gammas_constant(const type&); void set_betas_constant(const type&); - void set_parameters_constant(const type&) final; - void set_parameters_random() final; + void set_parameters_constant(const type&) override; + void set_parameters_random() override; void forward_propagate(const vector>&, unique_ptr&, - const bool&) final; + const bool&) override; void back_propagate(const vector>&, const vector>&, unique_ptr&, - unique_ptr&) const final; + unique_ptr&) const override; void add_deltas(const vector>&) const; void insert_gradient(unique_ptr&, const Index&, - Tensor&) const final; + Tensor&) const override; - void from_XML(const XMLDocument&) final; - void to_XML(XMLPrinter&) const final; + void from_XML(const XMLDocument&) override; + void to_XML(XMLPrinter&) const override; #ifdef OPENNN_CUDA @@ -100,16 +100,15 @@ class NormalizationLayer3D : public Layer struct NormalizationLayer3DForwardPropagation : LayerForwardPropagation { - explicit NormalizationLayer3DForwardPropagation(const Index& = 0, Layer* = nullptr); + NormalizationLayer3DForwardPropagation(const Index& = 0, Layer* = nullptr); - pair get_outputs_pair() const final; + pair get_outputs_pair() const override; - void set(const Index& = 0, Layer* = 0) final; + void set(const Index& = 0, Layer* = 0) override; void print() const; Tensor outputs; - Tensor normalized_inputs; Tensor means; Tensor standard_deviations; @@ -121,12 +120,12 @@ struct NormalizationLayer3DForwardPropagation : LayerForwardPropagation struct NormalizationLayer3DBackPropagation : LayerBackPropagation { - explicit NormalizationLayer3DBackPropagation(const Index& new_batch_samples_number = 0, + NormalizationLayer3DBackPropagation(const Index& new_batch_samples_number = 0, Layer* new_layer = nullptr); vector> get_input_derivative_pairs() const; - void set(const Index& = 0, Layer* = nullptr) final; + void set(const Index& = 0, Layer* = nullptr) override; void print() const; diff --git a/opennn/normalized_squared_error.h b/opennn/normalized_squared_error.h index 0570564f3..c7ee12203 100644 --- a/opennn/normalized_squared_error.h +++ b/opennn/normalized_squared_error.h @@ -19,7 +19,7 @@ class NormalizedSquaredError : public LossIndex public: - explicit NormalizedSquaredError(NeuralNetwork* = nullptr, DataSet* = nullptr); + NormalizedSquaredError(NeuralNetwork* = nullptr, DataSet* = nullptr); type get_normalization_coefficient() const; type get_selection_normalization_coefficient() const; @@ -34,7 +34,7 @@ class NormalizedSquaredError : public LossIndex void set_default(); - void set_data_set(DataSet* new_data_set) final; + void set_data_set(DataSet* new_data_set) override; type calculate_normalization_coefficient(const Tensor&, const Tensor&) const; @@ -44,34 +44,34 @@ class NormalizedSquaredError : public LossIndex void calculate_error(const Batch&, const ForwardPropagation&, - BackPropagation&) const final; + BackPropagation&) const override; void calculate_output_delta(const Batch&, ForwardPropagation&, - BackPropagation&) const final; + BackPropagation&) const override; // Back propagation LM void calculate_error_lm(const Batch&, const ForwardPropagation&, - BackPropagationLM&) const final; + BackPropagationLM&) const override; void calculate_output_delta_lm(const Batch&, ForwardPropagation&, - BackPropagationLM&) const final; + BackPropagationLM&) const override; void calculate_error_gradient_lm(const Batch&, - BackPropagationLM&) const final; + BackPropagationLM&) const override; void calculate_error_hessian_lm(const Batch&, - BackPropagationLM&) const final; + BackPropagationLM&) const override; - string get_loss_method() const final; - string get_error_type_text() const final; + string get_loss_method() const override; + string get_error_type_text() const override; virtual void from_XML(const XMLDocument&) const; - void to_XML(XMLPrinter&) const final; + void to_XML(XMLPrinter&) const override; //protected: diff --git a/opennn/optimization_algorithm.cpp b/opennn/optimization_algorithm.cpp index 3840d008e..736caaed7 100644 --- a/opennn/optimization_algorithm.cpp +++ b/opennn/optimization_algorithm.cpp @@ -371,14 +371,14 @@ string OptimizationAlgorithm::write_time(const type& time) const void TrainingResults::save(const filesystem::path& file_name) const { - const Tensor final_results = write_final_results(); + const Tensor override_results = write_override_results(); ofstream file(file_name); if(!file) return; - for(Index i = 0; i < final_results.dimension(0); i++) - file << final_results(i,0) << "; " << final_results(i,1) << "\n"; + for(Index i = 0; i < override_results.dimension(0); i++) + file << override_results(i,0) << "; " << override_results(i,1) << "\n"; file.close(); } @@ -400,33 +400,33 @@ void TrainingResults::print(const string &message) } -Tensor TrainingResults::write_final_results(const Index& precision) const +Tensor TrainingResults::write_override_results(const Index& precision) const { - Tensor final_results(6, 2); + Tensor override_results(6, 2); - final_results(0,0) = "Epochs number"; - final_results(1,0) = "Elapsed time"; - final_results(2,0) = "Stopping criterion"; - final_results(3,0) = "Training error"; - final_results(4,0) = "Selection error"; + override_results(0,0) = "Epochs number"; + override_results(1,0) = "Elapsed time"; + override_results(2,0) = "Stopping criterion"; + override_results(3,0) = "Training error"; + override_results(4,0) = "Selection error"; const Index size = training_error_history.size(); if(size == 0) { - final_results(0,1) = "NA"; - final_results(1,1) = "NA"; - final_results(2,1) = "NA"; - final_results(3,1) = "NA"; - final_results(4,1) = "NA"; + override_results(0,1) = "NA"; + override_results(1,1) = "NA"; + override_results(2,1) = "NA"; + override_results(3,1) = "NA"; + override_results(4,1) = "NA"; - return final_results; + return override_results; } - final_results(0,1) = to_string(training_error_history.size()-1); - final_results(1,1) = elapsed_time; - final_results(2,1) = write_stopping_condition(); - final_results(3,1) = to_string(training_error_history(size-1)); + override_results(0,1) = to_string(training_error_history.size()-1); + override_results(1,1) = elapsed_time; + override_results(2,1) = write_stopping_condition(); + override_results(3,1) = to_string(training_error_history(size-1)); // Final selection error @@ -437,9 +437,9 @@ Tensor TrainingResults::write_final_results(const Index& precision) c ? buffer << "NAN" : buffer << setprecision(precision) << selection_error_history(size-1); - final_results(4,1) = buffer.str(); + override_results(4,1) = buffer.str(); - return final_results; + return override_results; } diff --git a/opennn/optimization_algorithm.h b/opennn/optimization_algorithm.h index b8e6a6f64..35a400cfc 100644 --- a/opennn/optimization_algorithm.h +++ b/opennn/optimization_algorithm.h @@ -21,7 +21,7 @@ class OptimizationAlgorithm public: - explicit OptimizationAlgorithm(LossIndex* = nullptr); + OptimizationAlgorithm(LossIndex* = nullptr); enum class StoppingCondition{None, MinimumLossDecrease, @@ -122,7 +122,7 @@ class OptimizationAlgorithm struct OptimizationAlgorithmData { - explicit OptimizationAlgorithmData(); + OptimizationAlgorithmData(); void print() const; @@ -135,7 +135,7 @@ struct OptimizationAlgorithmData struct TrainingResults { - explicit TrainingResults(const Index& = 0); + TrainingResults(const Index& = 0); string write_stopping_condition() const; @@ -151,7 +151,7 @@ struct TrainingResults OptimizationAlgorithm::StoppingCondition stopping_condition = OptimizationAlgorithm::StoppingCondition::None; - Tensor write_final_results(const Index& = 3) const; + Tensor write_override_results(const Index& = 3) const; void resize_training_error_history(const Index&); diff --git a/opennn/perceptron_layer.h b/opennn/perceptron_layer.h index 0ad11b586..5dc32dce5 100644 --- a/opennn/perceptron_layer.h +++ b/opennn/perceptron_layer.h @@ -41,12 +41,12 @@ class PerceptronLayer : public Layer const ActivationFunction& = PerceptronLayer::ActivationFunction::HyperbolicTangent, const string& = "perceptron_layer"); - dimensions get_input_dimensions() const final; - dimensions get_output_dimensions() const final; + dimensions get_input_dimensions() const override; + dimensions get_output_dimensions() const override; - Tensor get_parameters() const final; + Tensor get_parameters() const override; - Index get_parameters_number() const final; + Index get_parameters_number() const override; type get_dropout_rate() const; const PerceptronLayer::ActivationFunction& get_activation_function() const; @@ -58,12 +58,12 @@ class PerceptronLayer : public Layer const PerceptronLayer::ActivationFunction & = PerceptronLayer::ActivationFunction::HyperbolicTangent, const string& = "perceptron_layer"); - void set_input_dimensions(const dimensions&) final; - void set_output_dimensions(const dimensions&) final; + void set_input_dimensions(const dimensions&) override; + void set_output_dimensions(const dimensions&) override; - void set_parameters(const Tensor&, const Index& index = 0) final; - void set_parameters_constant(const type&) final; - void set_parameters_random() final; + void set_parameters(const Tensor&, const Index& index = 0) override; + void set_parameters_constant(const type&) override; + void set_parameters_random() override; void set_activation_function(const ActivationFunction&); void set_activation_function(const string&); @@ -79,34 +79,34 @@ class PerceptronLayer : public Layer void forward_propagate(const vector>&, unique_ptr&, - const bool&) final; + const bool&) override; void back_propagate(const vector>&, const vector>&, unique_ptr&, - unique_ptr&) const final; + unique_ptr&) const override; void back_propagate_lm(const vector>&, const vector>&, unique_ptr&, - unique_ptr&) const final; + unique_ptr&) const override; void insert_gradient(unique_ptr&, const Index&, - Tensor&) const final; + Tensor&) const override; void insert_squared_errors_Jacobian_lm(unique_ptr&, const Index&, - Tensor&) const final; + Tensor&) const override; - string get_expression(const vector& = vector(), const vector& = vector()) const final; + string get_expression(const vector& = vector(), const vector& = vector()) const override; string get_activation_function_string_expression() const; void print() const; - void from_XML(const XMLDocument&) final; - void to_XML(XMLPrinter&) const final; + void from_XML(const XMLDocument&) override; + void to_XML(XMLPrinter&) const override; #ifdef OPENNN_CUDA #include "../../opennn_cuda/opennn_cuda/perceptron_layer_cuda.h" @@ -128,11 +128,11 @@ class PerceptronLayer : public Layer struct PerceptronLayerForwardPropagation : LayerForwardPropagation { - explicit PerceptronLayerForwardPropagation(const Index& = 0, Layer* = nullptr); + PerceptronLayerForwardPropagation(const Index& = 0, Layer* = nullptr); - pair get_outputs_pair() const final; + pair get_outputs_pair() const override; - void set(const Index& = 0, Layer* = nullptr) final; + void set(const Index& = 0, Layer* = nullptr) override; void print() const; @@ -144,11 +144,11 @@ struct PerceptronLayerForwardPropagation : LayerForwardPropagation struct PerceptronLayerBackPropagation : LayerBackPropagation { - explicit PerceptronLayerBackPropagation(const Index& = 0, Layer* = nullptr); + PerceptronLayerBackPropagation(const Index& = 0, Layer* = nullptr); vector> get_input_derivative_pairs() const; - void set(const Index& = 0, Layer* = nullptr) final; + void set(const Index& = 0, Layer* = nullptr) override; void print() const; @@ -162,11 +162,11 @@ struct PerceptronLayerBackPropagation : LayerBackPropagation struct PerceptronLayerBackPropagationLM : LayerBackPropagationLM { - explicit PerceptronLayerBackPropagationLM(const Index& = 0, Layer* = nullptr); + PerceptronLayerBackPropagationLM(const Index& = 0, Layer* = nullptr); vector> get_input_derivative_pairs() const; - void set(const Index& = 0, Layer* = nullptr) final; + void set(const Index& = 0, Layer* = nullptr) override; void print() const; diff --git a/opennn/perceptron_layer_3d.h b/opennn/perceptron_layer_3d.h index 7e9939ff0..c5c82d5dc 100644 --- a/opennn/perceptron_layer_3d.h +++ b/opennn/perceptron_layer_3d.h @@ -28,7 +28,7 @@ class PerceptronLayer3D : public Layer Linear, RectifiedLinear}; - explicit PerceptronLayer3D(const Index& = 0, + PerceptronLayer3D(const Index& = 0, const Index& = 0, const Index& = 0, const ActivationFunction& = PerceptronLayer3D::ActivationFunction::HyperbolicTangent); @@ -38,16 +38,16 @@ class PerceptronLayer3D : public Layer Index get_neurons_number() const; // @todo - dimensions get_input_dimensions() const final + dimensions get_input_dimensions() const override { throw runtime_error("XXX"); } - dimensions get_output_dimensions() const final; + dimensions get_output_dimensions() const override; - Index get_parameters_number() const final; + Index get_parameters_number() const override; type get_dropout_rate() const; - Tensor get_parameters() const final; + Tensor get_parameters() const override; const PerceptronLayer3D::ActivationFunction& get_activation_function() const; @@ -58,18 +58,18 @@ class PerceptronLayer3D : public Layer const Index& = 0, const PerceptronLayer3D::ActivationFunction& = PerceptronLayer3D::ActivationFunction::HyperbolicTangent); - void set_input_dimensions(const dimensions&) final; + void set_input_dimensions(const dimensions&) override; void set_inputs_depth(const Index&); - void set_output_dimensions(const dimensions&) final; + void set_output_dimensions(const dimensions&) override; - void set_parameters(const Tensor&, const Index& index = 0) final; + void set_parameters(const Tensor&, const Index& index = 0) override; void set_activation_function(const ActivationFunction&); void set_activation_function(const string&); void set_dropout_rate(const type&); - void set_parameters_constant(const type&) final; - void set_parameters_random() final; + void set_parameters_constant(const type&) override; + void set_parameters_random() override; void set_parameters_glorot(); void calculate_combinations(const Tensor&, @@ -82,21 +82,21 @@ class PerceptronLayer3D : public Layer void forward_propagate(const vector>&, unique_ptr&, - const bool&) final; + const bool&) override; void back_propagate(const vector>&, const vector>&, unique_ptr&, - unique_ptr&) const final; + unique_ptr&) const override; void add_deltas(const vector>&) const; void insert_gradient(unique_ptr&, const Index&, - Tensor&) const final; + Tensor&) const override; - void from_XML(const XMLDocument&) final; - void to_XML(XMLPrinter&) const final; + void from_XML(const XMLDocument&) override; + void to_XML(XMLPrinter&) const override; #ifdef OPENNN_CUDA #include "../../opennn_cuda/opennn_cuda/perceptron_layer_3d_cuda.h" @@ -127,11 +127,11 @@ class PerceptronLayer3D : public Layer struct PerceptronLayer3DForwardPropagation : LayerForwardPropagation { - explicit PerceptronLayer3DForwardPropagation(const Index& = 0, Layer* = nullptr); + PerceptronLayer3DForwardPropagation(const Index& = 0, Layer* = nullptr); - pair get_outputs_pair() const final; + pair get_outputs_pair() const override; - void set(const Index& = 0, Layer* = nullptr) final; + void set(const Index& = 0, Layer* = nullptr) override; void print() const; @@ -143,11 +143,11 @@ struct PerceptronLayer3DForwardPropagation : LayerForwardPropagation struct PerceptronLayer3DBackPropagation : LayerBackPropagation { - explicit PerceptronLayer3DBackPropagation(const Index& = 0, Layer* = 0); + PerceptronLayer3DBackPropagation(const Index& = 0, Layer* = 0); vector> get_input_derivative_pairs() const; - void set(const Index& = 0, Layer* = nullptr) final; + void set(const Index& = 0, Layer* = nullptr) override; void print() const; diff --git a/opennn/pooling_layer.h b/opennn/pooling_layer.h index 25720055c..55081e664 100644 --- a/opennn/pooling_layer.h +++ b/opennn/pooling_layer.h @@ -28,7 +28,7 @@ class PoolingLayer : public Layer enum class PoolingMethod{MaxPooling, AveragePooling}; - explicit PoolingLayer(const dimensions& = {2, 2, 1}, // Input dimensions {height,width,channels} + PoolingLayer(const dimensions& = {2, 2, 1}, // Input dimensions {height,width,channels} const dimensions& = { 2, 2 }, // Pool dimensions {pool_height,pool_width} const dimensions& = { 2, 2 }, // Stride dimensions {row_stride, column_stride} const dimensions& = { 0, 0 }, // Padding dimensions {padding_height, padding_width} @@ -81,7 +81,7 @@ class PoolingLayer : public Layer void forward_propagate(const vector>&, unique_ptr&, - const bool&) final; + const bool&) override; void forward_propagate_max_pooling(const Tensor&, unique_ptr&, @@ -94,7 +94,7 @@ class PoolingLayer : public Layer void back_propagate(const vector>&, const vector>&, unique_ptr&, - unique_ptr&) const final; + unique_ptr&) const override; void back_propagate_max_pooling(const Tensor&, const Tensor&, @@ -105,8 +105,8 @@ class PoolingLayer : public Layer const Tensor&, unique_ptr&) const; - void from_XML(const XMLDocument&) final; - void to_XML(XMLPrinter&) const final; + void from_XML(const XMLDocument&) override; + void to_XML(XMLPrinter&) const override; void print() const; @@ -139,11 +139,11 @@ class PoolingLayer : public Layer struct PoolingLayerForwardPropagation : LayerForwardPropagation { - explicit PoolingLayerForwardPropagation(const Index& = 0, Layer* = nullptr); + PoolingLayerForwardPropagation(const Index& = 0, Layer* = nullptr); - pair get_outputs_pair() const final; + pair get_outputs_pair() const override; - void set(const Index& = 0, Layer* = nullptr) final; + void set(const Index& = 0, Layer* = nullptr) override; void print() const; @@ -157,11 +157,11 @@ struct PoolingLayerForwardPropagation : LayerForwardPropagation struct PoolingLayerBackPropagation : LayerBackPropagation { - explicit PoolingLayerBackPropagation(const Index& = 0, Layer* = nullptr); + PoolingLayerBackPropagation(const Index& = 0, Layer* = nullptr); vector> get_input_derivative_pairs() const; - void set(const Index& = 0, Layer* = nullptr) final; + void set(const Index& = 0, Layer* = nullptr) override; void print() const; diff --git a/opennn/probabilistic_layer.h b/opennn/probabilistic_layer.h index ec9d75a2b..442735ad2 100644 --- a/opennn/probabilistic_layer.h +++ b/opennn/probabilistic_layer.h @@ -22,11 +22,11 @@ namespace opennn struct ProbabilisticLayerForwardPropagation : LayerForwardPropagation { - explicit ProbabilisticLayerForwardPropagation(const Index& = 0, Layer* = nullptr); + ProbabilisticLayerForwardPropagation(const Index& = 0, Layer* = nullptr); - pair get_outputs_pair() const final; + pair get_outputs_pair() const override; - void set(const Index& = 0, Layer* = nullptr) final; + void set(const Index& = 0, Layer* = nullptr) override; void print() const; @@ -37,11 +37,11 @@ struct ProbabilisticLayerForwardPropagation : LayerForwardPropagation struct ProbabilisticLayerBackPropagation : LayerBackPropagation { - explicit ProbabilisticLayerBackPropagation(const Index& = 0, Layer* = nullptr); + ProbabilisticLayerBackPropagation(const Index& = 0, Layer* = nullptr); vector> get_input_derivative_pairs() const; - void set(const Index& = 0, Layer* = nullptr) final; + void set(const Index& = 0, Layer* = nullptr) override; void print() const; @@ -61,12 +61,12 @@ struct ProbabilisticLayerBackPropagation : LayerBackPropagation struct ProbabilisticLayerBackPropagationLM : LayerBackPropagationLM { - explicit ProbabilisticLayerBackPropagationLM(const Index& new_batch_samples_number = 0, + ProbabilisticLayerBackPropagationLM(const Index& new_batch_samples_number = 0, Layer* new_layer = nullptr); vector> get_input_derivative_pairs() const; - void set(const Index& = 0, Layer* = nullptr) final; + void set(const Index& = 0, Layer* = nullptr) override; void print() const; @@ -93,68 +93,68 @@ class ProbabilisticLayer : public Layer enum class ActivationFunction { Binary, Logistic, Competitive, Softmax }; - explicit ProbabilisticLayer(const dimensions& = {0}, + ProbabilisticLayer(const dimensions& = {0}, const dimensions& = {0}, const string& = "probabilistic_layer"); - dimensions get_input_dimensions() const final; - dimensions get_output_dimensions() const final; + dimensions get_input_dimensions() const override; + dimensions get_output_dimensions() const override; const type& get_decision_threshold() const; const ActivationFunction& get_activation_function() const; string get_activation_function_string() const; - Index get_parameters_number() const final; - Tensor get_parameters() const final; + Index get_parameters_number() const override; + Tensor get_parameters() const override; void set(const dimensions& = {0}, const dimensions & = {0}, const string& = "probabilistic_layer"); - void set_input_dimensions(const dimensions&) final; - void set_output_dimensions(const dimensions&) final; + void set_input_dimensions(const dimensions&) override; + void set_output_dimensions(const dimensions&) override; - void set_parameters(const Tensor&, const Index& index = 0) final; + void set_parameters(const Tensor&, const Index& index = 0) override; void set_decision_threshold(const type&); void set_activation_function(const ActivationFunction&); void set_activation_function(const string&); - void set_parameters_constant(const type&) final; - void set_parameters_random() final; + void set_parameters_constant(const type&) override; + void set_parameters_random() override; void calculate_combinations(const Tensor&, Tensor&) const; void forward_propagate(const vector>&, unique_ptr&, - const bool&) final; + const bool&) override; void back_propagate(const vector>&, const vector>&, unique_ptr&, - unique_ptr&) const final; + unique_ptr&) const override; void insert_gradient(unique_ptr&, const Index&, - Tensor&) const final; + Tensor&) const override; void insert_squared_errors_Jacobian_lm(unique_ptr&, const Index&, - Tensor&) const final; + Tensor&) const override; string write_binary_expression(const vector&, const vector&) const; string write_logistic_expression(const vector&, const vector&) const; string write_competitive_expression(const vector&, const vector&) const; string write_softmax_expression(const vector&, const vector&) const; - string get_expression(const vector& = vector(), const vector& = vector()) const final; + string get_expression(const vector& = vector(), const vector& = vector()) const override; string write_combinations(const vector&) const; string write_activations(const vector&) const; - void from_XML(const XMLDocument&) final; - void to_XML(XMLPrinter&) const final; + void from_XML(const XMLDocument&) override; + void to_XML(XMLPrinter&) const override; void print() const; diff --git a/opennn/probabilistic_layer_3d.h b/opennn/probabilistic_layer_3d.h index b083a3f89..17eb7dde8 100644 --- a/opennn/probabilistic_layer_3d.h +++ b/opennn/probabilistic_layer_3d.h @@ -25,7 +25,7 @@ class ProbabilisticLayer3D : public Layer public: - explicit ProbabilisticLayer3D(const Index& = 0, const Index& = 0, const Index& = 0); + ProbabilisticLayer3D(const Index& = 0, const Index& = 0, const Index& = 0); enum class ActivationFunction{Softmax, Competitive}; @@ -34,13 +34,13 @@ class ProbabilisticLayer3D : public Layer Index get_neurons_number() const; // @todo - dimensions get_input_dimensions() const final + dimensions get_input_dimensions() const override { throw runtime_error("XXX"); } - dimensions get_output_dimensions() const final; + dimensions get_output_dimensions() const override; const type& get_decision_threshold() const; @@ -50,11 +50,11 @@ class ProbabilisticLayer3D : public Layer void set(const Index& = 0, const Index& = 0, const Index& = 0); - void set_input_dimensions(const dimensions&) final; + void set_input_dimensions(const dimensions&) override; void set_inputs_depth(const Index&); - void set_output_dimensions(const dimensions&) final; + void set_output_dimensions(const dimensions&) override; - void set_parameters(const Tensor&, const Index& index = 0) final; + void set_parameters(const Tensor&, const Index& index = 0) override; void set_decision_threshold(const type&); void set_activation_function(const ActivationFunction&); @@ -62,13 +62,13 @@ class ProbabilisticLayer3D : public Layer // Parameters - Index get_parameters_number() const final; - Tensor get_parameters() const final; + Index get_parameters_number() const override; + Tensor get_parameters() const override; // Parameters initialization - void set_parameters_constant(const type&) final; - void set_parameters_random() final; + void set_parameters_constant(const type&) override; + void set_parameters_random() override; void set_parameters_glorot(); // Forward propagation @@ -82,14 +82,14 @@ class ProbabilisticLayer3D : public Layer void forward_propagate(const vector>&, unique_ptr&, - const bool&) final; + const bool&) override; // Gradient void back_propagate(const vector>&, const vector>&, unique_ptr&, - unique_ptr&) const final; + unique_ptr&) const override; void calculate_combinations_derivatives(const Tensor&, const Tensor&, @@ -98,12 +98,12 @@ class ProbabilisticLayer3D : public Layer void insert_gradient(unique_ptr&, const Index&, - Tensor&) const final; + Tensor&) const override; // Serialization - void from_XML(const XMLDocument&) final; - void to_XML(XMLPrinter&) const final; + void from_XML(const XMLDocument&) override; + void to_XML(XMLPrinter&) const override; #ifdef OPENNN_CUDA #include "../../opennn_cuda/opennn_cuda/probabilistic_layer_3d_cuda.h" @@ -133,11 +133,11 @@ class ProbabilisticLayer3D : public Layer struct ProbabilisticLayer3DForwardPropagation : LayerForwardPropagation { - explicit ProbabilisticLayer3DForwardPropagation(const Index& = 0, Layer* = nullptr); + ProbabilisticLayer3DForwardPropagation(const Index& = 0, Layer* = nullptr); - pair get_outputs_pair() const final; + pair get_outputs_pair() const override; - void set(const Index& = 0, Layer* = nullptr) final; + void set(const Index& = 0, Layer* = nullptr) override; void print() const; @@ -147,11 +147,11 @@ struct ProbabilisticLayer3DForwardPropagation : LayerForwardPropagation struct ProbabilisticLayer3DBackPropagation : LayerBackPropagation { - explicit ProbabilisticLayer3DBackPropagation(const Index& = 0, Layer* = nullptr); + ProbabilisticLayer3DBackPropagation(const Index& = 0, Layer* = nullptr); vector> get_input_derivative_pairs() const; - void set(const Index& = 0, Layer* = nullptr) final; + void set(const Index& = 0, Layer* = nullptr) override; void print() const; diff --git a/opennn/quasi_newton_method.h b/opennn/quasi_newton_method.h index 8e70a09fa..99f08dd39 100644 --- a/opennn/quasi_newton_method.h +++ b/opennn/quasi_newton_method.h @@ -24,7 +24,7 @@ class QuasiNewtonMethod : public OptimizationAlgorithm enum class InverseHessianApproximationMethod{DFP, BFGS}; - explicit QuasiNewtonMethod(LossIndex* = nullptr); + QuasiNewtonMethod(LossIndex* = nullptr); const LearningRateAlgorithm& get_learning_rate_algorithm() const; LearningRateAlgorithm* get_learning_rate_algorithm(); @@ -51,9 +51,9 @@ class QuasiNewtonMethod : public OptimizationAlgorithm void set_inverse_hessian_approximation_method(const InverseHessianApproximationMethod&); void set_inverse_hessian_approximation_method(const string&); - void set_display(const bool&) final; + void set_display(const bool&) override; - void set_default() final; + void set_default() override; // Stopping criteria @@ -75,17 +75,17 @@ class QuasiNewtonMethod : public OptimizationAlgorithm void update_parameters(const Batch& , ForwardPropagation& , BackPropagation& , QuasiNewtonMehtodData&) const; - TrainingResults perform_training() final; + TrainingResults perform_training() override; - string write_optimization_algorithm_type() const final; + string write_optimization_algorithm_type() const override; // Serialization - void from_XML(const XMLDocument&) final; + void from_XML(const XMLDocument&) override; - void to_XML(XMLPrinter&) const final; + void to_XML(XMLPrinter&) const override; - Tensor to_string_matrix() const final; + Tensor to_string_matrix() const override; private: @@ -112,7 +112,7 @@ class QuasiNewtonMethod : public OptimizationAlgorithm struct QuasiNewtonMehtodData : public OptimizationAlgorithmData { - explicit QuasiNewtonMehtodData(QuasiNewtonMethod* new_quasi_newton_method = nullptr) + QuasiNewtonMehtodData(QuasiNewtonMethod* new_quasi_newton_method = nullptr) { set(new_quasi_newton_method); } diff --git a/opennn/recurrent_layer.h b/opennn/recurrent_layer.h index 0d455bfab..d92202ddf 100644 --- a/opennn/recurrent_layer.h +++ b/opennn/recurrent_layer.h @@ -33,15 +33,15 @@ class RecurrentLayer : public Layer SoftSign, HardSigmoid}; - explicit RecurrentLayer(const dimensions & = {}, const dimensions& = {}); + RecurrentLayer(const dimensions & = {}, const dimensions& = {}); - dimensions get_input_dimensions() const final; - dimensions get_output_dimensions() const final; + dimensions get_input_dimensions() const override; + dimensions get_output_dimensions() const override; Index get_timesteps() const; - Index get_parameters_number() const final; - Tensor get_parameters() const final; + Index get_parameters_number() const override; + Tensor get_parameters() const override; const RecurrentLayer::ActivationFunction& get_activation_function() const; @@ -49,18 +49,18 @@ class RecurrentLayer : public Layer void set(const dimensions& = {}, const dimensions& = {}); - void set_input_dimensions(const dimensions&) final; - void set_output_dimensions(const dimensions&) final; + void set_input_dimensions(const dimensions&) override; + void set_output_dimensions(const dimensions&) override; void set_timesteps(const Index&); - void set_parameters(const Tensor&, const Index&) final; + void set_parameters(const Tensor&, const Index&) override; void set_activation_function(const ActivationFunction&); void set_activation_function(const string&); - void set_parameters_constant(const type&) final; - void set_parameters_random() final; + void set_parameters_constant(const type&) override; + void set_parameters_random() override; void calculate_combinations(const Tensor&, Tensor&) const; @@ -70,23 +70,23 @@ class RecurrentLayer : public Layer void forward_propagate(const vector>&, unique_ptr&, - const bool&) final; + const bool&) override; void insert_gradient(unique_ptr&, const Index& , - Tensor&) const final; + Tensor&) const override; void back_propagate(const vector>&, const vector>&, unique_ptr&, - unique_ptr&) const final; + unique_ptr&) const override; - string get_expression(const vector& = vector(), const vector& = vector()) const final; + string get_expression(const vector& = vector(), const vector& = vector()) const override; string get_activation_function_string_expression() const; - void from_XML(const XMLDocument&) final; - void to_XML(XMLPrinter&) const final; + void from_XML(const XMLDocument&) override; + void to_XML(XMLPrinter&) const override; private: @@ -113,11 +113,11 @@ class RecurrentLayer : public Layer struct RecurrentLayerForwardPropagation : LayerForwardPropagation { - explicit RecurrentLayerForwardPropagation(const Index& = 0, Layer* = nullptr); + RecurrentLayerForwardPropagation(const Index& = 0, Layer* = nullptr); - pair get_outputs_pair() const final; + pair get_outputs_pair() const override; - void set(const Index& = 0, Layer* = nullptr) final; + void set(const Index& = 0, Layer* = nullptr) override; void print() const; @@ -132,11 +132,11 @@ struct RecurrentLayerForwardPropagation : LayerForwardPropagation struct RecurrentLayerBackPropagation : LayerBackPropagation { - explicit RecurrentLayerBackPropagation(const Index& = 0, Layer* = nullptr); + RecurrentLayerBackPropagation(const Index& = 0, Layer* = nullptr); vector> get_input_derivative_pairs() const; - void set(const Index& = 0, Layer* = nullptr) final; + void set(const Index& = 0, Layer* = nullptr) override; void print() const; diff --git a/opennn/response_optimization.h b/opennn/response_optimization.h index 612126ef3..c978721e4 100644 --- a/opennn/response_optimization.h +++ b/opennn/response_optimization.h @@ -24,7 +24,7 @@ class ResponseOptimization enum class Condition { None, Between, EqualTo, LessEqualTo, GreaterEqualTo, Minimum, Maximum }; - explicit ResponseOptimization(NeuralNetwork* = nullptr, DataSet* = nullptr); + ResponseOptimization(NeuralNetwork* = nullptr, DataSet* = nullptr); // Get @@ -80,7 +80,7 @@ class ResponseOptimization struct ResponseOptimizationResults { - explicit ResponseOptimizationResults(NeuralNetwork* new_neural_network = nullptr); + ResponseOptimizationResults(NeuralNetwork* new_neural_network = nullptr); DataSet* data_set = nullptr; diff --git a/opennn/scaling_layer_2d.h b/opennn/scaling_layer_2d.h index ed5168c65..35e9097a5 100644 --- a/opennn/scaling_layer_2d.h +++ b/opennn/scaling_layer_2d.h @@ -20,7 +20,7 @@ class ScalingLayer2D : public Layer public: - explicit ScalingLayer2D(const dimensions& = {0}); + ScalingLayer2D(const dimensions& = {0}); dimensions get_input_dimensions() const; dimensions get_output_dimensions() const; @@ -40,8 +40,8 @@ class ScalingLayer2D : public Layer void set(const dimensions& = {0}); - void set_input_dimensions(const dimensions&) final; - void set_output_dimensions(const dimensions&) final; + void set_input_dimensions(const dimensions&) override; + void set_output_dimensions(const dimensions&) override; void set_descriptives(const vector&); void set_item_descriptives(const Index&, const Descriptives&); @@ -65,7 +65,7 @@ class ScalingLayer2D : public Layer void forward_propagate(const vector>&, unique_ptr&, - const bool&) final; + const bool&) override; string write_no_scaling_expression(const vector&, const vector&) const; @@ -75,12 +75,12 @@ class ScalingLayer2D : public Layer string write_standard_deviation_expression(const vector&, const vector&) const; - string get_expression(const vector& = vector(), const vector& = vector()) const final; + string get_expression(const vector& = vector(), const vector& = vector()) const override; void print() const; - void from_XML(const XMLDocument&) final; - void to_XML(XMLPrinter&) const final; + void from_XML(const XMLDocument&) override; + void to_XML(XMLPrinter&) const override; private: @@ -95,11 +95,11 @@ class ScalingLayer2D : public Layer struct ScalingLayer2DForwardPropagation : LayerForwardPropagation { - explicit ScalingLayer2DForwardPropagation(const Index& = 0, Layer* = nullptr); + ScalingLayer2DForwardPropagation(const Index& = 0, Layer* = nullptr); - pair get_outputs_pair() const final; + pair get_outputs_pair() const override; - void set(const Index& = 0, Layer* = nullptr) final; + void set(const Index& = 0, Layer* = nullptr) override; void print() const; diff --git a/opennn/scaling_layer_4d.h b/opennn/scaling_layer_4d.h index b72e092ea..1dab1593a 100644 --- a/opennn/scaling_layer_4d.h +++ b/opennn/scaling_layer_4d.h @@ -19,7 +19,7 @@ class ScalingLayer4D : public Layer public: - explicit ScalingLayer4D(const dimensions& = {0, 0, 0, 0}); + ScalingLayer4D(const dimensions& = {0, 0, 0, 0}); dimensions get_input_dimensions() const; dimensions get_output_dimensions() const; @@ -32,12 +32,12 @@ class ScalingLayer4D : public Layer void forward_propagate(const vector>&, unique_ptr&, - const bool&) final; + const bool&) override; void print() const; - void from_XML(const XMLDocument&) final; - void to_XML(XMLPrinter&) const final; + void from_XML(const XMLDocument&) override; + void to_XML(XMLPrinter&) const override; private: @@ -51,11 +51,11 @@ class ScalingLayer4D : public Layer struct ScalingLayer4DForwardPropagation : LayerForwardPropagation { - explicit ScalingLayer4DForwardPropagation(const Index& = 0, Layer* = nullptr); + ScalingLayer4DForwardPropagation(const Index& = 0, Layer* = nullptr); - pair get_outputs_pair() const final; + pair get_outputs_pair() const override; - void set(const Index& = 0, Layer* = nullptr) final; + void set(const Index& = 0, Layer* = nullptr) override; void print() const; diff --git a/opennn/stochastic_gradient_descent.h b/opennn/stochastic_gradient_descent.h index 2ec5788d3..9a9d26293 100644 --- a/opennn/stochastic_gradient_descent.h +++ b/opennn/stochastic_gradient_descent.h @@ -21,7 +21,7 @@ class StochasticGradientDescent : public OptimizationAlgorithm public: - explicit StochasticGradientDescent(LossIndex* = nullptr); + StochasticGradientDescent(LossIndex* = nullptr); const type& get_initial_learning_rate() const; const type& get_initial_decay() const; @@ -31,7 +31,7 @@ class StochasticGradientDescent : public OptimizationAlgorithm const type& get_loss_goal() const; const type& get_maximum_time() const; - void set_default() final; + void set_default() override; void set_batch_samples_number(const Index&); @@ -50,15 +50,15 @@ class StochasticGradientDescent : public OptimizationAlgorithm void update_parameters(BackPropagation& , StochasticGradientDescentData&) const; - TrainingResults perform_training() final; + TrainingResults perform_training() override; - string write_optimization_algorithm_type() const final; + string write_optimization_algorithm_type() const override; - Tensor to_string_matrix() const final; + Tensor to_string_matrix() const override; - void from_XML(const XMLDocument&) final; + void from_XML(const XMLDocument&) override; - void to_XML(XMLPrinter&) const final; + void to_XML(XMLPrinter&) const override; private: @@ -90,7 +90,7 @@ class StochasticGradientDescent : public OptimizationAlgorithm struct StochasticGradientDescentData : public OptimizationAlgorithmData { - explicit StochasticGradientDescentData(StochasticGradientDescent* = nullptr); + StochasticGradientDescentData(StochasticGradientDescent* = nullptr); void set(StochasticGradientDescent* = nullptr); diff --git a/opennn/strings_utilities.cpp b/opennn/strings_utilities.cpp index 4c3b1dace..8e7d214c7 100644 --- a/opennn/strings_utilities.cpp +++ b/opennn/strings_utilities.cpp @@ -365,7 +365,7 @@ void replace_all_word_appearances(string& text, const string& to_replace, const size_t previous_position; const string underscore = "_"; - // Reserve a rough estimate of the final size of the chain + // Reserve a rough estimate of the override size of the chain buffer.reserve(text.size()); @@ -414,7 +414,7 @@ void replace_all_appearances(string& text, string const& to_replace, string cons size_t position = 0; size_t previous_position; - // Reserves rough estimate of final size of string + // Reserves rough estimate of override size of string buffer.reserve(text.size()); diff --git a/opennn/strings_utilities.h b/opennn/strings_utilities.h index 2a8889b65..1b7869937 100644 --- a/opennn/strings_utilities.h +++ b/opennn/strings_utilities.h @@ -269,9 +269,9 @@ class TextGenerationAlphabet { public: - explicit TextGenerationAlphabet(); + TextGenerationAlphabet(); - explicit TextGenerationAlphabet(const string&); + TextGenerationAlphabet(const string&); virtual ~TextGenerationAlphabet(); diff --git a/opennn/testing_analysis.h b/opennn/testing_analysis.h index 565896e5d..e68d59df4 100644 --- a/opennn/testing_analysis.h +++ b/opennn/testing_analysis.h @@ -20,7 +20,7 @@ class TestingAnalysis public: - explicit TestingAnalysis(NeuralNetwork* = nullptr, DataSet* = nullptr); + TestingAnalysis(NeuralNetwork* = nullptr, DataSet* = nullptr); struct GoodnessOfFitAnalysis { diff --git a/opennn/text_data_set.h b/opennn/text_data_set.h index 0d4bc6a1b..aed780697 100644 --- a/opennn/text_data_set.h +++ b/opennn/text_data_set.h @@ -19,7 +19,7 @@ class TextDataSet : public DataSet public: - explicit TextDataSet(); + TextDataSet(); const Index& get_short_words_length() const; const Index& get_long_words_length() const; diff --git a/opennn/time_series_data_set.h b/opennn/time_series_data_set.h index a84b0d1b3..c0647f137 100644 --- a/opennn/time_series_data_set.h +++ b/opennn/time_series_data_set.h @@ -19,11 +19,11 @@ class TimeSeriesDataSet : public DataSet public: - explicit TimeSeriesDataSet(const Index& = 0, + TimeSeriesDataSet(const Index& = 0, const dimensions& = {}, const dimensions& = {}); - explicit TimeSeriesDataSet(const filesystem::path&, + TimeSeriesDataSet(const filesystem::path&, const string&, const bool& = true, const bool& = false, @@ -47,10 +47,10 @@ class TimeSeriesDataSet : public DataSet Tensor calculate_autocorrelations(const Index& = 10) const; Tensor calculate_cross_correlations(const Index& = 10) const; - void print() const final; + void print() const override; - void to_XML(XMLPrinter&) const final; - void from_XML(const XMLDocument&) final; + void to_XML(XMLPrinter&) const override; + void from_XML(const XMLDocument&) override; Index get_time_series_time_raw_variable_index() const; diff --git a/opennn/tinyxml2.h b/opennn/tinyxml2.h index c999315f0..ca02e129a 100644 --- a/opennn/tinyxml2.h +++ b/opennn/tinyxml2.h @@ -469,7 +469,7 @@ class MemPoolT : public MemPool // false, no children of this node or its siblings will be visited. // // All flavors of Visit have a default implementation that returns 'true'(continue -// visiting). You need to only final that are interesting to you. +// visiting). You need to only override that are interesting to you. // // Generally Accept() is called on the XMLDocument, although all nodes support visiting. // @@ -2213,8 +2213,8 @@ class TINYXML2_LIB XMLPrinter : public XMLVisitor protected: virtual bool CompactMode(const XMLElement&) { return _compactMode; } -// Prints out the space before an element. You may final to change -// the space and tabs used. A PrintSpace() final should call Print(). +// Prints out the space before an element. You may override to change +// the space and tabs used. A PrintSpace() override should call Print(). virtual void PrintSpace(int depth ); void Print(const char* format, ...); diff --git a/opennn/training_strategy.h b/opennn/training_strategy.h index 3f17a07b1..b084de6e0 100644 --- a/opennn/training_strategy.h +++ b/opennn/training_strategy.h @@ -31,7 +31,7 @@ class TrainingStrategy public: - explicit TrainingStrategy(NeuralNetwork* = nullptr, DataSet* = nullptr); + TrainingStrategy(NeuralNetwork* = nullptr, DataSet* = nullptr); enum class LossMethod { diff --git a/opennn/transformer.cpp b/opennn/transformer.cpp index 40e300ea6..d56ca5d6f 100644 --- a/opennn/transformer.cpp +++ b/opennn/transformer.cpp @@ -274,11 +274,11 @@ void Transformer::set(const Index& input_length, // Output layer - unique_ptr final_layer + unique_ptr override_layer = make_unique(input_length, embedding_depth, input_dimensions); - final_layer->set_name("probabilistic"); - add_layer(std::move(final_layer)); + override_layer->set_name("probabilistic"); + add_layer(std::move(override_layer)); set_layer_inputs_indices("probabilistic", "decoder_perceptron_normalization_" + to_string(layers_number)); } diff --git a/opennn/transformer.h b/opennn/transformer.h index 28a7cadb1..fd472f974 100644 --- a/opennn/transformer.h +++ b/opennn/transformer.h @@ -24,11 +24,11 @@ class Transformer : public NeuralNetwork // Constructors - explicit Transformer(); + Transformer(); - explicit Transformer(const Tensor&); + Transformer(const Tensor&); - explicit Transformer(const initializer_list&); + Transformer(const initializer_list&); void set(const Tensor&); diff --git a/opennn/unscaling_layer.h b/opennn/unscaling_layer.h index 6eb4a8118..f2b6a4b9a 100644 --- a/opennn/unscaling_layer.h +++ b/opennn/unscaling_layer.h @@ -20,10 +20,10 @@ class UnscalingLayer : public Layer public: - explicit UnscalingLayer(const dimensions& = {0}, const string& = "unscaling_layer"); + UnscalingLayer(const dimensions& = {0}, const string& = "unscaling_layer"); dimensions get_input_dimensions() const; - dimensions get_output_dimensions() const final; + dimensions get_output_dimensions() const override; vector get_descriptives() const; @@ -38,8 +38,8 @@ class UnscalingLayer : public Layer void set(const Index& = 0, const string& = "unscaling_layer"); void set(const vector&, const vector&); - void set_input_dimensions(const dimensions&) final; - void set_output_dimensions(const dimensions&) final; + void set_input_dimensions(const dimensions&) override; + void set_output_dimensions(const dimensions&) override; void set_descriptives(const vector&); @@ -58,16 +58,16 @@ class UnscalingLayer : public Layer void forward_propagate(const vector>&, unique_ptr&, - const bool&) final; + const bool&) override; vector write_scalers_text() const; void print() const; - void from_XML(const XMLDocument&) final; - void to_XML(XMLPrinter&) const final; + void from_XML(const XMLDocument&) override; + void to_XML(XMLPrinter&) const override; - string get_expression(const vector& = vector(), const vector& = vector()) const final; + string get_expression(const vector& = vector(), const vector& = vector()) const override; private: @@ -82,11 +82,11 @@ class UnscalingLayer : public Layer struct UnscalingLayerForwardPropagation : LayerForwardPropagation { - explicit UnscalingLayerForwardPropagation(const Index& = 0, Layer* = 0); + UnscalingLayerForwardPropagation(const Index& = 0, Layer* = 0); - pair get_outputs_pair() const final; + pair get_outputs_pair() const override; - void set(const Index& = 0, Layer* = nullptr) final; + void set(const Index& = 0, Layer* = nullptr) override; void print() const; diff --git a/opennn/weighted_squared_error.h b/opennn/weighted_squared_error.h index 0534d5dba..3a66bff95 100644 --- a/opennn/weighted_squared_error.h +++ b/opennn/weighted_squared_error.h @@ -19,7 +19,7 @@ class WeightedSquaredError : public LossIndex public: - explicit WeightedSquaredError(NeuralNetwork* = nullptr, DataSet* = nullptr); + WeightedSquaredError(NeuralNetwork* = nullptr, DataSet* = nullptr); type get_positives_weight() const; type get_negatives_weight() const; @@ -35,45 +35,45 @@ class WeightedSquaredError : public LossIndex void set_weights(); - void set_normalization_coefficient() final; + void set_normalization_coefficient() override; - void set_data_set(DataSet*) final; + void set_data_set(DataSet*) override; - string get_loss_method() const final; + string get_loss_method() const override; - string get_error_type_text() const final; + string get_error_type_text() const override; // Back propagation void calculate_error(const Batch&, const ForwardPropagation&, - BackPropagation&) const final; + BackPropagation&) const override; void calculate_output_delta(const Batch&, ForwardPropagation&, - BackPropagation&) const final; + BackPropagation&) const override; // Back propagation LM void calculate_squared_errors_lm(const Batch&, const ForwardPropagation&, - BackPropagationLM&) const final; + BackPropagationLM&) const override; void calculate_error_lm(const Batch&, const ForwardPropagation&, - BackPropagationLM&) const final; + BackPropagationLM&) const override; void calculate_error_gradient_lm(const Batch&, - BackPropagationLM&) const final; + BackPropagationLM&) const override; void calculate_error_hessian_lm(const Batch&, - BackPropagationLM&) const final; + BackPropagationLM&) const override; // Serialization void from_XML(const XMLDocument&); - void to_XML(XMLPrinter&) const final; + void to_XML(XMLPrinter&) const override; private: diff --git a/opennn/word_bag.h b/opennn/word_bag.h index c01c4d768..fd7c36af0 100644 --- a/opennn/word_bag.h +++ b/opennn/word_bag.h @@ -8,7 +8,7 @@ namespace opennn struct WordBag { - explicit WordBag() {} + WordBag() {} vector words; Tensor frequencies;