Skip to content

Commit

Permalink
clean
Browse files Browse the repository at this point in the history
  • Loading branch information
RoberLopez committed Dec 4, 2024
1 parent 3550fa3 commit 92dd22a
Show file tree
Hide file tree
Showing 35 changed files with 302 additions and 339 deletions.
2 changes: 1 addition & 1 deletion examples/airfoil_self_noise/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ int main()
//training_strategy.load("../data/training_strategy.xml");

training_strategy.perform_training();

/*
ModelSelection model_selection(&training_strategy);
model_selection.perform_inputs_selection();
Expand Down
39 changes: 11 additions & 28 deletions opennn/adaptive_moment_estimation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,6 @@
#include "adaptive_moment_estimation.h"
#include "forward_propagation.h"
#include "back_propagation.h"
#include "scaling_layer_2d.h"
#include "unscaling_layer.h"

namespace opennn
{
Expand Down Expand Up @@ -365,54 +363,39 @@ TrainingResults AdaptiveMomentEstimation::perform_training()
cout << "Elapsed time: " << write_time(elapsed_time) << endl;
}

// Training history
// @todo loss and error missmatch

stop_training = true;

if(epoch == maximum_epochs_number)
{
if(display) cout << "Epoch " << epoch << "\nMaximum epochs number reached: " << epoch << endl;

stop_training = true;

results.stopping_condition = StoppingCondition::MaximumEpochsNumber;
}

if(elapsed_time >= maximum_time)
else if(elapsed_time >= maximum_time)
{
if(display) cout << "Epoch " << epoch << "\nMaximum training time reached: " << write_time(elapsed_time) << endl;

stop_training = true;

results.stopping_condition = StoppingCondition::MaximumTime;
}

// @todo loss and error missmatch

if(results.training_error_history(epoch) < training_loss_goal)
else if(results.training_error_history(epoch) < training_loss_goal)
{
stop_training = true;

results.stopping_condition = StoppingCondition::LossGoal;

if(display) cout << "Epoch " << epoch << "\nLoss goal reached: " << results.training_error_history(epoch) << endl;
}

if(training_accuracy >= training_accuracy_goal)
else if(training_accuracy >= training_accuracy_goal)
{
stop_training = true;

results.stopping_condition = StoppingCondition::LossGoal;

if(display) cout << "Epoch " << epoch << "\nAccuracy goal reached: " << training_accuracy << endl;
}

if(selection_failures >= maximum_selection_failures)
else if(selection_failures >= maximum_selection_failures)
{
if(display) cout << "Epoch " << epoch << "\nMaximum selection failures reached: " << selection_failures << endl;

stop_training = true;

results.stopping_condition = StoppingCondition::MaximumSelectionErrorIncreases;
}
else
{
stop_training = false;
}

if(stop_training)
{
Expand Down
4 changes: 2 additions & 2 deletions opennn/bounding_layer.h
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ class BoundingLayer : public Layer

// Serialization

void print() const;
void print() const override;

void from_XML(const XMLDocument&) override;

Expand All @@ -88,7 +88,7 @@ struct BoundingLayerForwardPropagation : LayerForwardPropagation

void set(const Index& = 0, Layer* = nullptr) override;

void print() const;
void print() const override;

Tensor<type, 2> outputs;
};
Expand Down
6 changes: 3 additions & 3 deletions opennn/convolutional_layer.h
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,7 @@ struct ConvolutionalLayerForwardPropagation : LayerForwardPropagation

void set(const Index& = 0, Layer* = nullptr) override;

void print() const;
void print() const override;

Tensor<type, 4> outputs;

Expand All @@ -220,11 +220,11 @@ struct ConvolutionalLayerBackPropagation : LayerBackPropagation
{
ConvolutionalLayerBackPropagation(const Index& = 0, Layer* = nullptr);

vector<pair<type*, dimensions>> get_input_derivative_pairs() const;
vector<pair<type*, dimensions>> get_input_derivative_pairs() const override;

void set(const Index& = 0, Layer* = nullptr) override;

void print() const;
void print() const override;

//Tensor<type, 3> image_convolutions_derivatives;

Expand Down
14 changes: 7 additions & 7 deletions opennn/embedding_layer.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ class EmbeddingLayer : public Layer
Index get_depth() const;
bool get_positional_encoding() const;

dimensions get_input_dimensions() const;
dimensions get_input_dimensions() const override;
dimensions get_output_dimensions() const override;

Index get_parameters_number() const override;
Expand Down Expand Up @@ -69,9 +69,9 @@ class EmbeddingLayer : public Layer

void add_deltas(const vector<pair<type*, dimensions>>&) const;

void insert_gradient(unique_ptr<LayerBackPropagation>& back_propagation,
const Index& index,
Tensor<type, 1>& gradient) const;
void insert_gradient(unique_ptr<LayerBackPropagation>&,
const Index&,
Tensor<type, 1>&) const override;

void from_XML(const XMLDocument&) override;
void to_XML(XMLPrinter&) const override;
Expand Down Expand Up @@ -106,7 +106,7 @@ struct EmbeddingLayerForwardPropagation : LayerForwardPropagation

void set(const Index& = 0, Layer* = nullptr) override;

void print() const;
void print() const override;

void build_positional_encoding_matrix();

Expand All @@ -122,11 +122,11 @@ struct EmbeddingLayerBackPropagation : LayerBackPropagation
{
EmbeddingLayerBackPropagation(const Index& = 0, Layer* = nullptr);

vector<pair<type*, dimensions>> get_input_derivative_pairs() const;
vector<pair<type*, dimensions>> get_input_derivative_pairs() const override;

void set(const Index& = 0, Layer* = nullptr) override;

void print() const;
void print() const override;

Tensor<type, 2> sample_deltas;
Tensor<type, 2> embedding_weights_derivatives;
Expand Down
10 changes: 5 additions & 5 deletions opennn/flatten_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ void FlattenLayer::forward_propagate(const vector<pair<type*, dimensions>>& inpu
{
const Index batch_samples_number = layer_forward_propagation->batch_samples_number;

const Index neurons_number = get_output_dimensions()[0];
const Index outputs_number = get_outputs_number();

FlattenLayerForwardPropagation* flatten_layer_forward_propagation =
static_cast<FlattenLayerForwardPropagation*>(layer_forward_propagation.get());
Expand All @@ -73,9 +73,9 @@ void FlattenLayer::forward_propagate(const vector<pair<type*, dimensions>>& inpu

memcpy(outputs_data,
input_pairs[0].first,
batch_samples_number*neurons_number*sizeof(type));
batch_samples_number*outputs_number*sizeof(type));

flatten_layer_forward_propagation->outputs = TensorMap<Tensor<type, 2>>(input_pairs[0].first, batch_samples_number, neurons_number);
flatten_layer_forward_propagation->outputs = TensorMap<Tensor<type, 2>>(input_pairs[0].first, batch_samples_number, outputs_number);
}


Expand All @@ -85,7 +85,7 @@ void FlattenLayer::back_propagate(const vector<pair<type*, dimensions>>& input_p
unique_ptr<LayerBackPropagation>& back_propagation) const
{
const Index batch_samples_number = input_pairs[0].second[0];
const Index neurons_number = get_output_dimensions()[0];
const Index outputs_number = get_outputs_number();

// Back propagation

Expand All @@ -96,7 +96,7 @@ void FlattenLayer::back_propagate(const vector<pair<type*, dimensions>>& input_p

memcpy(input_derivatives.data(),
delta_pairs[0].first,
Index(batch_samples_number * neurons_number * sizeof(type)));
Index(batch_samples_number * outputs_number * sizeof(type)));
}


Expand Down
10 changes: 5 additions & 5 deletions opennn/flatten_layer.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ class FlattenLayer : public Layer

FlattenLayer(const dimensions& = {0,0,0});

dimensions get_input_dimensions() const;
dimensions get_input_dimensions() const override;
dimensions get_output_dimensions() const override;

Index get_input_height() const;
Expand Down Expand Up @@ -57,7 +57,7 @@ class FlattenLayer : public Layer

void to_XML(XMLPrinter&) const override;

void print() const;
void print() const override;

#ifdef OPENNN_CUDA
#include "../../opennn_cuda/opennn_cuda/flatten_layer_cuda.h"
Expand All @@ -77,7 +77,7 @@ struct FlattenLayerForwardPropagation : LayerForwardPropagation

void set(const Index& = 0, Layer* = nullptr) override;

void print() const;
void print() const override;

Tensor<type, 2> outputs;
};
Expand All @@ -87,11 +87,11 @@ struct FlattenLayerBackPropagation : LayerBackPropagation
{
FlattenLayerBackPropagation(const Index& = 0, Layer* = nullptr);

vector<pair<type*, dimensions>> get_input_derivative_pairs() const;
vector<pair<type*, dimensions>> get_input_derivative_pairs() const override;

void set(const Index& = 0, Layer* = nullptr) override;

void print() const;
void print() const override;

Tensor<type, 4> input_derivatives;
};
Expand Down
2 changes: 1 addition & 1 deletion opennn/genetic_algorithm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -577,7 +577,7 @@ void GeneticAlgorithm::perform_crossover()

Index descendent_index = 0;

for(Index i = 0; i < parent_1_indices.size(); i++)
for(size_t i = 0; i < parent_1_indices.size(); i++)
{
parent_1_variables = population.chip(parent_1_indices[i], 0);

Expand Down
55 changes: 24 additions & 31 deletions opennn/levenberg_marquardt_algorithm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -264,11 +264,12 @@ TrainingResults LevenbergMarquardtAlgorithm::perform_training()
loss_index->back_propagate_lm(training_batch,
training_forward_propagation,
training_back_propagation_lm);
/*

results.training_error_history(epoch) = training_back_propagation_lm.error();

if(has_selection)
{
{
/*
neural_network->forward_propagate(selection_batch.get_input_pairs(),
selection_forward_propagation,
is_training);
Expand All @@ -289,9 +290,12 @@ TrainingResults LevenbergMarquardtAlgorithm::perform_training()
if(epoch != 0 && results.selection_error_history(epoch) > results.selection_error_history(epoch-1))
selection_failures++;
}
*/
}

elapsed_time = get_elapsed_time(beginning_time);
if(epoch != 0) loss_decrease = old_loss - training_back_propagation_lm.loss;
old_loss = training_back_propagation_lm.loss;

if(display && epoch%display_period == 0)
{
Expand All @@ -301,54 +305,37 @@ TrainingResults LevenbergMarquardtAlgorithm::perform_training()
cout << "Elapsed time: " << write_time(elapsed_time) << endl;
}

stop_training = true;

if(results.training_error_history(epoch) < training_loss_goal)
{
stop_training = true;

results.stopping_condition = StoppingCondition::LossGoal;

if(display) cout << "Epoch " << epoch << "\nLoss goal reached: " << results.training_error_history(epoch) << endl;
results.stopping_condition = StoppingCondition::LossGoal;
}

if(epoch != 0) loss_decrease = old_loss - training_back_propagation_lm.loss;

if(loss_decrease < minimum_loss_decrease)
else if(loss_decrease < minimum_loss_decrease)
{
if(display) cout << "Epoch " << epoch << "\nMinimum loss decrease reached: " << loss_decrease << endl;

stop_training = true;

results.stopping_condition = StoppingCondition::MinimumLossDecrease;
}

old_loss = training_back_propagation_lm.loss;

if(selection_failures >= maximum_selection_failures)
else if(selection_failures >= maximum_selection_failures)
{
if(display) cout << "Epoch " << epoch << "Maximum selection failures reached: " << selection_failures << endl;

stop_training = true;

results.stopping_condition = StoppingCondition::MaximumSelectionErrorIncreases;
}

if(epoch == maximum_epochs_number)
else if(epoch == maximum_epochs_number)
{
if(display) cout << "Epoch " << epoch << "\nMaximum epochs number reached: " << epoch << endl;

stop_training = true;

results.stopping_condition = StoppingCondition::MaximumEpochsNumber;
}

if(elapsed_time >= maximum_time)
else if(elapsed_time >= maximum_time)
{
if(display) cout << "Epoch " << epoch << "Maximum training time reached: " << elapsed_time << endl;

stop_training = true;

results.stopping_condition = StoppingCondition::MaximumTime;
}
else
{
stop_training = false;
}

if(stop_training)
{
Expand Down Expand Up @@ -534,6 +521,12 @@ void LevenbergMarquardtAlgorithm::from_XML(const XMLDocument& document)
}


LevenbergMarquardtAlgorithmData::LevenbergMarquardtAlgorithmData(LevenbergMarquardtAlgorithm *new_Levenberg_Marquardt_method)
{
set(new_Levenberg_Marquardt_method);
}


void LevenbergMarquardtAlgorithmData::set(LevenbergMarquardtAlgorithm* new_Levenberg_Marquardt_method)
{
Levenberg_Marquardt_algorithm = new_Levenberg_Marquardt_method;
Expand Down
6 changes: 1 addition & 5 deletions opennn/levenberg_marquardt_algorithm.h
Original file line number Diff line number Diff line change
Expand Up @@ -108,10 +108,7 @@ class LevenbergMarquardtAlgorithm : public OptimizationAlgorithm
struct LevenbergMarquardtAlgorithmData : public OptimizationAlgorithmData
{

LevenbergMarquardtAlgorithmData(LevenbergMarquardtAlgorithm* new_Levenberg_Marquardt_method = nullptr)
{
set(new_Levenberg_Marquardt_method);
}
LevenbergMarquardtAlgorithmData(LevenbergMarquardtAlgorithm* new_Levenberg_Marquardt_method = nullptr);

void set(LevenbergMarquardtAlgorithm* = nullptr);

Expand All @@ -133,7 +130,6 @@ struct LevenbergMarquardtAlgorithmData : public OptimizationAlgorithmData
Index epoch = 0;
};


}

#endif
Expand Down
Loading

0 comments on commit 92dd22a

Please sign in to comment.