Skip to content

Commit

Permalink
clean
Browse files Browse the repository at this point in the history
  • Loading branch information
RoberLopez committed Dec 27, 2024
1 parent 5c57fd2 commit a7947ed
Show file tree
Hide file tree
Showing 3 changed files with 98 additions and 10 deletions.
10 changes: 4 additions & 6 deletions opennn/data_set.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1258,12 +1258,11 @@ void DataSet::set_binary_raw_variables()

if(raw_variable.type == RawVariableType::Numeric)
{
/* // @todo tensor map does not work
const TensorMap<Tensor<type, 1>> data_column = tensor_map(data, variable_index);
const Tensor<type, 1> data_column = data.chip(variable_index, 1);

if(is_binary(data_column))
raw_variable.type = RawVariableType::Binary;
*/

variable_index++;
}
else if(raw_variable.type == RawVariableType::Categorical)
Expand Down Expand Up @@ -1292,12 +1291,11 @@ void DataSet::unuse_constant_raw_variables()

if(raw_variable.type == RawVariableType::Numeric)
{
/* // @todo
const TensorMap<Tensor<type, 1>> data_column = tensor_map(data, variable_index);
const Tensor<type, 1> data_column = data.chip(variable_index, 1);

if(is_constant(data_column))
raw_variable.set(raw_variable.name, VariableUse::None, RawVariableType::Constant);
*/

variable_index++;
}
else if(raw_variable.type == RawVariableType::DateTime || raw_variable.type == RawVariableType::Constant)
Expand Down
1 change: 0 additions & 1 deletion opennn/long_short_term_memory_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -679,7 +679,6 @@ void LongShortTermMemoryLayer::back_propagate(const vector<pair<type*, dimension
long_short_term_memory_layer_forward_propagation,
long_short_term_memory_layer_back_propagation);
*/
// @todo Calculate inputs derivatives
}


Expand Down
97 changes: 94 additions & 3 deletions opennn/loss_index.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -858,9 +858,7 @@ Tensor<type, 2> LossIndex::calculate_numerical_jacobian()
parameters_forward(j) -= h;

for(Index i = 0; i < samples_number; i++)
{
jacobian(i, j) = (error_terms_forward(i) - error_terms_backward(i))/(type(2.0)*h);
}
}

return jacobian;
Expand All @@ -871,7 +869,100 @@ Tensor<type, 2> LossIndex::calculate_numerical_jacobian()

Tensor<type, 2> LossIndex::calculate_numerical_hessian()
{
return Tensor<type, 2>();
const Tensor<type, 1> parameters = neural_network->get_parameters();

const Index parameters_number = parameters.size();

// type y = (t.*f)(x);

Tensor<type, 2> H(parameters_number, parameters_number);

type h_i;
type h_j;

Tensor<type, 1> x_backward_2i(parameters);
Tensor<type, 1> x_backward_i(parameters);

Tensor<type, 1> x_forward_i(parameters);
Tensor<type, 1> x_forward_2i(parameters);

Tensor<type, 1> x_backward_ij(parameters);
Tensor<type, 1> x_forward_ij(parameters);

Tensor<type, 1> x_backward_i_forward_j(parameters);
Tensor<type, 1> x_forward_i_backward_j(parameters);

type y_backward_2i;
type y_backward_i;

type y_forward_i;
type y_forward_2i;

type y_backward_ij;
type y_forward_ij;

type y_backward_i_forward_j;
type y_forward_i_backward_j;

for (Index i = 0; i < parameters_number; i++)
{
h_i = calculate_h(parameters(i));

x_backward_2i(i) -= static_cast<type>(2.0) * h_i;
//y_backward_2i = (t.*f)(x_backward_2i);
x_backward_2i(i) += static_cast<type>(2.0) * h_i;

x_backward_i(i) -= h_i;
//y_backward_i = (t.*f)(x_backward_i);
x_backward_i(i) += h_i;

x_forward_i(i) += h_i;
//y_forward_i = (t.*f)(x_forward_i);
x_forward_i(i) -= h_i;

x_forward_2i(i) += static_cast<type>(2.0) * h_i;
//y_forward_2i = (t.*f)(x_forward_2i);
x_forward_2i(i) -= static_cast<type>(2.0) * h_i;

//H(i, i) = (-y_forward_2i + type(16.0) * y_forward_i - type(30.0) * y + type(16.0) * y_backward_i - y_backward_2i) / (type(12.0) * pow(h_i, type(2)));

for (Index j = i; j < parameters_number; j++)
{
h_j = calculate_h(parameters(j));

x_backward_ij(i) -= h_i;
x_backward_ij(j) -= h_j;
//y_backward_ij = (t.*f)(x_backward_ij);
x_backward_ij(i) += h_i;
x_backward_ij(j) += h_j;

x_forward_ij(i) += h_i;
x_forward_ij(j) += h_j;
//y_forward_ij = (t.*f)(x_forward_ij);
x_forward_ij(i) -= h_i;
x_forward_ij(j) -= h_j;

x_backward_i_forward_j(i) -= h_i;
x_backward_i_forward_j(j) += h_j;
//y_backward_i_forward_j = (t.*f)(x_backward_i_forward_j);
x_backward_i_forward_j(i) += h_i;
x_backward_i_forward_j(j) -= h_j;

x_forward_i_backward_j(i) += h_i;
x_forward_i_backward_j(j) -= h_j;
//y_forward_i_backward_j = (t.*f)(x_forward_i_backward_j);
x_forward_i_backward_j(i) -= h_i;
x_forward_i_backward_j(j) += h_j;

H(i, j) = (y_forward_ij - y_forward_i_backward_j - y_backward_i_forward_j + y_backward_ij) / (type(4.0) * h_i * h_j);
}
}

for (Index i = 0; i < parameters_number; i++)
for (Index j = 0; j < i; j++)
H(i, j) = H(j, i);

return H;
}


Expand Down

0 comments on commit a7947ed

Please sign in to comment.