Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added finite difference schemes for tau sensitivity #8

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 20 additions & 3 deletions adjoint_esn/esn.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
from functools import partial

import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix
from scipy.sparse.linalg import eigs as sparse_eigs
Expand Down Expand Up @@ -914,7 +916,16 @@ def adjoint_sensitivity(self, X, Y, N, N_g):

# return dJdp

def finite_difference_sensitivity(self, X, P, N, N_g, h=1e-5):
@staticmethod
def finite_differences(J, J_right, J_left, h, method):
if method == "forward":
return (J_right - J) / (h)
elif method == "backward":
return (J - J_left) / (h)
elif method == "central":
return (J_right - J_left) / (2 * h)

def finite_difference_sensitivity(self, X, Y, P, N, N_g, h=1e-5, method="central"):
"""Sensitivity of the ESN with respect to the parameters
Calculated using CENTRAL FINITE DIFFERENCES
Objective is squared L2 of the 2*N_g output states, i.e. acoustic energy
Expand All @@ -934,7 +945,12 @@ def finite_difference_sensitivity(self, X, P, N, N_g, h=1e-5):
# initialize sensitivity
dJdp = np.zeros((self.N_param_dim))

# central finite difference
# compute the energy of the base
J = 1 / 4 * np.mean(np.sum(Y[1:, 0 : 2 * N_g] ** 2, axis=1))

# define which finite difference method to use
finite_difference = partial(self.finite_differences, method=method)

# perturbed by h
for i in range(self.N_param_dim):
P_left = P.copy()
Expand All @@ -945,6 +961,7 @@ def finite_difference_sensitivity(self, X, P, N, N_g, h=1e-5):
_, Y_right = self.closed_loop(X[0, :], N, P_right)
J_left = 1 / 4 * np.mean(np.sum(Y_left[1:, 0 : 2 * N_g] ** 2, axis=1))
J_right = 1 / 4 * np.mean(np.sum(Y_right[1:, 0 : 2 * N_g] ** 2, axis=1))
dJdp[i] = (J_right - J_left) / (2 * h)

dJdp[i] = finite_difference(J, J_right, J_left, h)

return dJdp
69 changes: 45 additions & 24 deletions adjoint_esn/rijke_esn.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
from functools import partial

import numpy as np
from sklearn.linear_model import ElasticNet, Lasso, Ridge

Expand Down Expand Up @@ -368,7 +370,7 @@ def reset_grad_attrs(self):
if hasattr(self, attr):
delattr(self, attr)

def direct_sensitivity(self, X, Y, N, X_past):
def direct_sensitivity(self, X, Y, N, X_past, method="central"):
"""Sensitivity of the ESN with respect to the parameters
Calculated using DIRECT method
Objective is squared L2 of the 2*N_g output states, i.e. acoustic energy
Expand Down Expand Up @@ -417,9 +419,19 @@ def direct_sensitivity(self, X, Y, N, X_past):
u_f_tau = Rijke.toVelocity(
N_g=self.N_g, eta=eta_tau, x=np.array([self.x_f])
)
# gradient of the delayed velocity with respect to tau
# backward finite difference
du_f_tau_dtau = (u_f_tau - u_f_tau_left) / self.dt
if method == "central" and i > 1:
x_aug_right = self.before_readout(XX[i - 2, :])
eta_tau_right = np.dot(x_aug_right, self.W_out)[0 : self.N_g]
u_f_tau_right = Rijke.toVelocity(
N_g=self.N_g, eta=eta_tau_right, x=np.array([self.x_f])
)
# gradient of the delayed velocity with respect to tau
# central finite difference
du_f_tau_dtau = (u_f_tau_right - u_f_tau_left) / (2 * self.dt)
else:
# gradient of the delayed velocity with respect to tau
# backward finite difference
du_f_tau_dtau = (u_f_tau - u_f_tau_left) / self.dt

dfdu_f_tau = self.dfdu_f_tau(dtanh, u_f_tau)

Expand Down Expand Up @@ -454,7 +466,7 @@ def direct_sensitivity(self, X, Y, N, X_past):

return dJdp

def adjoint_sensitivity(self, X, Y, N, X_past):
def adjoint_sensitivity(self, X, Y, N, X_past, method="central"):
"""Sensitivity of the ESN with respect to the parameters
Calculated using ADJOINT method
Objective is squared L2 of the 2*N_g output states, i.e. acoustic energy
Expand Down Expand Up @@ -510,9 +522,20 @@ def adjoint_sensitivity(self, X, Y, N, X_past):
u_f_tau = Rijke.toVelocity(
N_g=self.N_g, eta=eta_tau, x=np.array([self.x_f])
)
# gradient of the delayed velocity with respect to tau
# backward finite difference
du_f_tau_dtau = (u_f_tau - u_f_tau_left) / self.dt

if method == "central" and i > 1:
x_aug_right = self.before_readout(XX[i - 2, :])
eta_tau_right = np.dot(x_aug_right, self.W_out)[0 : self.N_g]
u_f_tau_right = Rijke.toVelocity(
N_g=self.N_g, eta=eta_tau_right, x=np.array([self.x_f])
)
# gradient of the delayed velocity with respect to tau
# central finite difference
du_f_tau_dtau = (u_f_tau_right - u_f_tau_left) / (2 * self.dt)
else:
# gradient of the delayed velocity with respect to tau
# backward finite difference
du_f_tau_dtau = (u_f_tau - u_f_tau_left) / self.dt

dfdu_f_tau = self.dfdu_f_tau(dtanh, u_f_tau)

Expand Down Expand Up @@ -554,9 +577,11 @@ def adjoint_sensitivity(self, X, Y, N, X_past):

return dJdp

def finite_difference_sensitivity(self, X, Y, X_tau, P, N, h=1e-5):
def finite_difference_sensitivity(
self, X, Y, X_tau, P, N, h=1e-5, method="central"
):
"""Sensitivity of the ESN with respect to the parameters
Calculated using CENTRAL FINITE DIFFERENCES
Calculated using FINITE DIFFERENCES
Objective is squared L2 of the 2*N_g output states, i.e. acoustic energy

Args:
Expand All @@ -568,13 +593,20 @@ def finite_difference_sensitivity(self, X, Y, X_tau, P, N, h=1e-5):
assuming outputs are ordered such that the first 2*N_g correspond to the
Galerkin amplitudes
h: perturbation
method: finite difference method, "forward","backward" or "central"

Returns:
dJdp: numerical sensitivity to parameters
"""
# initialize sensitivity
dJdp = np.zeros((self.N_param_dim + 1))

# compute the energy of the base
J = 1 / 4 * np.mean(np.sum(Y[1:, 0 : 2 * self.N_g] ** 2, axis=1))

# define which finite difference method to use
finite_difference = partial(self.finite_differences, method=method)

# central finite difference
# perturbed by h
for i in range(self.N_param_dim):
Expand All @@ -588,7 +620,7 @@ def finite_difference_sensitivity(self, X, Y, X_tau, P, N, h=1e-5):
J_right = (
1 / 4 * np.mean(np.sum(Y_right[1:, 0 : 2 * self.N_g] ** 2, axis=1))
)
dJdp[i] = (J_right - J_left) / (2 * h)
dJdp[i] = finite_difference(J, J_right, J_left, h)

# tau sensitivity
orig_tau = self.tau
Expand All @@ -604,20 +636,9 @@ def finite_difference_sensitivity(self, X, Y, X_tau, P, N, h=1e-5):
J_tau_right = (
1 / 4 * np.mean(np.sum(Y_tau_right[1:, 0 : 2 * self.N_g] ** 2, axis=1))
)
dJdp[-1] = finite_difference(J, J_tau_right, J_tau_left, h_tau)

J = 1 / 4 * np.mean(np.sum(Y[1:, 0 : 2 * self.N_g] ** 2, axis=1))
dJdp_backward = (J - J_tau_left) / (h_tau)
dJdp_forward = (J_tau_right - J) / (h_tau)
dJdp_central = (J_tau_right - J_tau_left) / (2 * h_tau)

dJdp[-1] = dJdp_central
print("J ESN = ", J)
print("J tau left ESN = ", J_tau_left)
print("J tau right ESN = ", J_tau_right)

print("dJdp backward = ", dJdp_backward)
print("dJdp forward = ", dJdp_forward)
print("dJdp central = ", dJdp_central)
# set tau back to the original value
self.tau = orig_tau

return dJdp
Loading