Skip to content

Commit

Permalink
ADD: tests + FIX: _update_many; _init_update
Browse files Browse the repository at this point in the history
  • Loading branch information
MarekWadinger committed Feb 23, 2024
1 parent db21046 commit 5a2ca4f
Show file tree
Hide file tree
Showing 2 changed files with 117 additions and 22 deletions.
79 changes: 62 additions & 17 deletions functions/odmd.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,14 +31,15 @@

import numpy as np
import pandas as pd
from river.base import MiniBatchRegressor

__all__ = [
"OnlineDMD",
"OnlineDMDwC",
]


class OnlineDMD:
class OnlineDMD(MiniBatchRegressor):
"""Online Dynamic Mode Decomposition (DMD).
This regressor is a class that implements online dynamic mode decomposition
Expand Down Expand Up @@ -81,6 +82,56 @@ class OnlineDMD:
_P: inverse of covariance matrix of X
Examples:
>>> import numpy as np
>>> import pandas as pd
>>> n = 101; freq = 2.; tspan = np.linspace(0, 10, n); dt = 0.1
>>> a1 = 1; a2 = 1; phase1 = -np.pi; phase2 = np.pi / 2
>>> w1 = np.cos(np.pi * freq * tspan)
>>> w2 = -np.sin(np.pi * freq * tspan)
>>> df = pd.DataFrame({'w1': w1[:-1], 'w2': w2[:-1]})
>>> model = OnlineDMD(r=2, w=0.1, initialize=0)
>>> X, Y = df.iloc[:-1], df.shift(-1).iloc[:-1]
>>> for (_, x), (_, y) in zip(X.iterrows(), Y.iterrows()):
... x, y = x.to_dict(), y.to_dict()
... model.learn_one(x, y)
>>> eig, _ = np.log(model.eig[0]) / dt
>>> r, i = eig.real, eig.imag
>>> np.isclose(eig.real, 0.)
True
>>> np.isclose(eig.imag, np.pi * freq)
True
>>> model.xi # TODO: verify the result
array([0.54244922, 0.54244922])
>>> from river.utils import Rolling
>>> model = Rolling(OnlineDMD(r=2, w=1.), 10)
>>> X, Y = df.iloc[:-1], df.shift(-1).iloc[:-1]
>>> for (_, x), (_, y) in zip(X.iterrows(), Y.iterrows()):
... x, y = x.to_dict(), y.to_dict()
... model.update(x, y)
>>> eig, _ = np.log(model.eig[0]) / dt
>>> r, i = eig.real, eig.imag
>>> np.isclose(eig.real, 0.)
True
>>> np.isclose(eig.imag, np.pi * freq)
True
>>> np.isclose(model.truncation_error(X.values.T, Y.values.T), 0)
True
>>> w_pred = model.predict_one(np.array([w1[-2], w2[-2]]))
>>> np.allclose(w_pred, [w1[-1], w2[-1]])
True
>>> w_pred = model.predict_many(np.array([1, 0]), 10)
>>> np.allclose(w_pred, [w1[1:11], w2[1:11]])
True
References:
[^1]: Zhang, H., Clarence Worth Rowley, Deem, E.A. and Cattafesta, L.N.
Expand Down Expand Up @@ -119,9 +170,9 @@ def eig(self) -> tuple[np.ndarray, np.ndarray]:
return Lambda, Phi

def _init_update(self) -> None:
if self.initialize < self.m:
if self.initialize > 0 and self.initialize < self.m:
warnings.warn(
f"Initialization is under-constrained. Changed initialize to {self.m}."
f"Initialization is under-constrained. Changing initialize to {self.m}."
)
self.initialize = self.m
self.A = np.random.randn(self.m, self.m)
Expand Down Expand Up @@ -172,7 +223,6 @@ def update(
if self.n_seen == 0:
self.m = x.shape[0]
self._init_update()

if bool(self.initialize) and self.n_seen <= self.initialize - 1:
self._X_init[:, self.n_seen] = x
self._Y_init[:, self.n_seen] = y
Expand Down Expand Up @@ -265,15 +315,12 @@ def _update_many(
"""
if self.n_seen == 0:
self.m = X.shape[0]
self._init_update()
epsilon = 1e-15
alpha = 1.0 / epsilon
self._P = alpha * np.identity(self.m) # inverse of cov(X)
raise RuntimeError("Model is not initialized.")
p = X.shape[1]
#
weights = np.sqrt(self.w) ** range(p - 1, -1, -1)
weights = np.ones(p)
if self.exponential_weighting:
weights = np.sqrt(self.w) ** range(p - 1, -1, -1)
else:
weights = np.ones(p)
C = np.diag(weights)
PX = self._P.dot(X)
AX = self.A.dot(X)
Expand Down Expand Up @@ -309,10 +356,10 @@ def learn_many(
assert p >= self.m and np.linalg.matrix_rank(X) == self.m
# Exponential weighting factor - older snapshots are weighted less
if self.exponential_weighting:
weight = np.sqrt(self.w) ** range(p - 1, -1, -1)
weights = np.sqrt(self.w) ** range(p - 1, -1, -1)
else:
weight = np.ones(p)
Xqhat, Yqhat = weight * X, weight * Y
weights = np.ones(p)
Xqhat, Yqhat = weights * X, weights * Y
self.A = Yqhat.dot(np.linalg.pinv(Xqhat))
self._P = np.linalg.inv(Xqhat.dot(Xqhat.T)) / self.w
self.n_seen += p
Expand All @@ -323,8 +370,6 @@ def learn_many(
# the rank-1 formula s times"
else:
self._update_many(X, Y)
for i in range(p):
self.update(X[:, i], Y[:, i])

def predict_one(self, x: Union[dict, np.ndarray]) -> np.ndarray:
"""
Expand Down
60 changes: 55 additions & 5 deletions functions/test_odmd.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,9 @@
import sys

import numpy as np
import pandas as pd
import pytest
from river.utils import Rolling
from scipy.integrate import odeint

# from river.compat.river_to_sklearn import convert_river_to_sklearn
Expand Down Expand Up @@ -50,13 +53,60 @@ def dyn(x, t):
eigvals[:, k] = np.linalg.eigvals(A[:, :, k])


def test_input_types():
n_init = round(samples / 2)

odmd1 = OnlineDMD()

odmd1.learn_many(X[:, :n_init], Y[:, :n_init])
for x, y in zip(X[:, n_init:].T, Y[:, n_init:].T):
odmd1.learn_one(x, y)

X_, Y_ = pd.DataFrame(X.T), pd.DataFrame(Y.T)

odmd2 = OnlineDMD()

odmd2.learn_many(X_.iloc[:n_init].T, Y_.iloc[:n_init].T)
for x, y in zip(X_.iloc[n_init:].values, Y_.iloc[n_init:].values):
odmd2.learn_one(x, y)

assert np.allclose(odmd1.A, odmd2.A)


def test_one_many_close():
n_init = round(samples / 2)

odmd1 = OnlineDMD()

odmd1.learn_many(X[:, :n_init], Y[:, :n_init])
for x, y in zip(X[:, n_init:].T, Y[:, n_init:].T):
odmd1.learn_one(x, y)

odmd2 = OnlineDMD()

odmd2.learn_many(X[:, :n_init], Y[:, :n_init])
odmd2.learn_many(X[:, n_init:], Y[:, n_init:])

assert np.allclose(odmd1.A, odmd2.A)


def test_errors_raised():
odmd = OnlineDMD()

with pytest.raises(Exception):
odmd._update_many(X, Y)

rodmd = Rolling(OnlineDMD(), window_size=1)
with pytest.raises(Exception):
for x, y in zip(X.T, Y.T):
rodmd.update(x, y)

def test_allclose_online_batch():
dmd = DMD()
odmd = OnlineDMD()

dmd.fit(X, Y)

odmd = OnlineDMD()
eigvals_online_ = np.empty((n, m), dtype=complex)
for i, (x, y) in enumerate(zip(X.T, Y.T)):
odmd.learn_one(x, y)
Expand All @@ -69,17 +119,17 @@ def test_allclose_online_batch():


def test_allclose_weighted_true():
init_samples = round(samples / 2)
n_init = round(samples / 2)
odmd = OnlineDMD(w=0.9)
# odmd.learn_many(X[:, :init_samples], Y[:, :init_samples])
# odmd.learn_many(X[:, :n_init], Y[:, :n_init])

eigvals_online_ = np.empty((n, m), dtype=complex)
for i, (x, y) in enumerate(zip(X.T, Y.T)):
odmd.learn_one(x, y)
eigvals_online_[:, i] = np.log(np.linalg.eigvals(odmd.A)) / dt

slope_eig_true = np.diff(eigvals)[0, init_samples:].mean()
slope_eig_online = np.diff(eigvals_online_)[0, init_samples:].mean()
slope_eig_true = np.diff(eigvals)[0, n_init:].mean()
slope_eig_online = np.diff(eigvals_online_)[0, n_init:].mean()
print(slope_eig_true, slope_eig_online)
np.allclose(
slope_eig_true,
Expand Down

0 comments on commit 5a2ca4f

Please sign in to comment.