Skip to content

Commit

Permalink
added test for trainable gaussian pool
Browse files Browse the repository at this point in the history
  • Loading branch information
grantbuster committed Sep 13, 2024
1 parent fcc644b commit 780d2e6
Showing 1 changed file with 52 additions and 6 deletions.
58 changes: 52 additions & 6 deletions tests/test_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -467,10 +467,10 @@ def test_gaussian_pooling():
for stdev in [1, 2]:
layer = GaussianAveragePooling2D(pool_size=5, strides=1, sigma=stdev)
_ = layer(np.ones((24, 100, 100, 35)))
kernel = layer._kernel.numpy()
kernel = layer.make_kernel().numpy()
kernels.append(kernel)

assert kernel[:, :, 0, 0].sum() == 1
assert np.allclose(kernel[:, :, 0, 0].sum(), 1, rtol=1e-4)
assert kernel[2, 2, 0, 0] == kernel.max()
assert kernel[0, 0, 0, 0] == kernel.min()
assert kernel[-1, -1, 0, 0] == kernel.min()
Expand All @@ -485,7 +485,7 @@ def test_gaussian_pooling():
normalize=False)
x_in = np.random.uniform(0, 1, (1, 12, 12, 3))
out1 = model1.predict(x_in)
kernel1 = model1.layers[0]._kernel[:, :, 0, 0].numpy()
kernel1 = model1.layers[0].make_kernel()[:, :, 0, 0].numpy()

for idf in range(out1.shape[-1]):
test = (x_in[0, :, :, idf] * kernel1).sum()
Expand All @@ -499,7 +499,7 @@ def test_gaussian_pooling():
model1.save_model(model_path)
model2 = TfModel.load(model_path)

kernel2 = model2.layers[0]._kernel[:, :, 0, 0].numpy()
kernel2 = model2.layers[0].make_kernel()[:, :, 0, 0].numpy()
out2 = model2.predict(x_in)
assert np.allclose(kernel1, kernel2)
assert np.allclose(out1, out2)
Expand All @@ -509,6 +509,52 @@ def test_gaussian_pooling():
_ = model2.predict(x_in)


def test_gaussian_pooling_train():
"""Test the trainable sigma functionality of the gaussian average pool"""
pool_size = 5
xtrain = np.random.uniform(0, 1, (10, pool_size, pool_size, 1))
ytrain = np.random.uniform(0, 1, (10, 1, 1, 1))
hidden_layers = [{'class': 'GaussianAveragePooling2D',
'pool_size': pool_size, 'trainable': False,
'strides': 1, 'padding': 'valid', 'sigma': 2}]

model = TfModel.build(['x'], ['y'],
hidden_layers=hidden_layers,
input_layer=False,
output_layer=False,
learning_rate=1e-3,
normalize=(True, True))
model.layers[0].build(xtrain.shape)
assert len(model.layers[0].trainable_weights) == 0

hidden_layers[0]['trainable'] = True
model = TfModel.build(['x'], ['y'],
hidden_layers=hidden_layers,
input_layer=False,
output_layer=False,
learning_rate=1e-3,
normalize=(True, True))
model.layers[0].build(xtrain.shape)
assert len(model.layers[0].trainable_weights) == 1

layer = model.layers[0]
sigma1 = float(layer.sigma)
kernel1 = layer.make_kernel().numpy().copy()
model.train_model(xtrain, ytrain, epochs=10)
sigma2 = float(layer.sigma)
kernel2 = layer.make_kernel().numpy().copy()

assert not np.allclose(sigma1, sigma2)
assert not np.allclose(kernel1, kernel2)

with TemporaryDirectory() as td:
model_path = os.path.join(td, 'test_model')
model.save_model(model_path)
model2 = TfModel.load(model_path)

assert np.allclose(model.predict(xtrain), model2.predict(xtrain))


def test_siglin():
"""Test the sigmoid linear layer"""
n_points = 1000
Expand All @@ -532,7 +578,7 @@ def test_logtransform():

lt = LogTransform(adder=1)
ilt = LogTransform(adder=1, inverse=True)
x = np.random.uniform(0, 10, (n_points + 1, 2))
x = np.random.uniform(0.01, 10, (n_points + 1, 2))
y = lt(x).numpy()
xinv = ilt(y).numpy()
assert not np.isnan(y).any()
Expand All @@ -541,7 +587,7 @@ def test_logtransform():

lt = LogTransform(adder=1, idf=1)
ilt = LogTransform(adder=1, inverse=True, idf=1)
x = np.random.uniform(0, 10, (n_points + 1, 2))
x = np.random.uniform(0.01, 10, (n_points + 1, 2))
y = lt(x).numpy()
xinv = ilt(y).numpy()
assert np.allclose(x[:, 0], y[:, 0])
Expand Down

0 comments on commit 780d2e6

Please sign in to comment.