Skip to content

Commit

Permalink
Merge pull request #54 from NREL/gb/pad_bug
Browse files Browse the repository at this point in the history
Gb/pad bug
  • Loading branch information
grantbuster authored Nov 15, 2024
2 parents c5b0cdb + ff29b5f commit c7a06b6
Show file tree
Hide file tree
Showing 3 changed files with 33 additions and 14 deletions.
39 changes: 29 additions & 10 deletions phygnn/layers/custom_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ class FlexiblePadding(tf.keras.layers.Layer):
"""Class to perform padding on tensors
"""

def __init__(self, paddings, mode='REFLECT'):
def __init__(self, paddings, mode='REFLECT', option='tf'):
"""
Parameters
----------
Expand All @@ -21,13 +21,30 @@ def __init__(self, paddings, mode='REFLECT'):
rank of the tensor and elements give the number
of leading and trailing pads
mode : str
tf.pad() padding mode. Can be REFLECT, CONSTANT,
tf.pad() / np.pad() padding mode. Can be REFLECT, CONSTANT,
or SYMMETRIC
option : str
Option for TensorFlow padding ("tf") or numpy ("np"). Default is tf
for tensorflow training. We have observed silent failures of
tf.pad() with larger array sizes, so "np" might be preferable at
inference time on large chunks, but it is much slower when it has
to convert tensors to numpy arrays.
"""
super().__init__()
self.paddings = tf.constant(paddings)
self.rank = len(paddings)
self.mode = mode
self.mode = mode.lower()
self.option = option.lower()

if self.option == 'tf':
self._pad_fun = tf.pad
elif self.option == 'np':
self._pad_fun = np.pad
else:
msg = ('FlexiblePadding option must be "tf" or "np" but '
f'received: {self.option}')
logger.error(msg)
raise KeyError(msg)

def compute_output_shape(self, input_shape):
"""Computes output shape after padding
Expand Down Expand Up @@ -62,8 +79,7 @@ def call(self, x):
by compute_output_shape
"""
return tf.pad(x, self.paddings,
mode=self.mode)
return self._pad_fun(x, self.paddings, mode=self.mode)


class ExpandDims(tf.keras.layers.Layer):
Expand Down Expand Up @@ -168,6 +184,7 @@ def __init__(self, pool_size, strides=None, padding='valid', sigma=1,
self.trainable = trainable
self.sigma = sigma

# pylint: disable=unused-argument
def build(self, input_shape):
"""Custom implementation of the tf layer build method.
Expand Down Expand Up @@ -475,11 +492,13 @@ def __init__(self, spatial_mult=1, temporal_mult=1,
where the feature axis is unpacked into the temporal axis.
t_roll : int
Option to roll the temporal axis after expanding. When using
temporal_method="depth_to_time", the default (t_roll=0) will
add temporal steps after the input steps such that if input
temporal shape is 3 and the temporal_mult is 24x, the output will
have the original timesteps at idt=0,24,48 but if t_roll=12, the
output will have the original timesteps at idt=12,36,60
temporal_method="depth_to_time", the default (t_roll=0) will add
temporal steps after the input steps such that if input temporal
shape is 3 and the temporal_mult is 24x, the output will have the
index-0 timesteps at idt=0,24,48 but if t_roll=12, the output will
have the original timesteps at idt=12,36,60. This is no longer
recommended, as a positive roll will move the features of timestep
-1 from the end of the series to the beginning.
"""

super().__init__()
Expand Down
2 changes: 1 addition & 1 deletion phygnn/version.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# -*- coding: utf-8 -*-
"""Physics Guided Neural Network version."""

__version__ = '0.0.29'
__version__ = '0.0.30'
6 changes: 3 additions & 3 deletions tests/test_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -289,17 +289,17 @@ def test_flexible_padding(hidden_layers):
layer = HiddenLayers(hidden_layers).layers[0]
t = tf.constant([[1, 2, 3],
[4, 5, 6]])
if layer.mode == 'CONSTANT':
if layer.mode.upper() == 'CONSTANT':
t_check = tf.constant([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 2, 3, 0, 0],
[0, 0, 4, 5, 6, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
elif layer.mode == 'REFLECT':
elif layer.mode.upper() == 'REFLECT':
t_check = tf.constant([[6, 5, 4, 5, 6, 5, 4],
[3, 2, 1, 2, 3, 2, 1],
[6, 5, 4, 5, 6, 5, 4],
[3, 2, 1, 2, 3, 2, 1]])
elif layer.mode == 'SYMMETRIC':
elif layer.mode.upper() == 'SYMMETRIC':
t_check = tf.constant([[2, 1, 1, 2, 3, 3, 2],
[2, 1, 1, 2, 3, 3, 2],
[5, 4, 4, 5, 6, 6, 5],
Expand Down

0 comments on commit c7a06b6

Please sign in to comment.