diff --git a/docs/advanced/model_optimization.rst b/docs/advanced/model_optimization.rst index c1396b3d2..41132ab61 100644 --- a/docs/advanced/model_optimization.rst +++ b/docs/advanced/model_optimization.rst @@ -13,11 +13,11 @@ The code block below showcases three use cases of the hls4ml Optimization API - from tensorflow.keras.optimizers import Adam from tensorflow.keras.metrics import CategoricalAccuracy from tensorflow.keras.losses import CategoricalCrossentropy - from hls4ml.optimization.keras import optimize_model - from hls4ml.optimization.keras.utils import get_model_sparsity - from hls4ml.optimization.attributes import get_attributes_from_keras_model - from hls4ml.optimization.objectives import ParameterEstimator - from hls4ml.optimization.scheduler import PolynomialScheduler + from hls4ml.optimization.dsp_aware_pruning.keras import optimize_model + from hls4ml.optimization.dsp_aware_pruning.keras.utils import get_model_sparsity + from hls4ml.optimization.dsp_aware_pruning.attributes import get_attributes_from_keras_model + from hls4ml.optimization.dsp_aware_pruning.objectives import ParameterEstimator + from hls4ml.optimization.dsp_aware_pruning.scheduler import PolynomialScheduler # Define baseline model and load data # X_train, y_train = ... # X_val, y_val = ... @@ -75,7 +75,7 @@ To optimize GPU FLOPs, the code is similar to above: .. code-block:: Python - from hls4ml.optimization.objectives.gpu_objectives import GPUFLOPEstimator + from hls4ml.optimization.dsp_aware_pruning.objectives.gpu_objectives import GPUFLOPEstimator # Optimize model # Note the change from ParameterEstimator to GPUFLOPEstimator @@ -98,7 +98,7 @@ Finally, optimizing Vivado DSPs is possible, given a hls4ml config: .. code-block:: Python from hls4ml.utils.config import config_from_keras_model - from hls4ml.optimization.objectives.vivado_objectives import VivadoDSPEstimator + from hls4ml.optimization.dsp_aware_pruning.objectives.vivado_objectives import VivadoDSPEstimator # Note the change from optimize_model to optimize_keras_model_for_hls4ml # The function optimize_keras_model_for_hls4ml acts as a wrapper for the function, parsing hls4ml config to model attributes diff --git a/hls4ml/optimization/__init__.py b/hls4ml/optimization/__init__.py index ab51ce1eb..c626b70c2 100644 --- a/hls4ml/optimization/__init__.py +++ b/hls4ml/optimization/__init__.py @@ -1,108 +1,3 @@ -import numpy as np - -from hls4ml.optimization.attributes import get_attributes_from_keras_model_and_hls4ml_config -from hls4ml.optimization.keras import optimize_model - -default_regularization_range = np.logspace(-6, -2, num=16).tolist() - - -def optimize_keras_model_for_hls4ml( - keras_model, - hls_config, - objective, - scheduler, - X_train, - y_train, - X_val, - y_val, - batch_size, - epochs, - optimizer, - loss_fn, - validation_metric, - increasing, - rtol, - callbacks=None, - ranking_metric='l1', - local=False, - verbose=False, - rewinding_epochs=1, - cutoff_bad_trials=3, - directory='hls4ml-optimization', - tuner='Bayesian', - knapsack_solver='CBC_MIP', - regularization_range=default_regularization_range, -): - ''' - Top-level function for optimizing a Keras model, given hls4ml config and a hardware objective(s) - - Args: - keras_model (keras.Model): Model to be optimized - hls_config (dict): hls4ml configuration, obtained from hls4ml.utils.config.config_from_keras_model(...) - objective (hls4ml.optimization.objectives.ObjectiveEstimator): - Parameter, hardware or user-defined objective of optimization - scheduler (hls4ml.optimization.scheduler.OptimizationScheduler): - Sparsity scheduler, choose between constant, polynomial and binary - X_train (np.array): Training inputs - y_train (np.array): Training labels - X_val (np.array): Validation inputs - y_val (np.array): Validation labels - batch_size (int): Batch size during training - epochs (int): Maximum number of epochs to fine-tune model, in one iteration of pruning - optimizer (keras.optimizers.Optimizer or equivalent-string description): Optimizer used during training - loss_fn (keras.losses.Loss or equivalent loss description): Loss function used during training - validation_metric (keras.metrics.Metric or equivalent loss description): Validation metric, used as a baseline - increasing (boolean): If the metric improves with increased values; - e.g. accuracy -> increasing = True, MSE -> increasing = False - rtol (float): Relative tolerance; - pruning stops when pruned_validation_metric < (or >) rtol * baseline_validation_metric - callbacks (list of keras.callbacks.Callback) Currently not supported, developed in future versions - ranking_metric (string): Metric used for ranking weights and structures; - currently supported l1, l2, saliency and Oracle - local (boolean): Layer-wise or global pruning - verbose (boolean): Display debug logs during model optimization - rewinding_epochs (int): Number of epochs to retrain model without weight freezing, - allows regrowth of previously pruned weights - cutoff_bad_trials (int): After how many bad trials (performance below threshold), - should model pruning / weight sharing stop - directory (string): Directory to store temporary results - tuner (str): Tuning algorithm, choose between Bayesian, Hyperband and None - knapsack_solver (str): Algorithm to solve Knapsack problem when optimizing; - default usually works well; for very large networks, greedy algorithm might be more suitable - regularization_range (list): List of suitable hyperparameters for weight decay - - Returns: - keras.Model: Optimized model - ''' - - # Extract model attributes - model_attributes = get_attributes_from_keras_model_and_hls4ml_config(keras_model, hls_config) - - # Optimize model - return optimize_model( - keras_model, - model_attributes, - objective, - scheduler, - X_train, - y_train, - X_val, - y_val, - batch_size, - epochs, - optimizer, - loss_fn, - validation_metric, - increasing, - rtol, - callbacks=callbacks, - ranking_metric=ranking_metric, - local=local, - verbose=verbose, - rewinding_epochs=rewinding_epochs, - cutoff_bad_trials=cutoff_bad_trials, - directory=directory, - tuner=tuner, - knapsack_solver=knapsack_solver, - regularization_range=regularization_range, - ) +from .dsp_aware_pruning import optimize_keras_model_for_hls4ml # noqa: F401 +from .dsp_aware_pruning.attributes import get_attributes_from_keras_model_and_hls4ml_config # noqa: F401 +from .dsp_aware_pruning.keras import optimize_model # noqa: F401 diff --git a/hls4ml/optimization/dsp_aware_pruning/__init__.py b/hls4ml/optimization/dsp_aware_pruning/__init__.py new file mode 100644 index 000000000..69e2029e0 --- /dev/null +++ b/hls4ml/optimization/dsp_aware_pruning/__init__.py @@ -0,0 +1,108 @@ +import numpy as np + +from hls4ml.optimization.dsp_aware_pruning.attributes import get_attributes_from_keras_model_and_hls4ml_config +from hls4ml.optimization.dsp_aware_pruning.keras import optimize_model + +default_regularization_range = np.logspace(-6, -2, num=16).tolist() + + +def optimize_keras_model_for_hls4ml( + keras_model, + hls_config, + objective, + scheduler, + X_train, + y_train, + X_val, + y_val, + batch_size, + epochs, + optimizer, + loss_fn, + validation_metric, + increasing, + rtol, + callbacks=None, + ranking_metric='l1', + local=False, + verbose=False, + rewinding_epochs=1, + cutoff_bad_trials=3, + directory='hls4ml-optimization', + tuner='Bayesian', + knapsack_solver='CBC_MIP', + regularization_range=default_regularization_range, +): + ''' + Top-level function for optimizing a Keras model, given hls4ml config and a hardware objective(s) + + Args: + keras_model (keras.Model): Model to be optimized + hls_config (dict): hls4ml configuration, obtained from hls4ml.utils.config.config_from_keras_model(...) + objective (hls4ml.optimization.objectives.ObjectiveEstimator): + Parameter, hardware or user-defined objective of optimization + scheduler (hls4ml.optimization.scheduler.OptimizationScheduler): + Sparsity scheduler, choose between constant, polynomial and binary + X_train (np.array): Training inputs + y_train (np.array): Training labels + X_val (np.array): Validation inputs + y_val (np.array): Validation labels + batch_size (int): Batch size during training + epochs (int): Maximum number of epochs to fine-tune model, in one iteration of pruning + optimizer (keras.optimizers.Optimizer or equivalent-string description): Optimizer used during training + loss_fn (keras.losses.Loss or equivalent loss description): Loss function used during training + validation_metric (keras.metrics.Metric or equivalent loss description): Validation metric, used as a baseline + increasing (boolean): If the metric improves with increased values; + e.g. accuracy -> increasing = True, MSE -> increasing = False + rtol (float): Relative tolerance; + pruning stops when pruned_validation_metric < (or >) rtol * baseline_validation_metric + callbacks (list of keras.callbacks.Callback) Currently not supported, developed in future versions + ranking_metric (string): Metric used for ranking weights and structures; + currently supported l1, l2, saliency and Oracle + local (boolean): Layer-wise or global pruning + verbose (boolean): Display debug logs during model optimization + rewinding_epochs (int): Number of epochs to retrain model without weight freezing, + allows regrowth of previously pruned weights + cutoff_bad_trials (int): After how many bad trials (performance below threshold), + should model pruning / weight sharing stop + directory (string): Directory to store temporary results + tuner (str): Tuning algorithm, choose between Bayesian, Hyperband and None + knapsack_solver (str): Algorithm to solve Knapsack problem when optimizing; + default usually works well; for very large networks, greedy algorithm might be more suitable + regularization_range (list): List of suitable hyperparameters for weight decay + + Returns: + keras.Model: Optimized model + ''' + + # Extract model attributes + model_attributes = get_attributes_from_keras_model_and_hls4ml_config(keras_model, hls_config) + + # Optimize model + return optimize_model( + keras_model, + model_attributes, + objective, + scheduler, + X_train, + y_train, + X_val, + y_val, + batch_size, + epochs, + optimizer, + loss_fn, + validation_metric, + increasing, + rtol, + callbacks=callbacks, + ranking_metric=ranking_metric, + local=local, + verbose=verbose, + rewinding_epochs=rewinding_epochs, + cutoff_bad_trials=cutoff_bad_trials, + directory=directory, + tuner=tuner, + knapsack_solver=knapsack_solver, + regularization_range=regularization_range, + ) diff --git a/hls4ml/optimization/attributes.py b/hls4ml/optimization/dsp_aware_pruning/attributes.py similarity index 98% rename from hls4ml/optimization/attributes.py rename to hls4ml/optimization/dsp_aware_pruning/attributes.py index a7b6d7413..f652f27d5 100644 --- a/hls4ml/optimization/attributes.py +++ b/hls4ml/optimization/dsp_aware_pruning/attributes.py @@ -2,8 +2,8 @@ import hls4ml from hls4ml.model.types import FixedPrecisionType, IntegerPrecisionType -from hls4ml.optimization.config import SUPPORTED_STRUCTURES -from hls4ml.optimization.keras.config import SUPPORTED_LAYERS +from hls4ml.optimization.dsp_aware_pruning.config import SUPPORTED_STRUCTURES +from hls4ml.optimization.dsp_aware_pruning.keras.config import SUPPORTED_LAYERS class hls4mlAttributes: diff --git a/hls4ml/optimization/config.py b/hls4ml/optimization/dsp_aware_pruning/config.py similarity index 100% rename from hls4ml/optimization/config.py rename to hls4ml/optimization/dsp_aware_pruning/config.py diff --git a/hls4ml/optimization/keras/__init__.py b/hls4ml/optimization/dsp_aware_pruning/keras/__init__.py similarity index 96% rename from hls4ml/optimization/keras/__init__.py rename to hls4ml/optimization/dsp_aware_pruning/keras/__init__.py index d67ddd5d2..29012bd39 100644 --- a/hls4ml/optimization/keras/__init__.py +++ b/hls4ml/optimization/dsp_aware_pruning/keras/__init__.py @@ -7,13 +7,13 @@ # Enables printing of loss tensors during custom training loop from tensorflow.python.ops.numpy_ops import np_config -import hls4ml.optimization.keras.utils as utils -from hls4ml.optimization.config import SUPPORTED_STRUCTURES -from hls4ml.optimization.keras.builder import build_optimizable_model, remove_custom_regularizers -from hls4ml.optimization.keras.config import SUPPORTED_LAYERS, SUPPORTED_METRICS, TMP_DIRECTORY -from hls4ml.optimization.keras.masking import get_model_masks -from hls4ml.optimization.keras.reduction import reduce_model -from hls4ml.optimization.scheduler import OptimizationScheduler +import hls4ml.optimization.dsp_aware_pruning.keras.utils as utils +from hls4ml.optimization.dsp_aware_pruning.config import SUPPORTED_STRUCTURES +from hls4ml.optimization.dsp_aware_pruning.keras.builder import build_optimizable_model, remove_custom_regularizers +from hls4ml.optimization.dsp_aware_pruning.keras.config import SUPPORTED_LAYERS, SUPPORTED_METRICS, TMP_DIRECTORY +from hls4ml.optimization.dsp_aware_pruning.keras.masking import get_model_masks +from hls4ml.optimization.dsp_aware_pruning.keras.reduction import reduce_model +from hls4ml.optimization.dsp_aware_pruning.scheduler import OptimizationScheduler np_config.enable_numpy_behavior() default_regularization_range = np.logspace(-6, -2, num=16).tolist() diff --git a/hls4ml/optimization/keras/builder.py b/hls4ml/optimization/dsp_aware_pruning/keras/builder.py similarity index 98% rename from hls4ml/optimization/keras/builder.py rename to hls4ml/optimization/dsp_aware_pruning/keras/builder.py index f265ccdf4..4ba39e4f7 100644 --- a/hls4ml/optimization/keras/builder.py +++ b/hls4ml/optimization/dsp_aware_pruning/keras/builder.py @@ -8,8 +8,8 @@ from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras.layers import Conv2D, Dense -from hls4ml.optimization.keras.config import SUPPORTED_LAYERS, TMP_DIRECTORY -from hls4ml.optimization.keras.regularizers import Conv2DRegularizer, DenseRegularizer +from hls4ml.optimization.dsp_aware_pruning.keras.config import SUPPORTED_LAYERS, TMP_DIRECTORY +from hls4ml.optimization.dsp_aware_pruning.keras.regularizers import Conv2DRegularizer, DenseRegularizer co = {} _add_supported_quantized_objects(co) diff --git a/hls4ml/optimization/keras/config.py b/hls4ml/optimization/dsp_aware_pruning/keras/config.py similarity index 100% rename from hls4ml/optimization/keras/config.py rename to hls4ml/optimization/dsp_aware_pruning/keras/config.py diff --git a/hls4ml/optimization/keras/masking.py b/hls4ml/optimization/dsp_aware_pruning/keras/masking.py similarity index 99% rename from hls4ml/optimization/keras/masking.py rename to hls4ml/optimization/dsp_aware_pruning/keras/masking.py index 0e74997be..dddeddf6f 100644 --- a/hls4ml/optimization/keras/masking.py +++ b/hls4ml/optimization/dsp_aware_pruning/keras/masking.py @@ -6,9 +6,9 @@ from qkeras import QConv2D, QDense from tensorflow.keras.layers import Conv2D, Dense -from hls4ml.optimization.config import SUPPORTED_STRUCTURES -from hls4ml.optimization.keras.config import SUPPORTED_LAYERS, SUPPORTED_METRICS -from hls4ml.optimization.knapsack import solve_knapsack +from hls4ml.optimization.dsp_aware_pruning.config import SUPPORTED_STRUCTURES +from hls4ml.optimization.dsp_aware_pruning.keras.config import SUPPORTED_LAYERS, SUPPORTED_METRICS +from hls4ml.optimization.dsp_aware_pruning.knapsack import solve_knapsack def get_model_masks( diff --git a/hls4ml/optimization/keras/reduction.py b/hls4ml/optimization/dsp_aware_pruning/keras/reduction.py similarity index 96% rename from hls4ml/optimization/keras/reduction.py rename to hls4ml/optimization/dsp_aware_pruning/keras/reduction.py index 4ea8855aa..12fb53479 100644 --- a/hls4ml/optimization/keras/reduction.py +++ b/hls4ml/optimization/dsp_aware_pruning/keras/reduction.py @@ -2,7 +2,7 @@ from tensorflow.keras.layers import Conv2D, Dense from tensorflow.keras.models import Sequential -from hls4ml.optimization.keras.utils import get_last_layer_with_weights +from hls4ml.optimization.dsp_aware_pruning.keras.utils import get_last_layer_with_weights def reduce_model(model): diff --git a/hls4ml/optimization/keras/regularizers.py b/hls4ml/optimization/dsp_aware_pruning/keras/regularizers.py similarity index 99% rename from hls4ml/optimization/keras/regularizers.py rename to hls4ml/optimization/dsp_aware_pruning/keras/regularizers.py index 1e885963c..b42eb3f05 100644 --- a/hls4ml/optimization/keras/regularizers.py +++ b/hls4ml/optimization/dsp_aware_pruning/keras/regularizers.py @@ -1,7 +1,7 @@ import numpy as np import tensorflow as tf -from hls4ml.optimization.config import SUPPORTED_STRUCTURES +from hls4ml.optimization.dsp_aware_pruning.config import SUPPORTED_STRUCTURES @tf.keras.utils.register_keras_serializable(name='DenseRegularizer') diff --git a/hls4ml/optimization/keras/utils.py b/hls4ml/optimization/dsp_aware_pruning/keras/utils.py similarity index 100% rename from hls4ml/optimization/keras/utils.py rename to hls4ml/optimization/dsp_aware_pruning/keras/utils.py diff --git a/hls4ml/optimization/knapsack.py b/hls4ml/optimization/dsp_aware_pruning/knapsack.py similarity index 100% rename from hls4ml/optimization/knapsack.py rename to hls4ml/optimization/dsp_aware_pruning/knapsack.py diff --git a/hls4ml/optimization/objectives/__init__.py b/hls4ml/optimization/dsp_aware_pruning/objectives/__init__.py similarity index 97% rename from hls4ml/optimization/objectives/__init__.py rename to hls4ml/optimization/dsp_aware_pruning/objectives/__init__.py index fcbef305b..45204aaf7 100644 --- a/hls4ml/optimization/objectives/__init__.py +++ b/hls4ml/optimization/dsp_aware_pruning/objectives/__init__.py @@ -3,8 +3,8 @@ import numpy as np -from hls4ml.optimization.attributes import OptimizationAttributes -from hls4ml.optimization.config import SUPPORTED_STRUCTURES +from hls4ml.optimization.dsp_aware_pruning.attributes import OptimizationAttributes +from hls4ml.optimization.dsp_aware_pruning.config import SUPPORTED_STRUCTURES ''' Pruning & weight sharing are formulated as an optimization problem, with the aim of minimizing some metric diff --git a/hls4ml/optimization/objectives/gpu_objectives.py b/hls4ml/optimization/dsp_aware_pruning/objectives/gpu_objectives.py similarity index 92% rename from hls4ml/optimization/objectives/gpu_objectives.py rename to hls4ml/optimization/dsp_aware_pruning/objectives/gpu_objectives.py index 8528a3183..bb3afc639 100644 --- a/hls4ml/optimization/objectives/gpu_objectives.py +++ b/hls4ml/optimization/dsp_aware_pruning/objectives/gpu_objectives.py @@ -2,9 +2,9 @@ import numpy as np -from hls4ml.optimization.attributes import OptimizationAttributes -from hls4ml.optimization.config import SUPPORTED_STRUCTURES -from hls4ml.optimization.objectives import ObjectiveEstimator +from hls4ml.optimization.dsp_aware_pruning.attributes import OptimizationAttributes +from hls4ml.optimization.dsp_aware_pruning.config import SUPPORTED_STRUCTURES +from hls4ml.optimization.dsp_aware_pruning.objectives import ObjectiveEstimator class GPUFLOPEstimator(ObjectiveEstimator): diff --git a/hls4ml/optimization/objectives/vivado_objectives.py b/hls4ml/optimization/dsp_aware_pruning/objectives/vivado_objectives.py similarity index 98% rename from hls4ml/optimization/objectives/vivado_objectives.py rename to hls4ml/optimization/dsp_aware_pruning/objectives/vivado_objectives.py index c0c0c33e0..798542cfc 100644 --- a/hls4ml/optimization/objectives/vivado_objectives.py +++ b/hls4ml/optimization/dsp_aware_pruning/objectives/vivado_objectives.py @@ -3,9 +3,9 @@ import numpy as np -from hls4ml.optimization.attributes import OptimizationAttributes -from hls4ml.optimization.config import SUPPORTED_STRUCTURES -from hls4ml.optimization.objectives import ObjectiveEstimator +from hls4ml.optimization.dsp_aware_pruning.attributes import OptimizationAttributes +from hls4ml.optimization.dsp_aware_pruning.config import SUPPORTED_STRUCTURES +from hls4ml.optimization.dsp_aware_pruning.objectives import ObjectiveEstimator # Optimizes DSP utilisation for Vivado backend diff --git a/hls4ml/optimization/scheduler.py b/hls4ml/optimization/dsp_aware_pruning/scheduler.py similarity index 100% rename from hls4ml/optimization/scheduler.py rename to hls4ml/optimization/dsp_aware_pruning/scheduler.py diff --git a/test/pytest/test_optimization/test_attributes.py b/test/pytest/test_optimization/test_attributes.py index 3ba8d08d1..a42d3a675 100644 --- a/test/pytest/test_optimization/test_attributes.py +++ b/test/pytest/test_optimization/test_attributes.py @@ -1,7 +1,7 @@ from tensorflow.keras.layers import Conv2D, Dense, Flatten, ReLU from tensorflow.keras.models import Sequential -from hls4ml.optimization.attributes import get_attributes_from_keras_model_and_hls4ml_config +from hls4ml.optimization import get_attributes_from_keras_model_and_hls4ml_config from hls4ml.utils.config import config_from_keras_model diff --git a/test/pytest/test_optimization/test_keras/test_masking.py b/test/pytest/test_optimization/test_keras/test_masking.py index 5c5e60aca..8b465d8d7 100644 --- a/test/pytest/test_optimization/test_keras/test_masking.py +++ b/test/pytest/test_optimization/test_keras/test_masking.py @@ -4,10 +4,10 @@ from tensorflow.keras.layers import Conv2D, Dense, Flatten from tensorflow.keras.models import Sequential -from hls4ml.optimization.attributes import get_attributes_from_keras_model -from hls4ml.optimization.config import SUPPORTED_STRUCTURES -from hls4ml.optimization.keras.masking import get_model_masks -from hls4ml.optimization.objectives import ParameterEstimator +from hls4ml.optimization.dsp_aware_pruning.attributes import get_attributes_from_keras_model +from hls4ml.optimization.dsp_aware_pruning.config import SUPPORTED_STRUCTURES +from hls4ml.optimization.dsp_aware_pruning.keras.masking import get_model_masks +from hls4ml.optimization.dsp_aware_pruning.objectives import ParameterEstimator ''' In all the tests, an artifical network with one Dense/Conv2D layer and pre-determined weights is created diff --git a/test/pytest/test_optimization/test_keras/test_reduction.py b/test/pytest/test_optimization/test_keras/test_reduction.py index 7243a9123..4bf93f730 100644 --- a/test/pytest/test_optimization/test_keras/test_reduction.py +++ b/test/pytest/test_optimization/test_keras/test_reduction.py @@ -6,8 +6,8 @@ from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, Dense, Flatten, MaxPooling2D, ReLU, Softmax from tensorflow.keras.models import Sequential -from hls4ml.optimization.keras.reduction import reduce_model -from hls4ml.optimization.keras.utils import get_model_sparsity +from hls4ml.optimization.dsp_aware_pruning.keras.reduction import reduce_model +from hls4ml.optimization.dsp_aware_pruning.keras.utils import get_model_sparsity pytest.skip(allow_module_level=True) diff --git a/test/pytest/test_optimization/test_keras/test_regularizers.py b/test/pytest/test_optimization/test_keras/test_regularizers.py index 9fe518caa..f643f3a79 100644 --- a/test/pytest/test_optimization/test_keras/test_regularizers.py +++ b/test/pytest/test_optimization/test_keras/test_regularizers.py @@ -6,9 +6,9 @@ from tensorflow.keras.models import Sequential from tensorflow.keras.optimizers import Adam -from hls4ml.optimization.config import SUPPORTED_STRUCTURES -from hls4ml.optimization.keras.builder import remove_custom_regularizers -from hls4ml.optimization.keras.regularizers import Conv2DRegularizer, DenseRegularizer +from hls4ml.optimization.dsp_aware_pruning.config import SUPPORTED_STRUCTURES +from hls4ml.optimization.dsp_aware_pruning.keras.builder import remove_custom_regularizers +from hls4ml.optimization.dsp_aware_pruning.keras.regularizers import Conv2DRegularizer, DenseRegularizer # Constants pattern_offset = 4 diff --git a/test/pytest/test_optimization/test_keras/test_weight_sharing.py b/test/pytest/test_optimization/test_keras/test_weight_sharing.py index c274a84da..be1d3a957 100644 --- a/test/pytest/test_optimization/test_keras/test_weight_sharing.py +++ b/test/pytest/test_optimization/test_keras/test_weight_sharing.py @@ -4,10 +4,10 @@ from tensorflow.keras.layers import Dense from tensorflow.keras.models import Sequential -from hls4ml.optimization.attributes import get_attributes_from_keras_model -from hls4ml.optimization.config import SUPPORTED_STRUCTURES -from hls4ml.optimization.keras.masking import get_model_masks -from hls4ml.optimization.objectives import ObjectiveEstimator +from hls4ml.optimization.dsp_aware_pruning.attributes import get_attributes_from_keras_model +from hls4ml.optimization.dsp_aware_pruning.config import SUPPORTED_STRUCTURES +from hls4ml.optimization.dsp_aware_pruning.keras.masking import get_model_masks +from hls4ml.optimization.dsp_aware_pruning.objectives import ObjectiveEstimator # Similar tests in test_masking.py, weight sharing instead of pruning sparsity = 0.33 diff --git a/test/pytest/test_optimization/test_knapsack.py b/test/pytest/test_optimization/test_knapsack.py index a4145c00d..804081c8e 100644 --- a/test/pytest/test_optimization/test_knapsack.py +++ b/test/pytest/test_optimization/test_knapsack.py @@ -1,7 +1,7 @@ import numpy as np import pytest -from hls4ml.optimization.knapsack import solve_knapsack +from hls4ml.optimization.dsp_aware_pruning.knapsack import solve_knapsack # In the simple case below, both implementations give the optimal answer diff --git a/test/pytest/test_optimization/test_objectives.py b/test/pytest/test_optimization/test_objectives.py index a7d81befe..2f8a6414d 100644 --- a/test/pytest/test_optimization/test_objectives.py +++ b/test/pytest/test_optimization/test_objectives.py @@ -2,8 +2,8 @@ from tensorflow.keras.layers import Conv2D, Dense, Flatten from tensorflow.keras.models import Sequential -from hls4ml.optimization.attributes import get_attributes_from_keras_model -from hls4ml.optimization.objectives import ParameterEstimator +from hls4ml.optimization.dsp_aware_pruning.attributes import get_attributes_from_keras_model +from hls4ml.optimization.dsp_aware_pruning.objectives import ParameterEstimator # Test attempts to verify one of the estimators (parameter) is correctly declared, the functions are static etc. diff --git a/test/pytest/test_optimization/test_scheduler.py b/test/pytest/test_optimization/test_scheduler.py index 2dc7642bf..2182d1cb4 100644 --- a/test/pytest/test_optimization/test_scheduler.py +++ b/test/pytest/test_optimization/test_scheduler.py @@ -1,6 +1,6 @@ import numpy as np # Use np.testing.assert_allclose due to floating point rounding errors -from hls4ml.optimization.scheduler import BinaryScheduler, ConstantScheduler, PolynomialScheduler +from hls4ml.optimization.dsp_aware_pruning.scheduler import BinaryScheduler, ConstantScheduler, PolynomialScheduler def test_constant_scheduler(): diff --git a/test/pytest/test_pipeline_style.py b/test/pytest/test_pipeline_style.py old mode 100644 new mode 100755