Skip to content

Commit

Permalink
Move optimization API to "dsp_aware_pruning" module (new optimization…
Browse files Browse the repository at this point in the history
… tools coming)
  • Loading branch information
vloncar committed Oct 7, 2024
1 parent c4af46a commit 97c5347
Show file tree
Hide file tree
Showing 26 changed files with 160 additions and 157 deletions.
14 changes: 7 additions & 7 deletions docs/advanced/model_optimization.rst
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,11 @@ The code block below showcases three use cases of the hls4ml Optimization API -
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.metrics import CategoricalAccuracy
from tensorflow.keras.losses import CategoricalCrossentropy
from hls4ml.optimization.keras import optimize_model
from hls4ml.optimization.keras.utils import get_model_sparsity
from hls4ml.optimization.attributes import get_attributes_from_keras_model
from hls4ml.optimization.objectives import ParameterEstimator
from hls4ml.optimization.scheduler import PolynomialScheduler
from hls4ml.optimization.dsp_aware_pruning.keras import optimize_model
from hls4ml.optimization.dsp_aware_pruning.keras.utils import get_model_sparsity
from hls4ml.optimization.dsp_aware_pruning.attributes import get_attributes_from_keras_model
from hls4ml.optimization.dsp_aware_pruning.objectives import ParameterEstimator
from hls4ml.optimization.dsp_aware_pruning.scheduler import PolynomialScheduler
# Define baseline model and load data
# X_train, y_train = ...
# X_val, y_val = ...
Expand Down Expand Up @@ -75,7 +75,7 @@ To optimize GPU FLOPs, the code is similar to above:

.. code-block:: Python
from hls4ml.optimization.objectives.gpu_objectives import GPUFLOPEstimator
from hls4ml.optimization.dsp_aware_pruning.objectives.gpu_objectives import GPUFLOPEstimator
# Optimize model
# Note the change from ParameterEstimator to GPUFLOPEstimator
Expand All @@ -98,7 +98,7 @@ Finally, optimizing Vivado DSPs is possible, given a hls4ml config:
.. code-block:: Python
from hls4ml.utils.config import config_from_keras_model
from hls4ml.optimization.objectives.vivado_objectives import VivadoDSPEstimator
from hls4ml.optimization.dsp_aware_pruning.objectives.vivado_objectives import VivadoDSPEstimator
# Note the change from optimize_model to optimize_keras_model_for_hls4ml
# The function optimize_keras_model_for_hls4ml acts as a wrapper for the function, parsing hls4ml config to model attributes
Expand Down
111 changes: 3 additions & 108 deletions hls4ml/optimization/__init__.py
Original file line number Diff line number Diff line change
@@ -1,108 +1,3 @@
import numpy as np

from hls4ml.optimization.attributes import get_attributes_from_keras_model_and_hls4ml_config
from hls4ml.optimization.keras import optimize_model

default_regularization_range = np.logspace(-6, -2, num=16).tolist()


def optimize_keras_model_for_hls4ml(
keras_model,
hls_config,
objective,
scheduler,
X_train,
y_train,
X_val,
y_val,
batch_size,
epochs,
optimizer,
loss_fn,
validation_metric,
increasing,
rtol,
callbacks=None,
ranking_metric='l1',
local=False,
verbose=False,
rewinding_epochs=1,
cutoff_bad_trials=3,
directory='hls4ml-optimization',
tuner='Bayesian',
knapsack_solver='CBC_MIP',
regularization_range=default_regularization_range,
):
'''
Top-level function for optimizing a Keras model, given hls4ml config and a hardware objective(s)
Args:
keras_model (keras.Model): Model to be optimized
hls_config (dict): hls4ml configuration, obtained from hls4ml.utils.config.config_from_keras_model(...)
objective (hls4ml.optimization.objectives.ObjectiveEstimator):
Parameter, hardware or user-defined objective of optimization
scheduler (hls4ml.optimization.scheduler.OptimizationScheduler):
Sparsity scheduler, choose between constant, polynomial and binary
X_train (np.array): Training inputs
y_train (np.array): Training labels
X_val (np.array): Validation inputs
y_val (np.array): Validation labels
batch_size (int): Batch size during training
epochs (int): Maximum number of epochs to fine-tune model, in one iteration of pruning
optimizer (keras.optimizers.Optimizer or equivalent-string description): Optimizer used during training
loss_fn (keras.losses.Loss or equivalent loss description): Loss function used during training
validation_metric (keras.metrics.Metric or equivalent loss description): Validation metric, used as a baseline
increasing (boolean): If the metric improves with increased values;
e.g. accuracy -> increasing = True, MSE -> increasing = False
rtol (float): Relative tolerance;
pruning stops when pruned_validation_metric < (or >) rtol * baseline_validation_metric
callbacks (list of keras.callbacks.Callback) Currently not supported, developed in future versions
ranking_metric (string): Metric used for ranking weights and structures;
currently supported l1, l2, saliency and Oracle
local (boolean): Layer-wise or global pruning
verbose (boolean): Display debug logs during model optimization
rewinding_epochs (int): Number of epochs to retrain model without weight freezing,
allows regrowth of previously pruned weights
cutoff_bad_trials (int): After how many bad trials (performance below threshold),
should model pruning / weight sharing stop
directory (string): Directory to store temporary results
tuner (str): Tuning algorithm, choose between Bayesian, Hyperband and None
knapsack_solver (str): Algorithm to solve Knapsack problem when optimizing;
default usually works well; for very large networks, greedy algorithm might be more suitable
regularization_range (list): List of suitable hyperparameters for weight decay
Returns:
keras.Model: Optimized model
'''

# Extract model attributes
model_attributes = get_attributes_from_keras_model_and_hls4ml_config(keras_model, hls_config)

# Optimize model
return optimize_model(
keras_model,
model_attributes,
objective,
scheduler,
X_train,
y_train,
X_val,
y_val,
batch_size,
epochs,
optimizer,
loss_fn,
validation_metric,
increasing,
rtol,
callbacks=callbacks,
ranking_metric=ranking_metric,
local=local,
verbose=verbose,
rewinding_epochs=rewinding_epochs,
cutoff_bad_trials=cutoff_bad_trials,
directory=directory,
tuner=tuner,
knapsack_solver=knapsack_solver,
regularization_range=regularization_range,
)
from .dsp_aware_pruning import optimize_keras_model_for_hls4ml # noqa: F401
from .dsp_aware_pruning.attributes import get_attributes_from_keras_model_and_hls4ml_config # noqa: F401
from .dsp_aware_pruning.keras import optimize_model # noqa: F401
108 changes: 108 additions & 0 deletions hls4ml/optimization/dsp_aware_pruning/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@
import numpy as np

from hls4ml.optimization.dsp_aware_pruning.attributes import get_attributes_from_keras_model_and_hls4ml_config
from hls4ml.optimization.dsp_aware_pruning.keras import optimize_model

default_regularization_range = np.logspace(-6, -2, num=16).tolist()


def optimize_keras_model_for_hls4ml(
keras_model,
hls_config,
objective,
scheduler,
X_train,
y_train,
X_val,
y_val,
batch_size,
epochs,
optimizer,
loss_fn,
validation_metric,
increasing,
rtol,
callbacks=None,
ranking_metric='l1',
local=False,
verbose=False,
rewinding_epochs=1,
cutoff_bad_trials=3,
directory='hls4ml-optimization',
tuner='Bayesian',
knapsack_solver='CBC_MIP',
regularization_range=default_regularization_range,
):
'''
Top-level function for optimizing a Keras model, given hls4ml config and a hardware objective(s)
Args:
keras_model (keras.Model): Model to be optimized
hls_config (dict): hls4ml configuration, obtained from hls4ml.utils.config.config_from_keras_model(...)
objective (hls4ml.optimization.objectives.ObjectiveEstimator):
Parameter, hardware or user-defined objective of optimization
scheduler (hls4ml.optimization.scheduler.OptimizationScheduler):
Sparsity scheduler, choose between constant, polynomial and binary
X_train (np.array): Training inputs
y_train (np.array): Training labels
X_val (np.array): Validation inputs
y_val (np.array): Validation labels
batch_size (int): Batch size during training
epochs (int): Maximum number of epochs to fine-tune model, in one iteration of pruning
optimizer (keras.optimizers.Optimizer or equivalent-string description): Optimizer used during training
loss_fn (keras.losses.Loss or equivalent loss description): Loss function used during training
validation_metric (keras.metrics.Metric or equivalent loss description): Validation metric, used as a baseline
increasing (boolean): If the metric improves with increased values;
e.g. accuracy -> increasing = True, MSE -> increasing = False
rtol (float): Relative tolerance;
pruning stops when pruned_validation_metric < (or >) rtol * baseline_validation_metric
callbacks (list of keras.callbacks.Callback) Currently not supported, developed in future versions
ranking_metric (string): Metric used for ranking weights and structures;
currently supported l1, l2, saliency and Oracle
local (boolean): Layer-wise or global pruning
verbose (boolean): Display debug logs during model optimization
rewinding_epochs (int): Number of epochs to retrain model without weight freezing,
allows regrowth of previously pruned weights
cutoff_bad_trials (int): After how many bad trials (performance below threshold),
should model pruning / weight sharing stop
directory (string): Directory to store temporary results
tuner (str): Tuning algorithm, choose between Bayesian, Hyperband and None
knapsack_solver (str): Algorithm to solve Knapsack problem when optimizing;
default usually works well; for very large networks, greedy algorithm might be more suitable
regularization_range (list): List of suitable hyperparameters for weight decay
Returns:
keras.Model: Optimized model
'''

# Extract model attributes
model_attributes = get_attributes_from_keras_model_and_hls4ml_config(keras_model, hls_config)

# Optimize model
return optimize_model(
keras_model,
model_attributes,
objective,
scheduler,
X_train,
y_train,
X_val,
y_val,
batch_size,
epochs,
optimizer,
loss_fn,
validation_metric,
increasing,
rtol,
callbacks=callbacks,
ranking_metric=ranking_metric,
local=local,
verbose=verbose,
rewinding_epochs=rewinding_epochs,
cutoff_bad_trials=cutoff_bad_trials,
directory=directory,
tuner=tuner,
knapsack_solver=knapsack_solver,
regularization_range=regularization_range,
)
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@

import hls4ml
from hls4ml.model.types import FixedPrecisionType, IntegerPrecisionType
from hls4ml.optimization.config import SUPPORTED_STRUCTURES
from hls4ml.optimization.keras.config import SUPPORTED_LAYERS
from hls4ml.optimization.dsp_aware_pruning.config import SUPPORTED_STRUCTURES
from hls4ml.optimization.dsp_aware_pruning.keras.config import SUPPORTED_LAYERS


class hls4mlAttributes:
Expand Down
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -7,13 +7,13 @@
# Enables printing of loss tensors during custom training loop
from tensorflow.python.ops.numpy_ops import np_config

import hls4ml.optimization.keras.utils as utils
from hls4ml.optimization.config import SUPPORTED_STRUCTURES
from hls4ml.optimization.keras.builder import build_optimizable_model, remove_custom_regularizers
from hls4ml.optimization.keras.config import SUPPORTED_LAYERS, SUPPORTED_METRICS, TMP_DIRECTORY
from hls4ml.optimization.keras.masking import get_model_masks
from hls4ml.optimization.keras.reduction import reduce_model
from hls4ml.optimization.scheduler import OptimizationScheduler
import hls4ml.optimization.dsp_aware_pruning.keras.utils as utils
from hls4ml.optimization.dsp_aware_pruning.config import SUPPORTED_STRUCTURES
from hls4ml.optimization.dsp_aware_pruning.keras.builder import build_optimizable_model, remove_custom_regularizers
from hls4ml.optimization.dsp_aware_pruning.keras.config import SUPPORTED_LAYERS, SUPPORTED_METRICS, TMP_DIRECTORY
from hls4ml.optimization.dsp_aware_pruning.keras.masking import get_model_masks
from hls4ml.optimization.dsp_aware_pruning.keras.reduction import reduce_model
from hls4ml.optimization.dsp_aware_pruning.scheduler import OptimizationScheduler

np_config.enable_numpy_behavior()
default_regularization_range = np.logspace(-6, -2, num=16).tolist()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.layers import Conv2D, Dense

from hls4ml.optimization.keras.config import SUPPORTED_LAYERS, TMP_DIRECTORY
from hls4ml.optimization.keras.regularizers import Conv2DRegularizer, DenseRegularizer
from hls4ml.optimization.dsp_aware_pruning.keras.config import SUPPORTED_LAYERS, TMP_DIRECTORY
from hls4ml.optimization.dsp_aware_pruning.keras.regularizers import Conv2DRegularizer, DenseRegularizer

co = {}
_add_supported_quantized_objects(co)
Expand Down
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,9 @@
from qkeras import QConv2D, QDense
from tensorflow.keras.layers import Conv2D, Dense

from hls4ml.optimization.config import SUPPORTED_STRUCTURES
from hls4ml.optimization.keras.config import SUPPORTED_LAYERS, SUPPORTED_METRICS
from hls4ml.optimization.knapsack import solve_knapsack
from hls4ml.optimization.dsp_aware_pruning.config import SUPPORTED_STRUCTURES
from hls4ml.optimization.dsp_aware_pruning.keras.config import SUPPORTED_LAYERS, SUPPORTED_METRICS
from hls4ml.optimization.dsp_aware_pruning.knapsack import solve_knapsack


def get_model_masks(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from tensorflow.keras.layers import Conv2D, Dense
from tensorflow.keras.models import Sequential

from hls4ml.optimization.keras.utils import get_last_layer_with_weights
from hls4ml.optimization.dsp_aware_pruning.keras.utils import get_last_layer_with_weights


def reduce_model(model):
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import numpy as np
import tensorflow as tf

from hls4ml.optimization.config import SUPPORTED_STRUCTURES
from hls4ml.optimization.dsp_aware_pruning.config import SUPPORTED_STRUCTURES


@tf.keras.utils.register_keras_serializable(name='DenseRegularizer')
Expand Down
File renamed without changes.
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@

import numpy as np

from hls4ml.optimization.attributes import OptimizationAttributes
from hls4ml.optimization.config import SUPPORTED_STRUCTURES
from hls4ml.optimization.dsp_aware_pruning.attributes import OptimizationAttributes
from hls4ml.optimization.dsp_aware_pruning.config import SUPPORTED_STRUCTURES

'''
Pruning & weight sharing are formulated as an optimization problem, with the aim of minimizing some metric
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,9 @@

import numpy as np

from hls4ml.optimization.attributes import OptimizationAttributes
from hls4ml.optimization.config import SUPPORTED_STRUCTURES
from hls4ml.optimization.objectives import ObjectiveEstimator
from hls4ml.optimization.dsp_aware_pruning.attributes import OptimizationAttributes
from hls4ml.optimization.dsp_aware_pruning.config import SUPPORTED_STRUCTURES
from hls4ml.optimization.dsp_aware_pruning.objectives import ObjectiveEstimator


class GPUFLOPEstimator(ObjectiveEstimator):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,9 @@

import numpy as np

from hls4ml.optimization.attributes import OptimizationAttributes
from hls4ml.optimization.config import SUPPORTED_STRUCTURES
from hls4ml.optimization.objectives import ObjectiveEstimator
from hls4ml.optimization.dsp_aware_pruning.attributes import OptimizationAttributes
from hls4ml.optimization.dsp_aware_pruning.config import SUPPORTED_STRUCTURES
from hls4ml.optimization.dsp_aware_pruning.objectives import ObjectiveEstimator


# Optimizes DSP utilisation for Vivado backend
Expand Down
File renamed without changes.
2 changes: 1 addition & 1 deletion test/pytest/test_optimization/test_attributes.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from tensorflow.keras.layers import Conv2D, Dense, Flatten, ReLU
from tensorflow.keras.models import Sequential

from hls4ml.optimization.attributes import get_attributes_from_keras_model_and_hls4ml_config
from hls4ml.optimization import get_attributes_from_keras_model_and_hls4ml_config
from hls4ml.utils.config import config_from_keras_model


Expand Down
8 changes: 4 additions & 4 deletions test/pytest/test_optimization/test_keras/test_masking.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,10 @@
from tensorflow.keras.layers import Conv2D, Dense, Flatten
from tensorflow.keras.models import Sequential

from hls4ml.optimization.attributes import get_attributes_from_keras_model
from hls4ml.optimization.config import SUPPORTED_STRUCTURES
from hls4ml.optimization.keras.masking import get_model_masks
from hls4ml.optimization.objectives import ParameterEstimator
from hls4ml.optimization.dsp_aware_pruning.attributes import get_attributes_from_keras_model
from hls4ml.optimization.dsp_aware_pruning.config import SUPPORTED_STRUCTURES
from hls4ml.optimization.dsp_aware_pruning.keras.masking import get_model_masks
from hls4ml.optimization.dsp_aware_pruning.objectives import ParameterEstimator

'''
In all the tests, an artifical network with one Dense/Conv2D layer and pre-determined weights is created
Expand Down
Loading

0 comments on commit 97c5347

Please sign in to comment.