From 84f3adfad72456ee05895da3fa0a1b2d19920bef Mon Sep 17 00:00:00 2001 From: Akshaya Purohit Date: Mon, 6 Jan 2025 11:01:47 -0800 Subject: [PATCH] No public description PiperOrigin-RevId: 712590121 Change-Id: I5998e535ee492df506ca81a777c52d39a4453cab --- .../forgiving_metrics/forgiving_bits.py | 2 +- .../forgiving_metrics/forgiving_energy.py | 2 +- qkeras/callbacks.py | 2 +- qkeras/qconv2d_batchnorm.py | 5 +- qkeras/qconvolutional.py | 30 +++-- qkeras/qdepthwiseconv2d_batchnorm.py | 5 +- qkeras/qlayers.py | 9 +- qkeras/qnormalization.py | 5 +- qkeras/qpooling.py | 13 +- qkeras/qrecurrent.py | 124 +++++++++--------- qkeras/quantizers.py | 24 ++-- 11 files changed, 119 insertions(+), 102 deletions(-) diff --git a/qkeras/autoqkeras/forgiving_metrics/forgiving_bits.py b/qkeras/autoqkeras/forgiving_metrics/forgiving_bits.py index 4968d1f4..d7f66f62 100644 --- a/qkeras/autoqkeras/forgiving_metrics/forgiving_bits.py +++ b/qkeras/autoqkeras/forgiving_metrics/forgiving_bits.py @@ -35,7 +35,7 @@ def __init__( self.ref_size = {} self.config = config if config else {} - super(ForgivingFactorBits, self).__init__(delta_p, delta_n, rate) + super().__init__(delta_p, delta_n, rate) def _param_size(self, layer): """Computes size of parameters of a layer in bits.""" diff --git a/qkeras/autoqkeras/forgiving_metrics/forgiving_energy.py b/qkeras/autoqkeras/forgiving_metrics/forgiving_energy.py index ff8a2956..6c0db878 100644 --- a/qkeras/autoqkeras/forgiving_metrics/forgiving_energy.py +++ b/qkeras/autoqkeras/forgiving_metrics/forgiving_energy.py @@ -50,7 +50,7 @@ def __init__(self, delta_p, delta_n, rate, stress=1.0, **kwargs): # energy calculation # keras_layer_quantizer: quantizer for keras layers in hybrid models - super(ForgivingFactorPower, self).__init__(delta_p, delta_n, rate) + super().__init__(delta_p, delta_n, rate) self.stress = stress # process: horowitz... - must be present in config_json diff --git a/qkeras/callbacks.py b/qkeras/callbacks.py index 71110bcf..a88659e3 100644 --- a/qkeras/callbacks.py +++ b/qkeras/callbacks.py @@ -58,7 +58,7 @@ def __init__(self, not. log_dir: Str. log directory to save qnoise_factor every epoch end. """ - super(QNoiseScheduler, self).__init__() + super().__init__() self.start = start self.finish = finish diff --git a/qkeras/qconv2d_batchnorm.py b/qkeras/qconv2d_batchnorm.py index 7c6fc20f..64d09673 100644 --- a/qkeras/qconv2d_batchnorm.py +++ b/qkeras/qconv2d_batchnorm.py @@ -109,7 +109,7 @@ def __init__( """ # intialization the qconv2d part of the composite layer - super(QConv2DBatchnorm, self).__init__( + super().__init__( filters=filters, kernel_size=kernel_size, strides=strides, @@ -126,7 +126,8 @@ def __init__( bias_constraint=bias_constraint, kernel_quantizer=kernel_quantizer, bias_quantizer=bias_quantizer, - **kwargs) + **kwargs + ) # initialization of batchnorm part of the composite layer self.batchnorm = layers.BatchNormalization( diff --git a/qkeras/qconvolutional.py b/qkeras/qconvolutional.py index 477b8527..924fd670 100644 --- a/qkeras/qconvolutional.py +++ b/qkeras/qconvolutional.py @@ -170,7 +170,7 @@ def __init__(self, if activation is not None: activation = get_quantizer(activation) - super(QConv1D, self).__init__( + super().__init__( filters=filters, kernel_size=kernel_size, strides=strides, @@ -185,7 +185,8 @@ def __init__(self, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, - **kwargs) + **kwargs + ) def call(self, inputs): if self.kernel_quantizer: @@ -323,7 +324,7 @@ def __init__(self, if activation is not None: activation = get_quantizer(activation) - super(QConv2D, self).__init__( + super().__init__( filters=filters, kernel_size=kernel_size, strides=strides, @@ -339,7 +340,8 @@ def __init__(self, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, - **kwargs) + **kwargs + ) def call(self, inputs): if self.kernel_quantizer: @@ -463,7 +465,7 @@ def __init__(self, if activation is not None: activation = get_quantizer(activation) - super(QConv2DTranspose, self).__init__( + super().__init__( filters=filters, kernel_size=kernel_size, strides=strides, @@ -480,7 +482,8 @@ def __init__(self, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, - **kwargs) + **kwargs + ) def call(self, inputs): inputs_shape = array_ops.shape(inputs) @@ -650,7 +653,7 @@ def __init__(self, if activation is not None: activation = get_quantizer(activation) - super(QSeparableConv1D, self).__init__( + super().__init__( filters=filters, kernel_size=kernel_size, strides=strides, @@ -670,7 +673,8 @@ def __init__(self, depthwise_constraint=constraints.get(depthwise_constraint), pointwise_constraint=constraints.get(pointwise_constraint), bias_constraint=constraints.get(bias_constraint), - **kwargs) + **kwargs + ) def call(self, inputs): if self.padding == 'causal': @@ -828,7 +832,7 @@ def __init__(self, if activation is not None: activation = get_quantizer(activation) - super(QSeparableConv2D, self).__init__( + super().__init__( filters=filters, kernel_size=kernel_size, strides=strides, @@ -848,7 +852,8 @@ def __init__(self, depthwise_constraint=constraints.get(depthwise_constraint), pointwise_constraint=constraints.get(pointwise_constraint), bias_constraint=constraints.get(bias_constraint), - **kwargs) + **kwargs + ) def call(self, inputs): # Apply the actual ops. @@ -986,7 +991,7 @@ def __init__(self, if activation is not None: activation = get_quantizer(activation) - super(QDepthwiseConv2D, self).__init__( + super().__init__( kernel_size=kernel_size, strides=strides, padding=padding, @@ -1002,7 +1007,8 @@ def __init__(self, depthwise_constraint=depthwise_constraint, bias_constraint=bias_constraint, dilation_rate=dilation_rate, - **kwargs) + **kwargs + ) def build(self, input_shape): if len(input_shape) < 4: diff --git a/qkeras/qdepthwiseconv2d_batchnorm.py b/qkeras/qdepthwiseconv2d_batchnorm.py index 20a0271f..5f32ea70 100644 --- a/qkeras/qdepthwiseconv2d_batchnorm.py +++ b/qkeras/qdepthwiseconv2d_batchnorm.py @@ -107,7 +107,7 @@ def __init__( """ # intialization the QDepthwiseConv2d part of the composite layer - super(QDepthwiseConv2DBatchnorm, self).__init__( + super().__init__( kernel_size=kernel_size, strides=strides, padding=padding, @@ -127,7 +127,8 @@ def __init__( bias_quantizer=bias_quantizer, depthwise_range=depthwise_range, bias_range=bias_range, - **kwargs) + **kwargs + ) # initialization of batchnorm part of the composite layer self.batchnorm = layers.BatchNormalization( diff --git a/qkeras/qlayers.py b/qkeras/qlayers.py index 4d6ad223..ceefc0d4 100644 --- a/qkeras/qlayers.py +++ b/qkeras/qlayers.py @@ -155,7 +155,7 @@ class QActivation(Layer, PrunableLayer): # object if string is given as activation. def __init__(self, activation, **kwargs): - super(QActivation, self).__init__(**kwargs) + super().__init__(**kwargs) self.activation = activation @@ -267,7 +267,7 @@ def __init__(self, that this param is ignored if the activation is not quantized_relu **kwargs: Args passed to the Layer class. """ - super(QAdaptiveActivation, self).__init__(**kwargs) + super().__init__(**kwargs) self.total_bits = total_bits self.symmetric = symmetric @@ -630,7 +630,7 @@ def __init__(self, if activation is not None: activation = get_quantizer(activation) - super(QDense, self).__init__( + super().__init__( units=units, activation=activation, use_bias=use_bias, @@ -641,7 +641,8 @@ def __init__(self, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, - **kwargs) + **kwargs, + ) def call(self, inputs): if self.kernel_quantizer: diff --git a/qkeras/qnormalization.py b/qkeras/qnormalization.py index ae3bbc4b..90245e15 100644 --- a/qkeras/qnormalization.py +++ b/qkeras/qnormalization.py @@ -154,7 +154,7 @@ def __init__( 'in qkeras qnormalization.py.') del kwargs['adjustment'] - super(QBatchNormalization, self).__init__( + super().__init__( axis=axis, momentum=momentum, epsilon=epsilon, @@ -172,7 +172,8 @@ def __init__( renorm=False, virtual_batch_size=None, adjustment=None, - **kwargs) + **kwargs + ) def call(self, inputs, training=None): if self.scale and self.gamma_quantizer: diff --git a/qkeras/qpooling.py b/qkeras/qpooling.py index 444bbac0..cb8b3c49 100644 --- a/qkeras/qpooling.py +++ b/qkeras/qpooling.py @@ -45,9 +45,13 @@ def __init__(self, pool_size=(2, 2), else: self.activation = activation - super(QAveragePooling2D, self).__init__( - pool_size=pool_size, strides=strides, padding=padding, - data_format=data_format, **kwargs) + super().__init__( + pool_size=pool_size, + strides=strides, + padding=padding, + data_format=data_format, + **kwargs + ) def call(self, inputs): """Performs quantized AveragePooling followed by QActivation. @@ -142,8 +146,7 @@ def __init__(self, data_format=None, else: self.activation = activation - super(QGlobalAveragePooling2D, self).__init__( - data_format=data_format, **kwargs) + super().__init__(data_format=data_format, **kwargs) def compute_pooling_area(self, input_shape): if not isinstance(input_shape, tuple): diff --git a/qkeras/qrecurrent.py b/qkeras/qrecurrent.py index c8609abe..fcddfc0f 100644 --- a/qkeras/qrecurrent.py +++ b/qkeras/qrecurrent.py @@ -121,22 +121,22 @@ def __init__(self, if activation is not None: activation = get_quantizer(activation) - super(QSimpleRNNCell, self).__init__( - units=units, - activation=activation, - use_bias=use_bias, - kernel_initializer=kernel_initializer, - recurrent_initializer=recurrent_initializer, - bias_initializer=bias_initializer, - kernel_regularizer=kernel_regularizer, - recurrent_regularizer=recurrent_regularizer, - bias_regularizer=bias_regularizer, - kernel_constraint=kernel_constraint, - recurrent_constraint=recurrent_constraint, - bias_constraint=bias_constraint, - dropout=dropout, - recurrent_dropout=recurrent_dropout, - **kwargs + super().__init__( + units=units, + activation=activation, + use_bias=use_bias, + kernel_initializer=kernel_initializer, + recurrent_initializer=recurrent_initializer, + bias_initializer=bias_initializer, + kernel_regularizer=kernel_regularizer, + recurrent_regularizer=recurrent_regularizer, + bias_regularizer=bias_regularizer, + kernel_constraint=kernel_constraint, + recurrent_constraint=recurrent_constraint, + bias_constraint=bias_constraint, + dropout=dropout, + recurrent_dropout=recurrent_dropout, + **kwargs ) def call(self, inputs, states, training=None): @@ -278,14 +278,15 @@ def __init__(self, trainable=kwargs.get('trainable', True), **cell_kwargs) - super(QSimpleRNN, self).__init__( + super().__init__( cell, return_sequences=return_sequences, return_state=return_state, go_backwards=go_backwards, stateful=stateful, unroll=unroll, - **kwargs) + **kwargs + ) self.activity_regularizer = regularizers.get(activity_regularizer) self.input_spec = [tf.keras.layers.InputSpec(ndim=3)] @@ -552,25 +553,25 @@ def __init__(self, if recurrent_activation is not None: recurrent_activation = get_quantizer(recurrent_activation) - super(QLSTMCell, self).__init__( - units=units, - activation=activation, - use_bias=use_bias, - recurrent_activation=recurrent_activation, - kernel_initializer=kernel_initializer, - recurrent_initializer=recurrent_initializer, - bias_initializer=bias_initializer, - unit_forget_bias=True, - kernel_regularizer=kernel_regularizer, - recurrent_regularizer=recurrent_regularizer, - bias_regularizer=bias_regularizer, - kernel_constraint=kernel_constraint, - recurrent_constraint=recurrent_constraint, - bias_constraint=bias_constraint, - dropout=dropout, - recurrent_dropout=recurrent_dropout, - implementation=implementation, - **kwargs + super().__init__( + units=units, + activation=activation, + use_bias=use_bias, + recurrent_activation=recurrent_activation, + kernel_initializer=kernel_initializer, + recurrent_initializer=recurrent_initializer, + bias_initializer=bias_initializer, + unit_forget_bias=True, + kernel_regularizer=kernel_regularizer, + recurrent_regularizer=recurrent_regularizer, + bias_regularizer=bias_regularizer, + kernel_constraint=kernel_constraint, + recurrent_constraint=recurrent_constraint, + bias_constraint=bias_constraint, + dropout=dropout, + recurrent_dropout=recurrent_dropout, + implementation=implementation, + **kwargs ) def _compute_carry_and_output(self, x, h_tm1, c_tm1, quantized_recurrent): @@ -780,14 +781,15 @@ def __init__(self, trainable=kwargs.get('trainable', True), **cell_kwargs) - super(QLSTM, self).__init__( + super().__init__( cell, return_sequences=return_sequences, return_state=return_state, go_backwards=go_backwards, stateful=stateful, unroll=unroll, - **kwargs) + **kwargs + ) self.activity_regularizer = regularizers.get(activity_regularizer) self.input_spec = [tf.keras.layers.InputSpec(ndim=3)] @@ -1074,25 +1076,26 @@ def __init__(self, if recurrent_activation is not None: recurrent_activation = get_quantizer(recurrent_activation) - super(QGRUCell, self).__init__( - units=units, - activation=activation, - recurrent_activation=recurrent_activation, - use_bias=use_bias, - kernel_initializer=kernel_initializer, - recurrent_initializer=recurrent_initializer, - bias_initializer=bias_initializer, - kernel_regularizer=kernel_regularizer, - recurrent_regularizer=recurrent_regularizer, - bias_regularizer=bias_regularizer, - kernel_constraint=kernel_constraint, - recurrent_constraint=recurrent_constraint, - bias_constraint=bias_constraint, - dropout=dropout, - recurrent_dropout=recurrent_dropout, - implementation=implementation, - reset_after=reset_after, - **kwargs) + super().__init__( + units=units, + activation=activation, + recurrent_activation=recurrent_activation, + use_bias=use_bias, + kernel_initializer=kernel_initializer, + recurrent_initializer=recurrent_initializer, + bias_initializer=bias_initializer, + kernel_regularizer=kernel_regularizer, + recurrent_regularizer=recurrent_regularizer, + bias_regularizer=bias_regularizer, + kernel_constraint=kernel_constraint, + recurrent_constraint=recurrent_constraint, + bias_constraint=bias_constraint, + dropout=dropout, + recurrent_dropout=recurrent_dropout, + implementation=implementation, + reset_after=reset_after, + **kwargs + ) def call(self, inputs, states, training=None): # previous memory @@ -1320,14 +1323,15 @@ def __init__(self, trainable=kwargs.get('trainable', True), **cell_kwargs) - super(QGRU, self).__init__( + super().__init__( cell, return_sequences=return_sequences, return_state=return_state, go_backwards=go_backwards, stateful=stateful, unroll=unroll, - **kwargs) + **kwargs + ) self.activity_regularizer = regularizers.get(activity_regularizer) self.input_spec = [tf.keras.layers.InputSpec(ndim=3)] diff --git a/qkeras/quantizers.py b/qkeras/quantizers.py index 34bedd93..54dd2672 100644 --- a/qkeras/quantizers.py +++ b/qkeras/quantizers.py @@ -841,7 +841,7 @@ def __init__( var_name=None, use_variables=False, ): - super(quantized_linear, self).__init__() + super().__init__() self.var_name = var_name @@ -1254,7 +1254,7 @@ def __init__(self, elements_per_scale=None, min_po2_exponent=None, max_po2_exponent=None): - super(quantized_bits, self).__init__() + super().__init__() self.bits = bits self.integer = integer @@ -1517,7 +1517,7 @@ class bernoulli(base_quantizer.BaseQuantizer): # pylint: disable=invalid-name """ def __init__(self, alpha=None, temperature=6.0, use_real_sigmoid=True): - super(bernoulli, self).__init__() + super().__init__() self.alpha = alpha self.bits = 1 self.temperature = temperature @@ -1623,7 +1623,7 @@ class ternary(base_quantizer.BaseQuantizer): # pylint: disable=invalid-name def __init__(self, alpha=None, threshold=None, use_stochastic_rounding=False, number_of_unrolls=5): - super(ternary, self).__init__() + super().__init__() self.bits = 2 self.alpha = alpha self.threshold = threshold @@ -1965,7 +1965,7 @@ class binary(base_quantizer.BaseQuantizer): # pylint: disable=invalid-name def __init__(self, use_01=False, alpha=None, use_stochastic_rounding=False, scale_axis=None, elements_per_scale=None, min_po2_exponent=None, max_po2_exponent=None): - super(binary, self).__init__() + super().__init__() self.use_01 = use_01 self.bits = 1 self.alpha = alpha @@ -2127,7 +2127,7 @@ class stochastic_binary(binary): # pylint: disable=invalid-name """ def __init__(self, alpha=None, temperature=6.0, use_real_sigmoid=True): - super(stochastic_binary, self).__init__(alpha=alpha) + super().__init__(alpha=alpha) self.alpha = alpha self.bits = 1 self.temperature = temperature @@ -2278,7 +2278,7 @@ def __init__(self, var_name=None, use_ste=True, use_variables=False): - super(quantized_relu, self).__init__() + super().__init__() self.bits = bits self.integer = integer self.use_sigmoid = use_sigmoid @@ -2452,7 +2452,7 @@ class quantized_ulaw(base_quantizer.BaseQuantizer): # pylint: disable=invalid-n """ def __init__(self, bits=8, integer=0, symmetric=0, u=255.0): - super(quantized_ulaw, self).__init__() + super().__init__() self.bits = bits self.integer = integer self.symmetric = symmetric @@ -2532,7 +2532,7 @@ class quantized_tanh(base_quantizer.BaseQuantizer): # pylint: disable=invalid-n def __init__(self, bits=8, use_stochastic_rounding=False, symmetric=False, use_real_tanh=False): - super(quantized_tanh, self).__init__() + super().__init__() self.bits = bits self.symmetric = symmetric self.use_stochastic_rounding = use_stochastic_rounding @@ -2598,7 +2598,7 @@ class quantized_sigmoid(base_quantizer.BaseQuantizer): # pylint: disable=invali def __init__(self, bits=8, symmetric=False, use_real_sigmoid=False, use_stochastic_rounding=False): - super(quantized_sigmoid, self).__init__() + super().__init__() self.bits = bits self.symmetric = symmetric self.use_real_sigmoid = use_real_sigmoid @@ -2806,7 +2806,7 @@ def __init__(self, var_name=None, use_ste=True, use_variables=False): - super(quantized_po2, self).__init__() + super().__init__() self.bits = bits self.max_value = max_value self.use_stochastic_rounding = use_stochastic_rounding @@ -2944,7 +2944,7 @@ def __init__(self, var_name=None, use_ste=True, use_variables=False): - super(quantized_relu_po2, self).__init__() + super().__init__() self.bits = bits self.max_value = max_value self.negative_slope = negative_slope