Skip to content

Commit

Permalink
No public description
Browse files Browse the repository at this point in the history
PiperOrigin-RevId: 712590121
Change-Id: I5998e535ee492df506ca81a777c52d39a4453cab
  • Loading branch information
Akshaya Purohit authored and copybara-github committed Jan 6, 2025
1 parent 4eedf1e commit 84f3adf
Show file tree
Hide file tree
Showing 11 changed files with 119 additions and 102 deletions.
2 changes: 1 addition & 1 deletion qkeras/autoqkeras/forgiving_metrics/forgiving_bits.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def __init__(
self.ref_size = {}
self.config = config if config else {}

super(ForgivingFactorBits, self).__init__(delta_p, delta_n, rate)
super().__init__(delta_p, delta_n, rate)

def _param_size(self, layer):
"""Computes size of parameters of a layer in bits."""
Expand Down
2 changes: 1 addition & 1 deletion qkeras/autoqkeras/forgiving_metrics/forgiving_energy.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def __init__(self, delta_p, delta_n, rate, stress=1.0, **kwargs):
# energy calculation
# keras_layer_quantizer: quantizer for keras layers in hybrid models

super(ForgivingFactorPower, self).__init__(delta_p, delta_n, rate)
super().__init__(delta_p, delta_n, rate)

self.stress = stress
# process: horowitz... - must be present in config_json
Expand Down
2 changes: 1 addition & 1 deletion qkeras/callbacks.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def __init__(self,
not.
log_dir: Str. log directory to save qnoise_factor every epoch end.
"""
super(QNoiseScheduler, self).__init__()
super().__init__()

self.start = start
self.finish = finish
Expand Down
5 changes: 3 additions & 2 deletions qkeras/qconv2d_batchnorm.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ def __init__(
"""

# intialization the qconv2d part of the composite layer
super(QConv2DBatchnorm, self).__init__(
super().__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
Expand All @@ -126,7 +126,8 @@ def __init__(
bias_constraint=bias_constraint,
kernel_quantizer=kernel_quantizer,
bias_quantizer=bias_quantizer,
**kwargs)
**kwargs
)

# initialization of batchnorm part of the composite layer
self.batchnorm = layers.BatchNormalization(
Expand Down
30 changes: 18 additions & 12 deletions qkeras/qconvolutional.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ def __init__(self,
if activation is not None:
activation = get_quantizer(activation)

super(QConv1D, self).__init__(
super().__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
Expand All @@ -185,7 +185,8 @@ def __init__(self,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs)
**kwargs
)

def call(self, inputs):
if self.kernel_quantizer:
Expand Down Expand Up @@ -323,7 +324,7 @@ def __init__(self,
if activation is not None:
activation = get_quantizer(activation)

super(QConv2D, self).__init__(
super().__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
Expand All @@ -339,7 +340,8 @@ def __init__(self,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs)
**kwargs
)

def call(self, inputs):
if self.kernel_quantizer:
Expand Down Expand Up @@ -463,7 +465,7 @@ def __init__(self,
if activation is not None:
activation = get_quantizer(activation)

super(QConv2DTranspose, self).__init__(
super().__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
Expand All @@ -480,7 +482,8 @@ def __init__(self,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs)
**kwargs
)

def call(self, inputs):
inputs_shape = array_ops.shape(inputs)
Expand Down Expand Up @@ -650,7 +653,7 @@ def __init__(self,
if activation is not None:
activation = get_quantizer(activation)

super(QSeparableConv1D, self).__init__(
super().__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
Expand All @@ -670,7 +673,8 @@ def __init__(self,
depthwise_constraint=constraints.get(depthwise_constraint),
pointwise_constraint=constraints.get(pointwise_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
**kwargs
)

def call(self, inputs):
if self.padding == 'causal':
Expand Down Expand Up @@ -828,7 +832,7 @@ def __init__(self,
if activation is not None:
activation = get_quantizer(activation)

super(QSeparableConv2D, self).__init__(
super().__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
Expand All @@ -848,7 +852,8 @@ def __init__(self,
depthwise_constraint=constraints.get(depthwise_constraint),
pointwise_constraint=constraints.get(pointwise_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
**kwargs
)

def call(self, inputs):
# Apply the actual ops.
Expand Down Expand Up @@ -986,7 +991,7 @@ def __init__(self,
if activation is not None:
activation = get_quantizer(activation)

super(QDepthwiseConv2D, self).__init__(
super().__init__(
kernel_size=kernel_size,
strides=strides,
padding=padding,
Expand All @@ -1002,7 +1007,8 @@ def __init__(self,
depthwise_constraint=depthwise_constraint,
bias_constraint=bias_constraint,
dilation_rate=dilation_rate,
**kwargs)
**kwargs
)

def build(self, input_shape):
if len(input_shape) < 4:
Expand Down
5 changes: 3 additions & 2 deletions qkeras/qdepthwiseconv2d_batchnorm.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ def __init__(
"""

# intialization the QDepthwiseConv2d part of the composite layer
super(QDepthwiseConv2DBatchnorm, self).__init__(
super().__init__(
kernel_size=kernel_size,
strides=strides,
padding=padding,
Expand All @@ -127,7 +127,8 @@ def __init__(
bias_quantizer=bias_quantizer,
depthwise_range=depthwise_range,
bias_range=bias_range,
**kwargs)
**kwargs
)

# initialization of batchnorm part of the composite layer
self.batchnorm = layers.BatchNormalization(
Expand Down
9 changes: 5 additions & 4 deletions qkeras/qlayers.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ class QActivation(Layer, PrunableLayer):
# object if string is given as activation.
def __init__(self, activation, **kwargs):

super(QActivation, self).__init__(**kwargs)
super().__init__(**kwargs)

self.activation = activation

Expand Down Expand Up @@ -267,7 +267,7 @@ def __init__(self,
that this param is ignored if the activation is not quantized_relu
**kwargs: Args passed to the Layer class.
"""
super(QAdaptiveActivation, self).__init__(**kwargs)
super().__init__(**kwargs)

self.total_bits = total_bits
self.symmetric = symmetric
Expand Down Expand Up @@ -630,7 +630,7 @@ def __init__(self,
if activation is not None:
activation = get_quantizer(activation)

super(QDense, self).__init__(
super().__init__(
units=units,
activation=activation,
use_bias=use_bias,
Expand All @@ -641,7 +641,8 @@ def __init__(self,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs)
**kwargs,
)

def call(self, inputs):
if self.kernel_quantizer:
Expand Down
5 changes: 3 additions & 2 deletions qkeras/qnormalization.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ def __init__(
'in qkeras qnormalization.py.')
del kwargs['adjustment']

super(QBatchNormalization, self).__init__(
super().__init__(
axis=axis,
momentum=momentum,
epsilon=epsilon,
Expand All @@ -172,7 +172,8 @@ def __init__(
renorm=False,
virtual_batch_size=None,
adjustment=None,
**kwargs)
**kwargs
)

def call(self, inputs, training=None):
if self.scale and self.gamma_quantizer:
Expand Down
13 changes: 8 additions & 5 deletions qkeras/qpooling.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,9 +45,13 @@ def __init__(self, pool_size=(2, 2),
else:
self.activation = activation

super(QAveragePooling2D, self).__init__(
pool_size=pool_size, strides=strides, padding=padding,
data_format=data_format, **kwargs)
super().__init__(
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
**kwargs
)

def call(self, inputs):
"""Performs quantized AveragePooling followed by QActivation.
Expand Down Expand Up @@ -142,8 +146,7 @@ def __init__(self, data_format=None,
else:
self.activation = activation

super(QGlobalAveragePooling2D, self).__init__(
data_format=data_format, **kwargs)
super().__init__(data_format=data_format, **kwargs)

def compute_pooling_area(self, input_shape):
if not isinstance(input_shape, tuple):
Expand Down
Loading

0 comments on commit 84f3adf

Please sign in to comment.