Skip to content

Commit 3a711cd

Browse files
committed
format with ruff
1 parent cd11ae7 commit 3a711cd

File tree

144 files changed

+857
-867
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

144 files changed

+857
-867
lines changed

docs/conf.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,7 @@ def get_pypi_version(package, url_pattern=URL_PATTERN):
9999

100100
# -- Extension configuration -------------------------------------------------
101101
html_show_sourcelink = False
102-
html_logo = "img/hls4ml_logo_navbar.png"
102+
html_logo = 'img/hls4ml_logo_navbar.png'
103103

104104
html_theme_options = {
105105
'canonical_url': '',
@@ -120,7 +120,7 @@ def get_pypi_version(package, url_pattern=URL_PATTERN):
120120
html_context = {
121121
'display_github': True, # Integrate GitHub
122122
'github_user': 'fastmachinelearning', # Username
123-
'github_repo': "hls4ml", # Repo name
123+
'github_repo': 'hls4ml', # Repo name
124124
'github_version': 'main', # Version
125125
'conf_py_path': '/docs/', # Path in the checkout to the docs root
126126
}

hls4ml/__init__.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
1-
from hls4ml import converters, report, utils # noqa: F401, E402
1+
from hls4ml import converters, report, utils
22

33
try:
44
from ._version import version as __version__
55
from ._version import version_tuple
66
except ImportError:
7-
__version__ = "unknown version"
8-
version_tuple = (0, 0, "unknown version")
7+
__version__ = 'unknown version'
8+
version_tuple = (0, 0, 'unknown version')
99

1010

1111
def reseed(newseed):

hls4ml/backends/__init__.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
1-
from hls4ml.backends.backend import Backend, get_available_backends, get_backend, register_backend # noqa: F401
2-
from hls4ml.backends.fpga.fpga_backend import FPGABackend # noqa: F401
1+
from hls4ml.backends.backend import Backend, get_available_backends, get_backend, register_backend
2+
from hls4ml.backends.fpga.fpga_backend import FPGABackend
33
from hls4ml.backends.oneapi.oneapi_backend import OneAPIBackend
44
from hls4ml.backends.quartus.quartus_backend import QuartusBackend
55
from hls4ml.backends.symbolic.symbolic_backend import SymbolicExpressionBackend
66
from hls4ml.backends.vivado.vivado_backend import VivadoBackend
77
from hls4ml.backends.vivado_accelerator.vivado_accelerator_backend import VivadoAcceleratorBackend
8-
from hls4ml.backends.vivado_accelerator.vivado_accelerator_config import VivadoAcceleratorConfig # noqa: F401
8+
from hls4ml.backends.vivado_accelerator.vivado_accelerator_config import VivadoAcceleratorConfig
99

1010
from hls4ml.backends.catapult.catapult_backend import CatapultBackend # isort: skip
1111

hls4ml/backends/catapult/catapult_backend.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -251,7 +251,7 @@ def build(
251251
ccs_args = f'"reset={reset} csim={csim} synth={synth} cosim={cosim} validation={validation}'
252252
ccs_args += f' export={export} vsynth={vsynth} fifo_opt={fifo_opt} bitfile={bitfile} ran_frame={ran_frame}'
253253
ccs_args += f' sw_opt={sw_opt} power={power} da={da} vhdl={vhdl} verilog={verilog} bup={bup}"'
254-
ccs_invoke = catapult_exe + ' -product ultra -shell -f build_prj.tcl -eval \'set ::argv ' + ccs_args + '\''
254+
ccs_invoke = catapult_exe + " -product ultra -shell -f build_prj.tcl -eval 'set ::argv " + ccs_args + "'"
255255
print(ccs_invoke)
256256
os.system(ccs_invoke)
257257
os.chdir(curr_dir)
@@ -455,9 +455,9 @@ def init_global_pooling2d(self, layer):
455455
@layer_optimizer(Softmax)
456456
def init_softmax(self, layer):
457457
if layer.model.config.get_config_value('IOType') == 'io_parallel':
458-
assert (
459-
len(layer.get_input_variable().shape) == 1
460-
), 'Softmax with io_parallel strategy cannot be used on multidimensional tensors.'
458+
assert len(layer.get_input_variable().shape) == 1, (
459+
'Softmax with io_parallel strategy cannot be used on multidimensional tensors.'
460+
)
461461

462462
@layer_optimizer(Embedding)
463463
def init_embed(self, layer):

hls4ml/backends/catapult/passes/broadcast_stream.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66

77

88
class Broadcast(Layer):
9-
'''Inserted between layers for broadcasting.'''
9+
"""Inserted between layers for broadcasting."""
1010

1111
def initialize(self):
1212
shape = self.attributes['target_shape']

hls4ml/backends/catapult/passes/conv_stream.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33

44

55
class GenerateConvStreamingInstructions(OptimizerPass):
6-
'''Generates the instructions for streaming implementation of CNNs'''
6+
"""Generates the instructions for streaming implementation of CNNs"""
77

88
def match(self, node):
99
is_match = (

hls4ml/backends/catapult/passes/convolution_winograd.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,10 +7,10 @@
77

88

99
class ApplyWinogradKernelTransformation(OptimizerPass):
10-
'''
10+
"""
1111
Transforms the weights of a Conv2D kernel to a format suitable for Wingorad convolution
1212
For further information, refer to Lavin & Gray, 2015 - Fast Algorithms for Convolutional Neural Networks
13-
'''
13+
"""
1414

1515
def match(self, node):
1616
node_matches = isinstance(node, (Conv1D, Conv2D))

hls4ml/backends/catapult/passes/fifo_depth_optimization.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -82,9 +82,9 @@ def transform(self, model):
8282

8383
if len(data['children']) == 0:
8484
print(
85-
"FIFO depth optimization found no FIFOs implemented using BRAMs in the design, no optimization is possible."
85+
'FIFO depth optimization found no FIFOs implemented using BRAMs in the design, no optimization is possible.'
8686
)
87-
print("Consider increasing profiling_fifo_depth.")
87+
print('Consider increasing profiling_fifo_depth.')
8888
return False
8989

9090
n_elem = len(data['children'][0]['children'][0]['children'])

hls4ml/backends/catapult/passes/resource_strategy.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55

66

77
class ApplyResourceStrategy(OptimizerPass):
8-
'''Transposes the weights to use the dense_resource matrix multiply routine'''
8+
"""Transposes the weights to use the dense_resource matrix multiply routine"""
99

1010
def match(self, node):
1111
node_matches = isinstance(node, (Dense, Conv1D, SeparableConv1D, Conv2D, SeparableConv2D, LSTM, GRU))

hls4ml/backends/fpga/fpga_backend.py

Lines changed: 29 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -346,9 +346,9 @@ def convert_precision_string(cls, precision):
346346

347347
@classmethod
348348
def _convert_ap_type(cls, precision):
349-
'''
349+
"""
350350
Convert a precision string (e.g. "ap_fixed<16,6>" to the internal FixedPrecisionTypes etc)
351-
'''
351+
"""
352352
bits = re.search('.+<(.+?)>', precision).group(1).split(',')
353353
sat_mode = None
354354
round_mode = None
@@ -357,12 +357,12 @@ def _convert_ap_type(cls, precision):
357357
width = int(bits[0])
358358
integer = int(bits[1])
359359
fields = 2
360-
signed = not ('u' in precision)
360+
signed = 'u' not in precision
361361
elif 'int' in precision:
362362
width = int(bits[0])
363363
integer = width
364364
fields = 1
365-
signed = not ('u' in precision)
365+
signed = 'u' not in precision
366366
if len(bits) > fields:
367367
round_mode = bits[fields]
368368
if len(bits) > fields + 1:
@@ -376,9 +376,9 @@ def _convert_ap_type(cls, precision):
376376

377377
@classmethod
378378
def _convert_ac_type(cls, precision):
379-
'''
379+
"""
380380
Convert a precision string (e.g. "ac_fixed<16,6>" to the internal FixedPrecisionTypes etc)
381-
'''
381+
"""
382382
bits = re.search('.+<(.+?)>', precision).group(1).split(',')
383383
signed = True # default is signed
384384
sat_mode = None
@@ -414,18 +414,18 @@ def _convert_ac_type(cls, precision):
414414

415415
@classmethod
416416
def _convert_auto_type(cls, precision):
417-
'''
417+
"""
418418
Convert a "auto" precision string into the UnspecifiedPrecisionType
419-
'''
419+
"""
420420
return UnspecifiedPrecisionType()
421421

422422
def product_type(self, data_T, weight_T):
423-
'''
423+
"""
424424
Helper function to determine which product implementation to use during inference
425-
'''
426-
assert not isinstance(
427-
data_T, ExponentPrecisionType
428-
), "Only ExponentPrecisionType (aka 'power of 2') weights are currently supported, not data."
425+
"""
426+
assert not isinstance(data_T, ExponentPrecisionType), (
427+
"Only ExponentPrecisionType (aka 'power of 2') weights are currently supported, not data."
428+
)
429429
product = 'mult'
430430
if isinstance(weight_T, ExponentPrecisionType):
431431
product = 'weight_exponential'
@@ -754,14 +754,14 @@ def generate_conv1d_line_buffer_fn(self, layer_idx, n_partitions, in_W, in_C, ke
754754
im2col_matrix = self._compute_conv1d_im2col((in_W, in_C), kernel, stride, (pad_left, pad_right), dilation)
755755

756756
generated_code = (
757-
"template<class data_T, typename CONFIG_T>\n"
758-
"class fill_buffer_{index} : public nnet::FillConv1DBuffer<data_T, CONFIG_T> {{\n"
759-
" public:\n"
760-
" static void fill_buffer(\n"
761-
" data_T data[CONFIG_T::in_width * CONFIG_T::n_chan],\n"
762-
" data_T buffer[CONFIG_T::n_pixels][CONFIG_T::filt_width * CONFIG_T::n_chan],\n"
763-
" const unsigned partition\n"
764-
" ) {{\n"
757+
'template<class data_T, typename CONFIG_T>\n'
758+
'class fill_buffer_{index} : public nnet::FillConv1DBuffer<data_T, CONFIG_T> {{\n'
759+
' public:\n'
760+
' static void fill_buffer(\n'
761+
' data_T data[CONFIG_T::in_width * CONFIG_T::n_chan],\n'
762+
' data_T buffer[CONFIG_T::n_pixels][CONFIG_T::filt_width * CONFIG_T::n_chan],\n'
763+
' const unsigned partition\n'
764+
' ) {{\n'
765765
).format(index=layer_idx)
766766
indent = ' '
767767

@@ -884,14 +884,14 @@ def generate_conv2d_line_buffer_fn(
884884
)
885885

886886
generated_code = (
887-
"template<class data_T, typename CONFIG_T>\n"
888-
"class fill_buffer_{index} : public nnet::FillConv2DBuffer<data_T, CONFIG_T> {{\n"
889-
" public:\n"
890-
" static void fill_buffer(\n"
891-
" data_T data[CONFIG_T::in_height * CONFIG_T::in_width * CONFIG_T::n_chan],\n"
892-
" data_T buffer[CONFIG_T::n_pixels][CONFIG_T::filt_height * CONFIG_T::filt_width * CONFIG_T::n_chan],\n"
893-
" const unsigned partition\n"
894-
" ) {{\n"
887+
'template<class data_T, typename CONFIG_T>\n'
888+
'class fill_buffer_{index} : public nnet::FillConv2DBuffer<data_T, CONFIG_T> {{\n'
889+
' public:\n'
890+
' static void fill_buffer(\n'
891+
' data_T data[CONFIG_T::in_height * CONFIG_T::in_width * CONFIG_T::n_chan],\n'
892+
' data_T buffer[CONFIG_T::n_pixels][CONFIG_T::filt_height * CONFIG_T::filt_width * CONFIG_T::n_chan],\n'
893+
' const unsigned partition\n'
894+
' ) {{\n'
895895
).format(index=layer_idx)
896896
indent = ' '
897897

hls4ml/backends/fpga/fpga_layers.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6,10 +6,10 @@
66

77

88
class BatchNormalizationQuantizedTanh(Layer):
9-
'''Merged Batch Normalization and quantized (binary or ternary) Tanh layer.
9+
"""Merged Batch Normalization and quantized (binary or ternary) Tanh layer.
1010
The mean, variance, beta, gamma parameters are folded into the threshold(s) at which the
1111
sign of the input flips after the quantized (binary or ternary) Tanh activation.
12-
'''
12+
"""
1313

1414
_expected_attributes = [
1515
Attribute('n_in'),
@@ -71,15 +71,15 @@ def set_thresholds(self, scale, bias, ternary_threshold=0.5):
7171

7272

7373
class PointwiseConv1D(Conv1D):
74-
'''Optimized Conv1D implementation for 1x1 kernels.'''
74+
"""Optimized Conv1D implementation for 1x1 kernels."""
7575

7676
def initialize(self):
7777
# Do noting, values copied
7878
pass
7979

8080

8181
class PointwiseConv2D(Conv2D):
82-
'''Optimized Conv2D implementation for 1x1 kernels.'''
82+
"""Optimized Conv2D implementation for 1x1 kernels."""
8383

8484
def initialize(self):
8585
# Do noting, values copied

hls4ml/backends/fpga/fpga_types.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -165,7 +165,7 @@ def definition_cpp(self):
165165

166166
class CompressedTypeConverter(TypeDefinition, TypePrecisionConverter):
167167
def definition_cpp(self):
168-
cpp_fmt = 'typedef struct {name} {{' '{index} row_index;' '{index} col_index;' '{precision} weight; }} {name};\n'
168+
cpp_fmt = 'typedef struct {name} {{{index} row_index;{index} col_index;{precision} weight; }} {name};\n'
169169
return cpp_fmt.format(name=self.name, index=self.index_precision, precision=self.precision.definition_cpp())
170170

171171
def convert_precision(self, precision_converter):
@@ -175,7 +175,7 @@ def convert_precision(self, precision_converter):
175175

176176
class ExponentTypeConverter(TypeDefinition, TypePrecisionConverter):
177177
def definition_cpp(self):
178-
cpp_fmt = 'typedef struct {name} {{' '{sign} sign;' '{precision} weight; }} {name};\n'
178+
cpp_fmt = 'typedef struct {name} {{{sign} sign;{precision} weight; }} {name};\n'
179179
return cpp_fmt.format(name=self.name, precision=self.precision.definition_cpp(), sign=self.sign.definition_cpp())
180180

181181
def convert_precision(self, precision_converter):

hls4ml/backends/fpga/passes/clone.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66

77

88
class Clone(Layer):
9-
'''Inserted after the layer whose output is used more than once.'''
9+
"""Inserted after the layer whose output is used more than once."""
1010

1111
def initialize(self):
1212
inp = self.get_input_variable()
@@ -47,7 +47,7 @@ def register_clone(backend):
4747

4848

4949
class CloneOutput(OptimizerPass):
50-
'''Clones streams that are used multiple times'''
50+
"""Clones streams that are used multiple times"""
5151

5252
def match(self, node):
5353
# We may have already inserted the Clone layer
@@ -70,7 +70,6 @@ def match(self, node):
7070
return False
7171

7272
def transform(self, model, node):
73-
7473
output_map = node.get_output_use_map()
7574

7675
transformed = False

hls4ml/backends/fpga/passes/final_reshape.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33

44

55
class RemoveFinalReshape(OptimizerPass):
6-
'''Remove reshape if final layer'''
6+
"""Remove reshape if final layer"""
77

88
def match(self, node):
99
# match if reshape is final node

hls4ml/backends/fpga/passes/fix_softmax_table_size.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -33,28 +33,28 @@ def transform(self, model, node: Layer):
3333
# 125 characters long line.
3434
warnings.warn(
3535
(
36-
f"Softmax layer {node.name} table size is too large for input"
37-
f"bitwidth {input_bw}. Setting table size to {2**input_bw}."
38-
"To avoid this warning, please increase input bitwidth or"
39-
"decrease table size."
36+
f'Softmax layer {node.name} table size is too large for input'
37+
f'bitwidth {input_bw}. Setting table size to {2**input_bw}.'
38+
'To avoid this warning, please increase input bitwidth or'
39+
'decrease table size.'
4040
),
4141
stacklevel=1,
4242
)
4343
if 2**table_bw < table_size:
4444
warnings.warn(
4545
(
46-
f"Softmax layer {node.name} table size is too large for input"
47-
f"bitwidth {input_bw}. Setting table size to {2**input_bw}."
48-
"To avoid this warning, please increase input bitwidth or"
49-
"decrease table size."
46+
f'Softmax layer {node.name} table size is too large for input'
47+
f'bitwidth {input_bw}. Setting table size to {2**input_bw}.'
48+
'To avoid this warning, please increase input bitwidth or'
49+
'decrease table size.'
5050
),
5151
stacklevel=1,
5252
)
5353
if backend == 'Quartus':
5454
warnings.warn(
5555
(
5656
"Quartus backend's table size is half of 2^min(input_bw-1,table_bw-1)"
57-
" instead of 2^min(input_bw,table_bw)."
57+
' instead of 2^min(input_bw,table_bw).'
5858
),
5959
stacklevel=1,
6060
)

hls4ml/backends/fpga/passes/hgq_proxy_model.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -35,15 +35,15 @@ def generate_mask_fn(
3535
else:
3636
fn = f'out[{idx}] = {to_fixed(k, b, i, RND, SAT)}(inp[{idx}]);'
3737
masks.append(f' {fn}')
38-
body = "\n".join(masks)
39-
mask_fn = f'''
38+
body = '\n'.join(masks)
39+
mask_fn = f"""
4040
template<typename input_t, typename output_t>
4141
void {name}(input_t *inp, output_t *out) {{
4242
#pragma HLS INLINE
4343
4444
{body}
4545
}}
46-
'''
46+
"""
4747
return mask_fn
4848

4949

hls4ml/backends/fpga/passes/im2col_codegen.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44

55

66
class GenerateConvIm2col(OptimizerPass):
7-
'''Generates tcode for im2col step of 1D/2d convolution'''
7+
"""Generates tcode for im2col step of 1D/2d convolution"""
88

99
# Note, DepthwizeConv1D/2D also matches because it inherits from Conv1D/2D
1010
def match(self, node):

hls4ml/backends/fpga/passes/inplace_parallel_reshape.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -22,10 +22,10 @@ def transform(self, model, node):
2222
node.set_attr(node.outputs[0], newoutvar)
2323
if node.name in model.outputs:
2424
prev_node = node.get_input_node()
25-
assert (
26-
prev_node.name not in model.outputs
27-
), f"Cannot output node {prev_node.name}: reshape is a no-op in io_parallel.\
25+
assert prev_node.name not in model.outputs, (
26+
f"Cannot output node {prev_node.name}: reshape is a no-op in io_parallel.\
2827
As a result, the previous node {prev_node.name}'s output will be used as the\
2928
output. However, this node is already an output."
29+
)
3030
model.outputs = [name if name != node.name else prev_node.name for name in model.outputs]
3131
return False

0 commit comments

Comments
 (0)