Skip to content

Use ruff for formatting #1275

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft
wants to merge 3 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 9 additions & 13 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -1,13 +1,12 @@
exclude: (^hls4ml\/templates\/(vivado|quartus)\/(ap_types|ac_types)\/|^test/pytest/test_report/)

repos:
- repo: https://github.com/psf/black
rev: 25.1.0
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.11.5
hooks:
- id: black
language_version: python3
args: ['--line-length=125',
'--skip-string-normalization']
- id: ruff
args: [--fix]
- id: ruff-format
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We still need to enforce the line length of 125

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It's in pyproject.toml now


- repo: https://github.com/tox-dev/pyproject-fmt
rev: v2.5.1
Expand All @@ -29,16 +28,11 @@ repos:
- id: requirements-txt-fixer
- id: trailing-whitespace

- repo: https://github.com/PyCQA/isort
rev: 6.0.1
hooks:
- id: isort

- repo: https://github.com/asottile/pyupgrade
rev: v3.19.1
hooks:
- id: pyupgrade
args: ["--py36-plus"]
args: ["--py310-plus"]

- repo: https://github.com/pycqa/flake8
rev: 7.2.0
Expand All @@ -47,7 +41,9 @@ repos:
exclude: docs/conf.py
additional_dependencies: [flake8-bugbear, flake8-print]
args: ['--max-line-length=125', # github viewer width
'--extend-ignore=E203,T201'] # E203 is not PEP8 compliant
'--extend-ignore=E203,T201,F401']
# E203 is not PEP8 compliant
# F401 included in ruff (behaves slightly differently for noqa flags)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is F401 ignored everywhere? If so, I think it's a bad idea.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

F401 included in ruff, see pyproject.toml. Only ignored for __init__.py.


- repo: https://github.com/mgedmin/check-manifest
rev: "0.50"
Expand Down
4 changes: 2 additions & 2 deletions docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ def get_pypi_version(package, url_pattern=URL_PATTERN):

# -- Extension configuration -------------------------------------------------
html_show_sourcelink = False
html_logo = "img/hls4ml_logo_navbar.png"
html_logo = 'img/hls4ml_logo_navbar.png'

html_theme_options = {
'canonical_url': '',
Expand All @@ -120,7 +120,7 @@ def get_pypi_version(package, url_pattern=URL_PATTERN):
html_context = {
'display_github': True, # Integrate GitHub
'github_user': 'fastmachinelearning', # Username
'github_repo': "hls4ml", # Repo name
'github_repo': 'hls4ml', # Repo name
'github_version': 'main', # Version
'conf_py_path': '/docs/', # Path in the checkout to the docs root
}
Expand Down
6 changes: 3 additions & 3 deletions hls4ml/__init__.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
from hls4ml import converters, report, utils # noqa: F401, E402
from hls4ml import converters, report, utils

try:
from ._version import version as __version__
from ._version import version_tuple
except ImportError:
__version__ = "unknown version"
version_tuple = (0, 0, "unknown version")
__version__ = 'unknown version'
version_tuple = (0, 0, 'unknown version')


def reseed(newseed):
Expand Down
6 changes: 3 additions & 3 deletions hls4ml/backends/__init__.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
from hls4ml.backends.backend import Backend, get_available_backends, get_backend, register_backend # noqa: F401
from hls4ml.backends.fpga.fpga_backend import FPGABackend # noqa: F401
from hls4ml.backends.backend import Backend, get_available_backends, get_backend, register_backend
from hls4ml.backends.fpga.fpga_backend import FPGABackend
from hls4ml.backends.oneapi.oneapi_backend import OneAPIBackend
from hls4ml.backends.quartus.quartus_backend import QuartusBackend
from hls4ml.backends.symbolic.symbolic_backend import SymbolicExpressionBackend
from hls4ml.backends.vivado.vivado_backend import VivadoBackend
from hls4ml.backends.vivado_accelerator.vivado_accelerator_backend import VivadoAcceleratorBackend
from hls4ml.backends.vivado_accelerator.vivado_accelerator_config import VivadoAcceleratorConfig # noqa: F401
from hls4ml.backends.vivado_accelerator.vivado_accelerator_config import VivadoAcceleratorConfig

from hls4ml.backends.catapult.catapult_backend import CatapultBackend # isort: skip

Expand Down
8 changes: 4 additions & 4 deletions hls4ml/backends/catapult/catapult_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -251,7 +251,7 @@ def build(
ccs_args = f'"reset={reset} csim={csim} synth={synth} cosim={cosim} validation={validation}'
ccs_args += f' export={export} vsynth={vsynth} fifo_opt={fifo_opt} bitfile={bitfile} ran_frame={ran_frame}'
ccs_args += f' sw_opt={sw_opt} power={power} da={da} vhdl={vhdl} verilog={verilog} bup={bup}"'
ccs_invoke = catapult_exe + ' -product ultra -shell -f build_prj.tcl -eval \'set ::argv ' + ccs_args + '\''
ccs_invoke = catapult_exe + " -product ultra -shell -f build_prj.tcl -eval 'set ::argv " + ccs_args + "'"
print(ccs_invoke)
os.system(ccs_invoke)
os.chdir(curr_dir)
Expand Down Expand Up @@ -455,9 +455,9 @@ def init_global_pooling2d(self, layer):
@layer_optimizer(Softmax)
def init_softmax(self, layer):
if layer.model.config.get_config_value('IOType') == 'io_parallel':
assert (
len(layer.get_input_variable().shape) == 1
), 'Softmax with io_parallel strategy cannot be used on multidimensional tensors.'
assert len(layer.get_input_variable().shape) == 1, (
'Softmax with io_parallel strategy cannot be used on multidimensional tensors.'
)

@layer_optimizer(Embedding)
def init_embed(self, layer):
Expand Down
2 changes: 1 addition & 1 deletion hls4ml/backends/catapult/passes/broadcast_stream.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@


class Broadcast(Layer):
'''Inserted between layers for broadcasting.'''
"""Inserted between layers for broadcasting."""

def initialize(self):
shape = self.attributes['target_shape']
Expand Down
2 changes: 1 addition & 1 deletion hls4ml/backends/catapult/passes/conv_stream.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@


class GenerateConvStreamingInstructions(OptimizerPass):
'''Generates the instructions for streaming implementation of CNNs'''
"""Generates the instructions for streaming implementation of CNNs"""

def match(self, node):
is_match = (
Expand Down
4 changes: 2 additions & 2 deletions hls4ml/backends/catapult/passes/convolution_winograd.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,10 @@


class ApplyWinogradKernelTransformation(OptimizerPass):
'''
"""
Transforms the weights of a Conv2D kernel to a format suitable for Wingorad convolution
For further information, refer to Lavin & Gray, 2015 - Fast Algorithms for Convolutional Neural Networks
'''
"""

def match(self, node):
node_matches = isinstance(node, (Conv1D, Conv2D))
Expand Down
4 changes: 2 additions & 2 deletions hls4ml/backends/catapult/passes/fifo_depth_optimization.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,9 +82,9 @@ def transform(self, model):

if len(data['children']) == 0:
print(
"FIFO depth optimization found no FIFOs implemented using BRAMs in the design, no optimization is possible."
'FIFO depth optimization found no FIFOs implemented using BRAMs in the design, no optimization is possible.'
)
print("Consider increasing profiling_fifo_depth.")
print('Consider increasing profiling_fifo_depth.')
return False

n_elem = len(data['children'][0]['children'][0]['children'])
Expand Down
2 changes: 1 addition & 1 deletion hls4ml/backends/catapult/passes/resource_strategy.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@


class ApplyResourceStrategy(OptimizerPass):
'''Transposes the weights to use the dense_resource matrix multiply routine'''
"""Transposes the weights to use the dense_resource matrix multiply routine"""

def match(self, node):
node_matches = isinstance(node, (Dense, Conv1D, SeparableConv1D, Conv2D, SeparableConv2D, LSTM, GRU))
Expand Down
58 changes: 29 additions & 29 deletions hls4ml/backends/fpga/fpga_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -346,9 +346,9 @@ def convert_precision_string(cls, precision):

@classmethod
def _convert_ap_type(cls, precision):
'''
"""
Convert a precision string (e.g. "ap_fixed<16,6>" to the internal FixedPrecisionTypes etc)
'''
"""
bits = re.search('.+<(.+?)>', precision).group(1).split(',')
sat_mode = None
round_mode = None
Expand All @@ -357,12 +357,12 @@ def _convert_ap_type(cls, precision):
width = int(bits[0])
integer = int(bits[1])
fields = 2
signed = not ('u' in precision)
signed = 'u' not in precision
elif 'int' in precision:
width = int(bits[0])
integer = width
fields = 1
signed = not ('u' in precision)
signed = 'u' not in precision
if len(bits) > fields:
round_mode = bits[fields]
if len(bits) > fields + 1:
Expand All @@ -376,9 +376,9 @@ def _convert_ap_type(cls, precision):

@classmethod
def _convert_ac_type(cls, precision):
'''
"""
Convert a precision string (e.g. "ac_fixed<16,6>" to the internal FixedPrecisionTypes etc)
'''
"""
bits = re.search('.+<(.+?)>', precision).group(1).split(',')
signed = True # default is signed
sat_mode = None
Expand Down Expand Up @@ -414,18 +414,18 @@ def _convert_ac_type(cls, precision):

@classmethod
def _convert_auto_type(cls, precision):
'''
"""
Convert a "auto" precision string into the UnspecifiedPrecisionType
'''
"""
return UnspecifiedPrecisionType()

def product_type(self, data_T, weight_T):
'''
"""
Helper function to determine which product implementation to use during inference
'''
assert not isinstance(
data_T, ExponentPrecisionType
), "Only ExponentPrecisionType (aka 'power of 2') weights are currently supported, not data."
"""
assert not isinstance(data_T, ExponentPrecisionType), (
"Only ExponentPrecisionType (aka 'power of 2') weights are currently supported, not data."
)
product = 'mult'
if isinstance(weight_T, ExponentPrecisionType):
product = 'weight_exponential'
Expand Down Expand Up @@ -754,14 +754,14 @@ def generate_conv1d_line_buffer_fn(self, layer_idx, n_partitions, in_W, in_C, ke
im2col_matrix = self._compute_conv1d_im2col((in_W, in_C), kernel, stride, (pad_left, pad_right), dilation)

generated_code = (
"template<class data_T, typename CONFIG_T>\n"
"class fill_buffer_{index} : public nnet::FillConv1DBuffer<data_T, CONFIG_T> {{\n"
" public:\n"
" static void fill_buffer(\n"
" data_T data[CONFIG_T::in_width * CONFIG_T::n_chan],\n"
" data_T buffer[CONFIG_T::n_pixels][CONFIG_T::filt_width * CONFIG_T::n_chan],\n"
" const unsigned partition\n"
" ) {{\n"
'template<class data_T, typename CONFIG_T>\n'
'class fill_buffer_{index} : public nnet::FillConv1DBuffer<data_T, CONFIG_T> {{\n'
' public:\n'
' static void fill_buffer(\n'
' data_T data[CONFIG_T::in_width * CONFIG_T::n_chan],\n'
' data_T buffer[CONFIG_T::n_pixels][CONFIG_T::filt_width * CONFIG_T::n_chan],\n'
' const unsigned partition\n'
' ) {{\n'
).format(index=layer_idx)
indent = ' '

Expand Down Expand Up @@ -884,14 +884,14 @@ def generate_conv2d_line_buffer_fn(
)

generated_code = (
"template<class data_T, typename CONFIG_T>\n"
"class fill_buffer_{index} : public nnet::FillConv2DBuffer<data_T, CONFIG_T> {{\n"
" public:\n"
" static void fill_buffer(\n"
" data_T data[CONFIG_T::in_height * CONFIG_T::in_width * CONFIG_T::n_chan],\n"
" data_T buffer[CONFIG_T::n_pixels][CONFIG_T::filt_height * CONFIG_T::filt_width * CONFIG_T::n_chan],\n"
" const unsigned partition\n"
" ) {{\n"
'template<class data_T, typename CONFIG_T>\n'
'class fill_buffer_{index} : public nnet::FillConv2DBuffer<data_T, CONFIG_T> {{\n'
' public:\n'
' static void fill_buffer(\n'
' data_T data[CONFIG_T::in_height * CONFIG_T::in_width * CONFIG_T::n_chan],\n'
' data_T buffer[CONFIG_T::n_pixels][CONFIG_T::filt_height * CONFIG_T::filt_width * CONFIG_T::n_chan],\n'
' const unsigned partition\n'
' ) {{\n'
).format(index=layer_idx)
indent = ' '

Expand Down
8 changes: 4 additions & 4 deletions hls4ml/backends/fpga/fpga_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,10 @@


class BatchNormalizationQuantizedTanh(Layer):
'''Merged Batch Normalization and quantized (binary or ternary) Tanh layer.
"""Merged Batch Normalization and quantized (binary or ternary) Tanh layer.
The mean, variance, beta, gamma parameters are folded into the threshold(s) at which the
sign of the input flips after the quantized (binary or ternary) Tanh activation.
'''
"""

_expected_attributes = [
Attribute('n_in'),
Expand Down Expand Up @@ -71,15 +71,15 @@ def set_thresholds(self, scale, bias, ternary_threshold=0.5):


class PointwiseConv1D(Conv1D):
'''Optimized Conv1D implementation for 1x1 kernels.'''
"""Optimized Conv1D implementation for 1x1 kernels."""

def initialize(self):
# Do noting, values copied
pass


class PointwiseConv2D(Conv2D):
'''Optimized Conv2D implementation for 1x1 kernels.'''
"""Optimized Conv2D implementation for 1x1 kernels."""

def initialize(self):
# Do noting, values copied
Expand Down
4 changes: 2 additions & 2 deletions hls4ml/backends/fpga/fpga_types.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ def definition_cpp(self):

class CompressedTypeConverter(TypeDefinition, TypePrecisionConverter):
def definition_cpp(self):
cpp_fmt = 'typedef struct {name} {{' '{index} row_index;' '{index} col_index;' '{precision} weight; }} {name};\n'
cpp_fmt = 'typedef struct {name} {{{index} row_index;{index} col_index;{precision} weight; }} {name};\n'
return cpp_fmt.format(name=self.name, index=self.index_precision, precision=self.precision.definition_cpp())

def convert_precision(self, precision_converter):
Expand All @@ -175,7 +175,7 @@ def convert_precision(self, precision_converter):

class ExponentTypeConverter(TypeDefinition, TypePrecisionConverter):
def definition_cpp(self):
cpp_fmt = 'typedef struct {name} {{' '{sign} sign;' '{precision} weight; }} {name};\n'
cpp_fmt = 'typedef struct {name} {{{sign} sign;{precision} weight; }} {name};\n'
return cpp_fmt.format(name=self.name, precision=self.precision.definition_cpp(), sign=self.sign.definition_cpp())

def convert_precision(self, precision_converter):
Expand Down
5 changes: 2 additions & 3 deletions hls4ml/backends/fpga/passes/clone.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@


class Clone(Layer):
'''Inserted after the layer whose output is used more than once.'''
"""Inserted after the layer whose output is used more than once."""

def initialize(self):
inp = self.get_input_variable()
Expand Down Expand Up @@ -47,7 +47,7 @@ def register_clone(backend):


class CloneOutput(OptimizerPass):
'''Clones streams that are used multiple times'''
"""Clones streams that are used multiple times"""

def match(self, node):
# We may have already inserted the Clone layer
Expand All @@ -70,7 +70,6 @@ def match(self, node):
return False

def transform(self, model, node):

output_map = node.get_output_use_map()

transformed = False
Expand Down
2 changes: 1 addition & 1 deletion hls4ml/backends/fpga/passes/final_reshape.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@


class RemoveFinalReshape(OptimizerPass):
'''Remove reshape if final layer'''
"""Remove reshape if final layer"""

def match(self, node):
# match if reshape is final node
Expand Down
Loading
Loading