From f28bd1d8b034a079d33f7fbe22b26a972a6e9c4e Mon Sep 17 00:00:00 2001 From: Chang Sun Date: Tue, 15 Apr 2025 18:26:32 -0700 Subject: [PATCH 1/3] pre-commit config update --- .pre-commit-config.yaml | 2 +- hls4ml/model/optimizer/passes/infer_precision.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a70b75062d..62db599cb6 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -38,7 +38,7 @@ repos: rev: v3.19.1 hooks: - id: pyupgrade - args: ["--py36-plus"] + args: ["--py310-plus"] - repo: https://github.com/pycqa/flake8 rev: 7.2.0 diff --git a/hls4ml/model/optimizer/passes/infer_precision.py b/hls4ml/model/optimizer/passes/infer_precision.py index bd439e4a0f..919bc0c3c2 100644 --- a/hls4ml/model/optimizer/passes/infer_precision.py +++ b/hls4ml/model/optimizer/passes/infer_precision.py @@ -1,5 +1,5 @@ import math -from typing import Iterable +from collections.abc import Iterable import numpy as np From cd11ae7733c8c58730b89c83b0dd132b47ec409a Mon Sep 17 00:00:00 2001 From: Chang Sun Date: Tue, 15 Apr 2025 18:40:21 -0700 Subject: [PATCH 2/3] use ruff --- .pre-commit-config.yaml | 20 ++++++++------------ pyproject.toml | 21 ++++++++++++++++----- 2 files changed, 24 insertions(+), 17 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 62db599cb6..db742fd5ac 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,13 +1,12 @@ exclude: (^hls4ml\/templates\/(vivado|quartus)\/(ap_types|ac_types)\/|^test/pytest/test_report/) repos: -- repo: https://github.com/psf/black - rev: 25.1.0 +- repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.11.5 hooks: - - id: black - language_version: python3 - args: ['--line-length=125', - '--skip-string-normalization'] + - id: ruff + args: [--fix] + - id: ruff-format - repo: https://github.com/tox-dev/pyproject-fmt rev: v2.5.1 @@ -29,11 +28,6 @@ repos: - id: requirements-txt-fixer - id: trailing-whitespace -- repo: https://github.com/PyCQA/isort - rev: 6.0.1 - hooks: - - id: isort - - repo: https://github.com/asottile/pyupgrade rev: v3.19.1 hooks: @@ -47,7 +41,9 @@ repos: exclude: docs/conf.py additional_dependencies: [flake8-bugbear, flake8-print] args: ['--max-line-length=125', # github viewer width - '--extend-ignore=E203,T201'] # E203 is not PEP8 compliant + '--extend-ignore=E203,T201,F401'] + # E203 is not PEP8 compliant + # F401 included in ruff (behaves slightly differently for noqa flags) - repo: https://github.com/mgedmin/check-manifest rev: "0.50" diff --git a/pyproject.toml b/pyproject.toml index 041428ea9f..7c2dedb437 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -82,13 +82,24 @@ git_describe_command = [ ] write_to = "hls4ml/_version.py" -[tool.black] +[tool.ruff] +target-version = "py310" + line-length = 125 -skip-string-normalization = true +indent-width = 4 +include = [ "hls4ml/**/*.py", "tests/**/*.py" ] +exclude = [ "hls4ml/_version.py", "hls4ml/templates/**" ] + +format.quote-style = "single" +format.skip-magic-trailing-comma = false +format.docstring-code-line-length = 125 +format.docstring-code-format = true +lint.select = [ "E", "F", "F401", "I", "W" ] +lint.ignore = [ "E741" ] +lint.per-file-ignores = { "__init__.py" = [ "F401" ] } -[tool.isort] -profile = "black" -line_length = 125 +lint.fixable = [ "ALL" ] +lint.unfixable = [ ] [tool.check-manifest] ignore = [ From 3a711cd2539e7a2e614d333eb221d87cd2da6454 Mon Sep 17 00:00:00 2001 From: Chang Sun Date: Tue, 15 Apr 2025 18:51:38 -0700 Subject: [PATCH 3/3] format with ruff --- docs/conf.py | 4 +- hls4ml/__init__.py | 6 +- hls4ml/backends/__init__.py | 6 +- hls4ml/backends/catapult/catapult_backend.py | 8 +- .../catapult/passes/broadcast_stream.py | 2 +- .../backends/catapult/passes/conv_stream.py | 2 +- .../catapult/passes/convolution_winograd.py | 4 +- .../passes/fifo_depth_optimization.py | 4 +- .../catapult/passes/resource_strategy.py | 2 +- hls4ml/backends/fpga/fpga_backend.py | 58 +++---- hls4ml/backends/fpga/fpga_layers.py | 8 +- hls4ml/backends/fpga/fpga_types.py | 4 +- hls4ml/backends/fpga/passes/clone.py | 5 +- hls4ml/backends/fpga/passes/final_reshape.py | 2 +- .../fpga/passes/fix_softmax_table_size.py | 18 +-- .../backends/fpga/passes/hgq_proxy_model.py | 6 +- hls4ml/backends/fpga/passes/im2col_codegen.py | 2 +- .../fpga/passes/inplace_parallel_reshape.py | 6 +- hls4ml/backends/fpga/passes/repack_stream.py | 4 +- hls4ml/backends/fpga/passes/xnor_pooling.py | 4 +- hls4ml/backends/oneapi/oneapi_backend.py | 6 +- hls4ml/backends/oneapi/oneapi_template.py | 4 +- hls4ml/backends/oneapi/oneapi_types.py | 4 +- .../oneapi/passes/convolution_templates.py | 6 +- .../oneapi/passes/convolution_winograd.py | 4 +- .../backends/oneapi/passes/core_templates.py | 10 +- hls4ml/backends/oneapi/passes/pointwise.py | 4 +- .../oneapi/passes/recurrent_templates.py | 16 +- .../oneapi/passes/resource_strategy.py | 2 +- .../quartus/passes/convolution_templates.py | 6 +- .../quartus/passes/convolution_winograd.py | 4 +- hls4ml/backends/quartus/passes/pointwise.py | 4 +- .../quartus/passes/recurrent_templates.py | 12 +- .../quartus/passes/resource_strategy.py | 2 +- hls4ml/backends/quartus/quartus_backend.py | 6 +- .../symbolic/passes/expr_templates.py | 2 +- .../backends/symbolic/passes/validate_lut.py | 2 +- .../vitis/passes/fifo_depth_optimization.py | 1 - .../vivado/passes/broadcast_stream.py | 2 +- hls4ml/backends/vivado/passes/conv_stream.py | 2 +- .../vivado/passes/fifo_depth_optimization.py | 4 +- .../backends/vivado/passes/pipeline_style.py | 1 - .../vivado/passes/pointwise_codegen.py | 2 +- .../vivado/passes/resource_strategy.py | 2 +- .../vivado/passes/unrolled_codegen.py | 26 +-- hls4ml/backends/vivado/vivado_backend.py | 6 +- .../vivado_accelerator_backend.py | 14 +- .../vivado_accelerator_config.py | 16 +- hls4ml/cli/__init__.py | 2 +- hls4ml/contrib/garnet.py | 24 +-- hls4ml/contrib/kl_layer/kl_layer.py | 4 +- hls4ml/converters/__init__.py | 26 +-- hls4ml/converters/keras/qkeras.py | 1 - hls4ml/converters/keras/recurrent.py | 2 +- hls4ml/converters/keras/reshape.py | 4 +- hls4ml/converters/onnx/convolution.py | 4 +- hls4ml/converters/onnx/core.py | 4 +- hls4ml/converters/onnx/pooling.py | 2 +- hls4ml/converters/onnx_to_hls.py | 4 +- hls4ml/converters/pytorch/pooling.py | 4 +- hls4ml/converters/pytorch/recurrent.py | 2 +- hls4ml/converters/pytorch/reshape.py | 1 - hls4ml/converters/pytorch_to_hls.py | 9 +- hls4ml/model/__init__.py | 2 +- hls4ml/model/flow/__init__.py | 2 +- hls4ml/model/graph.py | 11 +- hls4ml/model/layers.py | 24 +-- hls4ml/model/optimizer/__init__.py | 2 +- .../model/optimizer/passes/batchnorm_opt.py | 2 +- .../passes/convert_to_channels_last.py | 14 +- .../optimizer/passes/expand_layer_group.py | 2 +- .../passes/expand_time_distributed.py | 4 +- hls4ml/model/optimizer/passes/fuse_biasadd.py | 2 +- .../model/optimizer/passes/infer_precision.py | 1 - hls4ml/model/optimizer/passes/linear.py | 12 +- hls4ml/model/optimizer/passes/move_scales.py | 32 ++-- hls4ml/model/optimizer/passes/qkeras.py | 16 +- hls4ml/model/optimizer/passes/quant_opt.py | 2 +- hls4ml/model/profiling.py | 56 +++---- hls4ml/model/types.py | 2 +- .../dsp_aware_pruning/__init__.py | 4 +- .../dsp_aware_pruning/attributes.py | 24 +-- .../optimization/dsp_aware_pruning/config.py | 4 +- .../dsp_aware_pruning/keras/__init__.py | 18 +-- .../dsp_aware_pruning/keras/builder.py | 12 +- .../dsp_aware_pruning/keras/config.py | 12 +- .../dsp_aware_pruning/keras/masking.py | 16 +- .../dsp_aware_pruning/keras/reduction.py | 4 +- .../dsp_aware_pruning/keras/regularizers.py | 8 +- .../dsp_aware_pruning/keras/utils.py | 16 +- .../dsp_aware_pruning/knapsack.py | 24 +-- .../dsp_aware_pruning/objectives/__init__.py | 24 +-- .../dsp_aware_pruning/scheduler.py | 32 ++-- hls4ml/report/__init__.py | 28 ++-- hls4ml/report/catapult_report.py | 2 +- hls4ml/report/oneapi_report.py | 19 +-- hls4ml/report/quartus_report.py | 48 +++--- hls4ml/report/vivado_report.py | 2 +- hls4ml/utils/__init__.py | 6 +- hls4ml/utils/attribute_descriptions.py | 3 +- hls4ml/utils/config.py | 10 +- hls4ml/utils/example_models.py | 14 +- hls4ml/utils/fixed_point_utils.py | 28 ++-- hls4ml/utils/plot.py | 4 +- hls4ml/utils/profiling_utils.py | 7 +- hls4ml/utils/torch.py | 1 - hls4ml/writer/__init__.py | 2 +- hls4ml/writer/catapult_writer.py | 152 +++++++++--------- hls4ml/writer/oneapi_writer.py | 83 +++++----- hls4ml/writer/quartus_writer.py | 84 +++++----- hls4ml/writer/vitis_writer.py | 4 +- hls4ml/writer/vivado_accelerator_writer.py | 20 +-- hls4ml/writer/vivado_writer.py | 25 ++- test/pytest/generate_ci_yaml.py | 4 +- test/pytest/test_backend_config.py | 2 +- test/pytest/test_bram_factor.py | 4 +- test/pytest/test_causalpadding.py | 2 +- test/pytest/test_cnn_mnist_qkeras.py | 4 +- test/pytest/test_conv1d_narrow.py | 8 +- test/pytest/test_conv2d_narrow.py | 8 +- test/pytest/test_extensions.py | 6 +- test/pytest/test_extensions_pytorch.py | 6 +- test/pytest/test_flows.py | 4 +- test/pytest/test_graph.py | 6 +- test/pytest/test_hgq_layers.py | 36 ++--- test/pytest/test_hgq_players.py | 26 +-- test/pytest/test_keras_api.py | 20 +-- test/pytest/test_merge_pytorch.py | 2 +- test/pytest/test_multiout_onnx.py | 1 - .../test_keras/test_masking.py | 4 +- .../test_keras/test_reduction.py | 4 +- .../test_keras/test_weight_sharing.py | 4 +- test/pytest/test_pytorch_api.py | 53 +++--- test/pytest/test_pytorch_profiler.py | 8 +- test/pytest/test_qkeras.py | 74 ++++----- test/pytest/test_qonnx.py | 61 ++++--- test/pytest/test_recurrent_pytorch.py | 14 +- test/pytest/test_report.py | 4 +- test/pytest/test_sepconv2d.py | 2 +- test/pytest/test_sr.py | 1 - test/pytest/test_trace.py | 2 +- test/pytest/test_types.py | 2 +- test/pytest/test_upsampling_pytorch.py | 4 +- test/pytest/test_writer_config.py | 4 - 144 files changed, 857 insertions(+), 867 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index e4d7f399c1..5c1bc081e3 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -99,7 +99,7 @@ def get_pypi_version(package, url_pattern=URL_PATTERN): # -- Extension configuration ------------------------------------------------- html_show_sourcelink = False -html_logo = "img/hls4ml_logo_navbar.png" +html_logo = 'img/hls4ml_logo_navbar.png' html_theme_options = { 'canonical_url': '', @@ -120,7 +120,7 @@ def get_pypi_version(package, url_pattern=URL_PATTERN): html_context = { 'display_github': True, # Integrate GitHub 'github_user': 'fastmachinelearning', # Username - 'github_repo': "hls4ml", # Repo name + 'github_repo': 'hls4ml', # Repo name 'github_version': 'main', # Version 'conf_py_path': '/docs/', # Path in the checkout to the docs root } diff --git a/hls4ml/__init__.py b/hls4ml/__init__.py index 0ff5e52ac9..eee06faa1e 100644 --- a/hls4ml/__init__.py +++ b/hls4ml/__init__.py @@ -1,11 +1,11 @@ -from hls4ml import converters, report, utils # noqa: F401, E402 +from hls4ml import converters, report, utils try: from ._version import version as __version__ from ._version import version_tuple except ImportError: - __version__ = "unknown version" - version_tuple = (0, 0, "unknown version") + __version__ = 'unknown version' + version_tuple = (0, 0, 'unknown version') def reseed(newseed): diff --git a/hls4ml/backends/__init__.py b/hls4ml/backends/__init__.py index 4a48f072cd..eea67b1ff9 100644 --- a/hls4ml/backends/__init__.py +++ b/hls4ml/backends/__init__.py @@ -1,11 +1,11 @@ -from hls4ml.backends.backend import Backend, get_available_backends, get_backend, register_backend # noqa: F401 -from hls4ml.backends.fpga.fpga_backend import FPGABackend # noqa: F401 +from hls4ml.backends.backend import Backend, get_available_backends, get_backend, register_backend +from hls4ml.backends.fpga.fpga_backend import FPGABackend from hls4ml.backends.oneapi.oneapi_backend import OneAPIBackend from hls4ml.backends.quartus.quartus_backend import QuartusBackend from hls4ml.backends.symbolic.symbolic_backend import SymbolicExpressionBackend from hls4ml.backends.vivado.vivado_backend import VivadoBackend from hls4ml.backends.vivado_accelerator.vivado_accelerator_backend import VivadoAcceleratorBackend -from hls4ml.backends.vivado_accelerator.vivado_accelerator_config import VivadoAcceleratorConfig # noqa: F401 +from hls4ml.backends.vivado_accelerator.vivado_accelerator_config import VivadoAcceleratorConfig from hls4ml.backends.catapult.catapult_backend import CatapultBackend # isort: skip diff --git a/hls4ml/backends/catapult/catapult_backend.py b/hls4ml/backends/catapult/catapult_backend.py index 030016d6cd..5b493cd944 100644 --- a/hls4ml/backends/catapult/catapult_backend.py +++ b/hls4ml/backends/catapult/catapult_backend.py @@ -251,7 +251,7 @@ def build( ccs_args = f'"reset={reset} csim={csim} synth={synth} cosim={cosim} validation={validation}' ccs_args += f' export={export} vsynth={vsynth} fifo_opt={fifo_opt} bitfile={bitfile} ran_frame={ran_frame}' ccs_args += f' sw_opt={sw_opt} power={power} da={da} vhdl={vhdl} verilog={verilog} bup={bup}"' - ccs_invoke = catapult_exe + ' -product ultra -shell -f build_prj.tcl -eval \'set ::argv ' + ccs_args + '\'' + ccs_invoke = catapult_exe + " -product ultra -shell -f build_prj.tcl -eval 'set ::argv " + ccs_args + "'" print(ccs_invoke) os.system(ccs_invoke) os.chdir(curr_dir) @@ -455,9 +455,9 @@ def init_global_pooling2d(self, layer): @layer_optimizer(Softmax) def init_softmax(self, layer): if layer.model.config.get_config_value('IOType') == 'io_parallel': - assert ( - len(layer.get_input_variable().shape) == 1 - ), 'Softmax with io_parallel strategy cannot be used on multidimensional tensors.' + assert len(layer.get_input_variable().shape) == 1, ( + 'Softmax with io_parallel strategy cannot be used on multidimensional tensors.' + ) @layer_optimizer(Embedding) def init_embed(self, layer): diff --git a/hls4ml/backends/catapult/passes/broadcast_stream.py b/hls4ml/backends/catapult/passes/broadcast_stream.py index 97019e074b..3cc218ec5b 100644 --- a/hls4ml/backends/catapult/passes/broadcast_stream.py +++ b/hls4ml/backends/catapult/passes/broadcast_stream.py @@ -6,7 +6,7 @@ class Broadcast(Layer): - '''Inserted between layers for broadcasting.''' + """Inserted between layers for broadcasting.""" def initialize(self): shape = self.attributes['target_shape'] diff --git a/hls4ml/backends/catapult/passes/conv_stream.py b/hls4ml/backends/catapult/passes/conv_stream.py index 9ba0c04d32..a8032e7430 100755 --- a/hls4ml/backends/catapult/passes/conv_stream.py +++ b/hls4ml/backends/catapult/passes/conv_stream.py @@ -3,7 +3,7 @@ class GenerateConvStreamingInstructions(OptimizerPass): - '''Generates the instructions for streaming implementation of CNNs''' + """Generates the instructions for streaming implementation of CNNs""" def match(self, node): is_match = ( diff --git a/hls4ml/backends/catapult/passes/convolution_winograd.py b/hls4ml/backends/catapult/passes/convolution_winograd.py index 8b25ab41b8..36ae88d44a 100644 --- a/hls4ml/backends/catapult/passes/convolution_winograd.py +++ b/hls4ml/backends/catapult/passes/convolution_winograd.py @@ -7,10 +7,10 @@ class ApplyWinogradKernelTransformation(OptimizerPass): - ''' + """ Transforms the weights of a Conv2D kernel to a format suitable for Wingorad convolution For further information, refer to Lavin & Gray, 2015 - Fast Algorithms for Convolutional Neural Networks - ''' + """ def match(self, node): node_matches = isinstance(node, (Conv1D, Conv2D)) diff --git a/hls4ml/backends/catapult/passes/fifo_depth_optimization.py b/hls4ml/backends/catapult/passes/fifo_depth_optimization.py index 4d92e98de1..496d4965f3 100755 --- a/hls4ml/backends/catapult/passes/fifo_depth_optimization.py +++ b/hls4ml/backends/catapult/passes/fifo_depth_optimization.py @@ -82,9 +82,9 @@ def transform(self, model): if len(data['children']) == 0: print( - "FIFO depth optimization found no FIFOs implemented using BRAMs in the design, no optimization is possible." + 'FIFO depth optimization found no FIFOs implemented using BRAMs in the design, no optimization is possible.' ) - print("Consider increasing profiling_fifo_depth.") + print('Consider increasing profiling_fifo_depth.') return False n_elem = len(data['children'][0]['children'][0]['children']) diff --git a/hls4ml/backends/catapult/passes/resource_strategy.py b/hls4ml/backends/catapult/passes/resource_strategy.py index 63e6e0b4db..0098050d51 100755 --- a/hls4ml/backends/catapult/passes/resource_strategy.py +++ b/hls4ml/backends/catapult/passes/resource_strategy.py @@ -5,7 +5,7 @@ class ApplyResourceStrategy(OptimizerPass): - '''Transposes the weights to use the dense_resource matrix multiply routine''' + """Transposes the weights to use the dense_resource matrix multiply routine""" def match(self, node): node_matches = isinstance(node, (Dense, Conv1D, SeparableConv1D, Conv2D, SeparableConv2D, LSTM, GRU)) diff --git a/hls4ml/backends/fpga/fpga_backend.py b/hls4ml/backends/fpga/fpga_backend.py index bd85937d89..b845497e9a 100644 --- a/hls4ml/backends/fpga/fpga_backend.py +++ b/hls4ml/backends/fpga/fpga_backend.py @@ -346,9 +346,9 @@ def convert_precision_string(cls, precision): @classmethod def _convert_ap_type(cls, precision): - ''' + """ Convert a precision string (e.g. "ap_fixed<16,6>" to the internal FixedPrecisionTypes etc) - ''' + """ bits = re.search('.+<(.+?)>', precision).group(1).split(',') sat_mode = None round_mode = None @@ -357,12 +357,12 @@ def _convert_ap_type(cls, precision): width = int(bits[0]) integer = int(bits[1]) fields = 2 - signed = not ('u' in precision) + signed = 'u' not in precision elif 'int' in precision: width = int(bits[0]) integer = width fields = 1 - signed = not ('u' in precision) + signed = 'u' not in precision if len(bits) > fields: round_mode = bits[fields] if len(bits) > fields + 1: @@ -376,9 +376,9 @@ def _convert_ap_type(cls, precision): @classmethod def _convert_ac_type(cls, precision): - ''' + """ Convert a precision string (e.g. "ac_fixed<16,6>" to the internal FixedPrecisionTypes etc) - ''' + """ bits = re.search('.+<(.+?)>', precision).group(1).split(',') signed = True # default is signed sat_mode = None @@ -414,18 +414,18 @@ def _convert_ac_type(cls, precision): @classmethod def _convert_auto_type(cls, precision): - ''' + """ Convert a "auto" precision string into the UnspecifiedPrecisionType - ''' + """ return UnspecifiedPrecisionType() def product_type(self, data_T, weight_T): - ''' + """ Helper function to determine which product implementation to use during inference - ''' - assert not isinstance( - data_T, ExponentPrecisionType - ), "Only ExponentPrecisionType (aka 'power of 2') weights are currently supported, not data." + """ + assert not isinstance(data_T, ExponentPrecisionType), ( + "Only ExponentPrecisionType (aka 'power of 2') weights are currently supported, not data." + ) product = 'mult' if isinstance(weight_T, ExponentPrecisionType): product = 'weight_exponential' @@ -754,14 +754,14 @@ def generate_conv1d_line_buffer_fn(self, layer_idx, n_partitions, in_W, in_C, ke im2col_matrix = self._compute_conv1d_im2col((in_W, in_C), kernel, stride, (pad_left, pad_right), dilation) generated_code = ( - "template\n" - "class fill_buffer_{index} : public nnet::FillConv1DBuffer {{\n" - " public:\n" - " static void fill_buffer(\n" - " data_T data[CONFIG_T::in_width * CONFIG_T::n_chan],\n" - " data_T buffer[CONFIG_T::n_pixels][CONFIG_T::filt_width * CONFIG_T::n_chan],\n" - " const unsigned partition\n" - " ) {{\n" + 'template\n' + 'class fill_buffer_{index} : public nnet::FillConv1DBuffer {{\n' + ' public:\n' + ' static void fill_buffer(\n' + ' data_T data[CONFIG_T::in_width * CONFIG_T::n_chan],\n' + ' data_T buffer[CONFIG_T::n_pixels][CONFIG_T::filt_width * CONFIG_T::n_chan],\n' + ' const unsigned partition\n' + ' ) {{\n' ).format(index=layer_idx) indent = ' ' @@ -884,14 +884,14 @@ def generate_conv2d_line_buffer_fn( ) generated_code = ( - "template\n" - "class fill_buffer_{index} : public nnet::FillConv2DBuffer {{\n" - " public:\n" - " static void fill_buffer(\n" - " data_T data[CONFIG_T::in_height * CONFIG_T::in_width * CONFIG_T::n_chan],\n" - " data_T buffer[CONFIG_T::n_pixels][CONFIG_T::filt_height * CONFIG_T::filt_width * CONFIG_T::n_chan],\n" - " const unsigned partition\n" - " ) {{\n" + 'template\n' + 'class fill_buffer_{index} : public nnet::FillConv2DBuffer {{\n' + ' public:\n' + ' static void fill_buffer(\n' + ' data_T data[CONFIG_T::in_height * CONFIG_T::in_width * CONFIG_T::n_chan],\n' + ' data_T buffer[CONFIG_T::n_pixels][CONFIG_T::filt_height * CONFIG_T::filt_width * CONFIG_T::n_chan],\n' + ' const unsigned partition\n' + ' ) {{\n' ).format(index=layer_idx) indent = ' ' diff --git a/hls4ml/backends/fpga/fpga_layers.py b/hls4ml/backends/fpga/fpga_layers.py index 0026ebe213..6786a6961b 100644 --- a/hls4ml/backends/fpga/fpga_layers.py +++ b/hls4ml/backends/fpga/fpga_layers.py @@ -6,10 +6,10 @@ class BatchNormalizationQuantizedTanh(Layer): - '''Merged Batch Normalization and quantized (binary or ternary) Tanh layer. + """Merged Batch Normalization and quantized (binary or ternary) Tanh layer. The mean, variance, beta, gamma parameters are folded into the threshold(s) at which the sign of the input flips after the quantized (binary or ternary) Tanh activation. - ''' + """ _expected_attributes = [ Attribute('n_in'), @@ -71,7 +71,7 @@ def set_thresholds(self, scale, bias, ternary_threshold=0.5): class PointwiseConv1D(Conv1D): - '''Optimized Conv1D implementation for 1x1 kernels.''' + """Optimized Conv1D implementation for 1x1 kernels.""" def initialize(self): # Do noting, values copied @@ -79,7 +79,7 @@ def initialize(self): class PointwiseConv2D(Conv2D): - '''Optimized Conv2D implementation for 1x1 kernels.''' + """Optimized Conv2D implementation for 1x1 kernels.""" def initialize(self): # Do noting, values copied diff --git a/hls4ml/backends/fpga/fpga_types.py b/hls4ml/backends/fpga/fpga_types.py index 15ad386c5a..0862af40a4 100644 --- a/hls4ml/backends/fpga/fpga_types.py +++ b/hls4ml/backends/fpga/fpga_types.py @@ -165,7 +165,7 @@ def definition_cpp(self): class CompressedTypeConverter(TypeDefinition, TypePrecisionConverter): def definition_cpp(self): - cpp_fmt = 'typedef struct {name} {{' '{index} row_index;' '{index} col_index;' '{precision} weight; }} {name};\n' + cpp_fmt = 'typedef struct {name} {{{index} row_index;{index} col_index;{precision} weight; }} {name};\n' return cpp_fmt.format(name=self.name, index=self.index_precision, precision=self.precision.definition_cpp()) def convert_precision(self, precision_converter): @@ -175,7 +175,7 @@ def convert_precision(self, precision_converter): class ExponentTypeConverter(TypeDefinition, TypePrecisionConverter): def definition_cpp(self): - cpp_fmt = 'typedef struct {name} {{' '{sign} sign;' '{precision} weight; }} {name};\n' + cpp_fmt = 'typedef struct {name} {{{sign} sign;{precision} weight; }} {name};\n' return cpp_fmt.format(name=self.name, precision=self.precision.definition_cpp(), sign=self.sign.definition_cpp()) def convert_precision(self, precision_converter): diff --git a/hls4ml/backends/fpga/passes/clone.py b/hls4ml/backends/fpga/passes/clone.py index 232f769f20..8de791fd71 100644 --- a/hls4ml/backends/fpga/passes/clone.py +++ b/hls4ml/backends/fpga/passes/clone.py @@ -6,7 +6,7 @@ class Clone(Layer): - '''Inserted after the layer whose output is used more than once.''' + """Inserted after the layer whose output is used more than once.""" def initialize(self): inp = self.get_input_variable() @@ -47,7 +47,7 @@ def register_clone(backend): class CloneOutput(OptimizerPass): - '''Clones streams that are used multiple times''' + """Clones streams that are used multiple times""" def match(self, node): # We may have already inserted the Clone layer @@ -70,7 +70,6 @@ def match(self, node): return False def transform(self, model, node): - output_map = node.get_output_use_map() transformed = False diff --git a/hls4ml/backends/fpga/passes/final_reshape.py b/hls4ml/backends/fpga/passes/final_reshape.py index 984575441b..d357ef9ba6 100644 --- a/hls4ml/backends/fpga/passes/final_reshape.py +++ b/hls4ml/backends/fpga/passes/final_reshape.py @@ -3,7 +3,7 @@ class RemoveFinalReshape(OptimizerPass): - '''Remove reshape if final layer''' + """Remove reshape if final layer""" def match(self, node): # match if reshape is final node diff --git a/hls4ml/backends/fpga/passes/fix_softmax_table_size.py b/hls4ml/backends/fpga/passes/fix_softmax_table_size.py index 4e04626d2e..158e0aadad 100644 --- a/hls4ml/backends/fpga/passes/fix_softmax_table_size.py +++ b/hls4ml/backends/fpga/passes/fix_softmax_table_size.py @@ -33,20 +33,20 @@ def transform(self, model, node: Layer): # 125 characters long line. warnings.warn( ( - f"Softmax layer {node.name} table size is too large for input" - f"bitwidth {input_bw}. Setting table size to {2**input_bw}." - "To avoid this warning, please increase input bitwidth or" - "decrease table size." + f'Softmax layer {node.name} table size is too large for input' + f'bitwidth {input_bw}. Setting table size to {2**input_bw}.' + 'To avoid this warning, please increase input bitwidth or' + 'decrease table size.' ), stacklevel=1, ) if 2**table_bw < table_size: warnings.warn( ( - f"Softmax layer {node.name} table size is too large for input" - f"bitwidth {input_bw}. Setting table size to {2**input_bw}." - "To avoid this warning, please increase input bitwidth or" - "decrease table size." + f'Softmax layer {node.name} table size is too large for input' + f'bitwidth {input_bw}. Setting table size to {2**input_bw}.' + 'To avoid this warning, please increase input bitwidth or' + 'decrease table size.' ), stacklevel=1, ) @@ -54,7 +54,7 @@ def transform(self, model, node: Layer): warnings.warn( ( "Quartus backend's table size is half of 2^min(input_bw-1,table_bw-1)" - " instead of 2^min(input_bw,table_bw)." + ' instead of 2^min(input_bw,table_bw).' ), stacklevel=1, ) diff --git a/hls4ml/backends/fpga/passes/hgq_proxy_model.py b/hls4ml/backends/fpga/passes/hgq_proxy_model.py index c12930c92c..48a6f87214 100644 --- a/hls4ml/backends/fpga/passes/hgq_proxy_model.py +++ b/hls4ml/backends/fpga/passes/hgq_proxy_model.py @@ -35,15 +35,15 @@ def generate_mask_fn( else: fn = f'out[{idx}] = {to_fixed(k, b, i, RND, SAT)}(inp[{idx}]);' masks.append(f' {fn}') - body = "\n".join(masks) - mask_fn = f''' + body = '\n'.join(masks) + mask_fn = f""" template void {name}(input_t *inp, output_t *out) {{ #pragma HLS INLINE {body} }} -''' +""" return mask_fn diff --git a/hls4ml/backends/fpga/passes/im2col_codegen.py b/hls4ml/backends/fpga/passes/im2col_codegen.py index ccbac885c4..11e48d2552 100644 --- a/hls4ml/backends/fpga/passes/im2col_codegen.py +++ b/hls4ml/backends/fpga/passes/im2col_codegen.py @@ -4,7 +4,7 @@ class GenerateConvIm2col(OptimizerPass): - '''Generates tcode for im2col step of 1D/2d convolution''' + """Generates tcode for im2col step of 1D/2d convolution""" # Note, DepthwizeConv1D/2D also matches because it inherits from Conv1D/2D def match(self, node): diff --git a/hls4ml/backends/fpga/passes/inplace_parallel_reshape.py b/hls4ml/backends/fpga/passes/inplace_parallel_reshape.py index 82efe67100..9daff31307 100644 --- a/hls4ml/backends/fpga/passes/inplace_parallel_reshape.py +++ b/hls4ml/backends/fpga/passes/inplace_parallel_reshape.py @@ -22,10 +22,10 @@ def transform(self, model, node): node.set_attr(node.outputs[0], newoutvar) if node.name in model.outputs: prev_node = node.get_input_node() - assert ( - prev_node.name not in model.outputs - ), f"Cannot output node {prev_node.name}: reshape is a no-op in io_parallel.\ + assert prev_node.name not in model.outputs, ( + f"Cannot output node {prev_node.name}: reshape is a no-op in io_parallel.\ As a result, the previous node {prev_node.name}'s output will be used as the\ output. However, this node is already an output." + ) model.outputs = [name if name != node.name else prev_node.name for name in model.outputs] return False diff --git a/hls4ml/backends/fpga/passes/repack_stream.py b/hls4ml/backends/fpga/passes/repack_stream.py index 9a77dddb29..8cd7058e9a 100644 --- a/hls4ml/backends/fpga/passes/repack_stream.py +++ b/hls4ml/backends/fpga/passes/repack_stream.py @@ -6,7 +6,7 @@ class Repack(Layer): - '''Inserted between layers with different packing factors.''' + """Inserted between layers with different packing factors.""" def initialize(self): shape = self.attributes['target_shape'] @@ -45,7 +45,7 @@ def register_repack_stream(backend): class ReshapeStream(OptimizerPass): - '''Repacks stream for Reshape layer''' + """Repacks stream for Reshape layer""" def match(self, node): # do not run optimizer pass for a flatten layer (1 output dimension) diff --git a/hls4ml/backends/fpga/passes/xnor_pooling.py b/hls4ml/backends/fpga/passes/xnor_pooling.py index 73fee982e8..4e0df0d94f 100644 --- a/hls4ml/backends/fpga/passes/xnor_pooling.py +++ b/hls4ml/backends/fpga/passes/xnor_pooling.py @@ -4,10 +4,10 @@ class XnorPooling(OptimizerPass): - ''' + """ For correct behavior, for MaxPooling and similar, for XnorPrecisionType, have to propagate the type to the output. - ''' + """ def match(self, node): if isinstance(node, (Pooling1D, Pooling2D, GlobalPooling1D, GlobalPooling2D)) and node.get_attr('pool_op') == 'Max': diff --git a/hls4ml/backends/oneapi/oneapi_backend.py b/hls4ml/backends/oneapi/oneapi_backend.py index a4000529c3..2804f6c1fa 100644 --- a/hls4ml/backends/oneapi/oneapi_backend.py +++ b/hls4ml/backends/oneapi/oneapi_backend.py @@ -248,9 +248,9 @@ def init_activation(self, layer): @layer_optimizer(Softmax) def init_softmax(self, layer): if layer.model.config.get_config_value('IOType') == 'io_parallel': - assert ( - len(layer.get_input_variable().shape) == 1 - ), 'Softmax with io_parallel strategy cannot be used on multidimensional tensors.' + assert len(layer.get_input_variable().shape) == 1, ( + 'Softmax with io_parallel strategy cannot be used on multidimensional tensors.' + ) @layer_optimizer(Embedding) def init_embed(self, layer): diff --git a/hls4ml/backends/oneapi/oneapi_template.py b/hls4ml/backends/oneapi/oneapi_template.py index c86b8f7ea3..b9ff678658 100644 --- a/hls4ml/backends/oneapi/oneapi_template.py +++ b/hls4ml/backends/oneapi/oneapi_template.py @@ -1,6 +1,6 @@ -''' +""" This package includes oneAPI-specific templates -''' +""" from hls4ml.backends.template import Template diff --git a/hls4ml/backends/oneapi/oneapi_types.py b/hls4ml/backends/oneapi/oneapi_types.py index 3106e1e10d..e1fed26e12 100644 --- a/hls4ml/backends/oneapi/oneapi_types.py +++ b/hls4ml/backends/oneapi/oneapi_types.py @@ -1,6 +1,6 @@ -''' +""" This package includes oneAPI-specific customizations to the variable types -''' +""" import numpy as np diff --git a/hls4ml/backends/oneapi/passes/convolution_templates.py b/hls4ml/backends/oneapi/passes/convolution_templates.py index 64d9e42228..f1e1abded5 100644 --- a/hls4ml/backends/oneapi/passes/convolution_templates.py +++ b/hls4ml/backends/oneapi/passes/convolution_templates.py @@ -5,7 +5,7 @@ # TODO - Dilation rate ? -''' Shared mutliplication config ''' +""" Shared mutliplication config """ conv_mult_config_template = """struct config{index}_mult : nnet::dense_config {{ static const unsigned n_in = {n_in}; static const unsigned n_out = {n_out}; @@ -29,7 +29,7 @@ using product = nnet::product::{product_type}; }};\n""" -''' 1D Conv ''' +""" 1D Conv """ conv1d_config_template = """struct config{index} : nnet::conv1d_config {{ static const unsigned in_width = {in_width}; static const unsigned n_chan = {n_chan}; @@ -154,7 +154,7 @@ def __init__(self): self.template = depthconv1d_function_template -''' 2D Conv ''' +""" 2D Conv """ conv2d_config_template = """struct config{index} : nnet::conv2d_config {{ static const unsigned in_height = {in_height}; static const unsigned in_width = {in_width}; diff --git a/hls4ml/backends/oneapi/passes/convolution_winograd.py b/hls4ml/backends/oneapi/passes/convolution_winograd.py index fdab408b38..bf3782ad2d 100644 --- a/hls4ml/backends/oneapi/passes/convolution_winograd.py +++ b/hls4ml/backends/oneapi/passes/convolution_winograd.py @@ -7,10 +7,10 @@ class ApplyWinogradKernelTransformation(OptimizerPass): - ''' + """ Transforms the weights of a Conv2D kernel to a format suitable for Wingorad convolution For further information, refer to Lavin & Gray, 2015 - Fast Algorithms for Convolutional Neural Networks - ''' + """ def match(self, node): node_matches = isinstance(node, (Conv1D, Conv2D)) diff --git a/hls4ml/backends/oneapi/passes/core_templates.py b/hls4ml/backends/oneapi/passes/core_templates.py index 5ccf1a5213..9602b2d0fc 100644 --- a/hls4ml/backends/oneapi/passes/core_templates.py +++ b/hls4ml/backends/oneapi/passes/core_templates.py @@ -262,7 +262,7 @@ def __init__(self): def format(self, node): params = self._default_function_params(node) params['activation'] = node.get_attr('activation').lower() - params['config'] = f"{node.get_attr('activation')}_config{node.index}" + params['config'] = f'{node.get_attr("activation")}_config{node.index}' return self.template.format(**params) @@ -276,7 +276,7 @@ def format(self, node): params = self._default_function_params(node) params['activation'] = node._get_act_function_name() params['param'] = node.get_attr('activ_param', 1.0) - params['config'] = f"{node.get_attr('activation')}_config{node.index}" + params['config'] = f'{node.get_attr("activation")}_config{node.index}' return self.template.format(**params) @@ -290,7 +290,7 @@ def format(self, node): params = self._default_function_params(node) params['activation'] = node.get_attr('activation').lower() params['param'] = node.get_weights('param').name - params['config'] = f"{node.get_attr('activation')}_config{node.index}" + params['config'] = f'{node.get_attr("activation")}_config{node.index}' return self.template.format(**params) @@ -303,7 +303,7 @@ def __init__(self): def format(self, node): params = self._default_function_params(node) params['activation'] = node.get_attr('activation').lower() - params['config'] = f"{node.get_attr('activation')}_config{node.index}" + params['config'] = f'{node.get_attr("activation")}_config{node.index}' return self.template.format(**params) @@ -315,7 +315,7 @@ def __init__(self): def format(self, node): params = self._default_function_params(node) params['activation'] = node._get_act_function_name() - params['config'] = f"{node.get_attr('activation')}_config{node.index}" + params['config'] = f'{node.get_attr("activation")}_config{node.index}' return self.template.format(**params) diff --git a/hls4ml/backends/oneapi/passes/pointwise.py b/hls4ml/backends/oneapi/passes/pointwise.py index ccf410d1f6..8a0fa52026 100644 --- a/hls4ml/backends/oneapi/passes/pointwise.py +++ b/hls4ml/backends/oneapi/passes/pointwise.py @@ -11,10 +11,10 @@ from hls4ml.model.layers import register_layer from hls4ml.model.optimizer import OptimizerPass -''' +""" Custom hls4ml layer implementation for 1x1 Conv filters using im2col Allows lower latency andresource usage, due to less loop invocations -''' +""" pointwise_conv1d_function_template = ( 'nnet::pointwise_conv_1d_{data_format}<{input_t}, {output_t}, {config}>({input}, {output}, {w}, {b});' diff --git a/hls4ml/backends/oneapi/passes/recurrent_templates.py b/hls4ml/backends/oneapi/passes/recurrent_templates.py index a49a2ac02f..0799de5b61 100644 --- a/hls4ml/backends/oneapi/passes/recurrent_templates.py +++ b/hls4ml/backends/oneapi/passes/recurrent_templates.py @@ -10,7 +10,7 @@ ################################################ # Shared Matrix Multiplication Template (Dense) ################################################ -recr_mult_x_config_template = '''struct config{index}_mult : nnet::dense_config {{ +recr_mult_x_config_template = """struct config{index}_mult : nnet::dense_config {{ static const unsigned n_in = {n_in}; static const unsigned n_out = {n_out}; @@ -29,9 +29,9 @@ template using product = nnet::product::{product_type}; -}};\n''' +}};\n""" -recr_mult_h_config_template = '''struct config{index}_mult : nnet::dense_config {{ +recr_mult_h_config_template = """struct config{index}_mult : nnet::dense_config {{ static const unsigned n_in = {n_in}; static const unsigned n_out = {n_out}; @@ -50,23 +50,23 @@ template using product = nnet::product::{product_type}; -}};\n''' +}};\n""" ################################################ # Shared Activation Template ################################################ -activ_config_template = '''struct {type}_config{index} : nnet::activ_config {{ +activ_config_template = """struct {type}_config{index} : nnet::activ_config {{ static const unsigned n_in = {n_in}; static const unsigned table_size = {table_size}; static const unsigned io_type = nnet::{iotype}; static const unsigned reuse_factor = {reuse}; typedef {table_t.name} table_t; -}};\n''' +}};\n""" ################################################ # GRU Template ################################################ -gru_config_template = '''struct config{index} : nnet::gru_config {{ +gru_config_template = """struct config{index} : nnet::gru_config {{ static const unsigned n_in = {n_in}; static const unsigned n_out = {n_out}; static const unsigned n_units = {n_units}; @@ -94,7 +94,7 @@ static const unsigned reuse_factor = {reuse}; static const unsigned pytorch_order = {pytorch}; static const bool store_weights_in_bram = false; -}};\n''' +}};\n""" gru_function_template = 'nnet::gru<{input_t}, {output_t}, {config}>({input}, {output}, {w}, {wr}, {b}, {br});' gru_function_initial_state_template = ( diff --git a/hls4ml/backends/oneapi/passes/resource_strategy.py b/hls4ml/backends/oneapi/passes/resource_strategy.py index 15af1d197b..2d45410706 100644 --- a/hls4ml/backends/oneapi/passes/resource_strategy.py +++ b/hls4ml/backends/oneapi/passes/resource_strategy.py @@ -5,7 +5,7 @@ class ApplyResourceStrategy(OptimizerPass): - '''Transposes the weights to use the dense_resource matrix multiply routine''' + """Transposes the weights to use the dense_resource matrix multiply routine""" def match(self, node): node_matches = isinstance(node, (Dense, Conv1D, Conv2D, GRU, LSTM, SimpleRNN)) diff --git a/hls4ml/backends/quartus/passes/convolution_templates.py b/hls4ml/backends/quartus/passes/convolution_templates.py index d1c36fe1b1..eb570a0fd2 100644 --- a/hls4ml/backends/quartus/passes/convolution_templates.py +++ b/hls4ml/backends/quartus/passes/convolution_templates.py @@ -4,7 +4,7 @@ # TODO - Dilation rate ? -''' Shared mutliplication config ''' +""" Shared mutliplication config """ conv_mult_config_template = """struct config{index}_mult : nnet::dense_config {{ static const unsigned n_in = {n_in}; static const unsigned n_out = {n_out}; @@ -28,7 +28,7 @@ using product = nnet::product::{product_type}; }};\n""" -''' 1D Conv ''' +""" 1D Conv """ conv1d_config_template = """struct config{index} : nnet::conv1d_config {{ static const unsigned in_width = {in_width}; static const unsigned n_chan = {n_chan}; @@ -103,7 +103,7 @@ def format(self, node): return self.template.format(**params) -''' 2D Conv ''' +""" 2D Conv """ conv2d_config_template = """struct config{index} : nnet::conv2d_config {{ static const unsigned in_height = {in_height}; static const unsigned in_width = {in_width}; diff --git a/hls4ml/backends/quartus/passes/convolution_winograd.py b/hls4ml/backends/quartus/passes/convolution_winograd.py index 8b25ab41b8..36ae88d44a 100644 --- a/hls4ml/backends/quartus/passes/convolution_winograd.py +++ b/hls4ml/backends/quartus/passes/convolution_winograd.py @@ -7,10 +7,10 @@ class ApplyWinogradKernelTransformation(OptimizerPass): - ''' + """ Transforms the weights of a Conv2D kernel to a format suitable for Wingorad convolution For further information, refer to Lavin & Gray, 2015 - Fast Algorithms for Convolutional Neural Networks - ''' + """ def match(self, node): node_matches = isinstance(node, (Conv1D, Conv2D)) diff --git a/hls4ml/backends/quartus/passes/pointwise.py b/hls4ml/backends/quartus/passes/pointwise.py index d65ab22569..92fb9651c1 100644 --- a/hls4ml/backends/quartus/passes/pointwise.py +++ b/hls4ml/backends/quartus/passes/pointwise.py @@ -11,10 +11,10 @@ from hls4ml.model.layers import register_layer from hls4ml.model.optimizer import OptimizerPass -''' +""" Custom hls4ml layer implementation for 1x1 Conv filters using im2col Allows lower latency andresource usage, due to less loop invocations -''' +""" pointwise_conv1d_function_template = ( 'nnet::pointwise_conv_1d_{data_format}<{input_t}, {output_t}, {config}>({input}, {output}, {w}, {b});' diff --git a/hls4ml/backends/quartus/passes/recurrent_templates.py b/hls4ml/backends/quartus/passes/recurrent_templates.py index b2b542b04c..fb2976cb13 100644 --- a/hls4ml/backends/quartus/passes/recurrent_templates.py +++ b/hls4ml/backends/quartus/passes/recurrent_templates.py @@ -7,7 +7,7 @@ ################################################ # Shared Matrix Multiplication Template (Dense) ################################################ -recr_mult_config_template = '''struct config{index}_mult : nnet::dense_config {{ +recr_mult_config_template = """struct config{index}_mult : nnet::dense_config {{ static const unsigned n_in = {n_in}; static const unsigned n_out = {n_out}; @@ -26,23 +26,23 @@ template using product = nnet::product::{product_type}; -}};\n''' +}};\n""" ################################################ # Shared Activation Template ################################################ -activ_config_template = '''struct {type}_config{index} : nnet::activ_config {{ +activ_config_template = """struct {type}_config{index} : nnet::activ_config {{ static const unsigned n_in = {n_in}; static const unsigned table_size = {table_size}; static const unsigned io_type = nnet::{iotype}; static const unsigned reuse_factor = {reuse}; typedef {table_t.name} table_t; -}};\n''' +}};\n""" ################################################ # GRU Template ################################################ -gru_config_template = '''struct config{index} : nnet::gru_config {{ +gru_config_template = """struct config{index} : nnet::gru_config {{ static const unsigned n_in = {n_in}; static const unsigned n_out = {n_out}; static const unsigned n_units = {n_units}; @@ -68,7 +68,7 @@ static const unsigned reuse_factor = {reuse}; static const unsigned pytorch_order = {pytorch}; static const bool store_weights_in_bram = false; -}};\n''' +}};\n""" gru_function_template = 'nnet::gru<{input_t}, {output_t}, {config}>({input}, {output}, {w}, {wr}, {b}, {br});' gru_function_initial_state_template = ( diff --git a/hls4ml/backends/quartus/passes/resource_strategy.py b/hls4ml/backends/quartus/passes/resource_strategy.py index 00fe890385..d44a8b36a3 100644 --- a/hls4ml/backends/quartus/passes/resource_strategy.py +++ b/hls4ml/backends/quartus/passes/resource_strategy.py @@ -5,7 +5,7 @@ class ApplyResourceStrategy(OptimizerPass): - '''Transposes the weights to use the dense_resource matrix multiply routine''' + """Transposes the weights to use the dense_resource matrix multiply routine""" def match(self, node): node_matches = isinstance(node, (Dense, Conv1D, Conv2D, GRU, LSTM, SimpleRNN)) diff --git a/hls4ml/backends/quartus/quartus_backend.py b/hls4ml/backends/quartus/quartus_backend.py index e9b80b7a2f..728c164021 100644 --- a/hls4ml/backends/quartus/quartus_backend.py +++ b/hls4ml/backends/quartus/quartus_backend.py @@ -224,9 +224,9 @@ def init_activation(self, layer): @layer_optimizer(Softmax) def init_softmax(self, layer): if layer.model.config.get_config_value('IOType') == 'io_parallel': - assert ( - len(layer.get_input_variable().shape) == 1 - ), 'Softmax with io_parallel strategy cannot be used on multidimensional tensors.' + assert len(layer.get_input_variable().shape) == 1, ( + 'Softmax with io_parallel strategy cannot be used on multidimensional tensors.' + ) @layer_optimizer(Embedding) def init_embed(self, layer): diff --git a/hls4ml/backends/symbolic/passes/expr_templates.py b/hls4ml/backends/symbolic/passes/expr_templates.py index b96f011fbd..0cbf1d120d 100644 --- a/hls4ml/backends/symbolic/passes/expr_templates.py +++ b/hls4ml/backends/symbolic/passes/expr_templates.py @@ -100,7 +100,7 @@ def _print_math(self, expr): name = fname break else: - raise ValueError("No matching printer") + raise ValueError('No matching printer') # Setting precision of math functions required some rethinking # Doing e.g., hls::pow(x, y) passes C sim, but fails synthesis, need to use hls::pow<16,6>(x,y) diff --git a/hls4ml/backends/symbolic/passes/validate_lut.py b/hls4ml/backends/symbolic/passes/validate_lut.py index 0288b724b9..7f9b7233bc 100644 --- a/hls4ml/backends/symbolic/passes/validate_lut.py +++ b/hls4ml/backends/symbolic/passes/validate_lut.py @@ -3,7 +3,7 @@ class ValidateUserLookupTable(ConfigurableOptimizerPass): - '''Validates the precision of user-defined LUTs is adequate''' + """Validates the precision of user-defined LUTs is adequate""" def __init__(self): self.raise_exception = False diff --git a/hls4ml/backends/vitis/passes/fifo_depth_optimization.py b/hls4ml/backends/vitis/passes/fifo_depth_optimization.py index a008c0dc19..df61f5fd11 100644 --- a/hls4ml/backends/vitis/passes/fifo_depth_optimization.py +++ b/hls4ml/backends/vitis/passes/fifo_depth_optimization.py @@ -144,7 +144,6 @@ def set_optimized_fifo_depths(model, optimized_fifo_depths): for output_variable in model.output_vars.values(): if 'StreamVariable' in str(type(output_variable)): if output_variable.pragma: - if output_variable.name not in optimized_fifo_depths.keys(): continue diff --git a/hls4ml/backends/vivado/passes/broadcast_stream.py b/hls4ml/backends/vivado/passes/broadcast_stream.py index ec6322cf78..9160314aaa 100644 --- a/hls4ml/backends/vivado/passes/broadcast_stream.py +++ b/hls4ml/backends/vivado/passes/broadcast_stream.py @@ -6,7 +6,7 @@ class Broadcast(Layer): - '''Inserted between layers for broadcasting.''' + """Inserted between layers for broadcasting.""" def initialize(self): shape = self.attributes['target_shape'] diff --git a/hls4ml/backends/vivado/passes/conv_stream.py b/hls4ml/backends/vivado/passes/conv_stream.py index 9ba0c04d32..a8032e7430 100644 --- a/hls4ml/backends/vivado/passes/conv_stream.py +++ b/hls4ml/backends/vivado/passes/conv_stream.py @@ -3,7 +3,7 @@ class GenerateConvStreamingInstructions(OptimizerPass): - '''Generates the instructions for streaming implementation of CNNs''' + """Generates the instructions for streaming implementation of CNNs""" def match(self, node): is_match = ( diff --git a/hls4ml/backends/vivado/passes/fifo_depth_optimization.py b/hls4ml/backends/vivado/passes/fifo_depth_optimization.py index 4d92e98de1..496d4965f3 100644 --- a/hls4ml/backends/vivado/passes/fifo_depth_optimization.py +++ b/hls4ml/backends/vivado/passes/fifo_depth_optimization.py @@ -82,9 +82,9 @@ def transform(self, model): if len(data['children']) == 0: print( - "FIFO depth optimization found no FIFOs implemented using BRAMs in the design, no optimization is possible." + 'FIFO depth optimization found no FIFOs implemented using BRAMs in the design, no optimization is possible.' ) - print("Consider increasing profiling_fifo_depth.") + print('Consider increasing profiling_fifo_depth.') return False n_elem = len(data['children'][0]['children'][0]['children']) diff --git a/hls4ml/backends/vivado/passes/pipeline_style.py b/hls4ml/backends/vivado/passes/pipeline_style.py index 66c2bbe71e..1ad6dc3c1a 100644 --- a/hls4ml/backends/vivado/passes/pipeline_style.py +++ b/hls4ml/backends/vivado/passes/pipeline_style.py @@ -15,7 +15,6 @@ def transform(self, model): self._set_pipeline_style(model, 'auto') if model.config.pipeline_style is None or model.config.pipeline_style == 'auto': - if self._maybe_set_dataflow_io_stream(model): return True diff --git a/hls4ml/backends/vivado/passes/pointwise_codegen.py b/hls4ml/backends/vivado/passes/pointwise_codegen.py index d41d51f82f..522de740a9 100644 --- a/hls4ml/backends/vivado/passes/pointwise_codegen.py +++ b/hls4ml/backends/vivado/passes/pointwise_codegen.py @@ -63,7 +63,7 @@ def generate_pointwise_conv1d_fn(layer_idx, reuse_factor=1): class GeneratePointwiseConv1D(OptimizerPass): - '''Generates code for pointwise 1D convolution''' + """Generates code for pointwise 1D convolution""" def match(self, node): return ( diff --git a/hls4ml/backends/vivado/passes/resource_strategy.py b/hls4ml/backends/vivado/passes/resource_strategy.py index 0c06190f30..f21e4fd54b 100644 --- a/hls4ml/backends/vivado/passes/resource_strategy.py +++ b/hls4ml/backends/vivado/passes/resource_strategy.py @@ -5,7 +5,7 @@ class ApplyResourceStrategy(OptimizerPass): - '''Transposes the weights to use the dense_resource matrix multiply routine''' + """Transposes the weights to use the dense_resource matrix multiply routine""" def match(self, node): node_matches = isinstance(node, (Dense, Conv1D, SeparableConv1D, Conv2D, SeparableConv2D, LSTM, GRU)) diff --git a/hls4ml/backends/vivado/passes/unrolled_codegen.py b/hls4ml/backends/vivado/passes/unrolled_codegen.py index d901c77008..6dd38a68c9 100644 --- a/hls4ml/backends/vivado/passes/unrolled_codegen.py +++ b/hls4ml/backends/vivado/passes/unrolled_codegen.py @@ -8,7 +8,7 @@ class GenerateUnrolledDenseResource(OptimizerPass): - '''Generates C++ code for unrolled Dense resource''' + """Generates C++ code for unrolled Dense resource""" def match(self, node): # Only apply to layers use that use Dense Matrix Multiplication @@ -137,8 +137,8 @@ def _generate_unrolled_mult_code_rf_leq_nin(self, n_in, n_out, reuse_factor, wei indent = ' ' # Generate unrolled multiplications - mult_code = f'{indent*2}#pragma HLS ALLOCATION operation instances=mul limit={mult_limit - zeros}\n' - mult_code += f'{indent*2}MULT: {{{{\n' + mult_code = f'{indent * 2}#pragma HLS ALLOCATION operation instances=mul limit={mult_limit - zeros}\n' + mult_code += f'{indent * 2}MULT: {{{{\n' for ir in range(reuse_factor): acc_step = 0 @@ -146,11 +146,11 @@ def _generate_unrolled_mult_code_rf_leq_nin(self, n_in, n_out, reuse_factor, wei w_index = ir in_index = ir - mult_code += f'{indent*3}M{ir}: {{{{\n' + mult_code += f'{indent * 3}M{ir}: {{{{\n' for _ in range(block_factor): if weights.data.flatten()[w_index] != 0: mult_code += ( - f'{indent*4}acc[{out_index}] += ' + f'{indent * 4}acc[{out_index}] += ' 'static_cast' '(CONFIG_T::template product::' f'product(data[{in_index}], weights[{w_index}]));\n' @@ -166,9 +166,9 @@ def _generate_unrolled_mult_code_rf_leq_nin(self, n_in, n_out, reuse_factor, wei else: acc_step += 1 - mult_code += f'{indent*3}}}}}\n' + mult_code += f'{indent * 3}}}}}\n' - mult_code += f'{indent*2}}}}}\n' + mult_code += f'{indent * 2}}}}}\n' return mult_code @@ -199,18 +199,18 @@ def _generate_unrolled_mult_code_rf_gt_nin_rem0(self, n_in, n_out, reuse_factor, in_index = 0 # Generate unrolled multiplications - mult_code = f'{indent*2}#pragma HLS ALLOCATION operation instances=mul limit={mult_limit - zeros}\n' - mult_code += f'{indent*2}MULT: {{{{\n' + mult_code = f'{indent * 2}#pragma HLS ALLOCATION operation instances=mul limit={mult_limit - zeros}\n' + mult_code += f'{indent * 2}MULT: {{{{\n' for ir in range(reuse_factor): w_index = ir out_index = outidx[ir] - mult_code += f'{indent*3}M{ir}: {{{{\n' + mult_code += f'{indent * 3}M{ir}: {{{{\n' for _ in range(block_factor): if weights.data.flatten()[w_index] != 0: mult_code += ( - f'{indent*4}acc[{int(out_index)}] += ' + f'{indent * 4}acc[{int(out_index)}] += ' 'static_cast' '(CONFIG_T::template product::' f'product(data[{in_index}], weights[{w_index}]));\n' @@ -220,13 +220,13 @@ def _generate_unrolled_mult_code_rf_gt_nin_rem0(self, n_in, n_out, reuse_factor, if w_index > n_in * n_out: break out_index += outscale - mult_code += f'{indent*3}}}}}\n' + mult_code += f'{indent * 3}}}}}\n' in_index += 1 if in_index >= n_in: in_index = 0 - mult_code += f'{indent*2}}}}}\n' + mult_code += f'{indent * 2}}}}}\n' return mult_code diff --git a/hls4ml/backends/vivado/vivado_backend.py b/hls4ml/backends/vivado/vivado_backend.py index 0a18d4503d..bad0a4737b 100644 --- a/hls4ml/backends/vivado/vivado_backend.py +++ b/hls4ml/backends/vivado/vivado_backend.py @@ -571,9 +571,9 @@ def init_pooling2d(self, layer): @layer_optimizer(Softmax) def init_softmax(self, layer): if layer.model.config.get_config_value('IOType') == 'io_parallel': - assert ( - len(layer.get_input_variable().shape) == 1 - ), 'Softmax with io_parallel strategy cannot be used on multidimensional tensors.' + assert len(layer.get_input_variable().shape) == 1, ( + 'Softmax with io_parallel strategy cannot be used on multidimensional tensors.' + ) @layer_optimizer(Embedding) def init_embed(self, layer): diff --git a/hls4ml/backends/vivado_accelerator/vivado_accelerator_backend.py b/hls4ml/backends/vivado_accelerator/vivado_accelerator_backend.py index ccd9521269..128a8a8345 100644 --- a/hls4ml/backends/vivado_accelerator/vivado_accelerator_backend.py +++ b/hls4ml/backends/vivado_accelerator/vivado_accelerator_backend.py @@ -52,7 +52,7 @@ def build( try: os.system('vivado -mode batch -source design.tcl') except Exception: - print("Something went wrong, check the Vivado logs") + print('Something went wrong, check the Vivado logs') os.chdir(curr_dir) return parse_vivado_report(model.config.get_output_dir()) @@ -73,16 +73,16 @@ def make_xclbin(self, model, platform='xilinx_u250_xdma_201830_2'): try: os.system('vivado -mode batch -source design.tcl') except Exception: - print("Something went wrong, check the Vivado logs") + print('Something went wrong, check the Vivado logs') project_name = model.config.get_project_name() ip_repo_path = abs_path_dir + '/' + project_name + '_prj' + '/solution1/impl/ip' os.makedirs('xclbin_files', exist_ok=True) os.chdir(abs_path_dir + '/xclbin_files') # TODO Add other platforms vitis_cmd = ( - "v++ -t hw --platform " + 'v++ -t hw --platform ' + platform - + " --link ../xo_files/" + + ' --link ../xo_files/' + project_name + "_kernel.xo -o'" + project_name @@ -92,7 +92,7 @@ def make_xclbin(self, model, platform='xilinx_u250_xdma_201830_2'): try: os.system(vitis_cmd) except Exception: - print("Something went wrong, check the Vitis/Vivado logs") + print('Something went wrong, check the Vitis/Vivado logs') os.chdir(curr_dir) def create_initial_config( @@ -108,7 +108,7 @@ def create_initial_config( output_type='float', platform='xilinx_u250_xdma_201830_2', ): - ''' + """ Create initial accelerator config with default parameters Args: @@ -129,7 +129,7 @@ def create_initial_config( Returns: populated config - ''' + """ board = board if board is not None else 'pynq-z2' config = super().create_initial_config(part, clock_period, clock_uncertainty, io_type) config['AcceleratorConfig'] = {} diff --git a/hls4ml/backends/vivado_accelerator/vivado_accelerator_config.py b/hls4ml/backends/vivado_accelerator/vivado_accelerator_config.py index 7bd931ede3..c755a21434 100644 --- a/hls4ml/backends/vivado_accelerator/vivado_accelerator_config.py +++ b/hls4ml/backends/vivado_accelerator/vivado_accelerator_config.py @@ -52,12 +52,12 @@ def __init__(self, config, model_inputs, model_outputs): 'Platform', 'xilinx_u250_xdma_201830_2' ) # Get platform folder name - assert ( - len(model_inputs) == 1 - ), "Only models with one input tensor are currently supported by VivadoAcceleratorBackend" - assert ( - len(model_outputs) == 1 - ), "Only models with one output tensor are currently supported by VivadoAcceleratorBackend" + assert len(model_inputs) == 1, ( + 'Only models with one input tensor are currently supported by VivadoAcceleratorBackend' + ) + assert len(model_outputs) == 1, ( + 'Only models with one output tensor are currently supported by VivadoAcceleratorBackend' + ) self.inp = model_inputs[0] self.out = model_outputs[0] inp_axi_t = self.input_type @@ -83,13 +83,13 @@ def __init__(self, config, model_inputs, model_outputs): self.output_bitwidth = config.backend.convert_precision_string(out_axi_t).width def _next_factor8_type(self, p): - '''Return a new type with the width rounded to the next factor of 8 up to p's width + """Return a new type with the width rounded to the next factor of 8 up to p's width Args: p : IntegerPrecisionType or FixedPrecisionType Returns: An IntegerPrecisionType or FixedPrecisionType with the width rounder up to the next factor of 8 of p's width. Other parameters (fractional bits, extra modes) stay the same. - ''' + """ W = p.width newW = int(np.ceil(W / 8) * 8) if isinstance(p, FixedPrecisionType): diff --git a/hls4ml/cli/__init__.py b/hls4ml/cli/__init__.py index fd6fcb2427..4b1fef45d5 100755 --- a/hls4ml/cli/__init__.py +++ b/hls4ml/cli/__init__.py @@ -325,5 +325,5 @@ def _report_quartus(args, extra_args): hls4ml.report.read_quartus_report(args.project, quartus_args.open_browser) -if __name__ == "__main__": +if __name__ == '__main__': main() diff --git a/hls4ml/contrib/garnet.py b/hls4ml/contrib/garnet.py index 075819e9df..78b9f0a760 100644 --- a/hls4ml/contrib/garnet.py +++ b/hls4ml/contrib/garnet.py @@ -64,22 +64,22 @@ def _setup_transforms(self, n_aggregators, n_filters, n_propagate): if self._quantize_transforms: self._input_feature_transform = NamedQDense( n_propagate, - kernel_quantizer="quantized_bits(%i,%i,0,alpha=1)" % (self._total_bits, self._int_bits), - bias_quantizer="quantized_bits(%i,%i,0,alpha=1)" % (self._total_bits, self._int_bits), + kernel_quantizer='quantized_bits(%i,%i,0,alpha=1)' % (self._total_bits, self._int_bits), + bias_quantizer='quantized_bits(%i,%i,0,alpha=1)' % (self._total_bits, self._int_bits), name='FLR', ) self._output_feature_transform = NamedQDense( n_filters, - kernel_quantizer="quantized_bits(%i,%i,0,alpha=1)" % (self._total_bits, self._int_bits), + kernel_quantizer='quantized_bits(%i,%i,0,alpha=1)' % (self._total_bits, self._int_bits), name='Fout', ) - if self._output_activation is None or self._output_activation == "linear": + if self._output_activation is None or self._output_activation == 'linear': self._output_activation_transform = QActivation( - "quantized_bits(%i, %i)" % (self._total_bits, self._int_bits) + 'quantized_bits(%i, %i)' % (self._total_bits, self._int_bits) ) else: self._output_activation_transform = QActivation( - "quantized_%s(%i, %i)" % (self._output_activation, self._total_bits, self._int_bits) + 'quantized_%s(%i, %i)' % (self._output_activation, self._total_bits, self._int_bits) ) else: self._input_feature_transform = NamedDense(n_propagate, name='FLR') @@ -303,21 +303,21 @@ def _setup_transforms(self, n_aggregators, n_filters, n_propagate): if self._quantize_transforms is not None: input_feature_transform = NamedQDense( p, - kernel_quantizer="quantized_bits(%i,%i,0,alpha=1)" % (self._total_bits, self._int_bits), - bias_quantizer="quantized_bits(%i,%i,0,alpha=1)" % (self._total_bits, self._int_bits), + kernel_quantizer='quantized_bits(%i,%i,0,alpha=1)' % (self._total_bits, self._int_bits), + bias_quantizer='quantized_bits(%i,%i,0,alpha=1)' % (self._total_bits, self._int_bits), name=('FLR%d' % it), ) output_feature_transform = NamedQDense( f, - kernel_quantizer="quantized_bits(%i,%i,0,alpha=1)" % (self._total_bits, self._int_bits), + kernel_quantizer='quantized_bits(%i,%i,0,alpha=1)' % (self._total_bits, self._int_bits), name=('Fout%d' % it), ) - if self._output_activation is None or self._output_activation == "linear": - output_activation_transform = QActivation("quantized_bits(%i, %i)" % (self._total_bits, self._int_bits)) + if self._output_activation is None or self._output_activation == 'linear': + output_activation_transform = QActivation('quantized_bits(%i, %i)' % (self._total_bits, self._int_bits)) else: output_activation_transform = QActivation( - "quantized_%s(%i, %i)" % (self._output_activation, self._total_bits, self._int_bits) + 'quantized_%s(%i, %i)' % (self._output_activation, self._total_bits, self._int_bits) ) else: input_feature_transform = NamedDense(p, name=('FLR%d' % it)) diff --git a/hls4ml/contrib/kl_layer/kl_layer.py b/hls4ml/contrib/kl_layer/kl_layer.py index 44b610d327..1bfa88ecde 100644 --- a/hls4ml/contrib/kl_layer/kl_layer.py +++ b/hls4ml/contrib/kl_layer/kl_layer.py @@ -28,7 +28,7 @@ # Keras implementation of a KL layer class KLLoss(Merge): - '''Keras implementation of a KL loss custom layer''' + """Keras implementation of a KL loss custom layer""" @tf_utils.shape_type_conversion def build(self, input_shape): @@ -46,7 +46,7 @@ def _merge_function(self, inputs): # hls4ml implementations class HKLLoss(hls4ml.model.layers.Layer): - '''hls4ml implementation of a KL loss custom layer''' + """hls4ml implementation of a KL loss custom layer""" _expected_attributes = [ ConfigurableAttribute('table_size', default=1024), diff --git a/hls4ml/converters/__init__.py b/hls4ml/converters/__init__.py index 693a76f666..58ee8c1423 100644 --- a/hls4ml/converters/__init__.py +++ b/hls4ml/converters/__init__.py @@ -3,16 +3,22 @@ import yaml -from hls4ml.converters.keras_to_hls import KerasFileReader # noqa: F401 -from hls4ml.converters.keras_to_hls import KerasModelReader # noqa: F401 -from hls4ml.converters.keras_to_hls import KerasReader # noqa: F401 -from hls4ml.converters.keras_to_hls import get_supported_keras_layers # noqa: F401 -from hls4ml.converters.keras_to_hls import parse_keras_model # noqa: F401 -from hls4ml.converters.keras_to_hls import keras_to_hls, register_keras_layer_handler -from hls4ml.converters.onnx_to_hls import get_supported_onnx_layers # noqa: F401 -from hls4ml.converters.onnx_to_hls import parse_onnx_model # noqa: F401 -from hls4ml.converters.onnx_to_hls import onnx_to_hls, register_onnx_layer_handler -from hls4ml.converters.pytorch_to_hls import ( # noqa: F401 +from hls4ml.converters.keras_to_hls import ( + KerasFileReader, + KerasModelReader, + KerasReader, + get_supported_keras_layers, + keras_to_hls, + parse_keras_model, + register_keras_layer_handler, +) +from hls4ml.converters.onnx_to_hls import ( + get_supported_onnx_layers, + onnx_to_hls, + parse_onnx_model, + register_onnx_layer_handler, +) +from hls4ml.converters.pytorch_to_hls import ( get_supported_pytorch_layers, pytorch_to_hls, register_pytorch_layer_handler, diff --git a/hls4ml/converters/keras/qkeras.py b/hls4ml/converters/keras/qkeras.py index 8d50eb512e..c6a2e90f63 100644 --- a/hls4ml/converters/keras/qkeras.py +++ b/hls4ml/converters/keras/qkeras.py @@ -180,7 +180,6 @@ def get_activation_quantizer(keras_layer, input_names, activation_name='activati @keras_handler('QActivation') def parse_qactivation_layer(keras_layer, input_names, input_shapes, data_reader): - assert keras_layer['class_name'] == 'QActivation' layer = get_activation_quantizer(keras_layer, input_names) diff --git a/hls4ml/converters/keras/recurrent.py b/hls4ml/converters/keras/recurrent.py index 55dd5bf82e..0dde8b61a2 100644 --- a/hls4ml/converters/keras/recurrent.py +++ b/hls4ml/converters/keras/recurrent.py @@ -96,7 +96,7 @@ def parse_time_distributed_layer(keras_layer, input_names, input_shapes, data_re if isinstance(data_reader, KerasModelReader): nested_data_reader = KerasWrappedLayerReader(data_reader.model.get_layer(layer['name']).layer) else: - nested_data_reader = KerasWrappedLayerFileReader(data_reader, f"{layer['name']}/{layer['name']}") + nested_data_reader = KerasWrappedLayerFileReader(data_reader, f'{layer["name"]}/{layer["name"]}') wrapped_layer, layer_output_shape = handler(wrapped_keras_layer, [layer['name']], input_shapes, nested_data_reader) wrapped_layer['output_shape'] = layer_output_shape diff --git a/hls4ml/converters/keras/reshape.py b/hls4ml/converters/keras/reshape.py index 7d58252703..ea41e7e9e3 100644 --- a/hls4ml/converters/keras/reshape.py +++ b/hls4ml/converters/keras/reshape.py @@ -6,7 +6,7 @@ @keras_handler('Flatten') def parse_flatten_layer(keras_layer, input_names, input_shapes, data_reader): - assert keras_layer["class_name"] == 'Flatten' + assert keras_layer['class_name'] == 'Flatten' layer = parse_default_keras_layer(keras_layer, input_names) @@ -19,7 +19,7 @@ def parse_flatten_layer(keras_layer, input_names, input_shapes, data_reader): @keras_handler('Reshape') def parse_reshape_layer(keras_layer, input_names, input_shapes, data_reader): - assert keras_layer["class_name"] == 'Reshape' + assert keras_layer['class_name'] == 'Reshape' layer = parse_default_keras_layer(keras_layer, input_names) diff --git a/hls4ml/converters/onnx/convolution.py b/hls4ml/converters/onnx/convolution.py index d84fb855a8..259ef1063c 100644 --- a/hls4ml/converters/onnx/convolution.py +++ b/hls4ml/converters/onnx/convolution.py @@ -8,7 +8,7 @@ def parse_conv_layer(node, input_names, input_shapes, graph): layer = {} layer['name'] = node.name if node.domain != 'qonnx.custom_op.channels_last': - raise RuntimeError("Please convert the model to channels-last format with qonnx-to-channels-last") + raise RuntimeError('Please convert the model to channels-last format with qonnx-to-channels-last') layer['data_format'] = 'channels_last' # QONNX needs to be channels-last. layer['inputs'] = input_names layer['outputs'] = node.output @@ -35,7 +35,7 @@ def parse_conv_layer(node, input_names, input_shapes, graph): layer['n_dim'] = len(input_shapes[0]) - 2 # 2 comes from channels and batch dimentions if layer['n_dim'] not in (1, 2): - raise ValueError("Only 1D and 2D convolutions are supported") + raise ValueError('Only 1D and 2D convolutions are supported') layer['class_name'] = 'Conv' # set some values needed later diff --git a/hls4ml/converters/onnx/core.py b/hls4ml/converters/onnx/core.py index 8ad851426d..c7b6336ec6 100644 --- a/hls4ml/converters/onnx/core.py +++ b/hls4ml/converters/onnx/core.py @@ -96,11 +96,11 @@ def parse_batchnorm_layer(node, input_names, input_shapes, graph): layer['n_filt'] = -1 elif len(input_shapes[0]) > 2: if node.domain != 'qonnx.custom_op.channels_last': - raise RuntimeError("Please convert the model to channels-last format with qonnx-to-channels-last") + raise RuntimeError('Please convert the model to channels-last format with qonnx-to-channels-last') layer['data_format'] = 'channels_last' # QONNX needs to be channels-last. layer['n_filt'] = input_shapes[0][-1] else: - raise RuntimeError(f"Unexpected input shape: {input_shapes[0]}") + raise RuntimeError(f'Unexpected input shape: {input_shapes[0]}') return layer diff --git a/hls4ml/converters/onnx/pooling.py b/hls4ml/converters/onnx/pooling.py index 1f5c431004..0da6b34c69 100644 --- a/hls4ml/converters/onnx/pooling.py +++ b/hls4ml/converters/onnx/pooling.py @@ -12,7 +12,7 @@ def parse_pool_layer(node, input_names, input_shapes, graph): layer['inputs'] = input_names layer['outputs'] = list(node.output) if node.domain != 'qonnx.custom_op.channels_last': - raise RuntimeError("Please convert the model to channels-last format with qonnx-to-channels-last") + raise RuntimeError('Please convert the model to channels-last format with qonnx-to-channels-last') layer['class_name'] = node.op_type layer['data_format'] = 'channels_last' # Default QONNX diff --git a/hls4ml/converters/onnx_to_hls.py b/hls4ml/converters/onnx_to_hls.py index 0f7662c35e..43fec6cf04 100644 --- a/hls4ml/converters/onnx_to_hls.py +++ b/hls4ml/converters/onnx_to_hls.py @@ -195,7 +195,7 @@ def parse_onnx_model(onnx_model): constant_layers = all_initializers # no need to copy it even though we change it output_layers = get_out_layer_name(onnx_model.graph) - print("Output layers: ", output_layers) + print('Output layers: ', output_layers) for i, inp in enumerate(input_layers): input_layer = {} @@ -258,7 +258,7 @@ def parse_onnx_model(onnx_model): layer = layer_handlers[node.op_type](node, input_names, input_shapes, onnx_model.graph) sanitize_layer_name(layer) - print(f"Layer name: {layer['name']}, layer type: {layer['class_name']}, current shape: {input_shapes}") + print(f'Layer name: {layer["name"]}, layer type: {layer["class_name"]}, current shape: {input_shapes}') layer_list.append(layer) return layer_list, input_layers, output_layers diff --git a/hls4ml/converters/pytorch/pooling.py b/hls4ml/converters/pytorch/pooling.py index 54e840cacb..9f7aece195 100644 --- a/hls4ml/converters/pytorch/pooling.py +++ b/hls4ml/converters/pytorch/pooling.py @@ -34,9 +34,9 @@ def parse_pooling_layer(operation, layer_name, input_names, input_shapes, node, (*_, layer['n_in'], layer['n_filt']) = parse_data_format(input_shapes[0], layer['data_format']) if node.op == 'call_module': layer['pool_width'] = ( - class_object.kernel_size if not type(class_object.kernel_size) is tuple else class_object.kernel_size[0] + class_object.kernel_size if type(class_object.kernel_size) is not tuple else class_object.kernel_size[0] ) - layer['stride_width'] = class_object.stride if not type(class_object.stride) is tuple else class_object.stride[0] + layer['stride_width'] = class_object.stride if type(class_object.stride) is not tuple else class_object.stride[0] if type(class_object.padding) is tuple: padding = class_object.padding[0] diff --git a/hls4ml/converters/pytorch/recurrent.py b/hls4ml/converters/pytorch/recurrent.py index 5d8f6a58bd..610f28dc1b 100644 --- a/hls4ml/converters/pytorch/recurrent.py +++ b/hls4ml/converters/pytorch/recurrent.py @@ -11,7 +11,7 @@ def parse_rnn_layer(operation, layer_name, input_names, input_shapes, node, clas layer = {} - layer["name"] = layer_name + layer['name'] = layer_name layer['inputs'] = input_names if 'IOType' in config.keys(): diff --git a/hls4ml/converters/pytorch/reshape.py b/hls4ml/converters/pytorch/reshape.py index f7392ab8da..cd4a145f91 100644 --- a/hls4ml/converters/pytorch/reshape.py +++ b/hls4ml/converters/pytorch/reshape.py @@ -122,7 +122,6 @@ def parse_flatten_layer(operation, layer_name, input_names, input_shapes, node, @pytorch_handler('Upsample', 'UpsamplingNearest2d', 'UpsamplingBilinear2d') def handle_upsample(operation, layer_name, input_names, input_shapes, node, class_object, data_reader, config): - assert operation in ['Upsample', 'UpsamplingNearest2d', 'UpsamplingBilinear2d'] layer = {} layer['name'] = layer_name diff --git a/hls4ml/converters/pytorch_to_hls.py b/hls4ml/converters/pytorch_to_hls.py index a36ff5eb67..998383fa66 100644 --- a/hls4ml/converters/pytorch_to_hls.py +++ b/hls4ml/converters/pytorch_to_hls.py @@ -224,7 +224,7 @@ def parse_pytorch_model(config, verbose=True): # parse info from class object input_names = [inputs_map.get(str(i), str(i)) for i in node.args] - if pytorch_class in ["RNN", "GRU", "LSTM"]: + if pytorch_class in ['RNN', 'GRU', 'LSTM']: input_shapes = [] input_names = [] for arg in node.args: @@ -237,10 +237,10 @@ def parse_pytorch_model(config, verbose=True): input_names.append(inputs_map.get(str(arg), str(arg))) # if a 'getitem' is the input to a node, step back in the graph to find the real source of the input - elif "getitem" in node.args[0].name: + elif 'getitem' in node.args[0].name: for tmp_node in traced_model.nodes: if tmp_node.name == node.args[0].name: - if "getitem" in tmp_node.args[0].name: + if 'getitem' in tmp_node.args[0].name: raise Exception('Nested getitem calles not resolved at the moment.') input_names = [inputs_map.get(str(tmp_node.args[0]), str(tmp_node.args[0]))] input_shapes = [output_shapes[str(tmp_node.args[0])]] @@ -290,7 +290,6 @@ def parse_pytorch_model(config, verbose=True): output_shapes[layer['name']] = output_shape else: - input_layer['class_name'] = 'InputLayer' input_layer['input_shape'] = list(input_shapes[n_inputs][1:]) layer_list.insert(n_inputs, input_layer) @@ -314,7 +313,7 @@ def parse_pytorch_model(config, verbose=True): operation = layer_name_map[operation] # only a limited number of functions are supported - if operation == "getitem": + if operation == 'getitem': continue if operation not in supported_layers: raise Exception(f'Unsupported function {operation}') diff --git a/hls4ml/model/__init__.py b/hls4ml/model/__init__.py index 4ca72e3cd6..60e00475cb 100644 --- a/hls4ml/model/__init__.py +++ b/hls4ml/model/__init__.py @@ -1 +1 @@ -from hls4ml.model.graph import HLSConfig, ModelGraph # noqa: F401 +from hls4ml.model.graph import HLSConfig, ModelGraph diff --git a/hls4ml/model/flow/__init__.py b/hls4ml/model/flow/__init__.py index 0e2a180ec5..57760820d3 100644 --- a/hls4ml/model/flow/__init__.py +++ b/hls4ml/model/flow/__init__.py @@ -1,4 +1,4 @@ -from hls4ml.model.flow.flow import ( # noqa: F401 +from hls4ml.model.flow.flow import ( Flow, get_available_flows, get_backend_flows, diff --git a/hls4ml/model/graph.py b/hls4ml/model/graph.py index 76c621a1a2..6c528e97f7 100644 --- a/hls4ml/model/graph.py +++ b/hls4ml/model/graph.py @@ -333,8 +333,8 @@ def __init__(self, config, layer_list, inputs=None, outputs=None): self.inputs = self._find_output_variable_names(layer_list, input_layers) if self.inputs != input_layers: raise RuntimeError( - "Currently only support the case when input variables and input layer names match\n" - + f"Input layers = {input_layers}, input_vars = {self.inputs}" + 'Currently only support the case when input variables and input layer names match\n' + + f'Input layers = {input_layers}, input_vars = {self.inputs}' ) self.outputs = self._find_output_variable_names(layer_list, output_layers) @@ -540,7 +540,6 @@ def remove_node(self, node): raise Exception('Cannot delete a node with multiple inputs/outputs') if len(outputs) == 1 and len(inputs) == 1: - # Connect inputs -> $outputs if node.outputs[0] in self.outputs: msg = f'Remove leaf node {node.name} will connect its input node {inputs[0]} to output, but it already is.' @@ -694,7 +693,7 @@ def compile(self): def _compile(self): lib_name = self.config.backend.compile(self) if self._top_function_lib is not None: - if platform.system() == "Linux": + if platform.system() == 'Linux': libdl_libs = ['libdl.so', 'libdl.so.2'] for libdl in libdl_libs: try: @@ -702,7 +701,7 @@ def _compile(self): break except Exception: continue - elif platform.system() == "Darwin": + elif platform.system() == 'Darwin': dlclose_func = ctypes.CDLL('libc.dylib').dlclose dlclose_func.argtypes = [ctypes.c_void_p] @@ -740,7 +739,7 @@ def _get_top_function(self, x): ) top_function.restype = None - top_function.argtypes = [npc.ndpointer(ctype, flags="C_CONTIGUOUS") for i in range(len(xlist) + n_outputs)] + top_function.argtypes = [npc.ndpointer(ctype, flags='C_CONTIGUOUS') for i in range(len(xlist) + n_outputs)] return top_function, ctype diff --git a/hls4ml/model/layers.py b/hls4ml/model/layers.py index 0efeaafa3d..c47743ba42 100644 --- a/hls4ml/model/layers.py +++ b/hls4ml/model/layers.py @@ -78,7 +78,7 @@ def __init__(self, model, name, attributes, inputs, outputs=None): if name == 'input': raise RuntimeError( "No model layer should be named 'input' because that is a reserved;" - + "layer name in ModelGraph; Please rename the layer in your model" + + 'layer name in ModelGraph; Please rename the layer in your model' ) self.model = model self.name = name @@ -348,7 +348,7 @@ class Input(Layer): def initialize(self): shape = self.attributes['input_shape'] if shape[0] is None: - raise RuntimeError(f"Unexpectedly have a None in {shape=} of Input layer") + raise RuntimeError(f'Unexpectedly have a None in {shape=} of Input layer') dims = [f'N_INPUT_{i}_{self.index}' for i in range(1, len(shape) + 1)] if self.index == 1: default_type_name = 'input_t' @@ -419,7 +419,7 @@ def initialize(self): if isinstance(shape_node, Constant): target_shape = shape_node.attributes['value'][1:] else: - raise RuntimeError("Reshape for ONNX requires the target shape to be a second input.") + raise RuntimeError('Reshape for ONNX requires the target shape to be a second input.') # remove Nones -- Seems to be used by pytorch parser if target_shape[0] is None: @@ -936,12 +936,12 @@ def _get_act_function_name(self): class HardActivation(Activation): - ''' + """ Implements the hard sigmoid and tanh function in keras and qkeras (Default parameters in qkeras are different, so should be configured) The hard sigmoid unction is clip(slope * x + shift, 0, 1), and the hard tanh function is 2 * hard_sigmoid - 1 - ''' + """ _expected_attributes = [ Attribute('slope', value_type=float, default=0.2, configurable=False), @@ -985,10 +985,10 @@ def initialize(self): class BatchNormOnnx(Layer): - ''' + """ A transient layer formed from ONNX BatchNormalization that gets converted to BatchNormalization after the scale and bias are determined - ''' + """ def initialize(self): inp = self.get_input_variable() @@ -1030,8 +1030,8 @@ def initialize(self): # TODO: discuss whether this should be renamed to soemthing more descriptive, and whether the class hierarchy makes sense class ApplyAlpha(BatchNormalization): - '''A custom layer to scale the output of a QDense layer which used 'alpha != 1' - Inference computation uses BatchNormalization methods''' + """A custom layer to scale the output of a QDense layer which used 'alpha != 1' + Inference computation uses BatchNormalization methods""" def initialize(self): inp = self.get_input_variable() @@ -1317,7 +1317,7 @@ def initialize(self): # biases self.add_weights_variable(name='bias', var_name='b{index}') - if "pytorch" in self.attributes.keys(): + if 'pytorch' in self.attributes.keys(): self.add_weights_variable(name='recurrent_bias', var_name='br{index}') @@ -1371,7 +1371,7 @@ def initialize(self): # biases self.add_weights_variable(name='bias', var_name='b{index}') - if "pytorch" in self.attributes.keys(): + if 'pytorch' in self.attributes.keys(): self.add_weights_variable(name='recurrent_bias', var_name='br{index}') else: recurrent_bias = np.zeros(recurrent_weight.shape[1]) @@ -1626,7 +1626,7 @@ def initialize(self): shape = self.get_attr('output_shape') if shape[0] is None: shape.pop(0) - dims = [f'N_INPUT_{self.index}_{i+1}' for i in range(len(shape))] + dims = [f'N_INPUT_{self.index}_{i + 1}' for i in range(len(shape))] self.add_output_variable(shape, dims) diff --git a/hls4ml/model/optimizer/__init__.py b/hls4ml/model/optimizer/__init__.py index c474970448..e98187037a 100644 --- a/hls4ml/model/optimizer/__init__.py +++ b/hls4ml/model/optimizer/__init__.py @@ -1,7 +1,7 @@ import os from hls4ml.model.flow.flow import register_flow -from hls4ml.model.optimizer.optimizer import ( # noqa: F401 +from hls4ml.model.optimizer.optimizer import ( ConfigurableOptimizerPass, GlobalOptimizerPass, LayerOptimizerPass, diff --git a/hls4ml/model/optimizer/passes/batchnorm_opt.py b/hls4ml/model/optimizer/passes/batchnorm_opt.py index a2a799c127..8fad735486 100644 --- a/hls4ml/model/optimizer/passes/batchnorm_opt.py +++ b/hls4ml/model/optimizer/passes/batchnorm_opt.py @@ -127,7 +127,7 @@ def transform(self, model, node): IntegerPrecisionType, FixedPrecisionType, ): - print("Warning: output type not propagated for constant merge") + print('Warning: output type not propagated for constant merge') else: signed_prod = const_prec.signed or scale_prec.signed w_prod = const_prec.width + scale_prec.width diff --git a/hls4ml/model/optimizer/passes/convert_to_channels_last.py b/hls4ml/model/optimizer/passes/convert_to_channels_last.py index 6511a6967b..5a91870499 100644 --- a/hls4ml/model/optimizer/passes/convert_to_channels_last.py +++ b/hls4ml/model/optimizer/passes/convert_to_channels_last.py @@ -8,8 +8,8 @@ class ChannelsLastConverter(OptimizerPass): - '''Converts a model from channels_first to channels_last data format by transposing the weights of relevant layers - and adding a transpose layer for the inputs and outputs, if necessary''' + """Converts a model from channels_first to channels_last data format by transposing the weights of relevant layers + and adding a transpose layer for the inputs and outputs, if necessary""" def match(self, node): # If this parameter has not been set, this model does not need to be converted @@ -23,7 +23,7 @@ def transform(self, model, node): if isinstance(node, Input): # if inputs are not yet transposed into channels_last, add transpose layer - if model.config.config['HLSConfig']['Model']['ChannelsLastConversion'] == "full" and len(outshape) > 1: + if model.config.config['HLSConfig']['Model']['ChannelsLastConversion'] == 'full' and len(outshape) > 1: # Add transpose for input layer input = node.name if len(outshape) == 2: @@ -38,7 +38,7 @@ def transform(self, model, node): transpose_node.channels_last_converted = True model.insert_node(transpose_node) - elif model.config.config['HLSConfig']['Model']['ChannelsLastConversion'] == "internal" and len(outshape) > 1: + elif model.config.config['HLSConfig']['Model']['ChannelsLastConversion'] == 'internal' and len(outshape) > 1: input_shape = node.get_output_variable().shape input_shape.append(input_shape.pop(0)) node.get_output_variable().shape = input_shape @@ -96,7 +96,7 @@ def transform(self, model, node): if ( isinstance(node, Reshape) and len(node.attributes['target_shape']) == 1 - and not model.config.config['HLSConfig']['Model']['ChannelsLastConversion'] == "off" + and not model.config.config['HLSConfig']['Model']['ChannelsLastConversion'] == 'off' ): previous_node = node.get_input_node(node.inputs[0]) input = previous_node.name @@ -140,8 +140,8 @@ def transform(self, model, node): class RemoveTransposeBeforeFlatten(OptimizerPass): - '''After the channels last conversion, model may have a sequence: Transpose -> Flatten -> Dense. - In this case we can remove the expensive transpose and instead transpose the weights of the Dense layer.''' + """After the channels last conversion, model may have a sequence: Transpose -> Flatten -> Dense. + In this case we can remove the expensive transpose and instead transpose the weights of the Dense layer.""" def match(self, node): if node.model.config.get_config_value('IOType') != 'io_parallel': diff --git a/hls4ml/model/optimizer/passes/expand_layer_group.py b/hls4ml/model/optimizer/passes/expand_layer_group.py index 3f85784003..31e7a1f14d 100644 --- a/hls4ml/model/optimizer/passes/expand_layer_group.py +++ b/hls4ml/model/optimizer/passes/expand_layer_group.py @@ -3,7 +3,7 @@ class ExpandLayerGroup(OptimizerPass): - '''Expands LayerGroup (a nested model) into the parent model.''' + """Expands LayerGroup (a nested model) into the parent model.""" def match(self, node): return isinstance(node, LayerGroup) diff --git a/hls4ml/model/optimizer/passes/expand_time_distributed.py b/hls4ml/model/optimizer/passes/expand_time_distributed.py index 8a4f3390f4..ed9a162f3d 100644 --- a/hls4ml/model/optimizer/passes/expand_time_distributed.py +++ b/hls4ml/model/optimizer/passes/expand_time_distributed.py @@ -3,7 +3,7 @@ class ExpandTimeDistributed(OptimizerPass): - '''Expands TimeDistributed's wrapped layer into the graph and inserts a marker at the end. + """Expands TimeDistributed's wrapped layer into the graph and inserts a marker at the end. For example, the layer defined as: TimeDistributed(Dense(...)) @@ -15,7 +15,7 @@ class ExpandTimeDistributed(OptimizerPass): Handling flattened hierarchy has advantages of exposing the wrapped layer(s) to the optimizers. Backends may choose to undo this after all optimizers have been applied on the wrapped layers. - ''' + """ def match(self, node): return isinstance(node, TimeDistributed) and not isinstance(node.get_attr('wrapped_layer'), TimeDistributed) diff --git a/hls4ml/model/optimizer/passes/fuse_biasadd.py b/hls4ml/model/optimizer/passes/fuse_biasadd.py index 6054b03539..7b0e2ee176 100644 --- a/hls4ml/model/optimizer/passes/fuse_biasadd.py +++ b/hls4ml/model/optimizer/passes/fuse_biasadd.py @@ -3,7 +3,7 @@ class FuseBiasAdd(OptimizerPass): - '''Fuses BiasAdd into Dense/Conv2D layer (common in TF models).''' + """Fuses BiasAdd into Dense/Conv2D layer (common in TF models).""" def match(self, node): return isinstance(node, BiasAdd) and isinstance(node.get_input_node(), (Dense, Conv1D, Conv2D)) diff --git a/hls4ml/model/optimizer/passes/infer_precision.py b/hls4ml/model/optimizer/passes/infer_precision.py index 919bc0c3c2..6164992d8c 100644 --- a/hls4ml/model/optimizer/passes/infer_precision.py +++ b/hls4ml/model/optimizer/passes/infer_precision.py @@ -316,7 +316,6 @@ def _infer_bn_precision(self, node, types_to_infer): bias_precision = node.types['bias_t'].precision if self._all_supported_types((input_precision, scale_precision, bias_precision)): - after_scale_signed = scale_precision.signed or input_precision.signed after_scale_width = input_precision.width + scale_precision.width after_scale_integer = input_precision.integer + scale_precision.integer diff --git a/hls4ml/model/optimizer/passes/linear.py b/hls4ml/model/optimizer/passes/linear.py index ce0308eb66..be0b295b48 100644 --- a/hls4ml/model/optimizer/passes/linear.py +++ b/hls4ml/model/optimizer/passes/linear.py @@ -19,14 +19,14 @@ def transform(self, model, node): class MergeLinearActivation(OptimizerPass): - ''' + """ For many objects it's safe to change the output precision independently of the calculation. - ''' + """ def match(self, node): - ''' + """ Only match if the parent is safe and the precision is not explicitly set. - ''' + """ if isinstance(node, Activation) and node.get_attr('activation') == 'linear': parent = node.get_input_node(node.inputs[0]) safe_parent = isinstance(parent, _safe_parents) @@ -36,10 +36,10 @@ def match(self, node): def transform(self, model, node): prev_node = node.get_input_node(node.inputs[0]) - quantizer = node.get_attr("quantizer") + quantizer = node.get_attr('quantizer') # if the activation has a quantizer (usually from a QONNX Quant node), set the previous node's output precision if quantizer is not None: - prev_node.set_attr("quantizer", quantizer) + prev_node.set_attr('quantizer', quantizer) prev_node.get_output_variable().type.precision = quantizer.hls_type model.remove_node(node) return True diff --git a/hls4ml/model/optimizer/passes/move_scales.py b/hls4ml/model/optimizer/passes/move_scales.py index 03bb0f3b77..7105ee3b66 100644 --- a/hls4ml/model/optimizer/passes/move_scales.py +++ b/hls4ml/model/optimizer/passes/move_scales.py @@ -1,9 +1,9 @@ -''' +""" This file includes optimizations related to moving the ApplyAphas across MatMul and Conv nodes. TODO: Check that biases are properly handled. (Attempt to do it via Merge) -''' +""" import warnings @@ -17,13 +17,13 @@ class ScaleDownMatMul(OptimizerPass): - '''Shift an ApplyAlpha below a MatMul''' + """Shift an ApplyAlpha below a MatMul""" def match(self, node): - ''' + """ Check to see if we have a MatMul with at least one input ApplyAlpha. Note, if both are this optimizer runs twice. - ''' + """ is_match = ( isinstance(node, MatMul) and len(node.inputs) == 2 @@ -106,11 +106,11 @@ def transform(self, model, node): class ScaleDownAdd(OptimizerPass): - '''Shift an identical ApplyAlpha below a Merge (Add)''' + """Shift an identical ApplyAlpha below a Merge (Add)""" def match(self, node): - '''Check to see if we have an add with two ApplyAlphas with identical scale''' - is_match = isinstance(node, Merge) and len(node.inputs) == 2 and node.attributes["op"] == "add" + """Check to see if we have an add with two ApplyAlphas with identical scale""" + is_match = isinstance(node, Merge) and len(node.inputs) == 2 and node.attributes['op'] == 'add' if is_match: in0 = node.get_input_node(node.inputs[0]) in1 = node.get_input_node(node.inputs[1]) @@ -150,13 +150,13 @@ def transform(self, model, node): class BiasDownAdd(OptimizerPass): - '''Shift a ApplyAlpha with only bias below a Merge (Add)''' + """Shift a ApplyAlpha with only bias below a Merge (Add)""" def match(self, node): - '''Match if there is only one ApplyAlpha. If there are two, if the scale of both is 0, they would + """Match if there is only one ApplyAlpha. If there are two, if the scale of both is 0, they would match the ScaleDownAdd, so this optimizer does not need to handle that case. - ''' - is_match = isinstance(node, Merge) and len(node.inputs) == 2 and node.attributes["op"] == "add" + """ + is_match = isinstance(node, Merge) and len(node.inputs) == 2 and node.attributes['op'] == 'add' if is_match: in0 = node.get_input_node(node.inputs[0]) in1 = node.get_input_node(node.inputs[1]) @@ -185,10 +185,10 @@ def transform(self, model, node): class ScaleDownConv(OptimizerPass): - '''Shift an ApplyAlpha on a Conv with 2-3 inputs''' + """Shift an ApplyAlpha on a Conv with 2-3 inputs""" def match(self, node): - '''Shift an ApplyAlpha from the Weight''' + """Shift an ApplyAlpha from the Weight""" is_match = ( isinstance(node, Conv) and len(node.inputs) > 1 @@ -210,9 +210,9 @@ def transform(self, model, node): aa2 = isinstance(in2, ApplyAlpha) if len(node.inputs) == 3 else False if not isinstance(in1, (Constant, ApplyAlpha)): - raise RuntimeError("The weight node needs to be ApplyAlpha or Constant") + raise RuntimeError('The weight node needs to be ApplyAlpha or Constant') if len(node.inputs) == 3 and not isinstance(in2, (Constant, ApplyAlpha)): - raise RuntimeError("The bias node needs to be ApplyAlpha or Constant") + raise RuntimeError('The bias node needs to be ApplyAlpha or Constant') scale0 = in0.weights['scale'].data_unquantized if aa0 else None bias0 = in0.weights['bias'].data_unquantized if aa0 else None diff --git a/hls4ml/model/optimizer/passes/qkeras.py b/hls4ml/model/optimizer/passes/qkeras.py index fb02d4eccf..de4052198e 100644 --- a/hls4ml/model/optimizer/passes/qkeras.py +++ b/hls4ml/model/optimizer/passes/qkeras.py @@ -7,7 +7,7 @@ class OutputRoundingSaturationMode(ConfigurableOptimizerPass): - ''' + """ Set the Rounding and Saturation mode of the output (and accumulator, if applicable) of the layers specific in layer list. The layer list is empty by default. @@ -17,7 +17,7 @@ class OutputRoundingSaturationMode(ConfigurableOptimizerPass): To set which mode to use: hls4ml.model.optimizer.get_optimizer('output_rounding_saturation_mode').configure(rounding_mode='AP_RND_CONV') hls4ml.model.optimizer.get_optimizer('output_rounding_saturation_mode').configure(saturation_mode='AP_SAT') - ''' + """ def __init__(self): self.layers = [] @@ -31,9 +31,9 @@ def match(self, node): # check that the type doesn't already contain the rounding mode rs_match = False if self.rounding_mode is not None: - rs_match = rs_match or not (self.rounding_mode in t) + rs_match = rs_match or self.rounding_mode not in t if self.saturation_mode is not None: - rs_match = rs_match or not (self.saturation_mode in t) + rs_match = rs_match or self.saturation_mode not in t return layer_match and rs_match def transform(self, model, node): @@ -84,10 +84,10 @@ def register_qkeras(): class QKerasFactorizeAlpha(OptimizerPass): - '''OptimizerPass for extracting alpha "scale" from QKeras quantized layer. + """OptimizerPass for extracting alpha "scale" from QKeras quantized layer. The weights of the Q{Dense, Conv} layer are scaled to the common data type, and an 'ApplyAlpha' layer is inserted to reapply the scale. - ''' + """ def match(self, node): q_layer = node.class_name in ['Dense', 'Conv1D', 'Conv2D', 'Conv2DBatchnorm'] @@ -191,12 +191,12 @@ def transform(self, model, node): class ExtractTernaryThreshold(OptimizerPass): - '''The input value (threshold) at which the output of a a ternary activation + """The input value (threshold) at which the output of a a ternary activation changes is configurable. This pass extracts that threshold point, inserting a BatchNormalization layer to execute the scaling. That BatchNormalization layer is then expected to be fused into a BatchNormalizationQuantizedTanh layer configured with the correct threshold. - ''' + """ def match(self, node): return node.class_name == 'TernaryTanh' and node.get_attr('threshold', None) != 0.5 diff --git a/hls4ml/model/optimizer/passes/quant_opt.py b/hls4ml/model/optimizer/passes/quant_opt.py index 6c9badd832..39e5671d26 100644 --- a/hls4ml/model/optimizer/passes/quant_opt.py +++ b/hls4ml/model/optimizer/passes/quant_opt.py @@ -73,7 +73,7 @@ def transform(self, model, node): node.inputs = [inp for inp in node.inputs if inp] if len(node.inputs) != 1: - raise RuntimeError("hls4ml only supports constant scale, zeropt, and bitwidth values") + raise RuntimeError('hls4ml only supports constant scale, zeropt, and bitwidth values') return True diff --git a/hls4ml/model/profiling.py b/hls4ml/model/profiling.py index acc4ccfa44..043db6da5c 100644 --- a/hls4ml/model/profiling.py +++ b/hls4ml/model/profiling.py @@ -91,7 +91,7 @@ def boxplot(data, fmt='longform'): medianprops = dict(linestyle='-', color='k') f, ax = plt.subplots(1, 1) data.reverse() - colors = sb.color_palette("Blues", len(data)) + colors = sb.color_palette('Blues', len(data)) bp = ax.bxp(data, showfliers=False, vert=False, medianprops=medianprops) # add colored boxes for line, color in zip(bp['boxes'], colors): @@ -114,7 +114,7 @@ def histogram(data, fmt='longform'): from matplotlib.ticker import MaxNLocator n = len(data) if fmt == 'summary' else len(data['weight'].unique()) - colors = sb.color_palette("husl", n) + colors = sb.color_palette('husl', n) if fmt == 'longform': for i, weight in enumerate(data['weight'].unique()): y = array_to_summary(data[data['weight'] == weight]['x'], fmt='histogram') @@ -164,7 +164,7 @@ def types_boxplot(data, fmt='longform'): def types_histogram(data, fmt='longform'): ax = plt.gca() layers = np.array(ax.get_legend_handles_labels()[1]) - colors = sb.color_palette("husl", len(layers)) + colors = sb.color_palette('husl', len(layers)) ylim = ax.get_ylim() for _irow, row in data[data['layer'] != 'model'].iterrows(): if row['layer'] in layers: @@ -309,10 +309,10 @@ def activations_hlsmodel(model, X, fmt='summary', plot='boxplot'): _, trace = model.trace(np.ascontiguousarray(X)) if len(trace) == 0: - raise RuntimeError("ModelGraph must have tracing on for at least 1 layer (this can be set in its config)") + raise RuntimeError('ModelGraph must have tracing on for at least 1 layer (this can be set in its config)') for layer in trace.keys(): - print(f" {layer}") + print(f' {layer}') if fmt == 'summary': y = trace[layer].flatten() @@ -374,7 +374,7 @@ def activations_keras(model, X, fmt='longform', plot='boxplot'): ) outputs = dict(zip([layer.name for layer in model.layers if not isinstance(layer, keras.layers.InputLayer)], outputs)) for layer_name, y in outputs.items(): - print(f" {layer_name}") + print(f' {layer_name}') y = y.flatten() y = abs(y[y != 0]) if len(y) == 0: @@ -412,7 +412,7 @@ def activations_torch(model, X, fmt='longform', plot='boxplot'): lname = layer.__class__.__name__ layers.append(layer) pm = partial_model(*layers) - print(f" {lname}") + print(f' {lname}') y = pm(X).flatten().detach().numpy() y = abs(y[y != 0]) if len(y) == 0: @@ -454,15 +454,15 @@ def numerical(model=None, hls_model=None, X=None, plot='boxplot'): model_present = model is not None if hls_model_present: - before = " (before optimization)" - after = " (final / after optimization)" + before = ' (before optimization)' + after = ' (final / after optimization)' hls_model_unoptimized, tmp_output_dir = get_unoptimized_hlsmodel(hls_model) else: - before = "" - after = "" + before = '' + after = '' hls_model_unoptimized, tmp_output_dir = None, None - print("Profiling weights" + before) + print('Profiling weights' + before) data = None if hls_model_present: @@ -474,7 +474,7 @@ def numerical(model=None, hls_model=None, X=None, plot='boxplot'): data = weights_torch(model, fmt='summary', plot=plot) if data is None: - print("Only keras, PyTorch and ModelGraph models " + "can currently be profiled") + print('Only keras, PyTorch and ModelGraph models ' + 'can currently be profiled') if hls_model_present and os.path.exists(tmp_output_dir): shutil.rmtree(tmp_output_dir) @@ -487,11 +487,11 @@ def numerical(model=None, hls_model=None, X=None, plot='boxplot'): t_data = types_hlsmodel(hls_model_unoptimized) types_plots[plot](t_data, fmt='summary') - plt.title("Distribution of (non-zero) weights" + before) + plt.title('Distribution of (non-zero) weights' + before) plt.tight_layout() if hls_model_present: - print("Profiling weights" + after) + print('Profiling weights' + after) data = weights_hlsmodel(hls_model, fmt='summary', plot=plot) wph = plots[plot](data, fmt='summary') # weight plot @@ -500,11 +500,11 @@ def numerical(model=None, hls_model=None, X=None, plot='boxplot'): t_data = types_hlsmodel(hls_model) types_plots[plot](t_data, fmt='summary') - plt.title("Distribution of (non-zero) weights" + after) + plt.title('Distribution of (non-zero) weights' + after) plt.tight_layout() if X is not None: - print("Profiling activations" + before) + print('Profiling activations' + before) data = None if __keras_profiling_enabled__ and isinstance(model, keras.Model): data = activations_keras(model, X, fmt='summary', plot=plot) @@ -516,18 +516,18 @@ def numerical(model=None, hls_model=None, X=None, plot='boxplot'): if hls_model_present and plot in types_plots: t_data = activation_types_hlsmodel(hls_model_unoptimized) types_plots[plot](t_data, fmt='summary') - plt.title("Distribution of (non-zero) activations" + before) + plt.title('Distribution of (non-zero) activations' + before) plt.tight_layout() if hls_model_present: - print("Profiling activations" + after) + print('Profiling activations' + after) data = activations_hlsmodel(hls_model, X, fmt='summary', plot=plot) aph = plots[plot](data, fmt='summary') t_data = activation_types_hlsmodel(hls_model) types_plots[plot](t_data, fmt='summary') - plt.title("Distribution of (non-zero) activations (final / after optimization)") + plt.title('Distribution of (non-zero) activations (final / after optimization)') plt.tight_layout() if hls_model_present and os.path.exists(tmp_output_dir): @@ -583,13 +583,13 @@ def get_ymodel_keras(keras_model, X): layer.activation = None ymodel.update({layer.name: _get_outputs([layer], X, keras_model.input)}) layer.activation = tmp_activation - name = layer.name + f"_{tmp_activation.__name__}" + name = layer.name + f'_{tmp_activation.__name__}' traced_layers.append(layer) layer_names.append(name) outputs = _get_outputs(traced_layers, X, keras_model.input) for name, output in zip(layer_names, outputs): ymodel[name] = output - print("Done taking outputs for Keras model.") + print('Done taking outputs for Keras model.') return ymodel @@ -603,7 +603,7 @@ def _norm_diff(ymodel, ysim): # ---Bar Plot--- f, ax = plt.subplots() plt.bar(list(diff.keys()), list(diff.values())) - plt.title("layer-by-layer output differences") + plt.title('layer-by-layer output differences') ax.set_ylabel('Norm of difference vector') plt.xticks(rotation=90) plt.tight_layout() @@ -644,7 +644,7 @@ def _dist_diff(ymodel, ysim): ax.boxplot(list(diff.values()), sym='k+', positions=pos) # --formatting - plt.title("Layer-by-layer distribution of output differences") + plt.title('Layer-by-layer distribution of output differences') ax.set_xticklabels(list(diff.keys())) ax.set_ylabel('Normalized difference') ax.set_ylabel('Percent difference.') @@ -654,7 +654,7 @@ def _dist_diff(ymodel, ysim): return f -def compare(keras_model, hls_model, X, plot_type="dist_diff"): +def compare(keras_model, hls_model, X, plot_type='dist_diff'): """Compare each layer's output in keras and hls model. Note that the hls_model should not be compiled before using this. Args: @@ -676,11 +676,11 @@ def compare(keras_model, hls_model, X, plot_type="dist_diff"): ymodel = get_ymodel_keras(keras_model, X) _, ysim = hls_model.trace(X) - print("Plotting difference...") + print('Plotting difference...') f = plt.figure() - if plot_type == "norm_diff": + if plot_type == 'norm_diff': f = _norm_diff(ymodel, ysim) - elif plot_type == "dist_diff": + elif plot_type == 'dist_diff': f = _dist_diff(ymodel, ysim) return f diff --git a/hls4ml/model/types.py b/hls4ml/model/types.py index 14a9002310..3b5fdeefc3 100644 --- a/hls4ml/model/types.py +++ b/hls4ml/model/types.py @@ -471,7 +471,7 @@ def update_precision(self, new_precision): self.precision_fmt = f'{{:.{decimal_spaces}f}}' else: - raise RuntimeError(f"Unexpected new precision type: {new_precision}") + raise RuntimeError(f'Unexpected new precision type: {new_precision}') class CompressedWeightVariable(WeightVariable): diff --git a/hls4ml/optimization/dsp_aware_pruning/__init__.py b/hls4ml/optimization/dsp_aware_pruning/__init__.py index 69e2029e0e..d4855f6f3f 100644 --- a/hls4ml/optimization/dsp_aware_pruning/__init__.py +++ b/hls4ml/optimization/dsp_aware_pruning/__init__.py @@ -33,7 +33,7 @@ def optimize_keras_model_for_hls4ml( knapsack_solver='CBC_MIP', regularization_range=default_regularization_range, ): - ''' + """ Top-level function for optimizing a Keras model, given hls4ml config and a hardware objective(s) Args: @@ -73,7 +73,7 @@ def optimize_keras_model_for_hls4ml( Returns: keras.Model: Optimized model - ''' + """ # Extract model attributes model_attributes = get_attributes_from_keras_model_and_hls4ml_config(keras_model, hls_config) diff --git a/hls4ml/optimization/dsp_aware_pruning/attributes.py b/hls4ml/optimization/dsp_aware_pruning/attributes.py index f652f27d50..f79ae2b661 100644 --- a/hls4ml/optimization/dsp_aware_pruning/attributes.py +++ b/hls4ml/optimization/dsp_aware_pruning/attributes.py @@ -7,7 +7,7 @@ class hls4mlAttributes: - ''' + """ A class for storing hls4ml information of a single layer Args: @@ -19,7 +19,7 @@ class hls4mlAttributes: output_precision (FixedPrecisionType): Layer output precision reuse_factor (int): Layer reuse factor parallelization_factor (int): Layer parallelization factor - [applicable to io_parallel Conv2D] - ''' + """ def __init__( self, n_in, n_out, io_type, strategy, weight_precision, output_precision, reuse_factor, parallelization_factor=1 @@ -47,7 +47,7 @@ def __init__( class OptimizationAttributes: - ''' + """ A class for storing layer optimization attributes Args: @@ -61,7 +61,7 @@ class OptimizationAttributes: Notes: - In the case of hls4ml, pattern_offset is equivalent to the number of weights processed in parallel - The pattern_offset is n_in * n_out / reuse_factor; default case (=1) is equivalent to no unrolling - ''' + """ def __init__( self, @@ -84,7 +84,7 @@ def __init__( class LayerAttributes: - ''' + """ A class for storing layer information Args: @@ -99,7 +99,7 @@ class LayerAttributes: pruning or weight sharing, block shape and pattern offset args (dict): Additional information, e.g. hls4mlAttributes; dictionary so it can be generic enough for different platforms - ''' + """ def __init__( self, @@ -141,7 +141,7 @@ def __str__(self): def get_attributes_from_keras_model(model): - ''' + """ Given a Keras model, builds a dictionary of class attributes Additional arguments (e.g. reuse factor), depend on the target hardware platform and are inserted later Per-layer pruning sype (structured, pattern etc.), depend on the pruning objective and are inserted later @@ -151,7 +151,7 @@ def get_attributes_from_keras_model(model): Returns: model_attributes (dict): Each key corresponds to a layer name, values are instances of LayerAttribute - ''' + """ is_sequential = model.__class__.__name__ == 'Sequential' model_attributes = {} @@ -183,7 +183,7 @@ def get_attributes_from_keras_model(model): def get_attributes_from_keras_model_and_hls4ml_config(model, config): - ''' + """ Given a Keras model and hls4ml configuration, builds a dictionary of class attributes Per-layer pruning sype (structured, pruning etc.), depend on the pruning objective and are inserted later @@ -193,7 +193,7 @@ def get_attributes_from_keras_model_and_hls4ml_config(model, config): Returns: model_attributes (dict): Each key corresponds to a layer name, values are LayerAttribute instances - ''' + """ # Extract Keras attributes model_attributes = get_attributes_from_keras_model(model) @@ -230,9 +230,9 @@ def get_attributes_from_keras_model_and_hls4ml_config(model, config): def __get_layer_mult_size(attributes): - ''' + """ Helper function to calculate layer multiplication size - ''' + """ if 'Dense' in attributes.layer_type.__name__: n_in = np.prod(attributes.input_shape) n_out = np.prod(attributes.output_shape) diff --git a/hls4ml/optimization/dsp_aware_pruning/config.py b/hls4ml/optimization/dsp_aware_pruning/config.py index 0879c47f62..6c9e5b77ef 100644 --- a/hls4ml/optimization/dsp_aware_pruning/config.py +++ b/hls4ml/optimization/dsp_aware_pruning/config.py @@ -1,6 +1,6 @@ from enum import Enum -''' +""" A list of currently supported structures when optimizing (pruning, weight sharing) For more information, see attributes.py @@ -37,7 +37,7 @@ - Description: Zeroes out or quantizes all the weights in a block of size (w, h) - Supports: All rank-2 (e.g. Dense, but not Conv2D) layers in SUPPORTED_LAYERS (hls4ml.optimization.keras) -''' +""" class SUPPORTED_STRUCTURES(Enum): diff --git a/hls4ml/optimization/dsp_aware_pruning/keras/__init__.py b/hls4ml/optimization/dsp_aware_pruning/keras/__init__.py index b525f58a33..165d2dd5e1 100644 --- a/hls4ml/optimization/dsp_aware_pruning/keras/__init__.py +++ b/hls4ml/optimization/dsp_aware_pruning/keras/__init__.py @@ -42,7 +42,7 @@ def optimize_model( knapsack_solver='CBC_MIP', regularization_range=default_regularization_range, ): - ''' + """ Top-level function for optimizing a Keras model, given objectives Args: @@ -83,12 +83,12 @@ def optimize_model( Returns: keras.Model: Optimized model - ''' + """ if not isinstance(scheduler, OptimizationScheduler): raise Exception( 'Scheduler must be an instance of from hls4ml.optimization.scheduler.OptimizationScheduler' - 'If you provided string description (e.g. \'constant\')' + "If you provided string description (e.g. 'constant')" 'Please use an object instance (i.e. ConstantScheduler()).' 'For a full list of supported schedulers, refer to hls4ml.optimization.scheduler.' ) @@ -295,14 +295,14 @@ def optimize_model( class MaskedBackprop: - ''' + """ A helper class to perform masked backprop (training with frozen weights) The important function is __call__ as it masks gradients, based on frozen weights While this function can exist without a class, taking masks as input would deplete memory Since a new graph is created for every call, causing a large run-time The trick is to set the masks, models etc. as class variables and then pass the sparsity As the sparsity changes, a new graph of the function is created - ''' + """ def __init__(self, model, loss_fn, attributes): self.model = model @@ -315,7 +315,7 @@ def update_masks(self, masks): @tf.function def __call__(self, X, y, s): - ''' + """ Helper function performing backprop Args: @@ -325,7 +325,7 @@ def __call__(self, X, y, s): Returns: - loss (tf.Varilable): Model loss with input X and output y - ''' + """ grads = [] with tf.GradientTape(persistent=True) as tape: output = self.model(X, training=True) @@ -342,11 +342,11 @@ def __call__(self, X, y, s): def __compare__(x, y, leq=False): - ''' + """ Helper function for comparing two values, x & y Sometimes, we use the >= sign - e.g. pruned_accuracy >= tolerance * baseline_accuracy [ 0 <= tolerance <= 1] Other times, use the <= sign - e.g. pruned_mse <= tolerance * baseline_mse [tolerance >= 1] - ''' + """ if leq: return x <= y else: diff --git a/hls4ml/optimization/dsp_aware_pruning/keras/builder.py b/hls4ml/optimization/dsp_aware_pruning/keras/builder.py index 4ba39e4f7b..e2ba652d04 100644 --- a/hls4ml/optimization/dsp_aware_pruning/keras/builder.py +++ b/hls4ml/optimization/dsp_aware_pruning/keras/builder.py @@ -16,9 +16,9 @@ class HyperOptimizationModel(kt.HyperModel): - ''' + """ Helper class for Keras Tuner - ''' + """ def __init__(self, model, attributes, optimizer, loss_fn, validation_metric, regularization_range): """Create new instance of HyperOptimizationModel @@ -111,7 +111,7 @@ def build_optimizable_model( tuner='Bayesian', regularization_range=default_regularization_range, ): - ''' + """ Function identifying optimizable layers and adding a regularization loss Notes: @@ -141,7 +141,7 @@ def build_optimizable_model( Returns: keras.Model: Model prepared for optimization - ''' + """ # User provided manual hyper-parameters for regularisation loss # TODO - Maybe we could extend this to be hyper-parameters per layer? or layer-type? # Currently, the same (manually-set) hyper-parameter is set for every layer @@ -237,7 +237,7 @@ def build_optimizable_model( def remove_custom_regularizers(model): - ''' + """ Helper function to remove custom regularizers (DenseRegularizer & Conv2DRegularizer) This makes it possible to load the model in a different environment without hls4ml installed @@ -246,7 +246,7 @@ def remove_custom_regularizers(model): Returns: keras.Model: Model without custom regularizers - ''' + """ weights = model.get_weights() for layer in model.layers: if hasattr(layer, 'kernel_regularizer'): diff --git a/hls4ml/optimization/dsp_aware_pruning/keras/config.py b/hls4ml/optimization/dsp_aware_pruning/keras/config.py index d3ade5933d..0b99acc770 100644 --- a/hls4ml/optimization/dsp_aware_pruning/keras/config.py +++ b/hls4ml/optimization/dsp_aware_pruning/keras/config.py @@ -1,15 +1,15 @@ from qkeras import QConv2D, QDense from tensorflow.keras.layers import Conv2D, Dense -''' +""" Optimizable layers in Keras / QKeras Any new layers need to be registered here first Additional logic in the source files may need to be written (e.g. recurrent layers should also optimize recurrent kernels) -''' +""" SUPPORTED_LAYERS = (Dense, Conv2D, QDense, QConv2D) -''' +""" Supported ranking metrics, for classifying redundant (groups of) weights 1. l1 - groups of weights are ranked by their l1 norm @@ -17,10 +17,10 @@ 3. oracle - abs(dL / dw * w), introduced by Molchanov et al. (2016) Pruning Convolutional Neural Networks for Resource Efficient Inference 4. saliency - (d^2L / dw^2 * w)^2, introduced by Lecun et al. (1989) Optimal Brain Damage -''' +""" SUPPORTED_METRICS = ('l1', 'l2', 'oracle', 'saliency') -''' +""" Temporary directory for storing best models, tuning results etc. -''' +""" TMP_DIRECTORY = 'hls4ml-optimization-keras' diff --git a/hls4ml/optimization/dsp_aware_pruning/keras/masking.py b/hls4ml/optimization/dsp_aware_pruning/keras/masking.py index dddeddf6f7..85481c3771 100644 --- a/hls4ml/optimization/dsp_aware_pruning/keras/masking.py +++ b/hls4ml/optimization/dsp_aware_pruning/keras/masking.py @@ -22,7 +22,7 @@ def get_model_masks( hessians=None, knapsack_solver='CBC_MIP', ): - ''' + """ Function calculating a binary mask for all optimizable layers Entries equal to one correspond to the weight being updated during the training Entries equal to zero correspond to the weight being frozen during the training @@ -60,7 +60,7 @@ def get_model_masks( - masks (dict): Layer-wise dictionary of binary tensors - offsets (dict): Layer-wise dictionary of offsets for every weight - ''' + """ if metric not in SUPPORTED_METRICS: raise Exception('Unknown metric for ranking weights') @@ -82,10 +82,10 @@ def get_model_masks( def __get_masks_local(keras_model, model_attributes, sparsity, objective, metric, gradients, hessians, knapsack_solver): - ''' + """ Function calculating a layer-wise binary mask for all optimizable layers This function performs layer-wise masking, so all layers have the same sparsity (with respect to the objective) - ''' + """ masks = {} offsets = {} @@ -413,11 +413,11 @@ def __get_masks_local(keras_model, model_attributes, sparsity, objective, metric def __get_masks_global(keras_model, model_attributes, sparsity, objective, metric, gradients, hessians, knapsack_solver): - ''' + """ Function calculating a layer-wise binary mask for all optimizable layers Global masking, with layers of different sparsity; masks are calculated by solving a Knapsack problem Most of the logic remains similar to local masking; comments describing implementation are given in the function above - ''' + """ groups = [] total_resources = [] @@ -783,9 +783,9 @@ def __get_masks_global(keras_model, model_attributes, sparsity, objective, metri class __WeightGroups__: - ''' + """ A helper class containing information about a group of weights - ''' + """ def __init__(self, value, resources, layer_position, structure_type=None, layer_name=None, optimization_type=None): self.value = value diff --git a/hls4ml/optimization/dsp_aware_pruning/keras/reduction.py b/hls4ml/optimization/dsp_aware_pruning/keras/reduction.py index 12fb534799..0864be11c0 100644 --- a/hls4ml/optimization/dsp_aware_pruning/keras/reduction.py +++ b/hls4ml/optimization/dsp_aware_pruning/keras/reduction.py @@ -6,7 +6,7 @@ def reduce_model(model): - ''' + """ Function for removing zero neurons & filters from a model and rewiring the model graph This function is built on top of Keras Surgeon available at: https://github.com/BenWhetton/keras-surgeon Keras Surgeon is no longer under active development and does not work for TensorFlow 2.3+ and QKeras @@ -20,7 +20,7 @@ def reduce_model(model): Returns: reduced (keras.model): Modified model, with redundant structures removed - ''' + """ try: from kerassurgeon import Surgeon except ModuleNotFoundError: diff --git a/hls4ml/optimization/dsp_aware_pruning/keras/regularizers.py b/hls4ml/optimization/dsp_aware_pruning/keras/regularizers.py index b42eb3f056..00ffdf5724 100644 --- a/hls4ml/optimization/dsp_aware_pruning/keras/regularizers.py +++ b/hls4ml/optimization/dsp_aware_pruning/keras/regularizers.py @@ -6,7 +6,7 @@ @tf.keras.utils.register_keras_serializable(name='DenseRegularizer') class DenseRegularizer(tf.keras.regularizers.Regularizer): - ''' + """ A flexible regularizer for Dense layers, simultaneously penalizing high values and variance Args: @@ -36,7 +36,7 @@ class DenseRegularizer(tf.keras.regularizers.Regularizer): They likely use less than one BRAM block (e.g. if the BRAM width is 36 bit and weight width is 16) In that case, we need to group several patterns together, So the entire block of patterns can be removed, thus saving DSP and BRAM - ''' + """ def __init__( self, @@ -148,7 +148,7 @@ def get_config(self): @tf.keras.utils.register_keras_serializable(name='Conv2DRegularizer') class Conv2DRegularizer(tf.keras.regularizers.Regularizer): - ''' + """ A flexible regularizer for Conv2D layers, simultaneously performing pruning and clustering Args: @@ -169,7 +169,7 @@ class Conv2DRegularizer(tf.keras.regularizers.Regularizer): (group weights of dimensionality filt_width x filt_height x n_chan) - structure_type = pattern: regularization on groups of every n-th weight in flattened array (e.g. grouping by reuse factor in hls4ml) - ''' + """ def __init__( self, diff --git a/hls4ml/optimization/dsp_aware_pruning/keras/utils.py b/hls4ml/optimization/dsp_aware_pruning/keras/utils.py index bf10f6feb0..53a140c163 100644 --- a/hls4ml/optimization/dsp_aware_pruning/keras/utils.py +++ b/hls4ml/optimization/dsp_aware_pruning/keras/utils.py @@ -4,7 +4,7 @@ @tf.function def get_model_gradients(model, loss_fn, X, y): - ''' + """ Calculate model gradients with respect to weights Args: @@ -15,7 +15,7 @@ def get_model_gradients(model, loss_fn, X, y): Returns: grads (dict): Per-layer gradients of loss with respect to weights - ''' + """ grads = {} # While persistent GradientTape slows down execution, # Is faster than performing forward pass and non-persistent for every layer @@ -32,7 +32,7 @@ def get_model_gradients(model, loss_fn, X, y): @tf.function def get_model_hessians(model, loss_fn, X, y): - ''' + """ Calculate the second derivatives of the loss with repsect to model weights. Note, only diagonal elements of the Hessian are computed. @@ -45,7 +45,7 @@ def get_model_hessians(model, loss_fn, X, y): Returns: grads (dict): Per-layer second derivatives of loss with respect to weights - ''' + """ grads = {} with tf.GradientTape(persistent=True) as tape: output = model(X, training=False) @@ -59,7 +59,7 @@ def get_model_hessians(model, loss_fn, X, y): def get_model_sparsity(model): - ''' + """ Calculate total and per-layer model sparsity Args: @@ -72,7 +72,7 @@ def get_model_sparsity(model): - layers (dict): Key-value dictionary; each key is a layer name and the associated value is the layer's sparsity TODO - Extend support for recurrent layers (reccurent_kernel) - ''' + """ total_weights = 0 zero_weights = 0 @@ -93,7 +93,7 @@ def get_model_sparsity(model): # TODO - Does this work for non-linear models (e.g. skip connections) ? def get_last_layer_with_weights(model): - ''' + """ Finds the last layer with weights The last layer with weights determined the output shape, so, pruning is sometimes not applicable to it. @@ -105,7 +105,7 @@ def get_last_layer_with_weights(model): Returns: idx (int): Index location of last layer with params - ''' + """ for idx, layer in reversed(list(enumerate(model.layers))): if hasattr(layer, 'kernel'): return idx diff --git a/hls4ml/optimization/dsp_aware_pruning/knapsack.py b/hls4ml/optimization/dsp_aware_pruning/knapsack.py index 541222dc63..b1532a57c7 100644 --- a/hls4ml/optimization/dsp_aware_pruning/knapsack.py +++ b/hls4ml/optimization/dsp_aware_pruning/knapsack.py @@ -5,7 +5,7 @@ def solve_knapsack(values, weights, capacity, implementation='CBC_MIP', **kwargs): - ''' + """ A function for solving the Knapsack problem Args: @@ -52,7 +52,7 @@ def solve_knapsack(values, weights, capacity, implementation='CBC_MIP', **kwargs For pruning & weight sharing this is never a problem In case non-integer weights and capacities are requires, All of the values should be scaled by an appropriate scaling factor - ''' + """ if implementation not in ('dynamic', 'greedy', 'branch_bound', 'CBC_MIP'): raise Exception('Unknown algorithm for solving Knapsack') @@ -111,13 +111,13 @@ def solve_knapsack(values, weights, capacity, implementation='CBC_MIP', **kwargs def __solve_1d_knapsack_dp(values, weights, capacity): - ''' + """ Helper function to solve the 1-dimensional Knapsack problem exactly through dynamic programming The dynamic programming approach is only suitable for one-dimensional weight constraints Furthermore, it has a high computational complexity and it is not suitable for highly-dimensional arrays NOTE: The weights and corresponding weight constraint need to be integers; If not, the they should be scaled and rounded beforehand - ''' + """ assert len(weights.shape) == 1 # Build look-up table in bottom-up approach @@ -148,11 +148,11 @@ def __solve_1d_knapsack_dp(values, weights, capacity): def __solve_knapsack_greedy(values, weights, capacity): - ''' + """ Helper function that solves the n-dimensional Knapsack algorithm with a greedy algorithm The greedy approach should only be used for problems with many items or highly dimensional weights The solution can [and often will] be sub-optimal; otherwise, dynamic programming, branch & bound etc. should be used - ''' + """ # For each item, calculate the value per weight ratio (this can be thought of as item efficiency) # The weights are scaled for every dimension, to avoid inherent bias towards large weights in a single dimension @@ -183,7 +183,7 @@ def __solve_knapsack_greedy(values, weights, capacity): def __solve_knapsack_branch_and_bound(values, weights, capacity, time_limit=sys.float_info.max, scaling_factor=10e4): - ''' + """ Helper function to solve Knapsack problem using Branch and Bound; Implemented using Google OR-Tools [weights & capacities need to be integers] The algorithm explores the search space (a tree of all the posible combinations, 2^N nodes), @@ -194,7 +194,7 @@ def __solve_knapsack_branch_and_bound(values, weights, capacity, time_limit=sys. After which B&B search should stop and return a sub-optimal solution - scaling_factor - Factor to scale floats in values arrays; OR-Tools requires all values & weights to be integers; - ''' + """ try: from ortools.algorithms import pywrapknapsack_solver except ModuleNotFoundError: @@ -211,7 +211,7 @@ def __solve_knapsack_branch_and_bound(values, weights, capacity, time_limit=sys. def __solve_knapsack_cbc_mip(values, weights, capacity, time_limit=sys.float_info.max, scaling_factor=10e4): - ''' + """ Helper function to solve Knapsack problem using the CBC MIP solver using Google OR-Tools Additional args: @@ -219,7 +219,7 @@ def __solve_knapsack_cbc_mip(values, weights, capacity, time_limit=sys.float_inf - scaling_factor - Factor to scale floats in values arrays; OR-Tools requires all values & weights to be integers; So all of the values are scaled by a large number - ''' + """ try: from ortools.algorithms import pywrapknapsack_solver except ModuleNotFoundError: @@ -236,12 +236,12 @@ def __solve_knapsack_cbc_mip(values, weights, capacity, time_limit=sys.float_inf def __solve_knapsack_equal_weights(values, weights, capacity): - ''' + """ Helper function that solves the n-dimensional Knapsack algorithm with a greedy algorithm The assumption is that all the items have the same weight; while this seems a bit artificial It occurs often in pruning - e.g. in pattern pruning, each DSP block saves one DSP; however, as a counter-example In structured pruning, each structure can save a different amount of FLOPs (Conv2D filter vs Dense neuron) - ''' + """ assert np.all([weights[i, :] == weights[i, 0] for i in range(weights.shape[0])]) # Find items with the highest value diff --git a/hls4ml/optimization/dsp_aware_pruning/objectives/__init__.py b/hls4ml/optimization/dsp_aware_pruning/objectives/__init__.py index 45204aaf73..bae4ded30b 100644 --- a/hls4ml/optimization/dsp_aware_pruning/objectives/__init__.py +++ b/hls4ml/optimization/dsp_aware_pruning/objectives/__init__.py @@ -6,23 +6,23 @@ from hls4ml.optimization.dsp_aware_pruning.attributes import OptimizationAttributes from hls4ml.optimization.dsp_aware_pruning.config import SUPPORTED_STRUCTURES -''' +""" Pruning & weight sharing are formulated as an optimization problem, with the aim of minimizing some metric Metrics can include: total number of weights, DSP utilization, latency, FLOPs etc. -''' +""" class ObjectiveEstimator(ABC): - ''' + """ Abstract class with methods for estimating the utilization and savings of a certain layer, with respect to some objective For each objective, an inherited class is written with the correct implementation of the below methods The objectives can be multi-dimensional, e.g. DSPs and BRAM Care needs to be taken when optimizing several objectives, especially if conflicting - ''' + """ @abstractmethod def is_layer_optimizable(self, layer_attributes): - ''' + """ For a given layer, checks whether optimizations make sense, with respect to the given objective(s) Furthermore, it returns the type of optimization (structured, unstructured etc.) Most suitable for minimizing the objective(s). @@ -43,12 +43,12 @@ def is_layer_optimizable(self, layer_attributes): (Vivado doesn't use DSP when precision < 9) - Metric = DSP, Layer = Dense, Precision = ap_fixed<16, 6> -> return True, pattern structure, both pruning and weight sharing - ''' + """ pass @abstractmethod def layer_resources(self, layer_attributes): - ''' + """ For a given layer, how many units of the metric are used, given a generic weight matrix Args: @@ -59,12 +59,12 @@ def layer_resources(self, layer_attributes): Example: Metric = Total weights, Layer = Dense, shape = (4, 4) -> return [16] [regardless of layer sparsity] - ''' + """ pass @abstractmethod def layer_savings(self, layer_attributes): - ''' + """ For a given layer, how many units of the metric are saved, when optimizing one structure The structure type, alongside its parameters (e.g. block shape) are stored in layer attributes For best results, OptimizationAttributes in layer_attribtues should be obtained from is_layer_optimizable @@ -80,15 +80,15 @@ def layer_savings(self, layer_attributes): Metric = Total weights, Layer = Dense, shape = (4, 4): - structure_type == unstructured -> return [1] - structure_type == structured -> return [4] - ''' + """ pass class ParameterEstimator(ObjectiveEstimator): - ''' + """ A class containing objective estimation with the goal of minimizing The number of non-zero weights in a layer [corresponds to unstructured pruning] - ''' + """ @classmethod def is_layer_optimizable(self, layer_attributes): diff --git a/hls4ml/optimization/dsp_aware_pruning/scheduler.py b/hls4ml/optimization/dsp_aware_pruning/scheduler.py index a371ef85fb..886d37f41e 100644 --- a/hls4ml/optimization/dsp_aware_pruning/scheduler.py +++ b/hls4ml/optimization/dsp_aware_pruning/scheduler.py @@ -2,14 +2,14 @@ class OptimizationScheduler(ABC): - ''' + """ Baseline class handling logic regarding target sparsity and its updates at every step - ''' + """ def __init__(self, initial_sparsity=0, final_sparsity=1): - ''' + """ intial_sparsity and final_sparsity are between 0.0 and 1.0, NOT 0% and 100% - ''' + """ if initial_sparsity < 0 or initial_sparsity > 1: raise Exception('intial_sparsity must be between 0.0 and 1.0') if final_sparsity < 0 or final_sparsity > 1: @@ -20,7 +20,7 @@ def __init__(self, initial_sparsity=0, final_sparsity=1): @abstractmethod def update_step(self): - ''' + """ Increments the current sparsity, according to the rule. Examples: @@ -33,12 +33,12 @@ def update_step(self): - updated (boolean) - Has the sparsity changed? If not, the optimization algorithm can stop - sparsity (float) - Updated sparsity - ''' + """ pass @abstractmethod def repair_step(self): - ''' + """ Method used when the neural architecture does not meet satisfy performance requirement for a given sparsity. Then, the target sparsity is decreased according to the rule. @@ -52,7 +52,7 @@ def repair_step(self): - updated (boolean) - Has the sparsity changed? If not, the optimization algorithm can stop - sparsity (float) - Updated sparsity - ''' + """ pass def get_sparsity(self): @@ -60,11 +60,11 @@ def get_sparsity(self): class ConstantScheduler(OptimizationScheduler): - ''' + """ Sparsity updated by a constant term, until (i) sparsity target reached OR (ii) optimization algorithm stops requesting state updates - ''' + """ def __init__(self, initial_sparsity=0, final_sparsity=1.0, update_step=0.05): self.increment = update_step @@ -77,7 +77,7 @@ def update_step(self): else: return False, self.sparsity - ''' + """ In certain cases, a model might underperform at the current sparsity level, But perform better at a higher sparsity In this case, constant sparsity (since it increments by a small amount every time), @@ -85,18 +85,18 @@ def update_step(self): The model's performance over several sparsity levels optimization is tracked and S Stopped after high loss over several trials (see top level pruning/optimization function) - ''' + """ def repair_step(self): return self.update_step() class BinaryScheduler(OptimizationScheduler): - ''' + """ Sparsity updated by binary halving the search space; constantly updates lower and upper bounds In the update step, sparsity is incremented, as the midpoint between previous sparsity and target sparsity (upper bound) In the repair step, sparsity is decrement, as the midpoint between between the lower bound and previous sparsity - ''' + """ def __init__(self, initial_sparsity=0, final_sparsity=1.0, threshold=0.01): self.threshold = threshold @@ -122,7 +122,7 @@ def repair_step(self): class PolynomialScheduler(OptimizationScheduler): - ''' + """ Sparsity updated by at a polynomial decay, until (i) sparsity target reached OR (ii) optimization algorithm stops requesting state updates @@ -137,7 +137,7 @@ class PolynomialScheduler(OptimizationScheduler): In this case, polynomial sparsity will simply jump to the next sparsity level The model's performance over several sparsity levels optimization is tracked and toped after high loss over several trials (see top level pruning/optimization function) - ''' + """ def __init__(self, maximum_steps, initial_sparsity=0, final_sparsity=1.0, decay_power=3): self.decay_power = decay_power diff --git a/hls4ml/report/__init__.py b/hls4ml/report/__init__.py index 1afe1598fa..4776291cfb 100644 --- a/hls4ml/report/__init__.py +++ b/hls4ml/report/__init__.py @@ -1,10 +1,18 @@ -from hls4ml.report.catapult_report import parse_catapult_report # noqa: F401 -from hls4ml.report.catapult_report import qofr # noqa: F401 -from hls4ml.report.catapult_report import read_catapult_report # noqa: F401 -from hls4ml.report.oneapi_report import parse_oneapi_report # noqa: F401 -from hls4ml.report.oneapi_report import print_oneapi_report # noqa: F401 -from hls4ml.report.quartus_report import parse_quartus_report # noqa: F401 -from hls4ml.report.quartus_report import read_quartus_report # noqa: F401 -from hls4ml.report.vivado_report import parse_vivado_report # noqa: F401 -from hls4ml.report.vivado_report import print_vivado_report # noqa: F401 -from hls4ml.report.vivado_report import read_vivado_report # noqa: F401 +from hls4ml.report.catapult_report import ( + parse_catapult_report, + qofr, + read_catapult_report, +) +from hls4ml.report.oneapi_report import ( + parse_oneapi_report, + print_oneapi_report, +) +from hls4ml.report.quartus_report import ( + parse_quartus_report, + read_quartus_report, +) +from hls4ml.report.vivado_report import ( + parse_vivado_report, + print_vivado_report, + read_vivado_report, +) diff --git a/hls4ml/report/catapult_report.py b/hls4ml/report/catapult_report.py index 563a3a7594..e3a286bbcd 100755 --- a/hls4ml/report/catapult_report.py +++ b/hls4ml/report/catapult_report.py @@ -137,7 +137,7 @@ def parse_catapult_report(output_dir): with open(output_dir + '/hls4ml_config.yml') as yfile: ydata = yaml.safe_load(yfile) - if not ydata['ProjectDir'] is None: + if ydata['ProjectDir'] is not None: ProjectDir = ydata['ProjectDir'] else: ProjectDir = ydata['ProjectName'] + '_prj' diff --git a/hls4ml/report/oneapi_report.py b/hls4ml/report/oneapi_report.py index 55ad2532c0..575f7ccc08 100644 --- a/hls4ml/report/oneapi_report.py +++ b/hls4ml/report/oneapi_report.py @@ -41,7 +41,6 @@ def _find_projects(hls_dir): def _parse_single_report(prjDir): - if not os.path.exists(prjDir): print(f'Path {prjDir} does not exist. Exiting.') return @@ -137,7 +136,7 @@ def _parse_single_report(prjDir): def parse_oneapi_report(hls_dir): - ''' + """ Parse a report from a given oneAPI project as a dictionary. Args: @@ -145,7 +144,7 @@ def parse_oneapi_report(hls_dir): Returns: results (dict): The report dictionary, containing latency, resource usage etc. - ''' + """ prjList = _find_projects(hls_dir) if not prjList: return @@ -159,7 +158,7 @@ def parse_oneapi_report(hls_dir): def print_oneapi_report(report_dict): - ''' + """ Prints the oneAPI report dictionary as a table. Args: @@ -168,7 +167,7 @@ def print_oneapi_report(report_dict): Returns: None - ''' + """ for prjTarget, prjReport in report_dict.items(): if len(report_dict) > 1: print('*' * 54 + '\n') @@ -208,7 +207,7 @@ def _is_running_in_notebook(): return False # Probably standard Python interpreter -_table_css = ''' +_table_css = """ -''' +""" -_table_base_template = ''' +_table_base_template = """ @@ -262,11 +261,10 @@ def _is_running_in_notebook(): {table_rows}
-''' +""" def _make_html_table_template(table_header, row_templates): - num_columns = len(next(iter(row_templates.values()))) _row_html_template = ' {{}}' + ''.join('{{{}}}' for _ in range(num_columns)) + '' @@ -278,7 +276,6 @@ def _make_html_table_template(table_header, row_templates): def _make_str_table_template(table_header, row_templates): - len_title = 0 for row_title in row_templates.keys(): if len(row_title) > len_title: diff --git a/hls4ml/report/quartus_report.py b/hls4ml/report/quartus_report.py index 21e5b89c63..12ff13e6c4 100644 --- a/hls4ml/report/quartus_report.py +++ b/hls4ml/report/quartus_report.py @@ -6,7 +6,7 @@ def parse_quartus_report(hls_dir, write_to_file=True): - ''' + """ Parse a report from a given Quartus project as a dictionary. Args: @@ -16,7 +16,7 @@ def parse_quartus_report(hls_dir, write_to_file=True): Returns: results (dict): The report dictionary, containing latency, resource usage etc. - ''' + """ if not os.path.exists(hls_dir): print(f'Path {hls_dir} does not exist. Exiting.') return @@ -31,19 +31,19 @@ def parse_quartus_report(hls_dir, write_to_file=True): results = _find_reports(rpt_dir) print(results) if write_to_file: - print("Here") - f = open(hls_dir + '/' 'synthesis-report.txt', 'w') + print('Here') + f = open(hls_dir + '/synthesis-report.txt', 'w') f.write('HLS Synthesis Latency & Resource Usage Report') for key in results: f.write(str(key) + ':' + str(results[key]) + '\n') - print("There") + print('There') print(f'Saved latency & resource usage summary to {hls_dir}/synthesis-report.txt') return results @requires('quartus-report') def read_quartus_report(hls_dir, open_browser=False): - ''' + """ Parse and print the Quartus report to print the report. Optionally open a browser. Args: @@ -52,7 +52,7 @@ def read_quartus_report(hls_dir, open_browser=False): Returns: None - ''' + """ from tabulate import tabulate report = parse_quartus_report(hls_dir) @@ -73,7 +73,7 @@ def read_quartus_report(hls_dir, open_browser=False): def _find_project_dir(hls_dir): - ''' + """ Finds the synthesis folder from the HLS project directory Args: @@ -81,7 +81,7 @@ def _find_project_dir(hls_dir): Returns: project_dir (string): Synthesis folder within HLS project directory - ''' + """ top_func_name = None with open(hls_dir + '/build_lib.sh') as f: @@ -94,7 +94,7 @@ def _find_project_dir(hls_dir): @requires('quartus-report') def read_js_object(js_script): - ''' + """ Reads the JavaScript file and return a dictionary of variables definded in the script. Args: @@ -102,7 +102,7 @@ def read_js_object(js_script): Returns: Dictionary of variables defines in script - ''' + """ from calmjs.parse import asttypes, es5 def visit(node): @@ -110,7 +110,7 @@ def visit(node): d = {} for child in node: if not isinstance(child, asttypes.VarStatement): - raise ValueError("All statements should be var statements") + raise ValueError('All statements should be var statements') key, val = visit(child) d[key] = val return d @@ -133,7 +133,7 @@ def visit(node): elif isinstance(node.left, asttypes.Number) and isinstance(node.right, asttypes.Number): return visit(node.left) + visit(node.right) else: - raise ValueError("Cannot + on anything other than two literals") + raise ValueError('Cannot + on anything other than two literals') else: raise ValueError("Cannot do operator '%s'" % node.op) @@ -144,20 +144,20 @@ def visit(node): elif isinstance(node, asttypes.Null): return None elif isinstance(node, asttypes.Boolean): - if str(node) == "false": + if str(node) == 'false': return False else: return True elif isinstance(node, asttypes.Identifier): return node.value else: - raise Exception("Unhandled node: %r" % node) + raise Exception('Unhandled node: %r' % node) return visit(es5(js_script)) def _read_quartus_file(filename): - ''' + """ Reads results (clock frequency, resource usage) obtained through FPGA synthesis (full Quartus compilation) Args: @@ -165,14 +165,14 @@ def _read_quartus_file(filename): Returns: results (dict): Resource usage obtained through Quartus Compile - ''' + """ with open(filename) as dataFile: quartus_data = dataFile.read() quartus_data = read_js_object(quartus_data) results = {} - if quartus_data['quartusJSON']['quartusFitClockSummary']['nodes'][0]['clock'] != "TBD": + if quartus_data['quartusJSON']['quartusFitClockSummary']['nodes'][0]['clock'] != 'TBD': results['Clock'] = quartus_data['quartusJSON']['quartusFitClockSummary']['nodes'][0]['clock'] results['Quartus ALM'] = quartus_data['quartusJSON']['quartusFitResourceUsageSummary']['nodes'][-1]['alm'] results['Quartus REG'] = quartus_data['quartusJSON']['quartusFitResourceUsageSummary']['nodes'][-1]['reg'] @@ -188,7 +188,7 @@ def _read_quartus_file(filename): def _read_hls_file(filename): - ''' + """ Reads HLS resource estimate obtained through HLS synthesis Args: @@ -196,7 +196,7 @@ def _read_hls_file(filename): Returns: results (dict): Resource usage obtained through HLS Estimation - ''' + """ with open(filename) as dataFile: report_data = dataFile.read() report_data = report_data[: report_data.rfind('var fileJSON')] @@ -220,7 +220,7 @@ def _read_hls_file(filename): def _read_verification_file(filename): - ''' + """ Reads verification data (latency, initiation interval) obtained through simulation Args: @@ -228,7 +228,7 @@ def _read_verification_file(filename): Returns: results (dict): Verification data obtained from simulation - ''' + """ results = {} if os.path.isfile(filename): with open(filename) as dataFile: @@ -238,12 +238,12 @@ def _read_verification_file(filename): try: results['Number of Invoations'] = verification_data['verifJSON']['functions'][0]['data'][0] - latency = verification_data['verifJSON']['functions'][0]['data'][1].split(",") + latency = verification_data['verifJSON']['functions'][0]['data'][1].split(',') results['Latency (MIN)'] = latency[0] results['Latency (MAX)'] = latency[1] results['Latency (AVG)'] = latency[2] - ii = verification_data['verifJSON']['functions'][0]['data'][2].split(",") + ii = verification_data['verifJSON']['functions'][0]['data'][2].split(',') results['ii (MIN)'] = ii[0] results['ii (MAX)'] = ii[1] results['ii (AVG)'] = ii[2] diff --git a/hls4ml/report/vivado_report.py b/hls4ml/report/vivado_report.py index d63d729fdc..4e3c4fd537 100644 --- a/hls4ml/report/vivado_report.py +++ b/hls4ml/report/vivado_report.py @@ -464,7 +464,7 @@ def _is_running_in_notebook(): """ -_row_base_template = " {row_title}{{{row_key}}}" +_row_base_template = ' {row_title}{{{row_key}}}' def _make_html_table_template(table_header, row_templates): diff --git a/hls4ml/utils/__init__.py b/hls4ml/utils/__init__.py index f03cacb754..aac6dd6618 100644 --- a/hls4ml/utils/__init__.py +++ b/hls4ml/utils/__init__.py @@ -1,3 +1,3 @@ -from hls4ml.utils.config import config_from_keras_model, config_from_onnx_model, config_from_pytorch_model # noqa: F401 -from hls4ml.utils.example_models import fetch_example_list, fetch_example_model # noqa: F401 -from hls4ml.utils.plot import plot_model # noqa: F401 +from hls4ml.utils.config import config_from_keras_model, config_from_onnx_model, config_from_pytorch_model +from hls4ml.utils.example_models import fetch_example_list, fetch_example_model +from hls4ml.utils.plot import plot_model diff --git a/hls4ml/utils/attribute_descriptions.py b/hls4ml/utils/attribute_descriptions.py index 05653f8fdf..a233cf542b 100644 --- a/hls4ml/utils/attribute_descriptions.py +++ b/hls4ml/utils/attribute_descriptions.py @@ -39,8 +39,7 @@ 'Currently only supported in io_parallel.' ) conv_implementation = ( - '"LineBuffer" implementation is preferred over "Encoded" for most use cases. ' - 'This attribute only applies to io_stream.' + '"LineBuffer" implementation is preferred over "Encoded" for most use cases. This attribute only applies to io_stream.' ) # Recurrent-related attributes diff --git a/hls4ml/utils/config.py b/hls4ml/utils/config.py index b14d1ce99d..797c4b1cf6 100644 --- a/hls4ml/utils/config.py +++ b/hls4ml/utils/config.py @@ -68,16 +68,16 @@ def _get_precision_from_quantizer(quantizer): 'linear', ] signed = True - rnd = "AP_TRN" - overflow = "AP_WRAP" + rnd = 'AP_TRN' + overflow = 'AP_WRAP' if quantizer['class_name'] in supported_quantizers: bits = int(quantizer['config']['bits']) # if integer isn't specified, it should be the same as bits integer = int(quantizer['config'].get('integer', bits - 1)) + 1 # for quantizers use the following default rounding and overflow - rnd = "AP_RND_CONV" - overflow = "AP_SAT" + rnd = 'AP_RND_CONV' + overflow = 'AP_SAT' if quantizer['class_name'] in ('quantized_relu', 'quantized_relu_po2'): if quantizer['config']['negative_slope'] != 0.0: signed = True @@ -85,7 +85,7 @@ def _get_precision_from_quantizer(quantizer): signed = False integer -= 1 elif quantizer['class_name'] == 'quantized_tanh': - overflow = "AP_SAT_SYM" if quantizer['config']['symmetric'] else "AP_SAT" + overflow = 'AP_SAT_SYM' if quantizer['config']['symmetric'] else 'AP_SAT' integer = 1 elif quantizer['class_name'] == 'quantized_sigmoid': integer = 0 diff --git a/hls4ml/utils/example_models.py b/hls4ml/utils/example_models.py index 657f14325b..7d1b180968 100644 --- a/hls4ml/utils/example_models.py +++ b/hls4ml/utils/example_models.py @@ -67,12 +67,12 @@ def _filter_name(model_name): def _load_example_data(model_name): - print("Downloading input & output example files ...") + print('Downloading input & output example files ...') filtered_name = _filter_name(model_name) - input_file_name = filtered_name + "_input.dat" - output_file_name = filtered_name + "_output.dat" + input_file_name = filtered_name + '_input.dat' + output_file_name = filtered_name + '_output.dat' link_to_input = f'https://raw.githubusercontent.com/{ORGANIZATION}/example-models/{BRANCH}/data/' + input_file_name link_to_output = f'https://raw.githubusercontent.com/{ORGANIZATION}/example-models/{BRANCH}/data/' + output_file_name @@ -82,11 +82,11 @@ def _load_example_data(model_name): def _load_example_config(model_name): - print("Downloading configuration files ...") + print('Downloading configuration files ...') filtered_name = _filter_name(model_name) - config_name = filtered_name + "_config.yml" + config_name = filtered_name + '_config.yml' link_to_config = f'https://raw.githubusercontent.com/{ORGANIZATION}/example-models/{BRANCH}/config-files/' + config_name @@ -144,7 +144,7 @@ def fetch_example_model(model_name, backend='Vivado'): download_link_model = download_link + model_type + '/' + model_name # Download the example model - print("Downloading example model files ...") + print('Downloading example model files ...') urlretrieve( download_link_model, model_name, @@ -162,7 +162,7 @@ def fetch_example_model(model_name, backend='Vivado'): # If the model is a keras model then have to download its weight file as well if model_type == 'keras' and '.json' in model_name: - model_weight_name = model_name[:-5] + "_weights.h5" + model_weight_name = model_name[:-5] + '_weights.h5' download_link_weight = download_link + model_type + '/' + model_weight_name urlretrieve(download_link_weight, model_weight_name) diff --git a/hls4ml/utils/fixed_point_utils.py b/hls4ml/utils/fixed_point_utils.py index 020e5083c7..c09c0a1279 100644 --- a/hls4ml/utils/fixed_point_utils.py +++ b/hls4ml/utils/fixed_point_utils.py @@ -1,7 +1,7 @@ import math import sys -''' +""" A helper class for handling fixed point methods Currently, very limited, allowing only: - Conversion to float @@ -9,11 +9,11 @@ - Reciprocals Used primarily for generating softmax look-up table by using bit manipulation (see Vivado-equivalent implementation) -''' +""" class FixedPointEmulator: - ''' + """ Default constructor Args: - N : Total number of bits in the fixed point number @@ -22,7 +22,7 @@ class FixedPointEmulator: - signed : True/False - If True, use 2's complement when converting to float - self.integer_bits : Bits corresponding to the integer part of the number - self.decimal_bits : Bits corresponding to the decimal part of the number - ''' + """ def __init__(self, N, I, signed=True, integer_bits=None, decimal_bits=None): # noqa E741 self.N = N @@ -32,7 +32,7 @@ def __init__(self, N, I, signed=True, integer_bits=None, decimal_bits=None): # self.integer_bits = [0] * self.I if integer_bits is None else integer_bits self.decimal_bits = [0] * self.F if decimal_bits is None else decimal_bits - ''' + """ Converts the fixed point number stored in self.bits to a floating pont Args: - None @@ -45,7 +45,7 @@ def __init__(self, N, I, signed=True, integer_bits=None, decimal_bits=None): # 3. Traverse through decimal bits, incrementing result by 2.0^(-i) (using pow) Note: - This function uses left shifts instead of integer powers of 2. - ''' + """ def to_float(self): val = float(int(self.integer_bits[0]) << (self.I - 1)) @@ -60,11 +60,11 @@ def to_float(self): return val - ''' + """ Sets the top bits of the current number Args: - bits : Values top bit should be set to - ''' + """ def set_msb_bits(self, bits): for i in range(0, len(bits)): @@ -73,7 +73,7 @@ def set_msb_bits(self, bits): elif i >= self.I and i < self.N: self.decimal_bits[i - self.I] = bits[i] - ''' + """ Returns e^x, where x is the current fixed point number Args: - None @@ -81,7 +81,7 @@ def set_msb_bits(self, bits): - Float : e^x, rounded some number of decimal points Notice: - If e^x overflow, maximum value of float is used - ''' + """ def exp_float(self, sig_figs=12): try: @@ -89,13 +89,13 @@ def exp_float(self, sig_figs=12): except OverflowError: return round(sys.float_info.max, sig_figs) - ''' + """ Returns 1/x, where x is the current fixed point number Args: - None Returns: - Float : 1/x, rounded some number of decimal points - ''' + """ def inv_float(self, sig_figs=12): if self.to_float() != 0: @@ -104,14 +104,14 @@ def inv_float(self, sig_figs=12): return round(sys.float_info.max, sig_figs) -''' +""" Converts unsigned integer i to N-bit binary number Args: - i : Number to be converted - N : Number of bits to be used Note: - N > log2(i)+1 -''' +""" def uint_to_binary(i, N): diff --git a/hls4ml/utils/plot.py b/hls4ml/utils/plot.py index e3424bb1ad..1a582d8356 100644 --- a/hls4ml/utils/plot.py +++ b/hls4ml/utils/plot.py @@ -58,10 +58,10 @@ def model_to_dot( if 'IPython.core.magics.namespace' in sys.modules: # We don't raise an exception here in order to avoid crashing notebook # tests where graphviz is not available. - print('Failed to import pydot. You must install pydot' ' and graphviz for `pydotprint` to work.') + print('Failed to import pydot. You must install pydot and graphviz for `pydotprint` to work.') return else: - raise ImportError('Failed to import pydot. You must install pydot' ' and graphviz for `pydotprint` to work.') + raise ImportError('Failed to import pydot. You must install pydot and graphviz for `pydotprint` to work.') if subgraph: dot = pydot.Cluster(style='dashed', graph_name=model.name) diff --git a/hls4ml/utils/profiling_utils.py b/hls4ml/utils/profiling_utils.py index aba6eedda3..f161c67bd6 100644 --- a/hls4ml/utils/profiling_utils.py +++ b/hls4ml/utils/profiling_utils.py @@ -47,7 +47,6 @@ def _torch_rnn(layer): class WeightsTorch: - def __init__(self, model: torch.nn.Module, fmt: str = 'longform', plot: str = 'boxplot') -> None: self.model = model self.fmt = fmt @@ -58,13 +57,13 @@ def __init__(self, model: torch.nn.Module, fmt: str = 'longform', plot: str = 'b def _find_layers(self, model, module_name): for name, module in model.named_children(): if isinstance(module, (torch.nn.Sequential, torch.nn.ModuleList)): - self._find_layers(module, module_name + "." + name) + self._find_layers(module, module_name + '.' + name) elif isinstance(module, (torch.nn.Module)) and self._is_parameterized(module): if len(list(module.named_children())) != 0: # custom nn.Module, continue search - self._find_layers(module, module_name + "." + name) + self._find_layers(module, module_name + '.' + name) else: - self._register_layer(module_name + "." + name) + self._register_layer(module_name + '.' + name) def _is_registered(self, name: str) -> bool: return name in self.registered_layers diff --git a/hls4ml/utils/torch.py b/hls4ml/utils/torch.py index bfd2c9f0ca..25d2754b1f 100644 --- a/hls4ml/utils/torch.py +++ b/hls4ml/utils/torch.py @@ -10,7 +10,6 @@ class HLS4MLModule(torch.nn.Module): class CustomFXTracer(torch.fx.Tracer): - def is_leaf_module(self, m, module_qualified_name: str) -> bool: """ Custom Tracer class for hls4ml to define Brevitas modules and custom modules as leaf modules so they are not traced diff --git a/hls4ml/writer/__init__.py b/hls4ml/writer/__init__.py index 8de19fe1d2..e915d729c2 100644 --- a/hls4ml/writer/__init__.py +++ b/hls4ml/writer/__init__.py @@ -5,7 +5,7 @@ from hls4ml.writer.vitis_writer import VitisWriter from hls4ml.writer.vivado_accelerator_writer import VivadoAcceleratorWriter from hls4ml.writer.vivado_writer import VivadoWriter -from hls4ml.writer.writers import Writer, get_writer, register_writer # noqa: F401 +from hls4ml.writer.writers import Writer, get_writer, register_writer register_writer('Vivado', VivadoWriter) register_writer('VivadoAccelerator', VivadoAcceleratorWriter) diff --git a/hls4ml/writer/catapult_writer.py b/hls4ml/writer/catapult_writer.py index 9a48460995..3204394ff3 100755 --- a/hls4ml/writer/catapult_writer.py +++ b/hls4ml/writer/catapult_writer.py @@ -25,28 +25,28 @@ def print_array_to_cpp(self, var, odir, write_txt_file=True): write_txt_file (bool, optional): Write txt files in addition to .h files. Defaults to True. """ - h_file = open(f"{odir}/firmware/weights/{var.name}.h", "w") + h_file = open(f'{odir}/firmware/weights/{var.name}.h', 'w') if write_txt_file: - txt_file = open(f"{odir}/firmware/weights/{var.name}.txt", "w") + txt_file = open(f'{odir}/firmware/weights/{var.name}.txt', 'w') # meta data - h_file.write(f"//Numpy array shape {var.shape}\n") - h_file.write(f"//Min {np.min(var.min):.12f}\n") - h_file.write(f"//Max {np.max(var.max):.12f}\n") - h_file.write(f"//Number of zeros {var.nzeros}\n") - h_file.write("\n") + h_file.write(f'//Numpy array shape {var.shape}\n') + h_file.write(f'//Min {np.min(var.min):.12f}\n') + h_file.write(f'//Max {np.max(var.max):.12f}\n') + h_file.write(f'//Number of zeros {var.nzeros}\n') + h_file.write('\n') - h_file.write(f"#ifndef {var.name.upper()}_H_\n") - h_file.write(f"#define {var.name.upper()}_H_\n") - h_file.write("\n") + h_file.write(f'#ifndef {var.name.upper()}_H_\n') + h_file.write(f'#define {var.name.upper()}_H_\n') + h_file.write('\n') if write_txt_file: - h_file.write("#ifndef __SYNTHESIS__\n") - h_file.write("// global extern pointer only - actual array allocated in myproject_test.cpp\n") - h_file.write("extern " + var.definition_cpp() + ";\n") - h_file.write("#else\n") + h_file.write('#ifndef __SYNTHESIS__\n') + h_file.write('// global extern pointer only - actual array allocated in myproject_test.cpp\n') + h_file.write('extern ' + var.definition_cpp() + ';\n') + h_file.write('#else\n') - h_file.write(var.definition_cpp() + " = {") + h_file.write(var.definition_cpp() + ' = {') # fill c++ array. # not including internal brackets for multidimensional case @@ -55,12 +55,12 @@ def print_array_to_cpp(self, var, odir, write_txt_file=True): h_file.write(sep + x) if write_txt_file: txt_file.write(sep + x) - sep = ", " - h_file.write("};\n") + sep = ', ' + h_file.write('};\n') if write_txt_file: - h_file.write("#endif\n") + h_file.write('#endif\n') txt_file.close() - h_file.write("\n#endif\n") + h_file.write('\n#endif\n') h_file.close() def write_output_dir(self, model): @@ -69,8 +69,8 @@ def write_output_dir(self, model): Args: model (ModelGraph): the hls4ml model. """ - if not os.path.isdir(f"{model.config.get_output_dir()}/firmware/weights"): - os.makedirs(f"{model.config.get_output_dir()}/firmware/weights") + if not os.path.isdir(f'{model.config.get_output_dir()}/firmware/weights'): + os.makedirs(f'{model.config.get_output_dir()}/firmware/weights') @staticmethod def _make_array_pragma(variable, model): @@ -104,7 +104,7 @@ def _make_array_pragma(variable, model): return template.format(mode=mode.upper(), name=variable.name, type=typ, factor=factor, dim=0) elif mode == 'stream': - fifo = model.config.get_config_value("FIFO") + fifo = model.config.get_config_value('FIFO') if fifo is not None: retstr = f'#pragma hls_resource {variable.name}:cns variables="{variable.name}"' retstr += f' map_to_module="{fifo}" // depth="{depth}"' @@ -132,7 +132,7 @@ def _make_array_fifo_pragma(variable, model): factor = 0 if mode == 'stream': - fifo = model.config.get_config_value("FIFO") + fifo = model.config.get_config_value('FIFO') if fifo is not None: return f'// #pragma hls_fifo_depth {depth} {factor}' else: @@ -150,31 +150,31 @@ def write_project_cpp(self, model): filedir = os.path.dirname(os.path.abspath(__file__)) fout = open(f'{model.config.get_output_dir()}/firmware/layer_summary.txt', 'w') - outstr = "" - outstr = outstr + "{}".format("Layer Name").ljust(25) - outstr = outstr + " {}".format("Layer Class").ljust(20) - outstr = outstr + " {}".format("Input Type").ljust(40) - outstr = outstr + " {}".format("Input Shape").ljust(15) - outstr = outstr + " {}".format("Output Type").ljust(40) - outstr = outstr + " {}".format("Output Shape").ljust(15) + outstr = '' + outstr = outstr + '{}'.format('Layer Name').ljust(25) + outstr = outstr + ' {}'.format('Layer Class').ljust(20) + outstr = outstr + ' {}'.format('Input Type').ljust(40) + outstr = outstr + ' {}'.format('Input Shape').ljust(15) + outstr = outstr + ' {}'.format('Output Type').ljust(40) + outstr = outstr + ' {}'.format('Output Shape').ljust(15) # outstr = outstr + " {}".format("Weight Type").ljust(24) # outstr = outstr + " {}".format("Bias Type").ljust(24) - outstr = outstr + " {}".format("Filter Shape").ljust(15) - outstr = outstr + " {}".format("Stride").ljust(10) - outstr = outstr + " {}".format("IOType").ljust(15) - outstr = outstr + " {}".format("Reuse").ljust(10) - - fout.write(outstr + "\n") - input_shape = "" - input_datatype = "" + outstr = outstr + ' {}'.format('Filter Shape').ljust(15) + outstr = outstr + ' {}'.format('Stride').ljust(10) + outstr = outstr + ' {}'.format('IOType').ljust(15) + outstr = outstr + ' {}'.format('Reuse').ljust(10) + + fout.write(outstr + '\n') + input_shape = '' + input_datatype = '' for layer in model.get_layers(): - datatype = layer.get_output_variable().type.precision.definition_cpp() + " " - shape = "" + datatype = layer.get_output_variable().type.precision.definition_cpp() + ' ' + shape = '' # layer.get_output_variable().type.precision.width # layer.get_output_variable().type.precision.integer # layer.get_output_variable().type.precision.sign for _k, v in layer.get_output_variable().get_shape(): - shape = shape + "[" + str(v) + "]" + shape = shape + '[' + str(v) + ']' if layer.attributes.layer.class_name != 'Input': my_class_name = layer.class_name @@ -195,33 +195,33 @@ def write_project_cpp(self, model): # print(weights.type.precision.signed) # print(weights.data_length) - filter = "" + filter = '' filt_width = layer.get_attr('filt_width') filt_height = layer.get_attr('filt_height') if filt_width is not None: - filter = "[" + str(filt_width) + "]" + filter = '[' + str(filt_width) + ']' if filt_height is not None: - filter = filter + "[" + str(filt_height) + "]" + filter = filter + '[' + str(filt_height) + ']' - stride = "" + stride = '' stride_width = layer.get_attr('stride_width') if stride_width is not None: stride = str(stride_width) - outstr = "" - outstr = outstr + f"{layer.name}".ljust(25) - outstr = outstr + f" {my_class_name}".ljust(20) - outstr = outstr + f" {input_datatype}".ljust(40) - outstr = outstr + f" {input_shape}".ljust(15) - outstr = outstr + f" {datatype}".ljust(40) - outstr = outstr + f" {shape}".ljust(15) + outstr = '' + outstr = outstr + f'{layer.name}'.ljust(25) + outstr = outstr + f' {my_class_name}'.ljust(20) + outstr = outstr + f' {input_datatype}'.ljust(40) + outstr = outstr + f' {input_shape}'.ljust(15) + outstr = outstr + f' {datatype}'.ljust(40) + outstr = outstr + f' {shape}'.ljust(15) # outstr = outstr + " {}".format("weight type").ljust(24) # outstr = outstr + " {}".format("bias type").ljust(24) - outstr = outstr + f" {filter}".ljust(15) - outstr = outstr + f" {stride}".ljust(10) - outstr = outstr + " {}".format(layer.model.config.get_config_value('IOType')).ljust(15) - outstr = outstr + f" {str(layer.model.config.get_reuse_factor(layer))}".ljust(10) - fout.write(outstr + "\n") + outstr = outstr + f' {filter}'.ljust(15) + outstr = outstr + f' {stride}'.ljust(10) + outstr = outstr + ' {}'.format(layer.model.config.get_config_value('IOType')).ljust(15) + outstr = outstr + f' {str(layer.model.config.get_reuse_factor(layer))}'.ljust(10) + fout.write(outstr + '\n') input_shape = shape input_datatype = datatype @@ -276,7 +276,7 @@ def write_project_cpp(self, model): all_inputs = [i.name for i in model_inputs] all_outputs = [o.name for o in model_outputs] all_brams = [b.name for b in model_brams] - io_type = model.config.get_config_value("IOType") + io_type = model.config.get_config_value('IOType') if io_type == 'io_serial' or io_type == 'io_stream': # Eventually this will be amba.ccs_axi4stream_in and amba.ccs_axi4stream_out @@ -293,7 +293,7 @@ def write_project_cpp(self, model): all_inputs = [i.name for i in model_inputs] all_outputs = [o.name for o in model_outputs] all_brams = [b.name for b in model_brams] - io_type = model.config.get_config_value("IOType") + io_type = model.config.get_config_value('IOType') if io_type == 'io_parallel': for i in model_inputs: @@ -318,7 +318,7 @@ def write_project_cpp(self, model): newline += indent + '// #pragma HLS DATAFLOW \n' elif '// hls-fpga-machine-learning insert layers' in line: - io_type = model.config.get_config_value("IOType") + io_type = model.config.get_config_value('IOType') newline = line + '\n' for layer in model.get_layers(): vars = layer.get_variables() @@ -469,7 +469,7 @@ def write_parameters(self, model): if w.storage.lower() != 'bram': newline += f'#include "weights/{w.name}.h"\n' - elif "// hls-fpga-machine-learning insert layer-config" in line: + elif '// hls-fpga-machine-learning insert layer-config' in line: newline = line for layer in model.get_layers(): config = layer.get_attr('config_cpp', None) @@ -500,10 +500,10 @@ def __make_dat_file(self, original_path, project_path): """ # Take in data from current supported data files - if original_path[-3:] == "npy": + if original_path[-3:] == 'npy': data = np.load(original_path) else: - raise Exception("Unsupported input/output data files.") + raise Exception('Unsupported input/output data files.') # Faltten data, just keep first dimension data = data.reshape(data.shape[0], -1) @@ -511,11 +511,11 @@ def __make_dat_file(self, original_path, project_path): def print_data(f): for i in range(data.shape[0]): for j in range(data.shape[1]): - f.write(str(data[i][j]) + " ") - f.write("\n") + f.write(str(data[i][j]) + ' ') + f.write('\n') # Print out in dat file - with open(project_path, "w") as f: + with open(project_path, 'w') as f: print_data(f) def write_test_bench(self, model): @@ -534,13 +534,13 @@ def write_test_bench(self, model): output_predictions = model.config.get_config_value('OutputPredictions') if input_data: - if input_data[-3:] == "dat": + if input_data[-3:] == 'dat': copyfile(input_data, f'{model.config.get_output_dir()}/tb_data/tb_input_features.dat') else: self.__make_dat_file(input_data, f'{model.config.get_output_dir()}/tb_data/tb_input_features.dat') if output_predictions: - if output_predictions[-3:] == "dat": + if output_predictions[-3:] == 'dat': copyfile(output_predictions, f'{model.config.get_output_dir()}/tb_data/tb_output_predictions.dat') else: self.__make_dat_file( @@ -563,13 +563,13 @@ def write_test_bench(self, model): elif '// hls-fpga-machine-learning insert bram' in line: newline = line for bram in model_brams: - newline += f'#include \"firmware/weights/{bram.name}.h\"\n' + newline += f'#include "firmware/weights/{bram.name}.h"\n' elif '// hls-fpga-machine-learning insert declare weights' in line: newline = line for layer in model.get_layers(): for w in layer.get_weights(): - newline += w.definition_cpp() + ";\n" + newline += w.definition_cpp() + ';\n' elif '// hls-fpga-machine-learning insert load weights' in line: newline = line @@ -682,12 +682,12 @@ def write_bridge(self, model): elif '// hls-fpga-machine-learning insert bram' in line: newline = line for bram in model_brams: - newline += f'#include \"firmware/weights/{bram.name}.h\"\n' + newline += f'#include "firmware/weights/{bram.name}.h"\n' elif '// hls-fpga-machine-learning insert declare weights' in line: newline = line for layer in model.get_layers(): for w in layer.get_weights(): - newline += w.definition_cpp() + ";\n" + newline += w.definition_cpp() + ';\n' elif '// hls-fpga-machine-learning insert header' in line: dtype = line.split('#', 1)[1].strip() inputs_str = ', '.join([f'{dtype} {i.name}[{i.size_cpp()}]' for i in model_inputs]) @@ -825,7 +825,7 @@ def write_nnet_utils(self, model): for h in headers: copyfile(srcpath + h, dstpath + h) - print("Copying NNET files to local firmware directory") + print('Copying NNET files to local firmware directory') filedir = os.path.dirname(os.path.abspath(__file__)) for pkg in ('ac_types', 'ac_math', 'ac_simutils'): @@ -840,10 +840,10 @@ def write_nnet_utils(self, model): if os.path.exists(srcpath): if os.path.exists(dstpath): rmtree(dstpath) - print("... copying AC " + pkg + " headers from " + srcpath) + print('... copying AC ' + pkg + ' headers from ' + srcpath) copytree(srcpath, dstpath) else: - print("... skipping copy of " + pkg + " headers - assumed to located in Catapult install tree") + print('... skipping copy of ' + pkg + ' headers - assumed to located in Catapult install tree') # custom source filedir = os.path.dirname(os.path.abspath(__file__)) @@ -911,7 +911,7 @@ def write_tar(self, model): with tarfile.open(model.config.get_output_dir() + '.tar.gz', mode='w:gz') as archive: archive.add(model.config.get_output_dir(), recursive=True) else: - print("Project .tar.gz archive already exists") + print('Project .tar.gz archive already exists') def write_hls(self, model): print('Writing HLS project') diff --git a/hls4ml/writer/oneapi_writer.py b/hls4ml/writer/oneapi_writer.py index e93f8b5ca3..2c7450baeb 100644 --- a/hls4ml/writer/oneapi_writer.py +++ b/hls4ml/writer/oneapi_writer.py @@ -16,7 +16,6 @@ class OneAPIWriter(Writer): - def __make_dat_file(self, original_path, project_path): """ Convert other input/output data types into a dat file, which is @@ -25,10 +24,10 @@ def __make_dat_file(self, original_path, project_path): """ # Take in data from current supported data files - if original_path[-3:] == "npy": + if original_path[-3:] == 'npy': data = np.load(original_path) else: - raise Exception("Unsupported input/output data files.") + raise Exception('Unsupported input/output data files.') # Faltten data, just keep first dimension data = data.reshape(data.shape[0], -1) @@ -36,11 +35,11 @@ def __make_dat_file(self, original_path, project_path): def print_data(f): for i in range(data.shape[0]): for j in range(data.shape[1]): - f.write(str(data[i][j]) + " ") - f.write("\n") + f.write(str(data[i][j]) + ' ') + f.write('\n') # Print out in dat file - with open(project_path, "w") as f: + with open(project_path, 'w') as f: print_data(f) def get_max_reuse_factor(self, model): @@ -59,30 +58,30 @@ def print_array_to_cpp(self, var, layer, odir): layer (Layer): Instance of the layer to which the weights belong odir (str): Output directory """ - with open(f"{odir}/src/firmware/weights/{var.name}.h", "w") as h_file: + with open(f'{odir}/src/firmware/weights/{var.name}.h', 'w') as h_file: # meta data - h_file.write(f"//Numpy array shape {var.shape}\n") - h_file.write(f"//Min {np.min(var.min):.12f}\n") - h_file.write(f"//Max {np.max(var.max):.12f}\n") - h_file.write(f"//Number of zeros {var.nzeros}\n") - h_file.write("\n") + h_file.write(f'//Numpy array shape {var.shape}\n') + h_file.write(f'//Min {np.min(var.min):.12f}\n') + h_file.write(f'//Max {np.max(var.max):.12f}\n') + h_file.write(f'//Number of zeros {var.nzeros}\n') + h_file.write('\n') - h_file.write(f"#ifndef {var.name.upper()}_H_\n") - h_file.write(f"#define {var.name.upper()}_H_\n") - h_file.write("\n") + h_file.write(f'#ifndef {var.name.upper()}_H_\n') + h_file.write(f'#define {var.name.upper()}_H_\n') + h_file.write('\n') rf = int(layer.get_attr('reuse_factor', 1)) - h_file.write(var.definition_cpp(rf) + " = {{") + h_file.write(var.definition_cpp(rf) + ' = {{') # fill c++ array. # not including internal brackets for multidimensional case sep = '' for x in var: h_file.write(sep + x) - sep = ", " - h_file.write("}};\n") - h_file.write("\n#endif\n") + sep = ', ' + h_file.write('}};\n') + h_file.write('\n#endif\n') def write_project_dir(self, model): """Write the base project directory @@ -90,8 +89,8 @@ def write_project_dir(self, model): Args: model (ModelGraph): the hls4ml model. """ - if not os.path.isdir(f"{model.config.get_output_dir()}/src/firmware/weights"): - os.makedirs(f"{model.config.get_output_dir()}/src/firmware/weights") + if not os.path.isdir(f'{model.config.get_output_dir()}/src/firmware/weights'): + os.makedirs(f'{model.config.get_output_dir()}/src/firmware/weights') def write_project_cpp(self, model): """Write the main architecture source file (myproject.cpp) @@ -111,7 +110,7 @@ def write_project_cpp(self, model): model_brams = [var for var in model.get_weight_variables() if var.storage.lower() == 'bram'] if len(model_brams) != 0: - raise NotImplementedError("Weights on the interface is currently not supported") + raise NotImplementedError('Weights on the interface is currently not supported') io_type = model.config.get_config_value('IOType') indent = ' ' @@ -313,7 +312,7 @@ def write_parameters(self, model): ): newline += '#include "%s"\n' % include - elif "// hls-fpga-machine-learning insert layer-config" in line: + elif '// hls-fpga-machine-learning insert layer-config' in line: newline = line for layer in model.get_layers(): config = layer.get_attr('config_cpp', None) @@ -354,11 +353,11 @@ def write_test_bench(self, model): model_brams = [var for var in model.get_weight_variables() if var.storage.lower() == 'bram'] if len(model_brams) != 0: - raise NotImplementedError("Weights on the interface is currently not supported") + raise NotImplementedError('Weights on the interface is currently not supported') if len(model_inputs) != 1 or len(model_outputs) != 1: - print("The testbench supports only single input arrays and single output arrays.") - print("Please modify it before using it.") + print('The testbench supports only single input arrays and single output arrays.') + print('Please modify it before using it.') if not os.path.exists(f'{model.config.get_output_dir()}/tb_data/'): os.mkdir(f'{model.config.get_output_dir()}/tb_data/') @@ -367,13 +366,13 @@ def write_test_bench(self, model): output_predictions = model.config.get_config_value('OutputPredictions') if input_data: - if input_data[-3:] == "dat": + if input_data[-3:] == 'dat': copyfile(input_data, f'{model.config.get_output_dir()}/tb_data/tb_input_features.dat') else: self.__make_dat_file(input_data, f'{model.config.get_output_dir()}/tb_data/tb_input_features.dat') if output_predictions: - if output_predictions[-3:] == "dat": + if output_predictions[-3:] == 'dat': copyfile(output_predictions, f'{model.config.get_output_dir()}/tb_data/tb_output_predictions.dat') else: self.__make_dat_file( @@ -395,7 +394,7 @@ def write_test_bench(self, model): elif '// hls-fpga-machine-learning insert bram' in line: newline = line for bram in model_brams: - newline += f'#include \"firmware/weights/{bram.name}.h\"\n' + newline += f'#include "firmware/weights/{bram.name}.h"\n' elif '// hls-fpga-machine-learning insert zero' in line: newline = line inp = model_inputs[0] @@ -456,7 +455,7 @@ def write_bridge(self, model): elif '// hls-fpga-machine-learning insert bram' in line: newline = line for bram in model_brams: - newline += f'#include \"firmware/weights/{bram.name}.h\"\n' + newline += f'#include "firmware/weights/{bram.name}.h"\n' elif '// hls-fpga-machine-learning insert class def' in line: dtype = line.split('#', 1)[1].strip() @@ -583,7 +582,7 @@ def __write_elu_table(self, model, path): in_val = -8.0 * i / float(table_size) real_val = np.exp(in_val) - 1.0 h_file.write(sep + str(real_val)) - sep = ", " + sep = ', ' h_file.write('};\n') h_file.close() @@ -608,7 +607,7 @@ def __write_sigmoid_table(self, model, path): real_val = 1.0 / (1 + np.exp(-in_val)) if real_val >= 0.5: h_file.write(sep + str(real_val)) - sep = ", " + sep = ', ' h_file.write('};\n') h_file.close() @@ -633,7 +632,7 @@ def __write_tanh_table(self, model, path): real_val = np.tanh(in_val) if real_val >= 0: h_file.write(sep + str(real_val)) - sep = ", " + sep = ', ' h_file.write('};\n') h_file.close() @@ -650,7 +649,7 @@ def __write_softplus_table(self, model, path): in_val = 2 * 8.0 * (i - float(table_size) / 2.0) / float(table_size) real_val = np.log(np.exp(in_val) + 1.0) h_file.write(sep + str(real_val)) - sep = ", " + sep = ', ' h_file.write('};\n') h_file.close() @@ -675,7 +674,7 @@ def __write_softsign_table(self, model, path): real_val = in_val / (np.fabs(in_val) + 1.0) if real_val >= 0: h_file.write(sep + str(real_val)) - sep = ", " + sep = ', ' h_file.write('};\n') h_file.close() @@ -692,7 +691,7 @@ def __write_selu_table(self, model, path): in_val = -8.0 * i / float(table_size) real_val = 1.0507009873554804934193349852946 * (1.6732632423543772848170429916717 * (np.exp(in_val) - 1.0)) h_file.write(sep + str(real_val)) - sep = ", " + sep = ', ' h_file.write('};\n') h_file.close() @@ -738,7 +737,7 @@ def __write_exp_table(self, model, path): f.set_msb_bits(b) real_val = f.exp_float() h_file.write(sep + str(real_val)) - sep = ", " + sep = ', ' h_file.write('};\n') h_file.close() @@ -781,7 +780,7 @@ def __write_invert_table(self, model, path): f.set_msb_bits(b) real_val = f.inv_float() h_file.write(sep + str(real_val)) - sep = ", " + sep = ', ' h_file.write('};\n') h_file.close() @@ -820,7 +819,7 @@ def __write_exp_table_latency(self, model, path): f.set_msb_bits(uint_to_binary(i, N)) real_val = f.exp_float() h_file.write(sep + str(real_val)) - sep = ", " + sep = ', ' h_file.write('};\n') h_file.close() @@ -859,7 +858,7 @@ def __write_invert_table_latency(self, model, path): f.set_msb_bits(uint_to_binary(i, N)) real_val = f.inv_float() h_file.write(sep + str(real_val)) - sep = ", " + sep = ', ' h_file.write('};\n') h_file.close() @@ -876,7 +875,7 @@ def __write_exp_table_legacy(self, model, path): in_val = 2 * 8.0 * (i - float(table_size) / 2.0) / float(table_size) real_val = np.exp(in_val) h_file.write(sep + str(real_val)) - sep = ", " + sep = ', ' h_file.write('};\n') h_file.close() @@ -895,7 +894,7 @@ def __write_invert_table_legacy(self, model, path): if in_val > 0.0: real_val = 1.0 / in_val h_file.write(sep + str(real_val)) - sep = ", " + sep = ', ' h_file.write('};\n') h_file.close() diff --git a/hls4ml/writer/quartus_writer.py b/hls4ml/writer/quartus_writer.py index 403e9f6b2d..4b7c027267 100644 --- a/hls4ml/writer/quartus_writer.py +++ b/hls4ml/writer/quartus_writer.py @@ -29,10 +29,10 @@ def __make_dat_file(self, original_path, project_path): """ # Take in data from current supported data files - if original_path[-3:] == "npy": + if original_path[-3:] == 'npy': data = np.load(original_path) else: - raise Exception("Unsupported input/output data files.") + raise Exception('Unsupported input/output data files.') # Faltten data, just keep first dimension data = data.reshape(data.shape[0], -1) @@ -40,11 +40,11 @@ def __make_dat_file(self, original_path, project_path): def print_data(f): for i in range(data.shape[0]): for j in range(data.shape[1]): - f.write(str(data[i][j]) + " ") - f.write("\n") + f.write(str(data[i][j]) + ' ') + f.write('\n') # Print out in dat file - with open(project_path, "w") as f: + with open(project_path, 'w') as f: print_data(f) def get_max_reuse_factor(self, model): @@ -63,18 +63,18 @@ def print_array_to_cpp(self, var, layer, odir): layer (Layer): Instance of the layer to which the weights belong odir (str): Output directory """ - h_file = open(f"{odir}/firmware/weights/{var.name}.h", "w") + h_file = open(f'{odir}/firmware/weights/{var.name}.h', 'w') # meta data - h_file.write(f"//Numpy array shape {var.shape}\n") - h_file.write(f"//Min {np.min(var.min):.12f}\n") - h_file.write(f"//Max {np.max(var.max):.12f}\n") - h_file.write(f"//Number of zeros {var.nzeros}\n") - h_file.write("\n") + h_file.write(f'//Numpy array shape {var.shape}\n') + h_file.write(f'//Min {np.min(var.min):.12f}\n') + h_file.write(f'//Max {np.max(var.max):.12f}\n') + h_file.write(f'//Number of zeros {var.nzeros}\n') + h_file.write('\n') - h_file.write(f"#ifndef {var.name.upper()}_H_\n") - h_file.write(f"#define {var.name.upper()}_H_\n") - h_file.write("\n") + h_file.write(f'#ifndef {var.name.upper()}_H_\n') + h_file.write(f'#define {var.name.upper()}_H_\n') + h_file.write('\n') rf = int(layer.get_attr('reuse_factor', 1)) weight_header = '#ifdef __INTELFPGA_COMPILER__\n' @@ -107,16 +107,16 @@ def print_array_to_cpp(self, var, layer, odir): weight_header += 'static ' else: weight_header += 'static const ' - h_file.write(weight_header + var.definition_cpp() + " = {") + h_file.write(weight_header + var.definition_cpp() + ' = {') # fill c++ array. # not including internal brackets for multidimensional case sep = '' for x in var: h_file.write(sep + x) - sep = ", " - h_file.write("};\n") - h_file.write("\n#endif\n") + sep = ', ' + h_file.write('};\n') + h_file.write('\n#endif\n') h_file.close() def write_project_dir(self, model): @@ -125,8 +125,8 @@ def write_project_dir(self, model): Args: model (ModelGraph): the hls4ml model. """ - if not os.path.isdir(f"{model.config.get_output_dir()}/firmware/weights"): - os.makedirs(f"{model.config.get_output_dir()}/firmware/weights") + if not os.path.isdir(f'{model.config.get_output_dir()}/firmware/weights'): + os.makedirs(f'{model.config.get_output_dir()}/firmware/weights') def write_project_cpp(self, model): """Write the main architecture source file (myproject.cpp) @@ -460,7 +460,7 @@ def write_parameters(self, model): for include in sorted(set(sum((layer.get_attr('include_header', []) for layer in model.get_layers()), []))): newline += '#include "%s"\n' % include - elif "// hls-fpga-machine-learning insert layer-config" in line: + elif '// hls-fpga-machine-learning insert layer-config' in line: newline = line for layer in model.get_layers(): config = layer.get_attr('config_cpp', None) @@ -489,7 +489,7 @@ def write_testbench_parallel(self, model): model (ModelGraph): the hls4ml model. """ if len(model.get_output_variables()) != 1: - print("WARNING: The testbench only supports one output variable. Leaving empty testbench") + print('WARNING: The testbench only supports one output variable. Leaving empty testbench') return outvar = model.get_output_variables()[0] @@ -503,13 +503,13 @@ def write_testbench_parallel(self, model): output_predictions = model.config.get_config_value('OutputPredictions') if input_data: - if input_data[-3:] == "dat": + if input_data[-3:] == 'dat': copyfile(input_data, f'{model.config.get_output_dir()}/tb_data/tb_input_features.dat') else: self.__make_dat_file(input_data, f'{model.config.get_output_dir()}/tb_data/tb_input_features.dat') if output_predictions: - if output_predictions[-3:] == "dat": + if output_predictions[-3:] == 'dat': copyfile(output_predictions, f'{model.config.get_output_dir()}/tb_data/tb_output_predictions.dat') else: self.__make_dat_file( @@ -529,7 +529,7 @@ def write_testbench_parallel(self, model): elif '// hls-fpga-machine-learning insert bram' in line: newline = line for bram in model_brams: - newline += f'#include \"firmware/weights/{bram.name}.h\"\n' + newline += f'#include "firmware/weights/{bram.name}.h"\n' elif '// hls-fpga-machine-learning insert data' in line: newline = line newline += ' std::vector::const_iterator in_begin = in.cbegin();\n' @@ -598,7 +598,7 @@ def write_testbench_stream(self, model): model (ModelGraph): the hls4ml model. """ if len(model.get_output_variables()) != 1: - print("WARNING: The testbench only supports one output variable. Leaving empty testbench") + print('WARNING: The testbench only supports one output variable. Leaving empty testbench') return outvar = model.get_output_variables()[0] @@ -615,13 +615,13 @@ def write_testbench_stream(self, model): output_predictions = model.config.get_config_value('OutputPredictions') if input_data: - if input_data[-3:] == "dat": + if input_data[-3:] == 'dat': copyfile(input_data, f'{model.config.get_output_dir()}/tb_data/tb_input_features.dat') else: self.__make_dat_file(input_data, f'{model.config.get_output_dir()}/tb_data/tb_input_features.dat') if output_predictions: - if output_predictions[-3:] == "dat": + if output_predictions[-3:] == 'dat': copyfile(output_predictions, f'{model.config.get_output_dir()}/tb_data/tb_output_predictions.dat') else: self.__make_dat_file( @@ -642,7 +642,7 @@ def write_testbench_stream(self, model): elif '// hls-fpga-machine-learning insert bram' in line: newline = line for bram in model_brams: - newline += f'#include \"firmware/weights/{bram.name}.h\"\n' + newline += f'#include "firmware/weights/{bram.name}.h"\n' elif '// hls-fpga-machine learning instantiate inputs and outputs' in line: newline = line @@ -773,7 +773,7 @@ def write_bridge(self, model): elif '// hls-fpga-machine-learning insert bram' in line: newline = line for bram in model_brams: - newline += f'#include \"firmware/weights/{bram.name}.h\"\n' + newline += f'#include "firmware/weights/{bram.name}.h"\n' elif '// hls-fpga-machine-learning insert header' in line: dtype = line.split('#', 1)[1].strip() @@ -971,7 +971,7 @@ def __write_elu_table(self, model, path): in_val = -8.0 * i / float(table_size) real_val = np.exp(in_val) - 1.0 h_file.write(sep + str(real_val)) - sep = ", " + sep = ', ' h_file.write('};\n') h_file.close() @@ -996,7 +996,7 @@ def __write_sigmoid_table(self, model, path): real_val = 1.0 / (1 + np.exp(-in_val)) if real_val >= 0.5: h_file.write(sep + str(real_val)) - sep = ", " + sep = ', ' h_file.write('};\n') h_file.close() @@ -1021,7 +1021,7 @@ def __write_tanh_table(self, model, path): real_val = np.tanh(in_val) if real_val >= 0: h_file.write(sep + str(real_val)) - sep = ", " + sep = ', ' h_file.write('};\n') h_file.close() @@ -1038,7 +1038,7 @@ def __write_softplus_table(self, model, path): in_val = 2 * 8.0 * (i - float(table_size) / 2.0) / float(table_size) real_val = np.log(np.exp(in_val) + 1.0) h_file.write(sep + str(real_val)) - sep = ", " + sep = ', ' h_file.write('};\n') h_file.close() @@ -1063,7 +1063,7 @@ def __write_softsign_table(self, model, path): real_val = in_val / (np.fabs(in_val) + 1.0) if real_val >= 0: h_file.write(sep + str(real_val)) - sep = ", " + sep = ', ' h_file.write('};\n') h_file.close() @@ -1080,7 +1080,7 @@ def __write_selu_table(self, model, path): in_val = -8.0 * i / float(table_size) real_val = 1.0507009873554804934193349852946 * (1.6732632423543772848170429916717 * (np.exp(in_val) - 1.0)) h_file.write(sep + str(real_val)) - sep = ", " + sep = ', ' h_file.write('};\n') h_file.close() @@ -1126,7 +1126,7 @@ def __write_exp_table(self, model, path): f.set_msb_bits(b) real_val = f.exp_float() h_file.write(sep + str(real_val)) - sep = ", " + sep = ', ' h_file.write('};\n') h_file.close() @@ -1169,7 +1169,7 @@ def __write_invert_table(self, model, path): f.set_msb_bits(b) real_val = f.inv_float() h_file.write(sep + str(real_val)) - sep = ", " + sep = ', ' h_file.write('};\n') h_file.close() @@ -1208,7 +1208,7 @@ def __write_exp_table_latency(self, model, path): f.set_msb_bits(uint_to_binary(i, N)) real_val = f.exp_float() h_file.write(sep + str(real_val)) - sep = ", " + sep = ', ' h_file.write('};\n') h_file.close() @@ -1247,7 +1247,7 @@ def __write_invert_table_latency(self, model, path): f.set_msb_bits(uint_to_binary(i, N)) real_val = f.inv_float() h_file.write(sep + str(real_val)) - sep = ", " + sep = ', ' h_file.write('};\n') h_file.close() @@ -1264,7 +1264,7 @@ def __write_exp_table_legacy(self, model, path): in_val = 2 * 8.0 * (i - float(table_size) / 2.0) / float(table_size) real_val = np.exp(in_val) h_file.write(sep + str(real_val)) - sep = ", " + sep = ', ' h_file.write('};\n') h_file.close() @@ -1283,7 +1283,7 @@ def __write_invert_table_legacy(self, model, path): if in_val > 0.0: real_val = 1.0 / in_val h_file.write(sep + str(real_val)) - sep = ", " + sep = ', ' h_file.write('};\n') h_file.close() diff --git a/hls4ml/writer/vitis_writer.py b/hls4ml/writer/vitis_writer.py index 94a73c2eaf..f3c6d02d3a 100644 --- a/hls4ml/writer/vitis_writer.py +++ b/hls4ml/writer/vitis_writer.py @@ -26,9 +26,9 @@ def write_nnet_utils_overrides(self, model): copy(srcpath + h, dstpath + h) def write_board_script_override(self, model): - ''' + """ Write the tcl scripts and kernel sources to create a Vitis IPI - ''' + """ ################### # project.tcl diff --git a/hls4ml/writer/vivado_accelerator_writer.py b/hls4ml/writer/vivado_accelerator_writer.py index 817847887d..30d797e48e 100644 --- a/hls4ml/writer/vivado_accelerator_writer.py +++ b/hls4ml/writer/vivado_accelerator_writer.py @@ -11,10 +11,10 @@ def __init__(self): self.vivado_accelerator_config = None def write_axi_wrapper(self, model): - '''Write a top level HLS C++ file to wrap the hls4ml project with AXI interfaces + """Write a top level HLS C++ file to wrap the hls4ml project with AXI interfaces Args: model : The ModelGraph to write the wrapper for - ''' + """ inp_axi_t, out_axi_t, inp, out = self.vivado_accelerator_config.get_corrected_types() indent = ' ' @@ -104,7 +104,7 @@ def write_axi_wrapper(self, model): f = open(os.path.join(filedir, '../templates/vivado_accelerator/myproject_axi.cpp')) fout = open(f'{model.config.get_output_dir()}/firmware/{model.config.get_project_name()}_axi.cpp', 'w') - io_type = model.config.get_config_value("IOType") + io_type = model.config.get_config_value('IOType') for line in f.readlines(): if 'myproject' in line: @@ -149,10 +149,10 @@ def write_axi_wrapper(self, model): newline += indent + '#pragma HLS INTERFACE axis port=in\n' newline += indent + '#pragma HLS INTERFACE axis port=out\n' newline += indent + '#pragma HLS INTERFACE ap_ctrl_none port=return\n' - if model.config.get_config_value("IOType") == 'io_stream': + if model.config.get_config_value('IOType') == 'io_stream': newline += indent + '#pragma HLS DATAFLOW\n' elif '// hls-fpga-machine-learning insert enqueue' in line: - io_type = model.config.get_config_value("IOType") + io_type = model.config.get_config_value('IOType') if io_type == 'io_parallel': newline = '' newline += indent + 'for(unsigned i = 0; i < N_IN; i++){\n' @@ -194,7 +194,7 @@ def write_axi_wrapper(self, model): newline += indent + '}}\n' newline = newline.format(input_t=inp.type.name) elif '// hls-fpga-machine-learning insert dequeue' in line: - io_type = model.config.get_config_value("IOType") + io_type = model.config.get_config_value('IOType') if io_type == 'io_parallel': newline = '' newline += indent + 'for(unsigned i = 0; i < N_OUT; i++){\n' @@ -235,9 +235,9 @@ def write_axi_wrapper(self, model): fout.close() def modify_build_script(self, model): - ''' + """ Modify the build_prj.tcl and build_lib.sh scripts to add the extra wrapper files and set the top function - ''' + """ filedir = os.path.dirname(os.path.abspath(__file__)) oldfile = f'{model.config.get_output_dir()}/build_prj.tcl' newfile = f'{model.config.get_output_dir()}/build_prj_axi.tcl' @@ -364,9 +364,9 @@ def write_wrapper_test(self, model): os.rename(newfile, oldfile) def write_board_script(self, model): - ''' + """ Write the tcl scripts and kernel sources to create a Vivado IPI project for the VivadoAccelerator - ''' + """ filedir = os.path.dirname(os.path.abspath(__file__)) copyfile( os.path.join(filedir, self.vivado_accelerator_config.get_tcl_file_path()), diff --git a/hls4ml/writer/vivado_writer.py b/hls4ml/writer/vivado_writer.py index 9838abbce4..22b0a58804 100644 --- a/hls4ml/writer/vivado_writer.py +++ b/hls4ml/writer/vivado_writer.py @@ -76,8 +76,8 @@ def write_project_dir(self, model): Args: model (ModelGraph): the hls4ml model. """ - if not os.path.isdir(f"{model.config.get_output_dir()}/firmware/weights"): - os.makedirs(f"{model.config.get_output_dir()}/firmware/weights") + if not os.path.isdir(f'{model.config.get_output_dir()}/firmware/weights'): + os.makedirs(f'{model.config.get_output_dir()}/firmware/weights') @staticmethod def _make_array_pragma(variable): @@ -165,7 +165,6 @@ def write_project_cpp(self, model): elif '// hls-fpga-machine-learning insert load weights' in line: newline = line if model.config.get_writer_config()['WriteWeightsTxt']: - newline += '#ifndef __SYNTHESIS__\n' newline += ' static bool loaded_weights = false;\n' newline += ' if (!loaded_weights) {\n' @@ -410,7 +409,7 @@ def write_parameters(self, model): if w.storage.lower() != 'bram': newline += f'#include "weights/{w.name}.h"\n' - elif "// hls-fpga-machine-learning insert layer-config" in line: + elif '// hls-fpga-machine-learning insert layer-config' in line: newline = line for layer in model.get_layers(): config = layer.get_attr('config_cpp', None) @@ -460,10 +459,10 @@ def __make_dat_file(self, original_path, project_path): """ # Take in data from current supported data files - if original_path[-3:] == "npy": + if original_path[-3:] == 'npy': data = np.load(original_path) else: - raise Exception("Unsupported input/output data files.") + raise Exception('Unsupported input/output data files.') # Faltten data, just keep first dimension data = data.reshape(data.shape[0], -1) @@ -471,11 +470,11 @@ def __make_dat_file(self, original_path, project_path): def print_data(f): for i in range(data.shape[0]): for j in range(data.shape[1]): - f.write(str(data[i][j]) + " ") - f.write("\n") + f.write(str(data[i][j]) + ' ') + f.write('\n') # Print out in dat file - with open(project_path, "w") as f: + with open(project_path, 'w') as f: print_data(f) def write_test_bench(self, model): @@ -494,13 +493,13 @@ def write_test_bench(self, model): output_predictions = model.config.get_config_value('OutputPredictions') if input_data: - if input_data[-3:] == "dat": + if input_data[-3:] == 'dat': copyfile(input_data, f'{model.config.get_output_dir()}/tb_data/tb_input_features.dat') else: self.__make_dat_file(input_data, f'{model.config.get_output_dir()}/tb_data/tb_input_features.dat') if output_predictions: - if output_predictions[-3:] == "dat": + if output_predictions[-3:] == 'dat': copyfile(output_predictions, f'{model.config.get_output_dir()}/tb_data/tb_output_predictions.dat') else: self.__make_dat_file( @@ -524,7 +523,7 @@ def write_test_bench(self, model): elif '// hls-fpga-machine-learning insert bram' in line: newline = line for bram in model_brams: - newline += f'#include \"firmware/weights/{bram.name}.h\"\n' + newline += f'#include "firmware/weights/{bram.name}.h"\n' elif '// hls-fpga-machine-learning insert data' in line: newline = line @@ -630,7 +629,7 @@ def write_bridge(self, model): elif '// hls-fpga-machine-learning insert bram' in line: newline = line for bram in model_brams: - newline += f'#include \"firmware/weights/{bram.name}.h\"\n' + newline += f'#include "firmware/weights/{bram.name}.h"\n' elif '// hls-fpga-machine-learning insert header' in line: dtype = line.split('#', 1)[1].strip() diff --git a/test/pytest/generate_ci_yaml.py b/test/pytest/generate_ci_yaml.py index adc3d680ab..3f6a86a585 100644 --- a/test/pytest/generate_ci_yaml.py +++ b/test/pytest/generate_ci_yaml.py @@ -4,10 +4,10 @@ import yaml -''' +""" Create a Gitlab CI yml file with a separate entry for each test_* file in the pytests directory to parallelise the CI jobs. -''' +""" template = """ diff --git a/test/pytest/test_backend_config.py b/test/pytest/test_backend_config.py index c43a7c7680..2ce079c8a6 100644 --- a/test/pytest/test_backend_config.py +++ b/test/pytest/test_backend_config.py @@ -42,7 +42,7 @@ def test_backend_config(framework, backend, part, clock_period, clock_unc): test_dir = f'hls4mlprj_backend_config_{framework}_{backend}_part_{part}_period_{clock_period}_unc_{unc_str}' output_dir = test_root_path / test_dir - if framework == "keras": + if framework == 'keras': hls_model = convert_fn( model, input_shape=(None, 1), # This serves as a test of handling unexpected values by the backend in keras converer diff --git a/test/pytest/test_bram_factor.py b/test/pytest/test_bram_factor.py index 8aa608e8ac..28435b28be 100644 --- a/test/pytest/test_bram_factor.py +++ b/test/pytest/test_bram_factor.py @@ -13,7 +13,7 @@ @pytest.mark.parametrize('backend', ['Vivado', 'Quartus']) @pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) def test_bram_factor(backend, io_type): - '''A copy of the test_dense from test_keras_api.py with BramFactor set to 0''' + """A copy of the test_dense from test_keras_api.py with BramFactor set to 0""" model = tf.keras.models.Sequential() model.add( Dense( @@ -38,7 +38,7 @@ def test_bram_factor(backend, io_type): keras_prediction = model.predict(X_input) config = hls4ml.utils.config_from_keras_model(model) - config["Model"]["BramFactor"] = 0 + config['Model']['BramFactor'] = 0 output_dir = str(test_root_path / f'hls4mlprj_bram_factor_{backend}_{io_type}') hls_model = hls4ml.converters.convert_from_keras_model( diff --git a/test/pytest/test_causalpadding.py b/test/pytest/test_causalpadding.py index d91da35fac..8b3f654677 100644 --- a/test/pytest/test_causalpadding.py +++ b/test/pytest/test_causalpadding.py @@ -16,7 +16,7 @@ @pytest.mark.parametrize('backend', ['Vivado', 'Vitis', 'Quartus']) def test_causalpadding(io_type, backend): model = Sequential() - model.add(Conv1D(1, 5, padding="causal", input_shape=(100, 1))) + model.add(Conv1D(1, 5, padding='causal', input_shape=(100, 1))) model.compile() data = np.random.randint(0, 10, 100).astype(float) diff --git a/test/pytest/test_cnn_mnist_qkeras.py b/test/pytest/test_cnn_mnist_qkeras.py index 38489b5865..24c6cfe39d 100644 --- a/test/pytest/test_cnn_mnist_qkeras.py +++ b/test/pytest/test_cnn_mnist_qkeras.py @@ -20,8 +20,8 @@ def mnist_data(): (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data() # Scale images to the [0, 1] range - x_train = x_train.astype("float32") / 255 - x_test = x_test.astype("float32") / 255 + x_train = x_train.astype('float32') / 255 + x_test = x_test.astype('float32') / 255 # Make sure images have shape (28, 28, 1) x_train = np.expand_dims(x_train, -1) x_test = np.expand_dims(x_test, -1) diff --git a/test/pytest/test_conv1d_narrow.py b/test/pytest/test_conv1d_narrow.py index 0c129fa680..a5f571f951 100644 --- a/test/pytest/test_conv1d_narrow.py +++ b/test/pytest/test_conv1d_narrow.py @@ -37,11 +37,11 @@ def model(): ('io_parallel', 'latency', 'LineBuffer'), ], ) -@pytest.mark.filterwarnings("error") +@pytest.mark.filterwarnings('error') def test_narrow(data, model, narrowset, capfd): - ''' + """ Check that the implementation does not have leftover data. - ''' + """ io_type = narrowset[0] strategy = narrowset[1] conv = narrowset[2] @@ -61,5 +61,5 @@ def test_narrow(data, model, narrowset, capfd): y_hls4ml = hls_model.predict(X) out, _ = capfd.readouterr() - assert "leftover data" not in out + assert 'leftover data' not in out np.testing.assert_allclose(y_keras.ravel(), y_hls4ml.ravel(), atol=0.05) diff --git a/test/pytest/test_conv2d_narrow.py b/test/pytest/test_conv2d_narrow.py index 7b98843b46..8e16fb747a 100644 --- a/test/pytest/test_conv2d_narrow.py +++ b/test/pytest/test_conv2d_narrow.py @@ -37,11 +37,11 @@ def model(): ('io_parallel', 'latency', 'LineBuffer'), ], ) -@pytest.mark.filterwarnings("error") +@pytest.mark.filterwarnings('error') def test_narrow(data, model, narrowset, capfd): - ''' + """ Check that the implementation does not have leftover data. - ''' + """ io_type = narrowset[0] strategy = narrowset[1] conv = narrowset[2] @@ -61,5 +61,5 @@ def test_narrow(data, model, narrowset, capfd): y_hls4ml = hls_model.predict(X) out, _ = capfd.readouterr() - assert "leftover data" not in out + assert 'leftover data' not in out np.testing.assert_allclose(y_keras.ravel(), y_hls4ml.ravel(), atol=0.05) diff --git a/test/pytest/test_extensions.py b/test/pytest/test_extensions.py index 23bd6734f2..2b72189461 100644 --- a/test/pytest/test_extensions.py +++ b/test/pytest/test_extensions.py @@ -11,7 +11,7 @@ # Keras implementation of a custom layer class KReverse(tf.keras.layers.Layer): - '''Keras implementation of a hypothetical custom layer''' + """Keras implementation of a hypothetical custom layer""" def __init__(self): super().__init__() @@ -26,7 +26,7 @@ def get_config(self): # hls4ml layer implementation class HReverse(hls4ml.model.layers.Layer): - '''hls4ml implementation of a hypothetical custom layer''' + """hls4ml implementation of a hypothetical custom layer""" def initialize(self): inp = self.get_input_variable() @@ -37,7 +37,7 @@ def initialize(self): # hls4ml optimizer to remove duplicate optimizer class RemoveDuplicateReverse(hls4ml.model.optimizer.OptimizerPass): - '''OptimizerPass to remove consecutive HReverse layers.''' + """OptimizerPass to remove consecutive HReverse layers.""" def match(self, node): return isinstance(node, HReverse) and isinstance(node.get_input_node(), HReverse) diff --git a/test/pytest/test_extensions_pytorch.py b/test/pytest/test_extensions_pytorch.py index c5a8d2b101..d223d9e51a 100644 --- a/test/pytest/test_extensions_pytorch.py +++ b/test/pytest/test_extensions_pytorch.py @@ -12,7 +12,7 @@ # PyTorch implementation of a custom layer class TReverse(hls4ml.utils.torch.HLS4MLModule): - '''PyTorch implementation of a hypothetical custom layer''' + """PyTorch implementation of a hypothetical custom layer""" def __init__(self): super().__init__() @@ -24,7 +24,7 @@ def forward(self, inputs): # hls4ml layer implementation # Note that the `Torch` suffix is added here to avoid clashes with other tests and not mandatory class HReverseTorch(hls4ml.model.layers.Layer): - '''hls4ml implementation of a hypothetical custom layer''' + """hls4ml implementation of a hypothetical custom layer""" def initialize(self): inp = self.get_input_variable() @@ -35,7 +35,7 @@ def initialize(self): # hls4ml optimizer to remove duplicate optimizer class RemoveDuplicateReverse(hls4ml.model.optimizer.OptimizerPass): - '''OptimizerPass to remove consecutive HReverseTorch layers.''' + """OptimizerPass to remove consecutive HReverseTorch layers.""" def match(self, node): return isinstance(node, HReverseTorch) and isinstance(node.get_input_node(), HReverseTorch) diff --git a/test/pytest/test_flows.py b/test/pytest/test_flows.py index 2eec8932ea..35e6da53db 100644 --- a/test/pytest/test_flows.py +++ b/test/pytest/test_flows.py @@ -2,12 +2,12 @@ import hls4ml -''' +""" Tests for model flows. Construct some dummy optimizer passes and flows that do nothing. Passes record their label to the model. Tests check that the order of applied passes matches the expectations -''' +""" class DummyPass(hls4ml.model.optimizer.OptimizerPass): diff --git a/test/pytest/test_graph.py b/test/pytest/test_graph.py index f419823cbf..64637d9083 100644 --- a/test/pytest/test_graph.py +++ b/test/pytest/test_graph.py @@ -130,7 +130,7 @@ def test_graph_branch(iotype, batch): @pytest.mark.parametrize('iotype', ['io_parallel', 'io_stream']) def test_final_reshape(iotype): - '''Test case for a model with a Reshape as the final layer''' + """Test case for a model with a Reshape as the final layer""" inputs = tf.keras.layers.Input(shape=(1, 1, 1)) # 1 input pixel conv = tf.keras.layers.Conv2D(6, 1) # 6 filters, 1x1 kernel x = conv(inputs) @@ -167,7 +167,7 @@ def test_final_reshape(iotype): ], ) def test_broadcast_stream(shapes, layer): - '''Test case for stream broadcast before Add but not before Concatenate''' + """Test case for stream broadcast before Add but not before Concatenate""" input1 = tf.keras.layers.Input(shape=shapes[0]) input2 = tf.keras.layers.Input(shape=shapes[1]) inputs = [input1, input2] @@ -199,7 +199,7 @@ def test_broadcast_stream(shapes, layer): @pytest.mark.parametrize('batch', [1, 32]) def test_multiple_outputs(batch): - '''Test case for multiple outputs''' + """Test case for multiple outputs""" input1 = tf.keras.layers.Input(shape=(10,)) inputs = [input1] output1 = tf.keras.layers.Dense(5, kernel_initializer='ones', use_bias=False)(input1) diff --git a/test/pytest/test_hgq_layers.py b/test/pytest/test_hgq_layers.py index 80d96fbcda..1b699f3420 100644 --- a/test/pytest/test_hgq_layers.py +++ b/test/pytest/test_hgq_layers.py @@ -30,7 +30,6 @@ def _run_synth_match_test(proxy: keras.Model, data, io_type: str, backend: str, dir: str, cond=None): - output_dir = dir + '/hls4ml_prj' hls_model = convert_from_keras_model( proxy, @@ -56,9 +55,10 @@ def _run_synth_match_test(proxy: keras.Model, data, io_type: str, backend: str, try: if cond is None: mismatch_ph = p != h - assert ( - np.sum(mismatch_ph) == 0 - ), f"Proxy-HLS4ML mismatch for out {i}: {np.sum(np.any(mismatch_ph, axis=1))} out of {data_len} samples are different. Sample: {p[mismatch_ph].ravel()[:5]} vs {h[mismatch_ph].ravel()[:5]}" # noqa: E501 + assert np.sum(mismatch_ph) == 0, f"""Proxy-HLS4ML mismatch for out {i}: + {np.sum(np.any(mismatch_ph, axis=1))} out of {data_len} samples are different. + Sample: {p[mismatch_ph].ravel()[:5]} vs {h[mismatch_ph].ravel()[:5]}' + """ else: cond(p, h) except AssertionError as e: @@ -126,9 +126,9 @@ def get_data(shape: tuple[int, ...], v: float, max_scale: float): def softmax_cond(proxy, hls): match_precent = np.mean(np.argmax(proxy, axis=1) == np.argmax(hls, axis=1)) - assert ( - match_precent > 0.90 - ), f"Proxy-HLS4ML mismatch: {(1-match_precent) * 100}% of samples are different. Sample: {proxy[:5]} vs {hls[:5]}" + assert match_precent > 0.90, ( + f'Proxy-HLS4ML mismatch: {(1 - match_precent) * 100}% of samples are different. Sample: {proxy[:5]} vs {hls[:5]}' + ) def custom_activation_fn(x): @@ -138,9 +138,9 @@ def custom_activation_fn(x): @pytest.mark.parametrize( 'layer', [ - "HDense(10)", - "HDense(10, use_bias=False)", - "HDenseBatchNorm(10)", + 'HDense(10)', + 'HDense(10, use_bias=False)', + 'HDenseBatchNorm(10)', "HConv1D(2, 3, padding='same')", "HConv1D(2, 3, padding='valid')", "HConv1D(2, 3, padding='valid', use_bias=False)", @@ -153,21 +153,21 @@ def custom_activation_fn(x): "HConv2D(2, (3,3), padding='valid', strides=2)", "HConv2D(2, (3,3), padding='same', strides=2)", "HConv2DBatchNorm(2, (3,3), padding='valid')", - "HAdd()", + 'HAdd()', "HActivation('relu')", # "HActivation('leaky_relu')", "HActivation('tanh')", "HActivation('sigmoid')", # "HActivation('softmax')", - "HActivation(custom_activation_fn)", + 'HActivation(custom_activation_fn)', ], ) -@pytest.mark.parametrize("N", [1000]) -@pytest.mark.parametrize("rnd_strategy", ['standard_round', 'floor']) -@pytest.mark.parametrize("io_type", ['io_parallel', 'io_stream']) -@pytest.mark.parametrize("cover_factor", [1.0]) -@pytest.mark.parametrize("aggressive", [True, False]) -@pytest.mark.parametrize("backend", ['vivado', 'vitis']) +@pytest.mark.parametrize('N', [1000]) +@pytest.mark.parametrize('rnd_strategy', ['standard_round', 'floor']) +@pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) +@pytest.mark.parametrize('cover_factor', [1.0]) +@pytest.mark.parametrize('aggressive', [True, False]) +@pytest.mark.parametrize('backend', ['vivado', 'vitis']) def test_syn_hlayers(layer, N: int, rnd_strategy: str, io_type: str, cover_factor: float, aggressive: bool, backend: str): model = create_hlayer_model(layer=layer, rnd_strategy=rnd_strategy, io_type=io_type) data = get_data((N, 16), 7, 1) diff --git a/test/pytest/test_hgq_players.py b/test/pytest/test_hgq_players.py index 9c4b40f97f..5919b99b08 100644 --- a/test/pytest/test_hgq_players.py +++ b/test/pytest/test_hgq_players.py @@ -31,7 +31,6 @@ def _run_synth_match_test(proxy: keras.Model, data, io_type: str, backend: str, dir: str, cond=None): - output_dir = dir + '/hls4ml_prj' hls_model = convert_from_keras_model( proxy, @@ -57,9 +56,10 @@ def _run_synth_match_test(proxy: keras.Model, data, io_type: str, backend: str, try: if cond is None: mismatch_ph = p != h - assert ( - np.sum(mismatch_ph) == 0 - ), f"Proxy-HLS4ML mismatch for out {i}: {np.sum(np.any(mismatch_ph, axis=1))} out of {data_len} samples are different. Sample: {p[mismatch_ph].ravel()[:5]} vs {h[mismatch_ph].ravel()[:5]}" # noqa: E501 + assert np.sum(mismatch_ph) == 0, f"""Proxy-HLS4ML mismatch for out {i}: + {np.sum(np.any(mismatch_ph, axis=1))} out of {data_len} samples are different. + Sample: {p[mismatch_ph].ravel()[:5]} vs {h[mismatch_ph].ravel()[:5]}' + """ else: cond(p, h) except AssertionError as e: @@ -134,28 +134,28 @@ def get_data(shape: tuple[int, ...], v: float, max_scale: float): @pytest.mark.parametrize( 'layer', [ - "PConcatenate()", + 'PConcatenate()', "PMaxPool1D(2, padding='same')", "PMaxPool1D(4, padding='same')", "PMaxPool2D((5,3), padding='same')", "PMaxPool1D(2, padding='valid')", "PMaxPool2D((2,3), padding='valid')", - "Signature(1,6,3)", + 'Signature(1,6,3)', "PAvgPool1D(2, padding='same')", "PAvgPool2D((1,2), padding='same')", "PAvgPool2D((2,2), padding='same')", "PAvgPool1D(2, padding='valid')", "PAvgPool2D((1,2), padding='valid')", "PAvgPool2D((2,2), padding='valid')", - "PFlatten()", + 'PFlatten()', ], ) -@pytest.mark.parametrize("N", [1000]) -@pytest.mark.parametrize("rnd_strategy", ['floor', 'standard_round']) -@pytest.mark.parametrize("io_type", ['io_parallel', 'io_stream']) -@pytest.mark.parametrize("cover_factor", [1.0]) -@pytest.mark.parametrize("aggressive", [True, False]) -@pytest.mark.parametrize("backend", ['vivado', 'vitis']) +@pytest.mark.parametrize('N', [1000]) +@pytest.mark.parametrize('rnd_strategy', ['floor', 'standard_round']) +@pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) +@pytest.mark.parametrize('cover_factor', [1.0]) +@pytest.mark.parametrize('aggressive', [True, False]) +@pytest.mark.parametrize('backend', ['vivado', 'vitis']) def test_syn_players(layer, N: int, rnd_strategy: str, io_type: str, cover_factor: float, aggressive: bool, backend: str): model = create_player_model(layer=layer, rnd_strategy=rnd_strategy, io_type=io_type) data = get_data((N, 15), 7, 1) diff --git a/test/pytest/test_keras_api.py b/test/pytest/test_keras_api.py index 4bb9f03751..3d7511d21f 100644 --- a/test/pytest/test_keras_api.py +++ b/test/pytest/test_keras_api.py @@ -65,8 +65,8 @@ def test_dense(backend, io_type): np.testing.assert_allclose(hls_prediction, keras_prediction, rtol=1e-2, atol=0.01) assert len(model.layers) + 1 == len(hls_model.get_layers()) - assert list(hls_model.get_layers())[0].attributes['class_name'] == "InputLayer" - assert list(hls_model.get_layers())[1].attributes["class_name"] == model.layers[0]._name + assert list(hls_model.get_layers())[0].attributes['class_name'] == 'InputLayer' + assert list(hls_model.get_layers())[1].attributes['class_name'] == model.layers[0]._name assert list(hls_model.get_layers())[2].attributes['class_name'] == 'ELU' assert list(hls_model.get_layers())[0].attributes['input_shape'] == list(model.layers[0].input_shape[1:]) assert list(hls_model.get_layers())[1].attributes['n_in'] == model.layers[0].input_shape[1:][0] @@ -78,13 +78,13 @@ def test_dense(backend, io_type): # TODO: add ThresholdedReLU test when it can be made to pass # https://github.com/fastmachinelearning/hls4ml/issues/376 @pytest.mark.parametrize( - "activation_function", + 'activation_function', [ Activation(activation='relu', name='relu'), LeakyReLU(alpha=1.0), ELU(alpha=1.0), PReLU( - alpha_initializer="zeros", + alpha_initializer='zeros', ), Activation(activation='sigmoid', name='sigmoid'), ], @@ -171,13 +171,13 @@ def test_conv1d(padds, backend, strategy, io_type): assert list(hls_model.get_layers())[1].attributes['name'] == model.layers[0]._name assert list(hls_model.get_layers())[1].attributes['class_name'] == 'Conv1D' assert list(hls_model.get_layers())[1].attributes['activation'] == str(model.layers[0].activation).split()[1] - assert list(hls_model.get_layers())[1].attributes["in_width"] == model.layers[0]._batch_input_shape[1] + assert list(hls_model.get_layers())[1].attributes['in_width'] == model.layers[0]._batch_input_shape[1] assert list(hls_model.get_layers())[1].attributes['filt_width'] == model.layers[0].kernel_size[0] assert list(hls_model.get_layers())[1].attributes['n_chan'] == model.layers[0].input_shape[2] assert list(hls_model.get_layers())[1].attributes['n_filt'] == model.layers[0].filters assert list(hls_model.get_layers())[1].attributes['stride_width'] == model.layers[0].strides[0] assert list(hls_model.get_layers())[1].attributes['data_format'] == model.layers[0].data_format - assert list(hls_model.get_layers())[1].attributes["out_width"] == list(model.layers[0].output_shape)[1] + assert list(hls_model.get_layers())[1].attributes['out_width'] == list(model.layers[0].output_shape)[1] out_width = math.ceil(float(model.layers[0]._batch_input_shape[2]) / float(model.layers[0].strides[0])) pad_along_width = max( @@ -321,9 +321,9 @@ def test_conv2d(chans, padds, backend, strategy, io_type): @pytest.mark.parametrize('backend', ['Vivado', 'Vitis']) @pytest.mark.parametrize('io_type', ['io_stream']) def test_depthwise2d(backend, io_type): - ''' + """ Test proper handling of DepthwiseConv2D - ''' + """ X = np.random.rand(10, 32, 32, 3) X = np.round(X * 2**10) * 2**-10 # make it an exact ap_fixed<16,6> model = tf.keras.models.Sequential() @@ -349,9 +349,9 @@ def test_depthwise2d(backend, io_type): @pytest.mark.parametrize('backend', ['Vivado', 'Vitis']) @pytest.mark.parametrize('io_type', ['io_stream']) def test_depthwise1d(backend, io_type): - ''' + """ Test proper handling of DepthwiseConv1D. - ''' + """ X = np.random.rand(10, 32, 3) X = np.round(X * 2**10) * 2**-10 # make it an exact ap_fixed<16,6> model = tf.keras.models.Sequential() diff --git a/test/pytest/test_merge_pytorch.py b/test/pytest/test_merge_pytorch.py index 1dc461e532..d04869dc62 100644 --- a/test/pytest/test_merge_pytorch.py +++ b/test/pytest/test_merge_pytorch.py @@ -45,7 +45,7 @@ def test_merge(merge_op, io_type, backend): model, [input_shape, input_shape], default_precision='ap_fixed<32,16>', - channels_last_conversion="internal", + channels_last_conversion='internal', transpose_outputs=False, ) output_dir = str(test_root_path / f'hls4mlprj_merge_pytorch_{merge_op}_{backend}_{io_type}') diff --git a/test/pytest/test_multiout_onnx.py b/test/pytest/test_multiout_onnx.py index 3808e2630c..161f1d1b3c 100644 --- a/test/pytest/test_multiout_onnx.py +++ b/test/pytest/test_multiout_onnx.py @@ -56,7 +56,6 @@ def onnx_model(tmp_path): @pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) def test_multiout_onnx(onnx_model, io_type): - X = np.random.rand(1, 16) X = (np.round(X * 2**16) * 2**-16).astype(np.float32) diff --git a/test/pytest/test_optimization/test_keras/test_masking.py b/test/pytest/test_optimization/test_keras/test_masking.py index 8b465d8d7e..ef89477573 100644 --- a/test/pytest/test_optimization/test_keras/test_masking.py +++ b/test/pytest/test_optimization/test_keras/test_masking.py @@ -9,7 +9,7 @@ from hls4ml.optimization.dsp_aware_pruning.keras.masking import get_model_masks from hls4ml.optimization.dsp_aware_pruning.objectives import ParameterEstimator -''' +""" In all the tests, an artifical network with one Dense/Conv2D layer and pre-determined weights is created Then, the tests assert zeros occur in the correct places, based on the masking structure (unstructured, block etc.) Furthermore, tests assert the masks are binary, so only zeros and ones occur @@ -18,7 +18,7 @@ * zero_params > sparsity * total_params Since the targetted objective is ParameterEstimator, weight sharing is not suitable [does not decrease the number of weights] Therefore, all the test verify offsets are zero -''' +""" sparsity = 0.33 local_masking = [True, False] dense_layers = [Dense, QDense] diff --git a/test/pytest/test_optimization/test_keras/test_reduction.py b/test/pytest/test_optimization/test_keras/test_reduction.py index 4bf93f7301..0291e8533f 100644 --- a/test/pytest/test_optimization/test_keras/test_reduction.py +++ b/test/pytest/test_optimization/test_keras/test_reduction.py @@ -12,11 +12,11 @@ pytest.skip(allow_module_level=True) -''' +""" Set some neurons / filters to zero and verify that these are removed Even is some neurons (columns) in the output layer are zero, these should not be removed (to match data set labels) Test verify the above property, by setting some zeros in the last layer and verifying these remain in place -''' +""" @pytest.mark.skipif( diff --git a/test/pytest/test_optimization/test_keras/test_weight_sharing.py b/test/pytest/test_optimization/test_keras/test_weight_sharing.py index be1d3a957f..0a693395c3 100644 --- a/test/pytest/test_optimization/test_keras/test_weight_sharing.py +++ b/test/pytest/test_optimization/test_keras/test_weight_sharing.py @@ -14,12 +14,12 @@ local_masking = [True, False] dense_layers = [Dense, QDense] -''' +""" A mock objective class for weight sharing When a group of weights is quantized to the mean value, resource savings are equal to the number of weights quantized This is similar to ParameterEstimator, but instead of pruning, weight sharing is performed and No savings are incurred with unstructured type (unstructured weight sharing doesn't make sense) -''' +""" class MockWeightSharingEstimator(ObjectiveEstimator): diff --git a/test/pytest/test_pytorch_api.py b/test/pytest/test_pytorch_api.py index d182d9ae16..4ce965bf3e 100644 --- a/test/pytest/test_pytorch_api.py +++ b/test/pytest/test_pytorch_api.py @@ -52,8 +52,8 @@ def test_linear(backend, io_type): nNodes += 1 assert nNodes - 1 == len(hls_model.get_layers()) - assert list(hls_model.get_layers())[0].attributes['class_name'] == "InputLayer" - assert list(hls_model.get_layers())[1].attributes["class_name"] == "Dense" + assert list(hls_model.get_layers())[0].attributes['class_name'] == 'InputLayer' + assert list(hls_model.get_layers())[1].attributes['class_name'] == 'Dense' assert list(hls_model.get_layers())[0].attributes['input_shape'] == [1] assert list(hls_model.get_layers())[1].attributes['n_in'] == 1 assert list(hls_model.get_layers())[1].attributes['n_out'] == 1 @@ -61,7 +61,7 @@ def test_linear(backend, io_type): # TODO: add ThresholdedReLU test when it can be made to pass @pytest.mark.parametrize( - "activation_function", + 'activation_function', [ nn.Softmax(dim=-1), nn.ReLU(), @@ -170,7 +170,7 @@ def forward(self, x): @pytest.mark.parametrize( - "activation_function", + 'activation_function', [ SoftmaxModel(), ReLuModel(), @@ -233,10 +233,10 @@ def test_conv1d(padds, backend, io_type): if io_type == 'io_stream': X_input = np.ascontiguousarray(X_input.transpose(0, 2, 1)) config = config_from_pytorch_model( - model, (n_in, size_in), channels_last_conversion="internal", transpose_outputs=False + model, (n_in, size_in), channels_last_conversion='internal', transpose_outputs=False ) else: - config = config_from_pytorch_model(model, (n_in, size_in), channels_last_conversion="full", transpose_outputs=True) + config = config_from_pytorch_model(model, (n_in, size_in), channels_last_conversion='full', transpose_outputs=True) output_dir = str(test_root_path / f'hls4mlprj_pytorch_api_conv1d_{padds}_{backend}_{io_type}') hls_model = convert_from_pytorch_model(model, hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type) @@ -257,7 +257,7 @@ def test_conv1d(padds, backend, io_type): if io_type == 'io_stream': # Vivado inserts and additional layer for 'same' padding in io_stream - if (backend == "Vivado" or backend == "Vitis") and padds == 1: + if (backend == 'Vivado' or backend == 'Vitis') and padds == 1: assert nNodes == len(hls_model.get_layers()) else: assert nNodes - 1 == len(hls_model.get_layers()) @@ -284,28 +284,28 @@ def test_conv1d(padds, backend, io_type): # if not (backend == 'Vivado' and io_type == 'io_stream' and padds == 1): conv_index = 2 act_index = 3 - if io_type == "io_stream" and not ((backend == "Vivado" or backend == "Vitis") and padds == 1): + if io_type == 'io_stream' and not ((backend == 'Vivado' or backend == 'Vitis') and padds == 1): conv_index = 1 act_index = 2 assert list(hls_model.get_layers())[conv_index].attributes['name'] == convNode.name assert list(hls_model.get_layers())[conv_index].attributes['class_name'] == 'Conv1D' assert list(hls_model.get_layers())[act_index].attributes['activation'] == class_object_relu.__class__.__name__.lower() - if io_type == "io_stream" and (backend == "Vivado" or backend == "Vitis") and padds == 1: - assert list(hls_model.get_layers())[conv_index].attributes["in_width"] == size_in + 2 + if io_type == 'io_stream' and (backend == 'Vivado' or backend == 'Vitis') and padds == 1: + assert list(hls_model.get_layers())[conv_index].attributes['in_width'] == size_in + 2 else: - assert list(hls_model.get_layers())[conv_index].attributes["in_width"] == size_in + assert list(hls_model.get_layers())[conv_index].attributes['in_width'] == size_in assert list(hls_model.get_layers())[conv_index].attributes['filt_width'] == class_object_conv.kernel_size[0] assert list(hls_model.get_layers())[conv_index].attributes['n_chan'] == class_object_conv.in_channels assert list(hls_model.get_layers())[conv_index].attributes['n_filt'] == class_object_conv.out_channels assert list(hls_model.get_layers())[conv_index].attributes['stride_width'] == class_object_conv.stride[0] padding = padds - if io_type == "io_stream" and (backend == "Vivado" or backend == "Vitis") and padds == 1: + if io_type == 'io_stream' and (backend == 'Vivado' or backend == 'Vitis') and padds == 1: padding = 1 padds = 0 assert padding == class_object_conv.padding[0] assert list(hls_model.get_layers())[conv_index].attributes['data_format'] == 'channels_last' - assert list(hls_model.get_layers())[conv_index].attributes["out_width"] == out_width + assert list(hls_model.get_layers())[conv_index].attributes['out_width'] == out_width pad_along_width = max((out_width - 1) * class_object_conv.stride[0] + class_object_conv.kernel_size[0] - size_in, 0) pad_left = pad_along_width // 2 @@ -341,11 +341,11 @@ def test_conv2d(padds, backend, io_type): if io_type == 'io_stream': X_input = np.ascontiguousarray(X_input.transpose(0, 2, 3, 1)) config = config_from_pytorch_model( - model, (n_in, size_in_height, size_in_width), channels_last_conversion="internal", transpose_outputs=False + model, (n_in, size_in_height, size_in_width), channels_last_conversion='internal', transpose_outputs=False ) else: config = config_from_pytorch_model( - model, (n_in, size_in_height, size_in_width), channels_last_conversion="full", transpose_outputs=True + model, (n_in, size_in_height, size_in_width), channels_last_conversion='full', transpose_outputs=True ) output_dir = str(test_root_path / f'hls4mlprj_pytorch_api_conv2d_{padds}_{backend}_{io_type}') @@ -428,7 +428,7 @@ def test_conv2d(padds, backend, io_type): # Vivado inserts and additional layer for 'same' padding in io_stream conv_index = 2 act_index = 3 - if io_type == "io_stream": + if io_type == 'io_stream': conv_index = 1 act_index = 2 assert list(hls_model.get_layers())[conv_index].attributes['name'] == convNode.name @@ -436,8 +436,8 @@ def test_conv2d(padds, backend, io_type): assert ( list(hls_model.get_layers())[act_index].attributes['activation'] == class_object_relu.__class__.__name__.lower() ) - assert list(hls_model.get_layers())[conv_index].attributes["in_width"] == size_in_width - assert list(hls_model.get_layers())[conv_index].attributes["in_height"] == size_in_height + assert list(hls_model.get_layers())[conv_index].attributes['in_width'] == size_in_width + assert list(hls_model.get_layers())[conv_index].attributes['in_height'] == size_in_height assert list(hls_model.get_layers())[conv_index].attributes['filt_width'] == class_object_conv.kernel_size[1] assert list(hls_model.get_layers())[conv_index].attributes['filt_height'] == class_object_conv.kernel_size[0] assert list(hls_model.get_layers())[conv_index].attributes['n_chan'] == class_object_conv.in_channels @@ -517,7 +517,7 @@ def test_pooling(pooling, padds, backend): children = {c[0]: c[1] for c in model.named_children()} class_object_pool = children[poolNode.target] - if "Max" in pooling.__name__: + if 'Max' in pooling.__name__: out_height = int( math.floor( float(size_in_height + 2 * padds - class_object_pool.dilation * (class_object_pool.kernel_size - 1) - 1) @@ -561,7 +561,7 @@ def test_pooling(pooling, padds, backend): # Verify correct parsing of layer hls_pool = list(hls_model.get_layers())[-2] if '2d' in pooling.__name__: - assert hls_pool.attributes['name'] == "_" + poolNode.name.split("_")[-1] + assert hls_pool.attributes['name'] == '_' + poolNode.name.split('_')[-1] assert hls_pool.attributes['class_name'][-2] == str(2) assert hls_pool.attributes['stride_height'] == class_object_pool.stride assert hls_pool.attributes['stride_width'] == class_object_pool.stride @@ -570,15 +570,15 @@ def test_pooling(pooling, padds, backend): assert hls_pool.attributes['padding'] == 'valid' if class_object_pool.padding == 0 else 'same' elif '1d' in pooling.__name__: - if "Max" in pooling.__name__: - assert hls_pool.attributes['name'] == "_" + poolNode.name.split("_")[-1] + if 'Max' in pooling.__name__: + assert hls_pool.attributes['name'] == '_' + poolNode.name.split('_')[-1] assert hls_pool.attributes['class_name'][-2] == str(1) assert hls_pool.attributes['pool_width'] == class_object_pool.kernel_size assert hls_pool.attributes['stride_width'] == class_object_pool.stride assert hls_pool.attributes['padding'] == 'valid' if class_object_pool.padding == 0 else 'same' else: - assert hls_pool.attributes['name'] == "_" + poolNode.name.split("_")[-1] + assert hls_pool.attributes['name'] == '_' + poolNode.name.split('_')[-1] assert hls_pool.attributes['class_name'][-2] == str(1) assert hls_pool.attributes['pool_width'] == class_object_pool.kernel_size[0] assert hls_pool.attributes['stride_width'] == class_object_pool.stride[0] @@ -729,7 +729,7 @@ def test_skipped_layers(backend, io_type): model, input_shape, default_precision='ap_fixed<32,16>', - channels_last_conversion="full", + channels_last_conversion='full', transpose_outputs=False, ) output_dir = str(test_root_path / f'hls4mlprj_pytorch_api_skipped_{backend}_{io_type}') @@ -794,7 +794,7 @@ def forward(self, x): model, input_shape, default_precision='ap_fixed<32,16>', - channels_last_conversion="full", # Crucial for testing if the first Transpose was removed + channels_last_conversion='full', # Crucial for testing if the first Transpose was removed ) output_dir = str(test_root_path / f'hls4mlprj_pytorch_api_transpose_nop_{tensor_rank}d_{backend}_{io_type}') hls_model = convert_from_pytorch_model( @@ -821,7 +821,6 @@ def forward(self, x): @pytest.mark.parametrize('backend', ['Vivado', 'Vitis', 'Quartus', 'oneAPI']) @pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) def test_view(backend, io_type): - class TestModel(nn.Module): def __init__(self, n_in, n_out, size_in): super().__init__() @@ -854,7 +853,7 @@ def forward(self, x): # X_input is channels last X_input = np.ascontiguousarray(X_input.transpose(0, 2, 1)) - config = config_from_pytorch_model(model, (n_in, size_in), channels_last_conversion="internal", transpose_outputs=False) + config = config_from_pytorch_model(model, (n_in, size_in), channels_last_conversion='internal', transpose_outputs=False) output_dir = str(test_root_path / f'hls4mlprj_pytorch_view_{backend}_{io_type}') hls_model = convert_from_pytorch_model( diff --git a/test/pytest/test_pytorch_profiler.py b/test/pytest/test_pytorch_profiler.py index 372a6f5626..a092433a76 100644 --- a/test/pytest/test_pytorch_profiler.py +++ b/test/pytest/test_pytorch_profiler.py @@ -49,7 +49,7 @@ def count_bars_in_figure(fig): ] -@pytest.mark.parametrize("layers", test_layers) +@pytest.mark.parametrize('layers', test_layers) def test_sequential_model(layers): if __torch_profiling_enabled__: param_count, layers = layers @@ -58,7 +58,7 @@ def test_sequential_model(layers): assert count_bars_in_figure(wp) == param_count -@pytest.mark.parametrize("layers", test_layers) +@pytest.mark.parametrize('layers', test_layers) def test_subclass_model(layers): if __torch_profiling_enabled__: param_count, layers = layers @@ -67,7 +67,7 @@ def test_subclass_model(layers): assert count_bars_in_figure(wp) == param_count -@pytest.mark.parametrize("layers", test_layers) +@pytest.mark.parametrize('layers', test_layers) def test_modulelist_model(layers): if __torch_profiling_enabled__: param_count, layers = layers @@ -76,7 +76,7 @@ def test_modulelist_model(layers): assert count_bars_in_figure(wp) == param_count -@pytest.mark.parametrize("layers", test_layers) +@pytest.mark.parametrize('layers', test_layers) def test_nested_model(layers): if __torch_profiling_enabled__: param_count, layers = layers diff --git a/test/pytest/test_qkeras.py b/test/pytest/test_qkeras.py index 8cff159df9..889d71a0c4 100644 --- a/test/pytest/test_qkeras.py +++ b/test/pytest/test_qkeras.py @@ -30,8 +30,8 @@ _add_supported_quantized_objects(co) -warnings.filterwarnings("ignore", message="numpy.dtype size changed") -warnings.filterwarnings("ignore", message="numpy.ufunc size changed") +warnings.filterwarnings('ignore', message='numpy.dtype size changed') +warnings.filterwarnings('ignore', message='numpy.ufunc size changed') test_root_path = Path(__file__).parent example_model_path = (test_root_path / '../../example-models').resolve() @@ -39,10 +39,10 @@ @pytest.fixture(scope='module') def get_jettagging_data(): - ''' + """ Download the jet tagging dataset - ''' - print("Fetching data from openml") + """ + print('Fetching data from openml') data = fetch_openml('hls4ml_lhc_jets_hlf') X, y = data['data'], data['target'] le = LabelEncoder() @@ -57,9 +57,9 @@ def get_jettagging_data(): @pytest.fixture(scope='module') def load_jettagging_model(): - ''' + """ Load the 3 hidden layer QKeras example model trained on the jet tagging dataset - ''' + """ model_path = example_model_path / 'keras/qkeras_3layer.json' with model_path.open('r') as f: jsons = f.read() @@ -72,9 +72,9 @@ def load_jettagging_model(): @pytest.fixture @pytest.mark.parametrize('strategy', ['latency', 'resource']) def convert(load_jettagging_model, strategy): - ''' + """ Convert a QKeras model trained on the jet tagging dataset - ''' + """ model = load_jettagging_model config = hls4ml.utils.config_from_keras_model(model, granularity='name', backend='Vivado') @@ -93,12 +93,12 @@ def convert(load_jettagging_model, strategy): @pytest.mark.parametrize('strategy', ['latency', 'resource']) def test_accuracy(convert, load_jettagging_model, get_jettagging_data, strategy): - ''' + """ Test the hls4ml-evaluated accuracy of a 3 hidden layer QKeras model trained on the jet tagging dataset. QKeras model accuracy is required to be over 70%, and hls4ml accuracy required to be within 1% of the QKeras model accuracy. - ''' - print("Test accuracy") + """ + print('Test accuracy') from sklearn.metrics import accuracy_score X_train_val, X_test, y_train_val, y_test = get_jettagging_data @@ -137,10 +137,10 @@ def randX_100_16(): @pytest.mark.parametrize('backend', ['Vivado', 'Vitis', 'Quartus', 'oneAPI']) @pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) def test_single_dense_activation_exact(randX_100_16, bits, alpha, backend, io_type): - ''' + """ Test a single Dense -> Activation layer topology for bit exactness with number of bits parameter - ''' + """ X = randX_100_16 model = Sequential() model.add( @@ -194,11 +194,11 @@ def randX_100_10(): @pytest.mark.parametrize('backend', ['Vivado', 'Quartus', 'oneAPI']) @pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) def test_quantizer_special(randX_1000_1, quantizer, backend, io_type): - ''' + """ Test a single quantizer (tanh or sigmoid) as an Activation function. Checks the type inference through the conversion is correct without just using the same logic. - ''' + """ X = randX_1000_1 X = np.round(X * 2**10) * 2**-10 # make it an exact ap_fixed<16,6> model = Sequential() @@ -278,11 +278,11 @@ def randX_1000_1(): @pytest.mark.parametrize('backend', ['Vivado', 'Vitis', 'Quartus', 'oneAPI']) @pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) def test_quantizer(randX_1000_1, quantizer, backend, io_type): - ''' + """ Test a single quantizer as an Activation function. Checks the type inference through the conversion is correct without just using the same logic. - ''' + """ X = randX_1000_1 X = np.round(X * 2**10) * 2**-10 # make it an exact ap_fixed<16,6> model = Sequential() @@ -318,9 +318,9 @@ def test_quantizer(randX_1000_1, quantizer, backend, io_type): @pytest.mark.parametrize('backend', ['Vivado', 'Vitis', 'Quartus', 'oneAPI']) @pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) def test_relu_negative_slope(randX_1000_1, quantizer, backend, io_type): - ''' + """ Test a a transformation of quantized_relu with negative_slope to leaky_relu activation layer. - ''' + """ X = randX_1000_1 X = -X # Make it negative so leaky relu does something X = np.round(X * 2**10) * 2**-10 # make it an exact ap_fixed<16,6> @@ -443,9 +443,9 @@ def randX_100_8_8_1(): @pytest.mark.parametrize('backend', ['Vivado', 'Vitis', 'Quartus', 'oneAPI']) @pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) def test_qconv2dbn(randX_100_8_8_1, backend, io_type): - ''' + """ Test proper handling of QConv2DBatchnorm. - ''' + """ X = randX_100_8_8_1 X = np.round(X * 2**10) * 2**-10 # make it an exact ap_fixed<16,6> model = Sequential() @@ -488,9 +488,9 @@ def randX_10_32_32_3(): @pytest.mark.parametrize('backend', ['Vivado', 'Vitis']) @pytest.mark.parametrize('io_type', ['io_stream']) def test_qdepthwiseconv2d(randX_10_32_32_3, backend, io_type): - ''' + """ Test proper handling of QDepthwiseConv2D. - ''' + """ X = randX_10_32_32_3 X = np.round(X * 2**10) * 2**-10 # make it an exact ap_fixed<16,6> model = Sequential() @@ -561,9 +561,9 @@ def test_quantised_po2_bit_width(backend, io_type, strategy): @pytest.mark.parametrize('backend', ['Quartus', 'oneAPI']) def test_qsimplernn(backend): - ''' + """ Test proper handling of QSimpleRNN. - ''' + """ X = np.linspace(-0.25, 0.25, 5) X = np.stack([X, X], axis=1).reshape(1, 5, 2) @@ -582,7 +582,7 @@ def test_qsimplernn(backend): model.compile() config = hls4ml.utils.config_from_keras_model( - model, granularity='name', default_precision="ap_fixed<16,1>", backend=backend + model, granularity='name', default_precision='ap_fixed<16,1>', backend=backend ) output_dir = str(test_root_path / f'hls4mlprj_qkeras_qsimplernn_{backend}') hls_model = hls4ml.converters.convert_from_keras_model(model, hls_config=config, output_dir=output_dir, backend=backend) @@ -596,9 +596,9 @@ def test_qsimplernn(backend): @pytest.mark.parametrize('backend', ['Vivado', 'Quartus', 'oneAPI']) def test_qlstm(backend): - ''' + """ Test proper handling of QLSTM. - ''' + """ X = np.linspace(-0.5, 0.5, 5) X = np.stack([X, X], axis=1).reshape(1, 5, 2) @@ -618,7 +618,7 @@ def test_qlstm(backend): model.compile() config = hls4ml.utils.config_from_keras_model( - model, granularity='name', default_precision="ap_fixed<8,1>", backend=backend + model, granularity='name', default_precision='ap_fixed<8,1>', backend=backend ) output_dir = str(test_root_path / f'hls4mlprj_qkeras_qlstm_{backend}') hls_model = hls4ml.converters.convert_from_keras_model(model, hls_config=config, output_dir=output_dir, backend=backend) @@ -632,9 +632,9 @@ def test_qlstm(backend): @pytest.mark.parametrize('backend', ['Vivado', 'Quartus', 'oneAPI']) def test_qgru(backend): - ''' + """ Test proper handling of QGRU. - ''' + """ X = np.linspace(-0.5, 0.5, 5) X = np.stack([X, X], axis=1).reshape(1, 5, 2) @@ -655,7 +655,7 @@ def test_qgru(backend): model.compile() config = hls4ml.utils.config_from_keras_model( - model, granularity='name', default_precision="ap_fixed<8,1>", backend=backend + model, granularity='name', default_precision='ap_fixed<8,1>', backend=backend ) output_dir = str(test_root_path / f'hls4mlprj_qkeras_qsimplernn_{backend}') hls_model = hls4ml.converters.convert_from_keras_model(model, hls_config=config, output_dir=output_dir, backend=backend) @@ -670,9 +670,9 @@ def test_qgru(backend): @pytest.mark.parametrize('backend', ['Vivado', 'Vitis']) @pytest.mark.parametrize('io_type', ['io_stream']) def test_qseparableconv1d(backend, io_type): - ''' + """ Test proper handling of QSeparableConv1D. - ''' + """ x_in = Input((13, 20), name='input_layer') x = QSeparableConv1D( 5, @@ -716,9 +716,9 @@ def test_qseparableconv1d(backend, io_type): @pytest.mark.parametrize('backend', ['Vivado', 'Vitis']) @pytest.mark.parametrize('io_type', ['io_stream']) def test_qseparableconv2d(backend, io_type): - ''' + """ Test proper handling of QSeparableConv2D. - ''' + """ x_in = Input((13, 21, 20), name='input_layer') x = QSeparableConv2D( 5, diff --git a/test/pytest/test_qonnx.py b/test/pytest/test_qonnx.py index bfa6e0a49c..1e113d635d 100644 --- a/test/pytest/test_qonnx.py +++ b/test/pytest/test_qonnx.py @@ -23,18 +23,18 @@ @pytest.fixture(scope='module') def tfc_2w2a_model(): - ''' + """ Load the tiny fully-connected model - ''' + """ dl_dir = test_root_path - dl_file = str(dl_dir / "qonnx-tfc-2w2a.onnx") + dl_file = str(dl_dir / 'qonnx-tfc-2w2a.onnx') tfc_w2a2_qonnx_url = ( - "https://raw.githubusercontent.com/fastmachinelearning/" - "QONNX_model_zoo/main/models/MNIST/Brevitas_FINN_TFC/TFC/TFC_2W2A.onnx" + 'https://raw.githubusercontent.com/fastmachinelearning/' + 'QONNX_model_zoo/main/models/MNIST/Brevitas_FINN_TFC/TFC/TFC_2W2A.onnx' ) urllib.request.urlretrieve(tfc_w2a2_qonnx_url, dl_file) assert os.path.isfile(dl_file) - out_file = str(dl_dir / "qonnx-tfc-2w2a-clean.onnx") + out_file = str(dl_dir / 'qonnx-tfc-2w2a-clean.onnx') # cleanup qonnx.util.cleanup.cleanup(dl_file, out_file=out_file) @@ -44,20 +44,20 @@ def tfc_2w2a_model(): @pytest.fixture(scope='module') def cnv_2w2a_model(): - ''' + """ Load the small convolution model - ''' + """ dl_dir = test_root_path - dl_file = str(dl_dir / "qonnx-cnv-2w2a.onnx") + dl_file = str(dl_dir / 'qonnx-cnv-2w2a.onnx') cnv_w2a2_qonnx_url = ( - "https://raw.githubusercontent.com/fastmachinelearning/" - "QONNX_model_zoo/main/models/CIFAR10/Brevitas_FINN_CNV/CNV_2W2A.onnx" + 'https://raw.githubusercontent.com/fastmachinelearning/' + 'QONNX_model_zoo/main/models/CIFAR10/Brevitas_FINN_CNV/CNV_2W2A.onnx' ) urllib.request.urlretrieve(cnv_w2a2_qonnx_url, dl_file) assert os.path.isfile(dl_file) - out_clean = str(dl_dir / "qonnx-cnv-2w2a-clean.onnx") - out_chanlast = str(dl_dir / "qonnx-cnv-2w2a-clean-channels-last.onnx") - out_file = str(dl_dir / "qonnx-cnv-2w2a-clean-channels-last-clean.onnx") + out_clean = str(dl_dir / 'qonnx-cnv-2w2a-clean.onnx') + out_chanlast = str(dl_dir / 'qonnx-cnv-2w2a-clean-channels-last.onnx') + out_file = str(dl_dir / 'qonnx-cnv-2w2a-clean-channels-last-clean.onnx') # cleanup qonnx.util.cleanup.cleanup(dl_file, out_file=out_clean) @@ -69,18 +69,18 @@ def cnv_2w2a_model(): @pytest.fixture(scope='module') def jettagging_model(): - ''' + """ Load the 3 hidden layer QKeras example model trained on the jet tagging dataset - ''' + """ dl_dir = test_root_path - dl_file = str(dl_dir / "qkeras_jettagging.onnx") + dl_file = str(dl_dir / 'qkeras_jettagging.onnx') jet_tagging_qonnx_url = ( - "https://raw.githubusercontent.com/fastmachinelearning/" - "QONNX_model_zoo/main/models/JetTagging/QKeras_hls4ml_3layer/qkeras_jettagging.onnx" + 'https://raw.githubusercontent.com/fastmachinelearning/' + 'QONNX_model_zoo/main/models/JetTagging/QKeras_hls4ml_3layer/qkeras_jettagging.onnx' ) urllib.request.urlretrieve(jet_tagging_qonnx_url, dl_file) assert os.path.isfile(dl_file) - out_file = str(dl_dir / "qkeras_jettagging-clean.onnx") + out_file = str(dl_dir / 'qkeras_jettagging-clean.onnx') # cleanup qonnx.util.cleanup.cleanup(dl_file, out_file=out_file) @@ -93,7 +93,7 @@ def sep_conv_model(): """ Load separabale conv model, already channels-last and cleaned """ - dl_file = str(example_model_path / "onnx/separable_conv_model_ch_last.onnx") + dl_file = str(example_model_path / 'onnx/separable_conv_model_ch_last.onnx') assert os.path.isfile(dl_file) model = ModelWrapper(dl_file) @@ -106,7 +106,7 @@ def branched_model(): """ Load branched model using separable convs, already channels-last and cleaned """ - dl_file = str(example_model_path / "onnx/branched_model_ch_last.onnx") + dl_file = str(example_model_path / 'onnx/branched_model_ch_last.onnx') assert os.path.isfile(dl_file) model = ModelWrapper(dl_file) @@ -119,7 +119,7 @@ def tiny_unet_model(): """ Load tiny unet model, already channels-last and cleaned """ - dl_file = str(example_model_path / "onnx/tiny_unet_ch_last.onnx") + dl_file = str(example_model_path / 'onnx/tiny_unet_ch_last.onnx') assert os.path.isfile(dl_file) model = ModelWrapper(dl_file) @@ -132,7 +132,7 @@ def two_layer_keras_model(): """ Load a simple, two-layer, originally keras, unquantized model """ - dl_file = str(example_model_path / "onnx/two_layer_keras.onnx") + dl_file = str(example_model_path / 'onnx/two_layer_keras.onnx') assert os.path.isfile(dl_file) model = ModelWrapper(dl_file) @@ -145,7 +145,7 @@ def three_layer_keras_model(): """ Load a simple, three-layer, originally keras, unquantized model """ - dl_file = str(example_model_path / "onnx/three_layer_keras.onnx") + dl_file = str(example_model_path / 'onnx/three_layer_keras.onnx') assert os.path.isfile(dl_file) model = ModelWrapper(dl_file) @@ -158,7 +158,7 @@ def two_layer_pytorch_model(): """ Load a simple, two-layer, originally pytorch, unquantized model """ - dl_file = str(example_model_path / "onnx/two_layer_keras.onnx") + dl_file = str(example_model_path / 'onnx/two_layer_keras.onnx') assert os.path.isfile(dl_file) model = ModelWrapper(dl_file) @@ -173,7 +173,7 @@ def three_layer_pytorch_model(): """ Load a simple, three-layer, originally pytorch, unquantized model """ - dl_file = str(example_model_path / "onnx/three_layer_pytorch.onnx") + dl_file = str(example_model_path / 'onnx/three_layer_pytorch.onnx') assert os.path.isfile(dl_file) model = ModelWrapper(dl_file) @@ -188,7 +188,7 @@ def conv1d_small_keras_model(): """ Load a simple conv1d, originally keras, unquantized model """ - dl_file = str(example_model_path / "onnx/conv1d_small_keras.onnx") + dl_file = str(example_model_path / 'onnx/conv1d_small_keras.onnx') assert os.path.isfile(dl_file) model = ModelWrapper(dl_file) @@ -204,7 +204,7 @@ def conv2d_small_keras_model(): """ Load a simple conv2d, originally keras, unquantized model """ - dl_file = str(example_model_path / "onnx/conv2d_small_keras.onnx") + dl_file = str(example_model_path / 'onnx/conv2d_small_keras.onnx') assert os.path.isfile(dl_file) model = ModelWrapper(dl_file) @@ -220,7 +220,7 @@ def conv2d_small_mp_keras_model(): """ Load a conv2d model with max pooling, originally keras, unquantized model """ - dl_file = str(example_model_path / "onnx/conv2d_small_mp_keras.onnx") + dl_file = str(example_model_path / 'onnx/conv2d_small_mp_keras.onnx') assert os.path.isfile(dl_file) model = ModelWrapper(dl_file) @@ -362,7 +362,6 @@ def test_branched_model(branched_model, backend): @pytest.mark.parametrize('backend', ['Vitis']) def test_tiny_unet_model(tiny_unet_model, backend): - model = tiny_unet_model ishape = tuple(model.get_tensor_shape(model.graph.input[0].name)) X = np.random.uniform(low=0, high=1, size=np.prod(ishape)).reshape(ishape) diff --git a/test/pytest/test_recurrent_pytorch.py b/test/pytest/test_recurrent_pytorch.py index 6d2abf1be6..fb89d01ae3 100644 --- a/test/pytest/test_recurrent_pytorch.py +++ b/test/pytest/test_recurrent_pytorch.py @@ -47,7 +47,7 @@ def test_gru(backend, io_type): config = config_from_pytorch_model( model, [(None, 1, 10), (None, 1, 20)], - channels_last_conversion="off", + channels_last_conversion='off', transpose_outputs=False, default_precision='fixed<32,16>', ) @@ -74,7 +74,7 @@ def test_gru_stream(backend, io_type): pytorch_prediction = model(torch.Tensor(X_input)).detach().numpy() config = config_from_pytorch_model( - model, (None, 1, 10), channels_last_conversion="off", transpose_outputs=False, default_precision='fixed<32,16>' + model, (None, 1, 10), channels_last_conversion='off', transpose_outputs=False, default_precision='fixed<32,16>' ) output_dir = str(test_root_path / f'hls4mlprj_pytorch_api_gru_{backend}_{io_type}') @@ -125,7 +125,7 @@ def test_lstm(backend, io_type): config = config_from_pytorch_model( model, [(None, 1, 10), (None, 1, 20), (None, 1, 20)], - channels_last_conversion="off", + channels_last_conversion='off', transpose_outputs=False, default_precision='fixed<32,16>', ) @@ -151,7 +151,7 @@ def test_lstm(backend, io_type): @pytest.mark.parametrize('backend', ['Vivado', 'Vitis', 'Quartus', 'oneAPI']) @pytest.mark.parametrize('io_type', ['io_stream']) def test_lstm_stream(backend, io_type): - if not (backend in ('Quartus', 'oneAPI') and io_type == "io_stream"): + if not (backend in ('Quartus', 'oneAPI') and io_type == 'io_stream'): model = LSTMStream() model.eval() @@ -161,7 +161,7 @@ def test_lstm_stream(backend, io_type): pytorch_prediction = model(torch.Tensor(X_input)).detach().numpy() config = config_from_pytorch_model( - model, [(None, 1, 10)], channels_last_conversion="off", transpose_outputs=False, default_precision='fixed<32,16>' + model, [(None, 1, 10)], channels_last_conversion='off', transpose_outputs=False, default_precision='fixed<32,16>' ) output_dir = str(test_root_path / f'hls4mlprj_pytorch_api_lstm_{backend}_{io_type}') @@ -193,7 +193,7 @@ def forward(self, x, h0): @pytest.mark.parametrize('backend', ['Quartus', 'oneAPI']) @pytest.mark.parametrize('io_type', ['io_parallel']) def test_rnn(backend, io_type): - if not (backend in ('Quartus', 'oneAPI') and io_type == "io_stream"): + if not (backend in ('Quartus', 'oneAPI') and io_type == 'io_stream'): model = RNN() model.eval() @@ -206,7 +206,7 @@ def test_rnn(backend, io_type): config = config_from_pytorch_model( model, [(1, 10), (1, 20)], - channels_last_conversion="off", + channels_last_conversion='off', transpose_outputs=False, default_precision='fixed<32,16>', ) diff --git a/test/pytest/test_report.py b/test/pytest/test_report.py index 2410c3a88b..469cf466e1 100644 --- a/test/pytest/test_report.py +++ b/test/pytest/test_report.py @@ -131,12 +131,12 @@ def hls_model_setup(request, backend_configs, tmp_path): # to actually generate the reports (using Vivado 2020.1 or oneAPI 2025.0) # hls_model.build(**(backend_config['build'])) - backend_config["copy_func"](output_dir, test_report_dir) + backend_config['copy_func'](output_dir, test_report_dir) yield output_dir, backend_config -@pytest.mark.parametrize("hls_model_setup", ['Vivado', 'oneAPI'], indirect=True) +@pytest.mark.parametrize('hls_model_setup', ['Vivado', 'oneAPI'], indirect=True) def test_report(hls_model_setup, capsys): """Tests that the report parsing and printing functions work for different backends.""" output_dir, backend_config = hls_model_setup diff --git a/test/pytest/test_sepconv2d.py b/test/pytest/test_sepconv2d.py index 76005d5063..678447749a 100644 --- a/test/pytest/test_sepconv2d.py +++ b/test/pytest/test_sepconv2d.py @@ -56,7 +56,7 @@ def test_sepconv2d(chans, padds, strides, kernels, bias, io_type, backend, strat X_input = np.random.rand(100, *input_shape) keras_prediction = model.predict(X_input) config = hls4ml.utils.config_from_keras_model( - model, default_precision='ap_fixed<32,8>', granularity="name", backend=backend + model, default_precision='ap_fixed<32,8>', granularity='name', backend=backend ) config['Model']['Strategy'] = strategy config['Model']['ReuseFactor'] = rf diff --git a/test/pytest/test_sr.py b/test/pytest/test_sr.py index 272450b658..881e91ed44 100644 --- a/test/pytest/test_sr.py +++ b/test/pytest/test_sr.py @@ -76,7 +76,6 @@ def test_pysr_luts(data): @pytest.mark.parametrize('clock_unc', ['15%', None]) @pytest.mark.parametrize('compiler', ['vivado_hls', 'vitis_hls']) def test_sr_backend_config(part, clock_period, clock_unc, compiler): - expr = 'x0**2 + 2.5382*cos_lut(x3) - 0.5' if clock_unc is not None: diff --git a/test/pytest/test_trace.py b/test/pytest/test_trace.py index b01cfcd010..152d3ec2f4 100644 --- a/test/pytest/test_trace.py +++ b/test/pytest/test_trace.py @@ -14,7 +14,7 @@ @pytest.mark.parametrize('backend', ['Vivado', 'Vitis', 'Quartus']) @pytest.mark.parametrize('activation', ['relu', None]) def test_trace(backend, activation): - '''Test the tracing feature with a simple Keras model.''' + """Test the tracing feature with a simple Keras model.""" model = tf.keras.models.Sequential() model.add( Dense( diff --git a/test/pytest/test_types.py b/test/pytest/test_types.py index 8f4857fec9..92d9532a08 100644 --- a/test/pytest/test_types.py +++ b/test/pytest/test_types.py @@ -79,7 +79,7 @@ def test_precision_type_creation(capsys): ], ) def test_sign_parsing(prec_pair): - '''Test that convert_precisions_string determines the signedness correctly''' + """Test that convert_precisions_string determines the signedness correctly""" strprec = prec_pair[0] signed = prec_pair[1] diff --git a/test/pytest/test_upsampling_pytorch.py b/test/pytest/test_upsampling_pytorch.py index 165f44e259..07689ba80b 100644 --- a/test/pytest/test_upsampling_pytorch.py +++ b/test/pytest/test_upsampling_pytorch.py @@ -59,7 +59,7 @@ def test_pytorch_upsampling1d(data_1d, io_type, backend): model, (None, in_feat, in_width), default_precision='ap_fixed<16,6>', - channels_last_conversion="internal", + channels_last_conversion='internal', transpose_outputs=False, ) odir = str(test_root_path / f'hls4mlprj_pytorch_upsampling_1d_{backend}_{io_type}') @@ -89,7 +89,7 @@ def test_pytorch_upsampling2d(data_2d, io_type, backend): model, (in_feat, in_height, in_width), default_precision='ap_fixed<16,6>', - channels_last_conversion="full", # With conversion to channels_last + channels_last_conversion='full', # With conversion to channels_last transpose_outputs=True, ) odir = str(test_root_path / f'hls4mlprj_pytorch_upsampling_2d_{backend}_{io_type}') diff --git a/test/pytest/test_writer_config.py b/test/pytest/test_writer_config.py index 61c9c03a0d..ba956c2e15 100644 --- a/test/pytest/test_writer_config.py +++ b/test/pytest/test_writer_config.py @@ -23,7 +23,6 @@ def keras_model(): @pytest.mark.parametrize('backend', ['Vivado', 'Vitis']) # No Quartus for now @pytest.mark.parametrize('namespace', [None, 'test_namespace']) def test_namespace(keras_model, namespace, io_type, backend): - use_namespace = namespace is None config = hls4ml.utils.config_from_keras_model(keras_model, granularity='name') odir = str(test_root_path / f'hls4mlprj_namespace_{use_namespace}_{backend}_{io_type}') @@ -36,7 +35,6 @@ def test_namespace(keras_model, namespace, io_type, backend): @pytest.mark.parametrize('backend', ['Vivado', 'Vitis']) # No Quartus for now @pytest.mark.parametrize('write_tar', [True, False]) def test_write_tar(keras_model, write_tar, backend): - config = hls4ml.utils.config_from_keras_model(keras_model, granularity='name') odir = str(test_root_path / f'hls4mlprj_write_tar_{write_tar}_{backend}') @@ -55,7 +53,6 @@ def test_write_tar(keras_model, write_tar, backend): @pytest.mark.parametrize('backend', ['Vivado', 'Vitis']) # No Quartus for now @pytest.mark.parametrize('write_weights_txt', [True, False]) def test_write_weights_txt(keras_model, write_weights_txt, backend): - config = hls4ml.utils.config_from_keras_model(keras_model, granularity='name') odir = str(test_root_path / f'hls4mlprj_write_weights_txt_{write_weights_txt}_{backend}') @@ -75,7 +72,6 @@ def test_write_weights_txt(keras_model, write_weights_txt, backend): @pytest.mark.parametrize('backend', ['Vivado', 'Vitis']) @pytest.mark.parametrize('tb_output_stream', ['stdout', 'file', 'both']) def test_tb_output_stream(capfd, keras_model, tb_output_stream, backend): - config = hls4ml.utils.config_from_keras_model(keras_model, granularity='name') odir = str(test_root_path / f'hls4mlprj_tb_output_stream_{tb_output_stream}_{backend}') if os.path.exists(odir):