Skip to content

Commit b515c1b

Browse files
resolve redundant merge code
Signed-off-by: Brian Dellabetta <[email protected]>
1 parent 712a731 commit b515c1b

File tree

1 file changed

+1
-6
lines changed
  • src/compressed_tensors/quantization/lifecycle

1 file changed

+1
-6
lines changed

src/compressed_tensors/quantization/lifecycle/apply.py

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -125,6 +125,7 @@ def apply_quantization_config(
125125
:param run_compressed: Whether the model will be run in compressed mode or
126126
decompressed fully on load
127127
"""
128+
from compressed_tensors.linear.compressed_linear import CompressedLinear
128129

129130
config = deepcopy(config)
130131
if config is None: # see PR #180
@@ -148,7 +149,6 @@ def apply_quantization_config(
148149
# quant scheme to the matching layers
149150
matched_targets = match_targets(name, submodule, target_to_scheme)
150151
scheme = _scheme_from_targets(target_to_scheme, matched_targets, name)
151-
152152
# target matched - add layer and scheme to target list
153153
submodule.quantization_scheme = scheme
154154

@@ -159,8 +159,6 @@ def apply_quantization_config(
159159
and isinstance(submodule, torch.nn.Linear)
160160
and config.format != CompressionFormat.dense.value
161161
):
162-
from compressed_tensors.linear.compressed_linear import CompressedLinear
163-
164162
# TODO: expand to more module types
165163
compressed_linear = CompressedLinear.from_linear(
166164
submodule,
@@ -169,9 +167,6 @@ def apply_quantization_config(
169167
)
170168
replace_module(model, name, compressed_linear)
171169

172-
# target matched - add layer and scheme to target list
173-
submodule.quantization_scheme = scheme
174-
175170
# apply current quantization status to each targeted submodule
176171
apply_quantization_status(submodule, config.quantization_status)
177172

0 commit comments

Comments
 (0)