21
21
from huggingface_hub .utils import validate_hf_hub_args
22
22
from typing_extensions import Self
23
23
24
+ from .. import __version__
24
25
from ..quantizers import DiffusersAutoQuantizer
25
26
from ..utils import deprecate , is_accelerate_available , logging
26
27
from .single_file_utils import (
@@ -260,6 +261,11 @@ def from_single_file(cls, pretrained_model_link_or_path_or_dict: Optional[str] =
260
261
device = kwargs .pop ("device" , None )
261
262
disable_mmap = kwargs .pop ("disable_mmap" , False )
262
263
264
+ user_agent = {"diffusers" : __version__ , "file_type" : "single_file" , "framework" : "pytorch" }
265
+ # In order to ensure popular quantization methods are supported. Can be disable with `disable_telemetry`
266
+ if quantization_config is not None :
267
+ user_agent ["quant" ] = quantization_config .quant_method .value
268
+
263
269
if torch_dtype is not None and not isinstance (torch_dtype , torch .dtype ):
264
270
torch_dtype = torch .float32
265
271
logger .warning (
@@ -278,6 +284,7 @@ def from_single_file(cls, pretrained_model_link_or_path_or_dict: Optional[str] =
278
284
local_files_only = local_files_only ,
279
285
revision = revision ,
280
286
disable_mmap = disable_mmap ,
287
+ user_agent = user_agent ,
281
288
)
282
289
if quantization_config is not None :
283
290
hf_quantizer = DiffusersAutoQuantizer .from_config (quantization_config )
0 commit comments