RuntimeError: Failed to import transformers.modeling_utils because of the following error (look up to see its traceback): cannot import name 'DiagnosticOptions' from 'torch.onnx._internal.exporter' #6888
Labels
solved
This problem has been already solved
Reminder
System Info
FORCE_TORCHRUN=1 llamafactory-cli train examples/train_full/my_qwen2vl_full_sft.yaml
/opt/miniconda3/envs/pytorch/lib/python3.10/site-packages/onnxscript/converter.py:823: FutureWarning: 'onnxscript.values.Op.param_schemas' is deprecated in version 0.1 and will be removed in the future. Please use '.op_signature' instead.
param_schemas = callee.param_schemas()
/opt/miniconda3/envs/pytorch/lib/python3.10/site-packages/onnxscript/converter.py:823: FutureWarning: 'onnxscript.values.OnnxFunction.param_schemas' is deprecated in version 0.1 and will be removed in the future. Please use '.op_signature' instead.
param_schemas = callee.param_schemas()
Traceback (most recent call last):
File "/opt/miniconda3/envs/pytorch/lib/python3.10/site-packages/transformers/utils/import_utils.py", line 1852, in _get_module
return importlib.import_module("." + module_name, self.name)
File "/opt/miniconda3/envs/pytorch/lib/python3.10/importlib/init.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "", line 1050, in _gcd_import
File "", line 1027, in _find_and_load
File "", line 1006, in _find_and_load_unlocked
File "", line 688, in _load_unlocked
File "", line 883, in exec_module
File "", line 241, in _call_with_frames_removed
File "/opt/miniconda3/envs/pytorch/lib/python3.10/site-packages/transformers/modeling_utils.py", line 52, in
from .loss.loss_utils import LOSS_MAPPING
File "/opt/miniconda3/envs/pytorch/lib/python3.10/site-packages/transformers/loss/loss_utils.py", line 19, in
from .loss_deformable_detr import DeformableDetrForObjectDetectionLoss, DeformableDetrForSegmentationLoss
File "/opt/miniconda3/envs/pytorch/lib/python3.10/site-packages/transformers/loss/loss_deformable_detr.py", line 4, in
from ..image_transforms import center_to_corners_format
File "/opt/miniconda3/envs/pytorch/lib/python3.10/site-packages/transformers/image_transforms.py", line 22, in
from .image_utils import (
File "/opt/miniconda3/envs/pytorch/lib/python3.10/site-packages/transformers/image_utils.py", line 65, in
from torchvision import io as torchvision_io
File "/opt/miniconda3/envs/pytorch/lib/python3.10/site-packages/torchvision/init.py", line 10, in
from torchvision import _meta_registrations, datasets, io, models, ops, transforms, utils # usort:skip
File "/opt/miniconda3/envs/pytorch/lib/python3.10/site-packages/torchvision/models/init.py", line 2, in
from .convnext import *
File "/opt/miniconda3/envs/pytorch/lib/python3.10/site-packages/torchvision/models/convnext.py", line 8, in
from ..ops.misc import Conv2dNormActivation, Permute
File "/opt/miniconda3/envs/pytorch/lib/python3.10/site-packages/torchvision/ops/init.py", line 1, in
from ._register_onnx_ops import _register_custom_op
File "/opt/miniconda3/envs/pytorch/lib/python3.10/site-packages/torchvision/ops/_register_onnx_ops.py", line 5, in
from torch.onnx import symbolic_opset11 as opset11
File "/opt/miniconda3/envs/pytorch/lib/python3.10/site-packages/torch/onnx/init.py", line 49, in
from ._internal.exporter import ( # usort:skip. needs to be last to avoid circular import
ImportError: cannot import name 'DiagnosticOptions' from 'torch.onnx._internal.exporter' (/opt/miniconda3/envs/pytorch/lib/python3.10/site-packages/torch/onnx/_internal/exporter/init.py)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/opt/miniconda3/envs/pytorch/bin/llamafactory-cli", line 5, in
from llamafactory.cli import main
File "/mnt/si003189dcm8/pretrain/LLaMA-Factory/src/llamafactory/init.py", line 44, in
from .extras.env import VERSION
File "/mnt/si003189dcm8/pretrain/LLaMA-Factory/src/llamafactory/extras/env.py", line 22, in
import peft
File "/opt/miniconda3/envs/pytorch/lib/python3.10/site-packages/peft/init.py", line 22, in
from .auto import (
File "/opt/miniconda3/envs/pytorch/lib/python3.10/site-packages/peft/auto.py", line 32, in
from .mapping import MODEL_TYPE_TO_PEFT_MODEL_MAPPING
File "/opt/miniconda3/envs/pytorch/lib/python3.10/site-packages/peft/mapping.py", line 22, in
from peft.tuners.xlora.model import XLoraModel
File "/opt/miniconda3/envs/pytorch/lib/python3.10/site-packages/peft/tuners/init.py", line 21, in
from .lora import LoraConfig, LoraModel, LoftQConfig, LoraRuntimeConfig
File "/opt/miniconda3/envs/pytorch/lib/python3.10/site-packages/peft/tuners/lora/init.py", line 18, in
from .gptq import QuantLinear
File "/opt/miniconda3/envs/pytorch/lib/python3.10/site-packages/peft/tuners/lora/gptq.py", line 19, in
from peft.tuners.lora.layer import LoraLayer
File "/opt/miniconda3/envs/pytorch/lib/python3.10/site-packages/peft/tuners/lora/layer.py", line 26, in
from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge
File "/opt/miniconda3/envs/pytorch/lib/python3.10/site-packages/peft/tuners/tuners_utils.py", line 29, in
from transformers import PreTrainedModel
File "", line 1075, in _handle_fromlist
File "/opt/miniconda3/envs/pytorch/lib/python3.10/site-packages/transformers/utils/import_utils.py", line 1840, in getattr
module = self._get_module(self._class_to_module[name])
File "/opt/miniconda3/envs/pytorch/lib/python3.10/site-packages/transformers/utils/import_utils.py", line 1854, in _get_module
raise RuntimeError(
RuntimeError: Failed to import transformers.modeling_utils because of the following error (look up to see its traceback):
cannot import name 'DiagnosticOptions' from 'torch.onnx._internal.exporter' (/opt/miniconda3/envs/pytorch/lib/python3.10/site-packages/torch/onnx/_internal/exporter/init.py)
【script】
model
model_name_or_path: Qwen/Qwen2.5-VL-7B-Instruct
image_resolution: 262144
video_resolution: 16384
trust_remote_code: true
method
stage: sft
do_train: true
finetuning_type: full
freeze_vision_tower: true # choices: [true, false]
freeze_multi_modal_projector: true # choices: [true, false]
train_mm_proj_only: false # choices: [true, false]
deepspeed: examples/deepspeed/ds_z3_config.json # choices: [ds_z0_config.json, ds_z2_config.json, ds_z3_config.json]
dataset
dataset: mllm_demo
template: qwen2_vl
cutoff_len: 2048
max_samples: 1000
overwrite_cache: true
preprocessing_num_workers: 16
output
output_dir: saves/qwen2.5_vl-7b/full/sft
logging_steps: 10
save_steps: 500
plot_loss: true
overwrite_output_dir: true
train
per_device_train_batch_size: 1
gradient_accumulation_steps: 2
learning_rate: 1.0e-5
num_train_epochs: 3.0
lr_scheduler_type: cosine
warmup_ratio: 0.1
bf16: true
ddp_timeout: 180000000
【/script】
【env】
accelerate 1.2.1
aiofiles 23.2.1
aiohappyeyeballs 2.4.6
aiohttp 3.11.12
aiosignal 1.3.2
annotated-types 0.7.0
anyio 4.8.0
async-timeout 5.0.1
attrs 25.1.0
audioread 3.0.1
av 14.1.0
Brotli 1.0.9
certifi 2025.1.31
cffi 1.17.1
charset-normalizer 3.3.2
click 8.1.8
contourpy 1.3.1
cycler 0.12.1
datasets 3.2.0
decorator 5.1.1
deepspeed 0.16.3
dill 0.3.8
docstring_parser 0.16
einops 0.8.1
et_xmlfile 2.0.0
exceptiongroup 1.2.2
fastapi 0.115.8
ffmpy 0.5.0
filelock 3.13.1
fire 0.7.0
flash_attn 2.7.4.post1
fonttools 4.56.0
frozenlist 1.5.0
fsspec 2024.2.0
gmpy2 2.2.1
gradio 5.12.0
gradio_client 1.5.4
h11 0.14.0
hjson 3.1.0
httpcore 1.0.7
httpx 0.28.1
huggingface-hub 0.28.1
idna 3.10
jieba 0.42.1
Jinja2 3.1.3
joblib 1.4.2
kiwisolver 1.4.8
lazy_loader 0.4
librosa 0.10.2.post1
llamafactory 0.9.2.dev0 /mnt/si003189dcm8/pretrain/LLaMA-Factory
llvmlite 0.44.0
markdown-it-py 3.0.0
MarkupSafe 2.1.5
matplotlib 3.10.0
mdurl 0.1.2
mkl_fft 1.3.11
mkl_random 1.2.8
mkl-service 2.4.0
ml_dtypes 0.5.1
mpmath 1.3.0
msgpack 1.1.0
multidict 6.1.0
multiprocess 0.70.16
networkx 3.2.1
ninja 1.11.1.3
nltk 3.9.1
numba 0.61.0
numpy 1.26.3
nvidia-cublas-cu12 12.1.3.1
nvidia-cuda-cupti-cu12 12.1.105
nvidia-cuda-nvrtc-cu12 12.1.105
nvidia-cuda-runtime-cu12 12.1.105
nvidia-cudnn-cu12 9.1.0.70
nvidia-cufft-cu12 11.0.2.54
nvidia-curand-cu12 10.3.2.106
nvidia-cusolver-cu12 11.4.5.107
nvidia-cusparse-cu12 12.1.0.106
nvidia-ml-py 12.570.86
nvidia-nccl-cu12 2.21.5
nvidia-nvjitlink-cu12 12.1.105
nvidia-nvtx-cu12 12.1.105
onnx 1.17.0
onnxscript 0.1.0
openpyxl 3.1.5
orjson 3.10.15
packaging 24.2
pandas 2.2.3
peft 0.12.0
pillow 10.2.0
pip 25.0.1
platformdirs 4.3.6
pooch 1.8.2
propcache 0.2.1
protobuf 3.20.3
psutil 6.1.1
py-cpuinfo 9.0.0
pyarrow 19.0.0
pycparser 2.22
pydantic 2.10.6
pydantic_core 2.27.2
pydub 0.25.1
Pygments 2.19.1
pyparsing 3.2.1
PySocks 1.7.1
python-dateutil 2.9.0.post0
python-multipart 0.0.20
pytz 2025.1
PyYAML 6.0.2
regex 2024.11.6
requests 2.32.3
rich 13.9.4
rouge-chinese 1.0.3
ruff 0.9.5
safehttpx 0.1.6
safetensors 0.5.2
scikit-learn 1.6.1
scipy 1.15.1
semantic-version 2.10.0
sentencepiece 0.2.0
setuptools 75.1.0
shellingham 1.5.4
shtab 1.7.1
six 1.17.0
sniffio 1.3.1
soundfile 0.13.1
soxr 0.5.0.post1
sse-starlette 2.2.1
starlette 0.45.3
sympy 1.13.1
termcolor 2.5.0
threadpoolctl 3.5.0
tiktoken 0.8.0
tokenizers 0.21.0
tomlkit 0.13.2
torch 2.4.0
torchaudio 2.4.0
torchvision 0.19.0
tqdm 4.67.1
transformers 4.49.0.dev0
triton 3.0.0
trl 0.9.6
typer 0.15.1
typing_extensions 4.12.2
tyro 0.8.14
tzdata 2025.1
urllib3 2.3.0
uvicorn 0.34.0
websockets 14.2
wheel 0.44.0
xxhash 3.5.0
yarl 1.18.3
【/env】
Reproduction
Others
No response
The text was updated successfully, but these errors were encountered: