You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
from optimum.onnxruntime import ORTModelForCausalLM
from transformers import AutoTokenizer
model_checkpoint = "Qwen/Qwen2-7B"
save_directory = "Qwen2-7B"
# Load a model from transformers and export it to ONNX
ort_model = ORTModelForCausalLM.from_pretrained(model_checkpoint, export=True)
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
# Save the onnx model and tokenizer
ort_model.save_pretrained(save_directory)
tokenizer.save_pretrained(save_directory)
Traceback (most recent call last):
File "/home/sr//workspace/scripts/qwen_scripts/export_onnx.py", line 8, in
ort_model = ORTModelForCausalLM.from_pretrained(model_checkpoint, export=True)
File "/home/sr//conda_env/anaconda3/envs/vaiq_onnx/lib/python3.9/site-packages/optimum/onnxruntime/modeling_ort.py", line 737, in from_pretrained
return super().from_pretrained(
File "/home/sr//conda_env/anaconda3/envs/vaiq_onnx/lib/python3.9/site-packages/optimum/modeling_base.py", line 438, in from_pretrained
return from_pretrained_method(
File "/home/sr//conda_env/anaconda3/envs/vaiq_onnx/lib/python3.9/site-packages/optimum/onnxruntime/modeling_decoder.py", line 654, in _from_transformers
main_export(
File "/home/sr//conda_env/anaconda3/envs/vaiq_onnx/lib/python3.9/site-packages/optimum/exporters/onnx/main.py", line 373, in main_export
onnx_export_from_model(
File "/home/sr//conda_env/anaconda3/envs/vaiq_onnx/lib/python3.9/site-packages/optimum/exporters/onnx/convert.py", line 1196, in onnx_export_from_model
_, onnx_outputs = export_models(
File "/home/sr//conda_env/anaconda3/envs/vaiq_onnx/lib/python3.9/site-packages/optimum/exporters/onnx/convert.py", line 786, in export_models
export(
File "/home/sr//conda_env/anaconda3/envs/vaiq_onnx/lib/python3.9/site-packages/optimum/exporters/onnx/convert.py", line 891, in export
export_output = export_pytorch(
File "/home/sr//conda_env/anaconda3/envs/vaiq_onnx/lib/python3.9/site-packages/optimum/exporters/onnx/convert.py", line 586, in export_pytorch
onnx_export(
File "/home/sr//conda_env/anaconda3/envs/vaiq_onnx/lib/python3.9/site-packages/torch/onnx/init.py", line 375, in export
export(
File "/home/sr//conda_env/anaconda3/envs/vaiq_onnx/lib/python3.9/site-packages/torch/onnx/utils.py", line 502, in export
_export(
File "/home/sr//conda_env/anaconda3/envs/vaiq_onnx/lib/python3.9/site-packages/torch/onnx/utils.py", line 1564, in _export
graph, params_dict, torch_out = _model_to_graph(
File "/home/sr//conda_env/anaconda3/envs/vaiq_onnx/lib/python3.9/site-packages/torch/onnx/utils.py", line 1117, in _model_to_graph
graph = _optimize_graph(
File "/home/sr//conda_env/anaconda3/envs/vaiq_onnx/lib/python3.9/site-packages/torch/onnx/utils.py", line 663, in _optimize_graph
_C._jit_pass_onnx_graph_shape_type_inference(
RuntimeError: The serialized model is larger than the 2GiB limit imposed by the protobuf library. Therefore the output file must be a file path, so that the ONNX external data can be written to the same directory. Please specify the output file name.
Expected behavior
onnx >2GB should be generated with external files.
The text was updated successfully, but these errors were encountered:
System Info
Who can help?
No response
Information
Tasks
examples
folder (such as GLUE/SQuAD, ...)Reproduction (minimal, reproducible, runnable)
I am getting an error while trying to convert Qwen2-7B to onnx using the steps provided in https://huggingface.co/docs/transformers/en/serialization
Traceback (most recent call last):
File "/home/sr//workspace/scripts/qwen_scripts/export_onnx.py", line 8, in
ort_model = ORTModelForCausalLM.from_pretrained(model_checkpoint, export=True)
File "/home/sr//conda_env/anaconda3/envs/vaiq_onnx/lib/python3.9/site-packages/optimum/onnxruntime/modeling_ort.py", line 737, in from_pretrained
return super().from_pretrained(
File "/home/sr//conda_env/anaconda3/envs/vaiq_onnx/lib/python3.9/site-packages/optimum/modeling_base.py", line 438, in from_pretrained
return from_pretrained_method(
File "/home/sr//conda_env/anaconda3/envs/vaiq_onnx/lib/python3.9/site-packages/optimum/onnxruntime/modeling_decoder.py", line 654, in _from_transformers
main_export(
File "/home/sr//conda_env/anaconda3/envs/vaiq_onnx/lib/python3.9/site-packages/optimum/exporters/onnx/main.py", line 373, in main_export
onnx_export_from_model(
File "/home/sr//conda_env/anaconda3/envs/vaiq_onnx/lib/python3.9/site-packages/optimum/exporters/onnx/convert.py", line 1196, in onnx_export_from_model
_, onnx_outputs = export_models(
File "/home/sr//conda_env/anaconda3/envs/vaiq_onnx/lib/python3.9/site-packages/optimum/exporters/onnx/convert.py", line 786, in export_models
export(
File "/home/sr//conda_env/anaconda3/envs/vaiq_onnx/lib/python3.9/site-packages/optimum/exporters/onnx/convert.py", line 891, in export
export_output = export_pytorch(
File "/home/sr//conda_env/anaconda3/envs/vaiq_onnx/lib/python3.9/site-packages/optimum/exporters/onnx/convert.py", line 586, in export_pytorch
onnx_export(
File "/home/sr//conda_env/anaconda3/envs/vaiq_onnx/lib/python3.9/site-packages/torch/onnx/init.py", line 375, in export
export(
File "/home/sr//conda_env/anaconda3/envs/vaiq_onnx/lib/python3.9/site-packages/torch/onnx/utils.py", line 502, in export
_export(
File "/home/sr//conda_env/anaconda3/envs/vaiq_onnx/lib/python3.9/site-packages/torch/onnx/utils.py", line 1564, in _export
graph, params_dict, torch_out = _model_to_graph(
File "/home/sr//conda_env/anaconda3/envs/vaiq_onnx/lib/python3.9/site-packages/torch/onnx/utils.py", line 1117, in _model_to_graph
graph = _optimize_graph(
File "/home/sr//conda_env/anaconda3/envs/vaiq_onnx/lib/python3.9/site-packages/torch/onnx/utils.py", line 663, in _optimize_graph
_C._jit_pass_onnx_graph_shape_type_inference(
RuntimeError: The serialized model is larger than the 2GiB limit imposed by the protobuf library. Therefore the output file must be a file path, so that the ONNX external data can be written to the same directory. Please specify the output file name.
Expected behavior
onnx >2GB should be generated with external files.
The text was updated successfully, but these errors were encountered: