diff --git a/test_benchmark/PyTorch/AlexNet/pd_infer.py b/test_benchmark/PyTorch/AlexNet/pd_infer.py index b041af0cf..d9aec6812 100644 --- a/test_benchmark/PyTorch/AlexNet/pd_infer.py +++ b/test_benchmark/PyTorch/AlexNet/pd_infer.py @@ -1,5 +1,4 @@ from __future__ import print_function -import paddle.fluid as fluid import paddle import sys import os @@ -16,11 +15,8 @@ # trace paddle.enable_static() exe = paddle.static.Executor(paddle.CPUPlace()) - [prog, inputs, outputs] = fluid.io.load_inference_model( - dirname="pd_model_trace/inference_model/", - executor=exe, - model_filename="model.pdmodel", - params_filename="model.pdiparams") + [prog, inputs, outputs] = paddle.static.load_inference_model( + path_prefix="pd_model_trace/inference_model/model", executor=exe) result = exe.run(prog, feed={inputs[0]: input_data}, fetch_list=outputs) df = pytorch_output - result if numpy.max(numpy.fabs(df)) > 1e-04: @@ -31,11 +27,8 @@ # script paddle.enable_static() exe = paddle.static.Executor(paddle.CPUPlace()) - [prog, inputs, outputs] = fluid.io.load_inference_model( - dirname="pd_model_script/inference_model/", - executor=exe, - model_filename="model.pdmodel", - params_filename="model.pdiparams") + [prog, inputs, outputs] = paddle.static.load_inference_model( + path_prefix="pd_model_script/inference_model/model", executor=exe) result = exe.run(prog, feed={inputs[0]: input_data}, fetch_list=outputs) df = pytorch_output - result if numpy.max(numpy.fabs(df)) > 1e-05: diff --git a/test_benchmark/PyTorch/BASNet/deploy_infer.py b/test_benchmark/PyTorch/BASNet/deploy_infer.py index d86690068..76d951a12 100644 --- a/test_benchmark/PyTorch/BASNet/deploy_infer.py +++ b/test_benchmark/PyTorch/BASNet/deploy_infer.py @@ -4,7 +4,6 @@ import numpy as np import paddle -import paddle.fluid as fluid from paddle.inference import Config from paddle.inference import create_predictor diff --git a/test_benchmark/PyTorch/BASNet/pd_infer.py b/test_benchmark/PyTorch/BASNet/pd_infer.py index bf018c407..d46564325 100644 --- a/test_benchmark/PyTorch/BASNet/pd_infer.py +++ b/test_benchmark/PyTorch/BASNet/pd_infer.py @@ -1,5 +1,5 @@ from __future__ import print_function -import paddle.fluid as fluid + import paddle import sys import os @@ -16,11 +16,8 @@ # trace paddle.enable_static() exe = paddle.static.Executor(paddle.CPUPlace()) - [prog, inputs, outputs] = fluid.io.load_inference_model( - dirname="pd_model_trace/inference_model/", - executor=exe, - model_filename="model.pdmodel", - params_filename="model.pdiparams") + [prog, inputs, outputs] = paddle.static.load_inference_model( + path_prefix="pd_model_trace/inference_model/model", executor=exe) result = exe.run(prog, feed={inputs[0]: input_data}, fetch_list=outputs) result_string = "Trace Successed" for i in range(len(result)): diff --git a/test_benchmark/PyTorch/DBFace/deploy_infer.py b/test_benchmark/PyTorch/DBFace/deploy_infer.py index fb2f0bde2..d53b6fc9f 100644 --- a/test_benchmark/PyTorch/DBFace/deploy_infer.py +++ b/test_benchmark/PyTorch/DBFace/deploy_infer.py @@ -5,7 +5,6 @@ import numpy as np import paddle -import paddle.fluid as fluid from paddle.inference import Config from paddle.inference import create_predictor diff --git a/test_benchmark/PyTorch/DBFace/pd_infer.py b/test_benchmark/PyTorch/DBFace/pd_infer.py index f6aeb5499..60f1671fb 100644 --- a/test_benchmark/PyTorch/DBFace/pd_infer.py +++ b/test_benchmark/PyTorch/DBFace/pd_infer.py @@ -1,5 +1,5 @@ from __future__ import print_function -import paddle.fluid as fluid + import paddle import common import numpy as np @@ -23,11 +23,8 @@ # trace paddle.enable_static() exe = paddle.static.Executor(paddle.CPUPlace()) - [prog, inputs, outputs] = fluid.io.load_inference_model( - dirname="pd_model_trace/inference_model/", - executor=exe, - model_filename="model.pdmodel", - params_filename="model.pdiparams") + [prog, inputs, outputs] = paddle.static.load_inference_model( + path_prefix="pd_model_trace/inference_model/model", executor=exe) result = exe.run(prog, feed={inputs[0]: image}, fetch_list=outputs) df0 = pytorch_output["hm"] - result[0] diff --git a/test_benchmark/PyTorch/EDSR/deploy_infer.py b/test_benchmark/PyTorch/EDSR/deploy_infer.py index 1b583962f..1975bd8c2 100644 --- a/test_benchmark/PyTorch/EDSR/deploy_infer.py +++ b/test_benchmark/PyTorch/EDSR/deploy_infer.py @@ -4,7 +4,6 @@ import numpy as np import paddle -import paddle.fluid as fluid from paddle.inference import Config from paddle.inference import create_predictor diff --git a/test_benchmark/PyTorch/EDSR/pd_infer.py b/test_benchmark/PyTorch/EDSR/pd_infer.py index d15f86a11..249b5fd96 100644 --- a/test_benchmark/PyTorch/EDSR/pd_infer.py +++ b/test_benchmark/PyTorch/EDSR/pd_infer.py @@ -1,5 +1,5 @@ from __future__ import print_function -import paddle.fluid as fluid + import paddle import sys import os @@ -13,11 +13,8 @@ # trace paddle.enable_static() exe = paddle.static.Executor(paddle.CPUPlace()) - [prog, inputs, outputs] = fluid.io.load_inference_model( - dirname="pd_model_trace/inference_model/", - executor=exe, - model_filename="model.pdmodel", - params_filename="model.pdiparams") + [prog, inputs, outputs] = paddle.static.load_inference_model( + path_prefix="pd_model_trace/inference_model/model", executor=exe) result = exe.run(prog, feed={inputs[0]: img}, fetch_list=outputs) df = pytorch_output - result if np.max(np.fabs(df)) > 1e-03: @@ -28,11 +25,8 @@ # script paddle.enable_static() exe = paddle.static.Executor(paddle.CPUPlace()) - [prog, inputs, outputs] = fluid.io.load_inference_model( - dirname="pd_model_script/inference_model/", - executor=exe, - model_filename="model.pdmodel", - params_filename="model.pdiparams") + [prog, inputs, outputs] = paddle.static.load_inference_model( + path_prefix="pd_model_script/inference_model/model", executor=exe) result = exe.run(prog, feed={inputs[0]: img}, fetch_list=outputs) df = pytorch_output - result if np.max(np.fabs(df)) > 1e-03: diff --git a/test_benchmark/PyTorch/ElectraModel/deploy_infer.py b/test_benchmark/PyTorch/ElectraModel/deploy_infer.py index 4f04524d7..dc8d247a5 100644 --- a/test_benchmark/PyTorch/ElectraModel/deploy_infer.py +++ b/test_benchmark/PyTorch/ElectraModel/deploy_infer.py @@ -4,7 +4,6 @@ import numpy as np import paddle -import paddle.fluid as fluid from paddle.inference import Config from paddle.inference import create_predictor diff --git a/test_benchmark/PyTorch/ElectraModel/pd_infer.py b/test_benchmark/PyTorch/ElectraModel/pd_infer.py index f00efff16..a624664a6 100644 --- a/test_benchmark/PyTorch/ElectraModel/pd_infer.py +++ b/test_benchmark/PyTorch/ElectraModel/pd_infer.py @@ -1,5 +1,5 @@ from __future__ import print_function -import paddle.fluid as fluid + import paddle import sys import os @@ -22,11 +22,9 @@ def rel_err(x, y): try: paddle.enable_static() exe = paddle.static.Executor(paddle.CPUPlace()) - [prog, inputs, outputs - ] = fluid.io.load_inference_model(dirname="pd_model/inference_model/", - executor=exe, - model_filename="model.pdmodel", - params_filename="model.pdiparams") + [prog, inputs, outputs] = paddle.static.load_inference_model( + path_prefix="pd_model/inference_model/model", executor=exe) + result = exe.run(prog, feed={ inputs[0]: input_data["input_ids"], diff --git a/test_benchmark/PyTorch/FlaubertModel/deploy_infer.py b/test_benchmark/PyTorch/FlaubertModel/deploy_infer.py index 0897bff48..0bfdf69b6 100644 --- a/test_benchmark/PyTorch/FlaubertModel/deploy_infer.py +++ b/test_benchmark/PyTorch/FlaubertModel/deploy_infer.py @@ -4,7 +4,6 @@ import numpy as np import paddle -import paddle.fluid as fluid from paddle.inference import Config from paddle.inference import create_predictor diff --git a/test_benchmark/PyTorch/FlaubertModel/pd_infer.py b/test_benchmark/PyTorch/FlaubertModel/pd_infer.py index 34364fc1e..3c53bca42 100644 --- a/test_benchmark/PyTorch/FlaubertModel/pd_infer.py +++ b/test_benchmark/PyTorch/FlaubertModel/pd_infer.py @@ -1,5 +1,5 @@ from __future__ import print_function -import paddle.fluid as fluid + import paddle import sys import os @@ -22,11 +22,9 @@ def rel_err(x, y): try: paddle.enable_static() exe = paddle.static.Executor(paddle.CPUPlace()) - [prog, inputs, outputs - ] = fluid.io.load_inference_model(dirname="pd_model/inference_model/", - executor=exe, - model_filename="model.pdmodel", - params_filename="model.pdiparams") + [prog, inputs, outputs] = paddle.static.load_inference_model( + path_prefix="pd_model/inference_model/model", executor=exe) + result = exe.run(prog, feed={ inputs[0]: input_data["input_ids"], diff --git a/test_benchmark/PyTorch/InceptionV3/deploy_infer.py b/test_benchmark/PyTorch/InceptionV3/deploy_infer.py index ef3b88a7c..1eb859880 100644 --- a/test_benchmark/PyTorch/InceptionV3/deploy_infer.py +++ b/test_benchmark/PyTorch/InceptionV3/deploy_infer.py @@ -4,7 +4,6 @@ import numpy as np import paddle -import paddle.fluid as fluid from paddle.inference import Config from paddle.inference import create_predictor diff --git a/test_benchmark/PyTorch/InceptionV3/pd_infer.py b/test_benchmark/PyTorch/InceptionV3/pd_infer.py index a1cd035aa..f48f30f10 100644 --- a/test_benchmark/PyTorch/InceptionV3/pd_infer.py +++ b/test_benchmark/PyTorch/InceptionV3/pd_infer.py @@ -1,5 +1,5 @@ from __future__ import print_function -import paddle.fluid as fluid + import paddle import sys import os @@ -22,11 +22,8 @@ def rel_err(x, y): # trace paddle.enable_static() exe = paddle.static.Executor(paddle.CPUPlace()) - [prog, inputs, outputs] = fluid.io.load_inference_model( - dirname="pd_model_trace/inference_model/", - executor=exe, - model_filename="model.pdmodel", - params_filename="model.pdiparams") + [prog, inputs, outputs] = paddle.static.load_inference_model( + path_prefix="pd_model_trace/inference_model/model", executor=exe) result = exe.run(prog, feed={inputs[0]: input_data}, fetch_list=outputs) df = pytorch_output - result[0] if numpy.max(numpy.fabs(df)) > 1e-04: @@ -37,11 +34,8 @@ def rel_err(x, y): # script paddle.enable_static() exe = paddle.static.Executor(paddle.CPUPlace()) - [prog, inputs, outputs] = fluid.io.load_inference_model( - dirname="pd_model_script/inference_model/", - executor=exe, - model_filename="model.pdmodel", - params_filename="model.pdiparams") + [prog, inputs, outputs] = paddle.static.load_inference_model( + path_prefix="pd_model_script/inference_model/model", executor=exe) result = exe.run(prog, feed={inputs[0]: input_data}, fetch_list=outputs) df = pytorch_output - result[0] if numpy.max(numpy.fabs(df)) > 1e-04: diff --git a/test_benchmark/PyTorch/MNasNet/deploy_infer.py b/test_benchmark/PyTorch/MNasNet/deploy_infer.py index 2113a59b7..47220a1f4 100644 --- a/test_benchmark/PyTorch/MNasNet/deploy_infer.py +++ b/test_benchmark/PyTorch/MNasNet/deploy_infer.py @@ -4,7 +4,6 @@ import numpy as np import paddle -import paddle.fluid as fluid from paddle.inference import Config from paddle.inference import create_predictor diff --git a/test_benchmark/PyTorch/MNasNet/pd_infer.py b/test_benchmark/PyTorch/MNasNet/pd_infer.py index 294c440dc..f8fac93ef 100644 --- a/test_benchmark/PyTorch/MNasNet/pd_infer.py +++ b/test_benchmark/PyTorch/MNasNet/pd_infer.py @@ -1,5 +1,5 @@ from __future__ import print_function -import paddle.fluid as fluid + import paddle import sys import os @@ -22,11 +22,8 @@ def rel_err(x, y): # trace paddle.enable_static() exe = paddle.static.Executor(paddle.CPUPlace()) - [prog, inputs, outputs] = fluid.io.load_inference_model( - dirname="pd_model_trace/inference_model/", - executor=exe, - model_filename="model.pdmodel", - params_filename="model.pdiparams") + [prog, inputs, outputs] = paddle.static.load_inference_model( + path_prefix="pd_model_trace/inference_model/model", executor=exe) result = exe.run(prog, feed={inputs[0]: input_data}, fetch_list=outputs) df = pytorch_output - result[0] if numpy.max(numpy.fabs(df)) > 1e-04: @@ -37,11 +34,8 @@ def rel_err(x, y): # script paddle.enable_static() exe = paddle.static.Executor(paddle.CPUPlace()) - [prog, inputs, outputs] = fluid.io.load_inference_model( - dirname="pd_model_script/inference_model/", - executor=exe, - model_filename="model.pdmodel", - params_filename="model.pdiparams") + [prog, inputs, outputs] = paddle.static.load_inference_model( + path_prefix="pd_model_script/inference_model/model", executor=exe) result = exe.run(prog, feed={inputs[0]: input_data}, fetch_list=outputs) df = pytorch_output - result[0] if numpy.max(numpy.fabs(df)) > 1e-04: diff --git a/test_benchmark/PyTorch/MobileNetV2/deploy_infer.py b/test_benchmark/PyTorch/MobileNetV2/deploy_infer.py index c098217ae..569fa7e04 100644 --- a/test_benchmark/PyTorch/MobileNetV2/deploy_infer.py +++ b/test_benchmark/PyTorch/MobileNetV2/deploy_infer.py @@ -4,7 +4,6 @@ import numpy as np import paddle -import paddle.fluid as fluid from paddle.inference import Config from paddle.inference import create_predictor diff --git a/test_benchmark/PyTorch/MobileNetV2/pd_infer.py b/test_benchmark/PyTorch/MobileNetV2/pd_infer.py index 18758f965..7606b6047 100644 --- a/test_benchmark/PyTorch/MobileNetV2/pd_infer.py +++ b/test_benchmark/PyTorch/MobileNetV2/pd_infer.py @@ -1,5 +1,5 @@ from __future__ import print_function -import paddle.fluid as fluid + import paddle import sys import os @@ -22,11 +22,8 @@ def rel_err(x, y): # trace paddle.enable_static() exe = paddle.static.Executor(paddle.CPUPlace()) - [prog, inputs, outputs] = fluid.io.load_inference_model( - dirname="pd_model_trace/inference_model/", - executor=exe, - model_filename="model.pdmodel", - params_filename="model.pdiparams") + [prog, inputs, outputs] = paddle.static.load_inference_model( + path_prefix="pd_model_trace/inference_model/model", executor=exe) result = exe.run(prog, feed={inputs[0]: input_data}, fetch_list=outputs) df = pytorch_output - result[0] if numpy.max(numpy.fabs(df)) > 1e-04: @@ -37,11 +34,8 @@ def rel_err(x, y): # script paddle.enable_static() exe = paddle.static.Executor(paddle.CPUPlace()) - [prog, inputs, outputs] = fluid.io.load_inference_model( - dirname="pd_model_script/inference_model/", - executor=exe, - model_filename="model.pdmodel", - params_filename="model.pdiparams") + [prog, inputs, outputs] = paddle.static.load_inference_model( + path_prefix="pd_model_script/inference_model/model", executor=exe) result = exe.run(prog, feed={inputs[0]: input_data}, fetch_list=outputs) df = pytorch_output - result[0] if numpy.max(numpy.fabs(df)) > 1e-04: diff --git a/test_benchmark/PyTorch/ResNet18/deploy_infer.py b/test_benchmark/PyTorch/ResNet18/deploy_infer.py index fe49099ac..e86853f4e 100644 --- a/test_benchmark/PyTorch/ResNet18/deploy_infer.py +++ b/test_benchmark/PyTorch/ResNet18/deploy_infer.py @@ -4,7 +4,6 @@ import numpy as np import paddle -import paddle.fluid as fluid from paddle.inference import Config from paddle.inference import create_predictor diff --git a/test_benchmark/PyTorch/ResNet18/pd_infer.py b/test_benchmark/PyTorch/ResNet18/pd_infer.py index fa20652f1..718ffe5b7 100644 --- a/test_benchmark/PyTorch/ResNet18/pd_infer.py +++ b/test_benchmark/PyTorch/ResNet18/pd_infer.py @@ -1,5 +1,5 @@ from __future__ import print_function -import paddle.fluid as fluid + import paddle import sys import os @@ -22,11 +22,8 @@ def rel_err(x, y): # trace paddle.enable_static() exe = paddle.static.Executor(paddle.CPUPlace()) - [prog, inputs, outputs] = fluid.io.load_inference_model( - dirname="pd_model_trace/inference_model/", - executor=exe, - model_filename="model.pdmodel", - params_filename="model.pdiparams") + [prog, inputs, outputs] = paddle.static.load_inference_model( + path_prefix="pd_model_trace/inference_model/model", executor=exe) result = exe.run(prog, feed={inputs[0]: input_data}, fetch_list=outputs) df = pytorch_output - result[0] if numpy.max(numpy.fabs(df)) > 1e-04: @@ -37,11 +34,8 @@ def rel_err(x, y): # script paddle.enable_static() exe = paddle.static.Executor(paddle.CPUPlace()) - [prog, inputs, outputs] = fluid.io.load_inference_model( - dirname="pd_model_script/inference_model/", - executor=exe, - model_filename="model.pdmodel", - params_filename="model.pdiparams") + [prog, inputs, outputs] = paddle.static.load_inference_model( + path_prefix="pd_model_script/inference_model/model", executor=exe) result = exe.run(prog, feed={inputs[0]: input_data}, fetch_list=outputs) df = pytorch_output - result[0] if numpy.max(numpy.fabs(df)) > 1e-04: diff --git a/test_benchmark/PyTorch/ResNet18_2/deploy_infer.py b/test_benchmark/PyTorch/ResNet18_2/deploy_infer.py index 9ccfb9ade..cd66b803d 100644 --- a/test_benchmark/PyTorch/ResNet18_2/deploy_infer.py +++ b/test_benchmark/PyTorch/ResNet18_2/deploy_infer.py @@ -4,7 +4,6 @@ import numpy as np import paddle -import paddle.fluid as fluid from paddle.inference import Config from paddle.inference import create_predictor diff --git a/test_benchmark/PyTorch/ResNet18_2/pd_infer.py b/test_benchmark/PyTorch/ResNet18_2/pd_infer.py index 0e0be1741..dfc2ba640 100644 --- a/test_benchmark/PyTorch/ResNet18_2/pd_infer.py +++ b/test_benchmark/PyTorch/ResNet18_2/pd_infer.py @@ -1,5 +1,5 @@ import paddle -import paddle.fluid as fluid + import numpy as np import pickle import sys @@ -11,11 +11,8 @@ paddle.enable_static() exe = paddle.static.Executor(paddle.CPUPlace()) # test trace - [prog, inputs, outputs] = fluid.io.load_inference_model( - dirname="pd_model_trace/inference_model/", - executor=exe, - model_filename="model.pdmodel", - params_filename="model.pdiparams") + [prog, inputs, outputs] = paddle.static.load_inference_model( + path_prefix="pd_model_trace/inference_model/model", executor=exe) data = np.load('../dataset/ResNet18_2/input.npy') result = exe.run(prog, feed={inputs[0]: data}, fetch_list=outputs) diff --git a/test_benchmark/PyTorch/ShuffleNetV2/deploy_infer.py b/test_benchmark/PyTorch/ShuffleNetV2/deploy_infer.py index 35752a997..b011be2b3 100644 --- a/test_benchmark/PyTorch/ShuffleNetV2/deploy_infer.py +++ b/test_benchmark/PyTorch/ShuffleNetV2/deploy_infer.py @@ -4,7 +4,6 @@ import numpy as np import paddle -import paddle.fluid as fluid from paddle.inference import Config from paddle.inference import create_predictor diff --git a/test_benchmark/PyTorch/ShuffleNetV2/pd_infer.py b/test_benchmark/PyTorch/ShuffleNetV2/pd_infer.py index 63ec1e58f..7572384e9 100644 --- a/test_benchmark/PyTorch/ShuffleNetV2/pd_infer.py +++ b/test_benchmark/PyTorch/ShuffleNetV2/pd_infer.py @@ -1,5 +1,5 @@ from __future__ import print_function -import paddle.fluid as fluid + import paddle import sys import os @@ -22,11 +22,8 @@ def rel_err(x, y): # trace paddle.enable_static() exe = paddle.static.Executor(paddle.CPUPlace()) - [prog, inputs, outputs] = fluid.io.load_inference_model( - dirname="pd_model_trace/inference_model/", - executor=exe, - model_filename="model.pdmodel", - params_filename="model.pdiparams") + [prog, inputs, outputs] = paddle.static.load_inference_model( + path_prefix="pd_model_trace/inference_model/model", executor=exe) result = exe.run(prog, feed={inputs[0]: input_data}, fetch_list=outputs) df = pytorch_output - result[0] if numpy.max(numpy.fabs(df)) > 1e-04: @@ -37,11 +34,8 @@ def rel_err(x, y): # script paddle.enable_static() exe = paddle.static.Executor(paddle.CPUPlace()) - [prog, inputs, outputs] = fluid.io.load_inference_model( - dirname="pd_model_script/inference_model/", - executor=exe, - model_filename="model.pdmodel", - params_filename="model.pdiparams") + [prog, inputs, outputs] = paddle.static.load_inference_model( + path_prefix="pd_model_script/inference_model/model", executor=exe) result = exe.run(prog, feed={inputs[0]: input_data}, fetch_list=outputs) df = pytorch_output - result[0] if numpy.max(numpy.fabs(df)) > 1e-04: diff --git a/test_benchmark/PyTorch/SqueezeNet/deploy_infer.py b/test_benchmark/PyTorch/SqueezeNet/deploy_infer.py index 85fa1adad..b7d01981b 100644 --- a/test_benchmark/PyTorch/SqueezeNet/deploy_infer.py +++ b/test_benchmark/PyTorch/SqueezeNet/deploy_infer.py @@ -4,7 +4,6 @@ import numpy as np import paddle -import paddle.fluid as fluid from paddle.inference import Config from paddle.inference import create_predictor diff --git a/test_benchmark/PyTorch/SqueezeNet/pd_infer.py b/test_benchmark/PyTorch/SqueezeNet/pd_infer.py index 06f50a893..942f43186 100644 --- a/test_benchmark/PyTorch/SqueezeNet/pd_infer.py +++ b/test_benchmark/PyTorch/SqueezeNet/pd_infer.py @@ -1,5 +1,5 @@ from __future__ import print_function -import paddle.fluid as fluid + import paddle import sys import os @@ -22,11 +22,8 @@ def rel_err(x, y): # trace paddle.enable_static() exe = paddle.static.Executor(paddle.CPUPlace()) - [prog, inputs, outputs] = fluid.io.load_inference_model( - dirname="pd_model_trace/inference_model/", - executor=exe, - model_filename="model.pdmodel", - params_filename="model.pdiparams") + [prog, inputs, outputs] = paddle.static.load_inference_model( + path_prefix="pd_model_trace/inference_model/model", executor=exe) result = exe.run(prog, feed={inputs[0]: input_data}, fetch_list=outputs) df = pytorch_output - result[0] if numpy.max(numpy.fabs(df)) > 1e-04: @@ -37,11 +34,8 @@ def rel_err(x, y): # script paddle.enable_static() exe = paddle.static.Executor(paddle.CPUPlace()) - [prog, inputs, outputs] = fluid.io.load_inference_model( - dirname="pd_model_script/inference_model/", - executor=exe, - model_filename="model.pdmodel", - params_filename="model.pdiparams") + [prog, inputs, outputs] = paddle.static.load_inference_model( + path_prefix="pd_model_script/inference_model/model", executor=exe) result = exe.run(prog, feed={inputs[0]: input_data}, fetch_list=outputs) df = pytorch_output - result[0] if numpy.max(numpy.fabs(df)) > 1e-04: diff --git a/test_benchmark/PyTorch/VGG16/deploy_infer.py b/test_benchmark/PyTorch/VGG16/deploy_infer.py index ff61a9611..3c8b8d611 100644 --- a/test_benchmark/PyTorch/VGG16/deploy_infer.py +++ b/test_benchmark/PyTorch/VGG16/deploy_infer.py @@ -4,7 +4,6 @@ import numpy as np import paddle -import paddle.fluid as fluid from paddle.inference import Config from paddle.inference import create_predictor diff --git a/test_benchmark/PyTorch/VGG16/pd_infer.py b/test_benchmark/PyTorch/VGG16/pd_infer.py index d452fe283..30da19671 100644 --- a/test_benchmark/PyTorch/VGG16/pd_infer.py +++ b/test_benchmark/PyTorch/VGG16/pd_infer.py @@ -1,5 +1,5 @@ from __future__ import print_function -import paddle.fluid as fluid + import paddle import sys import os @@ -22,11 +22,8 @@ def rel_err(x, y): # trace paddle.enable_static() exe = paddle.static.Executor(paddle.CPUPlace()) - [prog, inputs, outputs] = fluid.io.load_inference_model( - dirname="pd_model_trace/inference_model/", - executor=exe, - model_filename="model.pdmodel", - params_filename="model.pdiparams") + [prog, inputs, outputs] = paddle.static.load_inference_model( + path_prefix="pd_model_trace/inference_model/model", executor=exe) result = exe.run(prog, feed={inputs[0]: input_data}, fetch_list=outputs) df = pytorch_output - result[0] if numpy.max(numpy.fabs(df)) > 1e-04: @@ -37,11 +34,8 @@ def rel_err(x, y): # script paddle.enable_static() exe = paddle.static.Executor(paddle.CPUPlace()) - [prog, inputs, outputs] = fluid.io.load_inference_model( - dirname="pd_model_script/inference_model/", - executor=exe, - model_filename="model.pdmodel", - params_filename="model.pdiparams") + [prog, inputs, outputs] = paddle.static.load_inference_model( + path_prefix="pd_model_script/inference_model/model", executor=exe) result = exe.run(prog, feed={inputs[0]: input_data}, fetch_list=outputs) df = pytorch_output - result[0] if numpy.max(numpy.fabs(df)) > 1e-04: diff --git a/test_benchmark/PyTorch/black.list b/test_benchmark/PyTorch/black.list index fae6e32d4..28f2493d5 100644 --- a/test_benchmark/PyTorch/black.list +++ b/test_benchmark/PyTorch/black.list @@ -1,35 +1,20 @@ ACG_UnitTest -AlexNet -BASNet BertForMaskedLM_dccuchile BertModel_SpanBert CamembertForQuestionAnswering -DBFace DPRContextEncoder DeepLabv3_ResNet50 -EDSR EasyOCR_detector EasyOCR_recognizer -ElectraModel FCN_ResNet50 -FlaubertModel GRU -InceptionV3 -MNasNet MiniFasNet -MobileNetV2 Mobilestereonet MockingBird -ResNet18 -ResNet18_2 Roberta Saicinpainting_LaMa -ShuffleNetV2 -SqueezeNet SwinTransformer -VGG16 XLMRobertaForTokenClassification -mobilenetv3 opadd dataset tools diff --git a/test_benchmark/PyTorch/convert.sh b/test_benchmark/PyTorch/convert.sh index 4c80dfed0..0c62c7138 100644 --- a/test_benchmark/PyTorch/convert.sh +++ b/test_benchmark/PyTorch/convert.sh @@ -32,7 +32,9 @@ do touch result.txt echo $model ">>>Failed"> result.txt - sh run_convert.sh 1>run.log 2>run.err & + # TODO(megemini): debug + # sh run_convert.sh 1>run.log 2>run.err & + sh run_convert.sh cd .. counter=$(($counter+1)) step=$(( $counter % 1 )) diff --git a/test_benchmark/PyTorch/mobilenetv3/convert.py b/test_benchmark/PyTorch/mobilenetv3/convert.py index 23990d22c..ba5277f7d 100644 --- a/test_benchmark/PyTorch/mobilenetv3/convert.py +++ b/test_benchmark/PyTorch/mobilenetv3/convert.py @@ -8,7 +8,7 @@ modelFile = '../dataset/mobilenetv3/MobileNetV3_large.pth' torch_model = mobilenet_v3_large(num_classes=n_classes) #模型结构 #加载模型 -checkpoint = torch.load(modelFile, map_location='cuda:0') +checkpoint = torch.load(modelFile, map_location='cpu') torch_model.load_state_dict(checkpoint) #加载预训练参数 # 设置为eval模式 torch_model.eval() diff --git a/test_benchmark/PyTorch/mobilenetv3/pd_infer.py b/test_benchmark/PyTorch/mobilenetv3/pd_infer.py index 036f60171..1ed0663e8 100644 --- a/test_benchmark/PyTorch/mobilenetv3/pd_infer.py +++ b/test_benchmark/PyTorch/mobilenetv3/pd_infer.py @@ -1,9 +1,10 @@ -import paddle.fluid as fluid import paddle import numpy as np import sys import pickle +import traceback + f = open('result.txt', 'w') f.write("======Mobilenetv3: \n") try: @@ -11,11 +12,8 @@ exe = paddle.static.Executor(paddle.CPUPlace()) # test dygraph - [prog, inputs, outputs - ] = fluid.io.load_inference_model(dirname="pd_model/inference_model/", - executor=exe, - model_filename="model.pdmodel", - params_filename="model.pdiparams") + [prog, inputs, outputs] = paddle.static.load_inference_model( + path_prefix="pd_model/inference_model/model", executor=exe) data = np.load('../dataset/mobilenetv3/input.npy') result = exe.run(prog, feed={inputs[0]: data}, fetch_list=outputs) @@ -34,5 +32,7 @@ f.write("Dygraph Successed\n") else: f.write("!!!!!Dygraph Failed\n") -except: +except Exception as e: f.write("!!!!!Failed\n") + + f.write(traceback.format_exc()) diff --git a/test_benchmark/PyTorch/tools/predict.py b/test_benchmark/PyTorch/tools/predict.py index 8b2e99ce4..5104a2882 100644 --- a/test_benchmark/PyTorch/tools/predict.py +++ b/test_benchmark/PyTorch/tools/predict.py @@ -10,7 +10,6 @@ import yaml import numpy as np import paddle -import paddle.fluid as fluid from paddle.inference import create_predictor, PrecisionType from paddle.inference import Config from benchmark_utils import PaddleInferBenchmark @@ -194,11 +193,9 @@ def analysis_operators(self, model_dir): paddle.enable_static() exe = paddle.static.Executor(paddle.CPUPlace()) # test Dygraph - [prog, inputs, outputs - ] = fluid.io.load_inference_model(dirname=model_dir, - executor=exe, - model_filename="model.pdmodel", - params_filename="model.pdiparams") + [prog, inputs, outputs] = paddle.static.load_inference_model( + path_prefix=model_dir + "/model", executor=exe) + #test op nums op_dict = dict() op_nums = 0 diff --git a/x2paddle/convert.py b/x2paddle/convert.py index 56ff967c5..b9cd52350 100644 --- a/x2paddle/convert.py +++ b/x2paddle/convert.py @@ -379,14 +379,14 @@ def pytorch2paddle(module, v2 = v2.split('+')[0] version_sum = int(v0) * 100 + int(v1) * 10 + int(v2) if version_sum < 150: - logging.info("[ERROR] PyTorch>=1.5.0 is required") + logger.info("[ERROR] PyTorch>=1.5.0 is required") return except: - logging.info( + logger.info( "[ERROR] PyTorch is not installed, use \"pip install torch torchvision\"." ) return - logging.info("Now translating model from PyTorch to Paddle.") + logger.info("Now translating model from PyTorch to Paddle.") from x2paddle.decoder.pytorch_decoder import ScriptDecoder, TraceDecoder from x2paddle.op_mapper.pytorch2paddle.pytorch_op_mapper import PyTorchOpMapper @@ -397,39 +397,39 @@ def pytorch2paddle(module, model = ScriptDecoder(module, input_examples) mapper = PyTorchOpMapper(model) mapper.paddle_graph.build() - logging.info("Model optimizing ...") + logger.info("Model optimizing ...") from x2paddle.optimizer.optimizer import GraphOptimizer graph_opt = GraphOptimizer(source_frame="pytorch", jit_type=jit_type) graph_opt.optimize(mapper.paddle_graph) - logging.info("Model optimized!") + logger.info("Model optimized!") mapper.paddle_graph.gen_model(save_dir, jit_type=jit_type, enable_code_optim=enable_code_optim) - logging.info("Successfully exported Paddle static graph model!") + logger.info("Successfully exported Paddle static graph model!") if not disable_feedback: ConverterCheck(task="PyTorch", time_info=time_info, convert_state="Success").start() if convert_to_lite: - logging.info("Now translating model from Paddle to Paddle Lite ...") + logger.info("Now translating model from Paddle to Paddle Lite ...") if not disable_feedback: ConverterCheck(task="PyTorch", time_info=time_info, lite_state="Start").start() convert2lite(save_dir, lite_valid_places, lite_model_type) - logging.info("Successfully exported Paddle Lite support model!") + logger.info("Successfully exported Paddle Lite support model!") if not disable_feedback: ConverterCheck(task="PyTorch", time_info=time_info, lite_state="Success").start() # for convert survey - logging.info("================================================") - logging.info("") - logging.info( + logger.info("================================================") + logger.info("") + logger.info( "Model Converted! Fill this survey to help X2Paddle better, https://iwenjuan.baidu.com/?code=npyd51 " ) - logging.info("") - logging.info("================================================") + logger.info("") + logger.info("================================================") def main():