From 78385592d0411094fb758a1a800dae6a5136b6fc Mon Sep 17 00:00:00 2001 From: megemini Date: Thu, 17 Oct 2024 15:48:47 +0800 Subject: [PATCH 1/3] [Add] pytorch aten::list and fix models --- .../DeepLabv3_ResNet50/deploy_infer.py | 1 - .../PyTorch/DeepLabv3_ResNet50/pd_infer.py | 15 +++------- .../PyTorch/FCN_ResNet50/deploy_infer.py | 1 - .../PyTorch/FCN_ResNet50/pd_infer.py | 15 +++------- x2paddle/convert.py | 19 +++++------- x2paddle/op_mapper/pytorch2paddle/aten.py | 27 +++++++++++++++++ x2paddle/utils.py | 29 ++++++++++++++----- 7 files changed, 64 insertions(+), 43 deletions(-) diff --git a/test_benchmark/PyTorch/DeepLabv3_ResNet50/deploy_infer.py b/test_benchmark/PyTorch/DeepLabv3_ResNet50/deploy_infer.py index f01e03373..06302a39c 100644 --- a/test_benchmark/PyTorch/DeepLabv3_ResNet50/deploy_infer.py +++ b/test_benchmark/PyTorch/DeepLabv3_ResNet50/deploy_infer.py @@ -4,7 +4,6 @@ import numpy as np import paddle -import paddle.fluid as fluid from paddle.inference import Config from paddle.inference import create_predictor diff --git a/test_benchmark/PyTorch/DeepLabv3_ResNet50/pd_infer.py b/test_benchmark/PyTorch/DeepLabv3_ResNet50/pd_infer.py index 9663ecbf4..ff14bcc95 100644 --- a/test_benchmark/PyTorch/DeepLabv3_ResNet50/pd_infer.py +++ b/test_benchmark/PyTorch/DeepLabv3_ResNet50/pd_infer.py @@ -1,5 +1,4 @@ from __future__ import print_function -import paddle.fluid as fluid import paddle import sys import os @@ -22,11 +21,8 @@ def rel_err(x, y): # trace paddle.enable_static() exe = paddle.static.Executor(paddle.CPUPlace()) - [prog, inputs, outputs] = fluid.io.load_inference_model( - dirname="pd_model_trace/inference_model/", - executor=exe, - model_filename="model.pdmodel", - params_filename="model.pdiparams") + [prog, inputs, outputs] = paddle.static.load_inference_model( + path_prefix="pd_model_trace/inference_model/model", executor=exe) result = exe.run(prog, feed={inputs[0]: input_data}, fetch_list=outputs) df0 = pytorch_output["aux"] - result[0] df1 = pytorch_output["out"] - result[1] @@ -40,11 +36,8 @@ def rel_err(x, y): # script paddle.enable_static() exe = paddle.static.Executor(paddle.CPUPlace()) - [prog, inputs, outputs] = fluid.io.load_inference_model( - dirname="pd_model_script/inference_model/", - executor=exe, - model_filename="model.pdmodel", - params_filename="model.pdiparams") + [prog, inputs, outputs] = paddle.static.load_inference_model( + path_prefix="pd_model_script/inference_model/model", executor=exe) result = exe.run(prog, feed={inputs[0]: input_data}, fetch_list=outputs) df0 = pytorch_output["aux"] - result[0] df1 = pytorch_output["out"] - result[1] diff --git a/test_benchmark/PyTorch/FCN_ResNet50/deploy_infer.py b/test_benchmark/PyTorch/FCN_ResNet50/deploy_infer.py index f1449f61f..e7e593499 100644 --- a/test_benchmark/PyTorch/FCN_ResNet50/deploy_infer.py +++ b/test_benchmark/PyTorch/FCN_ResNet50/deploy_infer.py @@ -4,7 +4,6 @@ import numpy as np import paddle -import paddle.fluid as fluid from paddle.inference import Config from paddle.inference import create_predictor diff --git a/test_benchmark/PyTorch/FCN_ResNet50/pd_infer.py b/test_benchmark/PyTorch/FCN_ResNet50/pd_infer.py index 2373392e4..df873b6af 100644 --- a/test_benchmark/PyTorch/FCN_ResNet50/pd_infer.py +++ b/test_benchmark/PyTorch/FCN_ResNet50/pd_infer.py @@ -1,5 +1,4 @@ from __future__ import print_function -import paddle.fluid as fluid import paddle import sys import os @@ -22,11 +21,8 @@ def rel_err(x, y): # trace paddle.enable_static() exe = paddle.static.Executor(paddle.CPUPlace()) - [prog, inputs, outputs] = fluid.io.load_inference_model( - dirname="pd_model_trace/inference_model/", - executor=exe, - model_filename="model.pdmodel", - params_filename="model.pdiparams") + [prog, inputs, outputs] = paddle.static.load_inference_model( + path_prefix="pd_model_trace/inference_model/model", executor=exe) result = exe.run(prog, feed={inputs[0]: input_data}, fetch_list=outputs) df0 = pytorch_output["aux"] - result[0] @@ -41,11 +37,8 @@ def rel_err(x, y): # script paddle.enable_static() exe = paddle.static.Executor(paddle.CPUPlace()) - [prog, inputs, outputs] = fluid.io.load_inference_model( - dirname="pd_model_script/inference_model/", - executor=exe, - model_filename="model.pdmodel", - params_filename="model.pdiparams") + [prog, inputs, outputs] = paddle.static.load_inference_model( + path_prefix="pd_model_script/inference_model/model", executor=exe) result = exe.run(prog, feed={inputs[0]: input_data}, fetch_list=outputs) df0 = pytorch_output["aux"] - result[0] diff --git a/x2paddle/convert.py b/x2paddle/convert.py index 3372c5bc1..0a7e83a29 100644 --- a/x2paddle/convert.py +++ b/x2paddle/convert.py @@ -14,7 +14,7 @@ from six import text_type as _text_type from x2paddle import program -from x2paddle.utils import ConverterCheck +from x2paddle.utils import ConverterCheck, check_version import argparse import sys import logging @@ -449,20 +449,15 @@ def main(): assert args.save_dir is not None, "--save_dir is not defined" try: - import platform - v0, v1, v2 = platform.python_version().split('.') - if not (int(v0) >= 3 and int(v1) >= 5): - logging.info("[ERROR] python>=3.5 is required") + if not sys.version_info >= (3, 8): + logging.error("[ERROR] python>=3.8 is required") return + import paddle - v0, v1, v2 = paddle.__version__.split('.') - logging.info("paddle.__version__ = {}".format(paddle.__version__)) - if v0 == '0' and v1 == '0' and v2 == '0': - logging.info( - "[WARNING] You are use develop version of paddlepaddle") - elif int(v0) != 2 or int(v1) < 0: - logging.info("[ERROR] paddlepaddle>=2.0.0 is required") + if not check_version('2.0.0'): + logging.error("[ERROR] paddlepaddle>=2.0.0 is required") return + except: logging.info( "[ERROR] paddlepaddle not installed, use \"pip install paddlepaddle\"" diff --git a/x2paddle/op_mapper/pytorch2paddle/aten.py b/x2paddle/op_mapper/pytorch2paddle/aten.py index bd4dada4a..44e4e573f 100755 --- a/x2paddle/op_mapper/pytorch2paddle/aten.py +++ b/x2paddle/op_mapper/pytorch2paddle/aten.py @@ -6550,3 +6550,30 @@ def aten_topk(mapper, graph, node): **layer_attrs) return current_inputs, current_outputs + + +def aten_list(mapper, graph, node): + """ python 的 `list` 转换,如 `list((1,2,3))` + TorchScript示例: + %1926 : int[] = aten::list(%1925) + 参数含义: + %1926 (list): 输出,转换为list。 + %1925 (-): 可以转为list的输入 + """ + scope_name = mapper.normalize_scope_name(node) + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer("prim.list", + inputs=layer_inputs, + outputs=layer_outputs, + scope_name=scope_name) + return current_inputs, current_outputs diff --git a/x2paddle/utils.py b/x2paddle/utils.py index 2117a40c7..b51dda7f4 100644 --- a/x2paddle/utils.py +++ b/x2paddle/utils.py @@ -13,6 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging + +from packaging.version import Version + import paddle import x2paddle import hashlib @@ -30,15 +34,26 @@ def string(param): return "\'{}\'".format(param) -def check_version(): - version = paddle.__version__ - v0, v1, v2 = version.split('.') - if not ((v0 == '0' and v1 == '0' and v2 == '0') or - (int(v0) >= 2 and int(v1) >= 1)): - return False - else: +def check_version(base_version: str = '2.1.0') -> bool: + """ + Return `True` if the current version is equal or bigger than `base_version`. + The default version `2.1.0` is used for checking `is_new_version`. + """ + is_new = False + + dev_version = Version('0.0.0') + cur_version = Version(paddle.__version__) + + if cur_version == dev_version: + logging.info("[WARNING] You are use develop version of paddlepaddle") + return True + if cur_version >= Version(base_version): + return True + + return False + def _md5(text: str): '''Calculate the md5 value of the input text.''' From 41601eef4fd37275244b32cc9df39ff24321cebf Mon Sep 17 00:00:00 2001 From: megemini Date: Thu, 17 Oct 2024 18:41:29 +0800 Subject: [PATCH 2/3] [Add] prim_list_ as a python function --- x2paddle/op_mapper/pytorch2paddle/aten.py | 5 ++++- x2paddle/op_mapper/pytorch2paddle/prim2code.py | 12 ++++++++++++ 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/x2paddle/op_mapper/pytorch2paddle/aten.py b/x2paddle/op_mapper/pytorch2paddle/aten.py index 44e4e573f..b0c8693e9 100755 --- a/x2paddle/op_mapper/pytorch2paddle/aten.py +++ b/x2paddle/op_mapper/pytorch2paddle/aten.py @@ -6569,10 +6569,13 @@ def aten_list(mapper, graph, node): # 获取当前节点输出的list current_outputs = [output_name] + # process input + layer_inputs["x"] = inputs_name[0] + # 获取当前节点输入的list current_inputs = list(layer_inputs.values()) - graph.add_layer("prim.list", + graph.add_layer("prim.list_", inputs=layer_inputs, outputs=layer_outputs, scope_name=scope_name) diff --git a/x2paddle/op_mapper/pytorch2paddle/prim2code.py b/x2paddle/op_mapper/pytorch2paddle/prim2code.py index f3662dc11..7dd7f773c 100755 --- a/x2paddle/op_mapper/pytorch2paddle/prim2code.py +++ b/x2paddle/op_mapper/pytorch2paddle/prim2code.py @@ -530,6 +530,18 @@ def prim_list(layer, forward_func.extend(gen_codes([line], indent=indent)) +def prim_list_(layer, + indent=1, + init_func=[], + forward_func=[], + layer_id=None, + different_attrs=None): + """ just a python function: `list(x)` + """ + line = "{} = list({})".format(layer.outputs[0], layer.inputs["x"]) + forward_func.extend(gen_codes([line], indent=indent)) + + def prim_list_unpack(layer, indent=1, init_func=[], From b6c90a713c530f4d1e098a1493f939907b6623a4 Mon Sep 17 00:00:00 2001 From: megemini Date: Thu, 31 Oct 2024 12:06:49 +0800 Subject: [PATCH 3/3] [Update] black.list --- test_benchmark/PyTorch/black.list | 2 -- 1 file changed, 2 deletions(-) diff --git a/test_benchmark/PyTorch/black.list b/test_benchmark/PyTorch/black.list index 53f23063b..df67cb92b 100644 --- a/test_benchmark/PyTorch/black.list +++ b/test_benchmark/PyTorch/black.list @@ -2,10 +2,8 @@ BertForMaskedLM_dccuchile BertModel_SpanBert CamembertForQuestionAnswering DPRContextEncoder -DeepLabv3_ResNet50 EasyOCR_detector EasyOCR_recognizer -FCN_ResNet50 GRU MiniFasNet MockingBird