diff --git a/requirements.txt b/requirements.txt index ded0ee752..7127bf47d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1 +1,2 @@ sympy +packaging diff --git a/test_autoscan/onnx/auto_scan_test.py b/test_autoscan/onnx/auto_scan_test.py index 50e1ee2e5..a2c157385 100644 --- a/test_autoscan/onnx/auto_scan_test.py +++ b/test_autoscan/onnx/auto_scan_test.py @@ -30,6 +30,8 @@ paddle.set_device("cpu") logging.basicConfig(level=logging.INFO, format="%(message)s") +logger = logging.getLogger(__name__) +logger.setLevel(logging.INFO) settings.register_profile("ci", max_examples=100, @@ -97,37 +99,37 @@ def run_test(configs): loop_func = given(generator())(run_test) if reproduce is not None: loop_func = reproduce(loop_func) - logging.info("Start to running test of {}".format(type(self))) + logger.info("Start to running test of {}".format(type(self))) paddle.disable_static() loop_func() - logging.info( + logger.info( "===================Statistical Information===================") - logging.info("Number of Generated Programs: {}".format( + logger.info("Number of Generated Programs: {}".format( self.num_ran_tests)) - logging.info("Number of Ignore Programs: {}".format( + logger.info("Number of Ignore Programs: {}".format( self.num_ignore_tests)) successful_ran_programs = int(self.num_ran_tests - self.num_ignore_tests) if successful_ran_programs < min_success_num: - logging.warning("satisfied_programs = ran_programs") - logging.error( + logger.warning("satisfied_programs = ran_programs") + logger.error( "At least {} programs need to ran successfully, but now only about {} programs satisfied." .format(min_success_num, successful_ran_programs)) assert False used_time = time.time() - start_time - logging.info("Used time: {} s".format(round(used_time, 2))) + logger.info("Used time: {} s".format(round(used_time, 2))) if max_duration > 0 and used_time > max_duration: - logging.error( + logger.error( "The duration exceeds {} seconds, if this is neccessary, try to set a larger number for parameter `max_duration`." .format(max_duration)) assert False def run_test(self, configs): config, attrs = configs - logging.info("Run configs: {}".format(config)) - logging.info("Run attrs: {}".format(attrs)) + logger.info("Run configs: {}".format(config)) + logger.info("Run attrs: {}".format(attrs)) assert "op_names" in config.keys( ), "config must include op_names in dict keys" @@ -220,9 +222,9 @@ def run_test(self, configs): randtool("float", -2, 2, shape).astype(input_type[j])) obj.set_input_data("input_data", tuple(input_data)) - logging.info("Now Run >>> dtype: {}, op_name: {}".format( + logger.info("Now Run >>> dtype: {}, op_name: {}".format( input_type, op_names[i])) obj.run() if len(input_type_list) == 0: obj.run() - logging.info("Run Successfully!") + logger.info("Run Successfully!") diff --git a/test_autoscan/onnx/onnxbase.py b/test_autoscan/onnx/onnxbase.py index 240512d19..40d656bf8 100644 --- a/test_autoscan/onnx/onnxbase.py +++ b/test_autoscan/onnx/onnxbase.py @@ -26,6 +26,10 @@ from onnx import TensorProto from onnxruntime import InferenceSession +logging.basicConfig(level=logging.INFO, format="%(message)s") +logger = logging.getLogger(__name__) +logger.setLevel(logging.INFO) + DTYPE_ONNX_STR_MAP = { 'float32': TensorProto.FLOAT, 'float64': TensorProto.DOUBLE, @@ -44,6 +48,9 @@ def compare(result, expect, delta=1e-10, rtol=1e-10): delta: absolute error rtol: relative error """ + + logger.info(">>> compare ...") + if type(result) == np.ndarray: if type(expect) == list: expect = expect[0] @@ -167,6 +174,9 @@ def _mkdir(self): """ make dir to save all """ + + logger.info(">>> _mkdir ...") + save_path = os.path.join(self.pwd, self.name) if not os.path.exists(save_path): os.mkdir(save_path) @@ -175,21 +185,33 @@ def _onnx_to_paddle(self, ver): """ convert onnx to paddle """ + logger.info(">>> _onnx_to_paddle ...") + from x2paddle.convert import onnx2paddle + + logger.info(">>> from x2paddle.convert import onnx2paddle ...") + onnx_path = os.path.join(self.pwd, self.name, self.name + '_' + str(ver) + '.onnx') paddle_path = os.path.join(self.pwd, self.name, self.name + '_' + str(ver) + '_paddle') + + logger.info(">>> onnx2paddle ...") + onnx2paddle(onnx_path, paddle_path, convert_to_lite=False, enable_onnx_checker=self.enable_onnx_checker, disable_feedback=True) + logger.info(">>> onnx2paddle finished ...") + def _mk_paddle_res(self, ver): """ make paddle res """ + logger.info(">>> _mk_paddle_res ...") + # input data paddle_tensor_feed = list() result = list() @@ -201,7 +223,12 @@ def _mk_paddle_res(self, ver): if "float64" in self.inputs_dtype: self.run_dynamic = True + # TODO(megemini): create_predictor stuck + self.run_dynamic = True + if self.run_dynamic: + logger.info(">>> self.run_dynamic...") + paddle_path = os.path.join(self.pwd, self.name, self.name + '_' + str(ver) + '_paddle/') restore = paddle.load(os.path.join(paddle_path, "model.pdparams")) @@ -213,7 +240,12 @@ def _mk_paddle_res(self, ver): model.set_dict(restore) model.eval() result = model(*paddle_tensor_feed) + + logger.info(">>> self.run_dynamic finished...") + else: + logger.info(">>> NOT self.run_dynamic...") + paddle_model_path = os.path.join( self.pwd, self.name, self.name + '_' + str(ver) + '_paddle/inference_model/model.pdmodel') @@ -224,22 +256,52 @@ def _mk_paddle_res(self, ver): config.set_prog_file(paddle_model_path) if os.path.exists(paddle_param_path): config.set_params_file(paddle_param_path) + + logger.info(">>> config.enable_use_gpu...") + # initial GPU memory(M), device ID config.enable_use_gpu(200, 0) + + logger.info(">>> config.enable_use_gpu finished...") + # optimize graph and fuse op config.switch_ir_optim(False) config.enable_memory_optim() + + logger.info(">>> enable_memory_optim finished...") + # disable feed, fetch OP, needed by zero_copy_run config.switch_use_feed_fetch_ops(False) + + logger.info(">>> config.disable_glog_info...") + config.disable_glog_info() + + logger.info(">>> config.pass_builder...") + pass_builder = config.pass_builder() + + logger.info(">>> create_predictor(config)...") + predictor = create_predictor(config) + + logger.info(">>> predictor.get_input_names...") + input_names = predictor.get_input_names() output_names = predictor.get_output_names() + + logger.info(">>> copy_from_cpu...") + for i in range(len(input_names)): input_tensor = predictor.get_input_handle(input_names[i]) input_tensor.copy_from_cpu(self.input_feed[self.inputs_name[i]]) + + logger.info(">>> predictor.run...") + predictor.run() + + logger.info(">>> predictor.run finished...") + for output_name in output_names: output_tensor = predictor.get_output_handle(output_name) result.append(output_tensor.copy_to_cpu()) @@ -257,15 +319,24 @@ def _mk_paddle_res(self, ver): result = (result, ) else: result = (result.numpy(), ) + + logger.info(">>> _mk_paddle_res finished ...") + return result def _mk_onnx_res(self, ver): """ make onnx res """ + + logger.info('>>> _mk_onnx_res InferenceSession...') + sess = InferenceSession( os.path.join(self.pwd, self.name, self.name + '_' + str(ver) + '.onnx')) + + logger.info('>>> sess.run ...') + ort_outs = sess.run(output_names=None, input_feed=self.input_feed) return ort_outs @@ -291,6 +362,8 @@ def _mk_onnx_graph(self, ver): """ make onnx graph """ + logger.info(">>> _mk_onnx_graph ... make_node") + node = onnx.helper.make_node( self.op_type, inputs=self.inputs_name, @@ -324,8 +397,14 @@ def run(self): 3. use onnx to make res 4. compare diff """ + + logger.info(">>> run ...") + self._mkdir() for place in self.places: + + logger.info(">>> place ..." + str(place)) + paddle.set_device(place) onnx_res = {} paddle_res = {} diff --git a/test_autoscan/onnx/test_auto_scan_abs.py b/test_autoscan/onnx/test_auto_scan_abs.py index fe750ca0e..3773bfac2 100644 --- a/test_autoscan/onnx/test_auto_scan_abs.py +++ b/test_autoscan/onnx/test_auto_scan_abs.py @@ -19,6 +19,12 @@ import numpy as np import unittest +import logging + +logging.basicConfig(level=logging.INFO, format="%(message)s") +logger = logging.getLogger(__name__) +logger.setLevel(logging.INFO) + class TestAbsConvert(OPConvertAutoScanTest): """ @@ -51,6 +57,8 @@ def sample_convert_config(self, draw): return (config, attrs) def test(self): + logger.info('>>> test_auto_scan_abs.py::test') + self.run_and_statis(max_examples=30) diff --git a/test_autoscan/run_autoscan_onnx.sh b/test_autoscan/run_autoscan_onnx.sh index df8d1aec9..0e711e3f2 100644 --- a/test_autoscan/run_autoscan_onnx.sh +++ b/test_autoscan/run_autoscan_onnx.sh @@ -24,7 +24,11 @@ for var in ${file_arr[@]} do log_name=${logs_path}/${var}.log echo " Now start test: ${var}" + + echo " >>> begin test ..." + python ${var} > ${log_name} 2>&1 + # python ${var} done diff --git a/test_autoscan/torch/auto_scan_test.py b/test_autoscan/torch/auto_scan_test.py index 8b91ed052..6701d3d74 100644 --- a/test_autoscan/torch/auto_scan_test.py +++ b/test_autoscan/torch/auto_scan_test.py @@ -32,6 +32,8 @@ paddle.set_device("cpu") logging.basicConfig(level=logging.INFO, format="%(message)s") +logger = logging.getLogger(__name__) +logger.setLevel(logging.INFO) settings.register_profile("ci", max_examples=100, @@ -112,36 +114,36 @@ def run_test(configs): loop_func = given(generator())(run_test) if reproduce is not None: loop_func = reproduce(loop_func) - logging.info("Start to running test of {}".format(type(self))) + logger.info("Start to running test of {}".format(type(self))) paddle.disable_static() loop_func() - logging.info( + logger.info( "===================Statistical Information===================") - logging.info("Number of Generated Programs: {}".format( + logger.info("Number of Generated Programs: {}".format( self.num_ran_tests)) - logging.info("Number of Ignore Programs: {}".format( + logger.info("Number of Ignore Programs: {}".format( self.num_ignore_tests)) successful_ran_programs = int(self.num_ran_tests - self.num_ignore_tests) if successful_ran_programs < min_success_num: - logging.warning("satisfied_programs = ran_programs") - logging.error( + logger.warning("satisfied_programs = ran_programs") + logger.error( "At least {} programs need to ran successfully, but now only about {} programs satisfied." .format(min_success_num, successful_ran_programs)) assert False used_time = time.time() - start_time - logging.info("Used time: {} s".format(round(used_time, 2))) + logger.info("Used time: {} s".format(round(used_time, 2))) if max_duration > 0 and used_time > max_duration: - logging.error( + logger.error( "The duration exceeds {} seconds, if this is neccessary, try to set a larger number for parameter `max_duration`." .format(max_duration)) assert False def run_test(self, configs): config, models = configs - logging.info("Run configs: {}".format(config)) + logger.info("Run configs: {}".format(config)) assert "op_names" in config.keys( ), "config must include op_names in dict keys" @@ -218,9 +220,9 @@ def run_test(self, configs): randtool("float", -2, 2, shape).astype(input_type[j])) obj.set_input_data("input_data", tuple(input_data)) - logging.info("Now Run >>> dtype: {}, op_name: {}".format( + logger.info("Now Run >>> dtype: {}, op_name: {}".format( input_type, op_names[i])) obj.run() if len(input_type_list) == 0: obj.run() - logging.info("Run Successfully!") + logger.info("Run Successfully!") diff --git a/test_autoscan/torch/torchbase.py b/test_autoscan/torch/torchbase.py index 435313271..0f66f513c 100644 --- a/test_autoscan/torch/torchbase.py +++ b/test_autoscan/torch/torchbase.py @@ -180,6 +180,9 @@ def _mk_paddle_res(self, ): if "float64" in self.inputs_dtype: self.run_dynamic = True + # TODO(megemini): create_predictor stuck + self.run_dynamic = True + if self.run_dynamic: paddle_path = os.path.join(self.pwd, self.name, self.name + '_paddle/') diff --git a/test_benchmark/Caffe/convert.sh b/test_benchmark/Caffe/convert.sh index 190166823..a60db59e7 100644 --- a/test_benchmark/Caffe/convert.sh +++ b/test_benchmark/Caffe/convert.sh @@ -10,7 +10,7 @@ find . -name "run.log" | xargs rm -rf # use black.list to control CI tests filename="black.list" -models=$(ls -d */ | grep -v -F -f "$filename") +models=$(ls -d */ | grep -v -F -f "$filename" | awk -F '/' '{print $1}') num_of_models=$(ls -d */ | grep -v -F -f "$filename" | wc -l) counter=1 @@ -18,6 +18,11 @@ for model in $models do echo "[X2Paddle-Caffe] ${counter}/${num_of_models} $model ..." cd $model + + # make default result is `Failed` in case of `result.txt` not generated + touch result.txt + echo $model ">>>Failed"> result.txt + sh run_convert.sh $model 1>run.log 2>run.err & cd .. counter=$(($counter+1)) diff --git a/test_benchmark/Caffe/tools/log_summary.sh b/test_benchmark/Caffe/tools/log_summary.sh index 7c0228980..4dc318a61 100644 --- a/test_benchmark/Caffe/tools/log_summary.sh +++ b/test_benchmark/Caffe/tools/log_summary.sh @@ -7,7 +7,12 @@ fi mkdir ../output cd .. -for model in `ls -d */ | grep -v 'tools' | grep -v 'output' | awk -F '/' '{print $1}'` + +# use black.list to control CI tests +filename="black.list" +models=$(ls -d */ | grep -v -F -f "$filename" | awk -F '/' '{print $1}') + +for model in $models do cp ${model}/run.log output/${model}_run.log cp ${model}/run.err output/${model}_run.err diff --git a/test_benchmark/ONNX/convert.sh b/test_benchmark/ONNX/convert.sh index 8fc61701a..7b25275b9 100644 --- a/test_benchmark/ONNX/convert.sh +++ b/test_benchmark/ONNX/convert.sh @@ -10,7 +10,7 @@ find . -name "run.err" | xargs rm -rf # use black.list to control CI tests filename="black.list" -models=$(ls -d */ | grep -v -F -f "$filename") +models=$(ls -d */ | grep -v -F -f "$filename" | awk -F '/' '{print $1}') num_of_models=$(ls -d */ | grep -v -F -f "$filename" | wc -l) counter=1 @@ -18,6 +18,11 @@ for model in $models do echo "[X2Paddle-ONNX] ${counter}/${num_of_models} $model ..." cd $model + + # make default result is `Failed` in case of `result.txt` not generated + touch result.txt + echo $model ">>>Failed"> result.txt + sh run_convert.sh $model 1>run.log 2>run.err & cd .. counter=$(($counter+1)) diff --git a/test_benchmark/ONNX/tools/log_summary.sh b/test_benchmark/ONNX/tools/log_summary.sh index 7c0228980..4dc318a61 100644 --- a/test_benchmark/ONNX/tools/log_summary.sh +++ b/test_benchmark/ONNX/tools/log_summary.sh @@ -7,7 +7,12 @@ fi mkdir ../output cd .. -for model in `ls -d */ | grep -v 'tools' | grep -v 'output' | awk -F '/' '{print $1}'` + +# use black.list to control CI tests +filename="black.list" +models=$(ls -d */ | grep -v -F -f "$filename" | awk -F '/' '{print $1}') + +for model in $models do cp ${model}/run.log output/${model}_run.log cp ${model}/run.err output/${model}_run.err diff --git a/test_benchmark/PyTorch/convert.sh b/test_benchmark/PyTorch/convert.sh index 0f8f67873..4c80dfed0 100644 --- a/test_benchmark/PyTorch/convert.sh +++ b/test_benchmark/PyTorch/convert.sh @@ -19,7 +19,7 @@ find . -name "run.err" | xargs rm -rf # use black.list to control CI tests filename="black.list" -models=$(ls -d */ | grep -v -F -f "$filename") +models=$(ls -d */ | grep -v -F -f "$filename" | awk -F '/' '{print $1}') num_of_models=$(ls -d */ | grep -v -F -f "$filename" | wc -l) counter=1 @@ -27,6 +27,11 @@ for model in $models do echo "[X2Paddle-PyTorch] ${counter}/${num_of_models} $model ..." cd $model + + # make default result is `Failed` in case of `result.txt` not generated + touch result.txt + echo $model ">>>Failed"> result.txt + sh run_convert.sh 1>run.log 2>run.err & cd .. counter=$(($counter+1)) diff --git a/test_benchmark/PyTorch/tools/log_summary.sh b/test_benchmark/PyTorch/tools/log_summary.sh index 7c0228980..4dc318a61 100644 --- a/test_benchmark/PyTorch/tools/log_summary.sh +++ b/test_benchmark/PyTorch/tools/log_summary.sh @@ -7,7 +7,12 @@ fi mkdir ../output cd .. -for model in `ls -d */ | grep -v 'tools' | grep -v 'output' | awk -F '/' '{print $1}'` + +# use black.list to control CI tests +filename="black.list" +models=$(ls -d */ | grep -v -F -f "$filename" | awk -F '/' '{print $1}') + +for model in $models do cp ${model}/run.log output/${model}_run.log cp ${model}/run.err output/${model}_run.err diff --git a/test_benchmark/TensorFlow/convert.sh b/test_benchmark/TensorFlow/convert.sh index 2259fce14..25e476bd8 100644 --- a/test_benchmark/TensorFlow/convert.sh +++ b/test_benchmark/TensorFlow/convert.sh @@ -10,7 +10,7 @@ find . -name "run.err" | xargs rm -rf # use black.list to control CI tests filename="black.list" -models=$(ls -d */ | grep -v -F -f "$filename") +models=$(ls -d */ | grep -v -F -f "$filename" | awk -F '/' '{print $1}') num_of_models=$(ls -d */ | grep -v -F -f "$filename" | wc -l) counter=1 @@ -18,6 +18,11 @@ for model in $models do echo "[X2Paddle-TensorFlow] ${counter}/${num_of_models} $model ..." cd $model + + # make default result is `Failed` in case of `result.txt` not generated + touch result.txt + echo $model ">>>Failed"> result.txt + sh run_convert.sh $model 1>run.log 2>run.err & cd .. counter=$(($counter+1)) diff --git a/test_benchmark/TensorFlow/tools/log_summary.sh b/test_benchmark/TensorFlow/tools/log_summary.sh index 7c0228980..4dc318a61 100644 --- a/test_benchmark/TensorFlow/tools/log_summary.sh +++ b/test_benchmark/TensorFlow/tools/log_summary.sh @@ -7,7 +7,12 @@ fi mkdir ../output cd .. -for model in `ls -d */ | grep -v 'tools' | grep -v 'output' | awk -F '/' '{print $1}'` + +# use black.list to control CI tests +filename="black.list" +models=$(ls -d */ | grep -v -F -f "$filename" | awk -F '/' '{print $1}') + +for model in $models do cp ${model}/run.log output/${model}_run.log cp ${model}/run.err output/${model}_run.err diff --git a/x2paddle/convert.py b/x2paddle/convert.py index 3372c5bc1..56ff967c5 100644 --- a/x2paddle/convert.py +++ b/x2paddle/convert.py @@ -14,12 +14,16 @@ from six import text_type as _text_type from x2paddle import program -from x2paddle.utils import ConverterCheck +from x2paddle.utils import ConverterCheck, check_version import argparse import sys import logging import time +logging.basicConfig(level=logging.INFO, format="%(message)s") +logger = logging.getLogger(__name__) +logger.setLevel(logging.INFO) + def arg_parser(): parser = argparse.ArgumentParser() @@ -275,6 +279,9 @@ def onnx2paddle(model_path, lite_model_type="naive_buffer", disable_feedback=False, enable_onnx_checker=True): + + logger.info(">>> onnx2paddle ...") + # for convert_id time_info = int(time.time()) if not disable_feedback: @@ -287,27 +294,27 @@ def onnx2paddle(model_path, v0, v1, v2 = version.split('.') version_sum = int(v0) * 100 + int(v1) * 10 + int(v2) if version_sum < 160: - logging.info("[ERROR] onnx>=1.6.0 is required") + logger.info("[ERROR] onnx>=1.6.0 is required") return except: - logging.info( + logger.info( "[ERROR] onnx is not installed, use \"pip install onnx==1.6.0\".") return - logging.info("Now translating model from onnx to paddle.") + logger.info("Now translating model from onnx to paddle.") # Do optimizer if enable_optim: from onnxsim import simplify onnx_net_opt_path = model_path[:-5] + '_opt.onnx' - logging.info("ONNX Model optimizing ...") + logger.info("ONNX Model optimizing ...") # load your predefined ONNX model model = onnx.load(model_path) # convert model model_simp, check = simplify(model) assert check, "Simplified ONNX model could not be validated" - logging.info("Export optimized onnx model:{}".format(onnx_net_opt_path)) + logger.info("Export optimized onnx model:{}".format(onnx_net_opt_path)) onnx.save(model_simp, onnx_net_opt_path) - logging.info("ONNX Model optimized!") + logger.info("ONNX Model optimized!") model_path = onnx_net_opt_path from x2paddle.decoder.onnx_decoder import ONNXDecoder @@ -315,36 +322,36 @@ def onnx2paddle(model_path, model = ONNXDecoder(model_path, enable_onnx_checker, input_shape_dict) mapper = ONNXOpMapper(model) mapper.paddle_graph.build() - logging.info("Model optimizing ...") + logger.info("Model optimizing ...") from x2paddle.optimizer.optimizer import GraphOptimizer graph_opt = GraphOptimizer(source_frame="onnx") graph_opt.optimize(mapper.paddle_graph) - logging.info("Model optimized.") + logger.info("Model optimized.") mapper.paddle_graph.gen_model(save_dir) - logging.info("Successfully exported Paddle static graph model!") + logger.info("Successfully exported Paddle static graph model!") if not disable_feedback: ConverterCheck(task="ONNX", time_info=time_info, convert_state="Success").start() if convert_to_lite: - logging.info("Now translating model from Paddle to Paddle Lite ...") + logger.info("Now translating model from Paddle to Paddle Lite ...") if not disable_feedback: ConverterCheck(task="ONNX", time_info=time_info, lite_state="Start").start() convert2lite(save_dir, lite_valid_places, lite_model_type) - logging.info("Successfully exported Paddle Lite support model!") + logger.info("Successfully exported Paddle Lite support model!") if not disable_feedback: ConverterCheck(task="ONNX", time_info=time_info, lite_state="Success").start() # for convert survey - logging.info("================================================") - logging.info("") - logging.info( + logger.info("================================================") + logger.info("") + logger.info( "Model Converted! Fill this survey to help X2Paddle better, https://iwenjuan.baidu.com/?code=npyd51 " ) - logging.info("") - logging.info("================================================") + logger.info("") + logger.info("================================================") def pytorch2paddle(module, @@ -449,20 +456,15 @@ def main(): assert args.save_dir is not None, "--save_dir is not defined" try: - import platform - v0, v1, v2 = platform.python_version().split('.') - if not (int(v0) >= 3 and int(v1) >= 5): - logging.info("[ERROR] python>=3.5 is required") + if not sys.version_info >= (3, 8): + logging.error("[ERROR] python>=3.8 is required") return + import paddle - v0, v1, v2 = paddle.__version__.split('.') - logging.info("paddle.__version__ = {}".format(paddle.__version__)) - if v0 == '0' and v1 == '0' and v2 == '0': - logging.info( - "[WARNING] You are use develop version of paddlepaddle") - elif int(v0) != 2 or int(v1) < 0: - logging.info("[ERROR] paddlepaddle>=2.0.0 is required") + if not check_version('2.0.0'): + logging.error("[ERROR] paddlepaddle>=2.0.0 is required") return + except: logging.info( "[ERROR] paddlepaddle not installed, use \"pip install paddlepaddle\"" diff --git a/x2paddle/utils.py b/x2paddle/utils.py index 2117a40c7..b51dda7f4 100644 --- a/x2paddle/utils.py +++ b/x2paddle/utils.py @@ -13,6 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging + +from packaging.version import Version + import paddle import x2paddle import hashlib @@ -30,15 +34,26 @@ def string(param): return "\'{}\'".format(param) -def check_version(): - version = paddle.__version__ - v0, v1, v2 = version.split('.') - if not ((v0 == '0' and v1 == '0' and v2 == '0') or - (int(v0) >= 2 and int(v1) >= 1)): - return False - else: +def check_version(base_version: str = '2.1.0') -> bool: + """ + Return `True` if the current version is equal or bigger than `base_version`. + The default version `2.1.0` is used for checking `is_new_version`. + """ + is_new = False + + dev_version = Version('0.0.0') + cur_version = Version(paddle.__version__) + + if cur_version == dev_version: + logging.info("[WARNING] You are use develop version of paddlepaddle") + return True + if cur_version >= Version(base_version): + return True + + return False + def _md5(text: str): '''Calculate the md5 value of the input text.'''