Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1 +1,2 @@
sympy
packaging
26 changes: 14 additions & 12 deletions test_autoscan/onnx/auto_scan_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,8 @@
paddle.set_device("cpu")

logging.basicConfig(level=logging.INFO, format="%(message)s")
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)

settings.register_profile("ci",
max_examples=100,
Expand Down Expand Up @@ -97,37 +99,37 @@ def run_test(configs):
loop_func = given(generator())(run_test)
if reproduce is not None:
loop_func = reproduce(loop_func)
logging.info("Start to running test of {}".format(type(self)))
logger.info("Start to running test of {}".format(type(self)))

paddle.disable_static()
loop_func()

logging.info(
logger.info(
"===================Statistical Information===================")
logging.info("Number of Generated Programs: {}".format(
logger.info("Number of Generated Programs: {}".format(
self.num_ran_tests))
logging.info("Number of Ignore Programs: {}".format(
logger.info("Number of Ignore Programs: {}".format(
self.num_ignore_tests))
successful_ran_programs = int(self.num_ran_tests -
self.num_ignore_tests)
if successful_ran_programs < min_success_num:
logging.warning("satisfied_programs = ran_programs")
logging.error(
logger.warning("satisfied_programs = ran_programs")
logger.error(
"At least {} programs need to ran successfully, but now only about {} programs satisfied."
.format(min_success_num, successful_ran_programs))
assert False
used_time = time.time() - start_time
logging.info("Used time: {} s".format(round(used_time, 2)))
logger.info("Used time: {} s".format(round(used_time, 2)))
if max_duration > 0 and used_time > max_duration:
logging.error(
logger.error(
"The duration exceeds {} seconds, if this is neccessary, try to set a larger number for parameter `max_duration`."
.format(max_duration))
assert False

def run_test(self, configs):
config, attrs = configs
logging.info("Run configs: {}".format(config))
logging.info("Run attrs: {}".format(attrs))
logger.info("Run configs: {}".format(config))
logger.info("Run attrs: {}".format(attrs))

assert "op_names" in config.keys(
), "config must include op_names in dict keys"
Expand Down Expand Up @@ -220,9 +222,9 @@ def run_test(self, configs):
randtool("float", -2, 2,
shape).astype(input_type[j]))
obj.set_input_data("input_data", tuple(input_data))
logging.info("Now Run >>> dtype: {}, op_name: {}".format(
logger.info("Now Run >>> dtype: {}, op_name: {}".format(
input_type, op_names[i]))
obj.run()
if len(input_type_list) == 0:
obj.run()
logging.info("Run Successfully!")
logger.info("Run Successfully!")
79 changes: 79 additions & 0 deletions test_autoscan/onnx/onnxbase.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,10 @@
from onnx import TensorProto
from onnxruntime import InferenceSession

logging.basicConfig(level=logging.INFO, format="%(message)s")
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)

DTYPE_ONNX_STR_MAP = {
'float32': TensorProto.FLOAT,
'float64': TensorProto.DOUBLE,
Expand All @@ -44,6 +48,9 @@ def compare(result, expect, delta=1e-10, rtol=1e-10):
delta: absolute error
rtol: relative error
"""

logger.info(">>> compare ...")

if type(result) == np.ndarray:
if type(expect) == list:
expect = expect[0]
Expand Down Expand Up @@ -167,6 +174,9 @@ def _mkdir(self):
"""
make dir to save all
"""

logger.info(">>> _mkdir ...")

save_path = os.path.join(self.pwd, self.name)
if not os.path.exists(save_path):
os.mkdir(save_path)
Expand All @@ -175,21 +185,33 @@ def _onnx_to_paddle(self, ver):
"""
convert onnx to paddle
"""
logger.info(">>> _onnx_to_paddle ...")

from x2paddle.convert import onnx2paddle

logger.info(">>> from x2paddle.convert import onnx2paddle ...")

onnx_path = os.path.join(self.pwd, self.name,
self.name + '_' + str(ver) + '.onnx')
paddle_path = os.path.join(self.pwd, self.name,
self.name + '_' + str(ver) + '_paddle')

logger.info(">>> onnx2paddle ...")

onnx2paddle(onnx_path,
paddle_path,
convert_to_lite=False,
enable_onnx_checker=self.enable_onnx_checker,
disable_feedback=True)

logger.info(">>> onnx2paddle finished ...")

def _mk_paddle_res(self, ver):
"""
make paddle res
"""
logger.info(">>> _mk_paddle_res ...")

# input data
paddle_tensor_feed = list()
result = list()
Expand All @@ -201,7 +223,12 @@ def _mk_paddle_res(self, ver):
if "float64" in self.inputs_dtype:
self.run_dynamic = True

# TODO(megemini): create_predictor stuck
self.run_dynamic = True

if self.run_dynamic:
logger.info(">>> self.run_dynamic...")

paddle_path = os.path.join(self.pwd, self.name,
self.name + '_' + str(ver) + '_paddle/')
restore = paddle.load(os.path.join(paddle_path, "model.pdparams"))
Expand All @@ -213,7 +240,12 @@ def _mk_paddle_res(self, ver):
model.set_dict(restore)
model.eval()
result = model(*paddle_tensor_feed)

logger.info(">>> self.run_dynamic finished...")

else:
logger.info(">>> NOT self.run_dynamic...")

paddle_model_path = os.path.join(
self.pwd, self.name, self.name + '_' + str(ver) +
'_paddle/inference_model/model.pdmodel')
Expand All @@ -224,22 +256,52 @@ def _mk_paddle_res(self, ver):
config.set_prog_file(paddle_model_path)
if os.path.exists(paddle_param_path):
config.set_params_file(paddle_param_path)

logger.info(">>> config.enable_use_gpu...")

# initial GPU memory(M), device ID
config.enable_use_gpu(200, 0)

logger.info(">>> config.enable_use_gpu finished...")

# optimize graph and fuse op
config.switch_ir_optim(False)
config.enable_memory_optim()

logger.info(">>> enable_memory_optim finished...")

# disable feed, fetch OP, needed by zero_copy_run
config.switch_use_feed_fetch_ops(False)

logger.info(">>> config.disable_glog_info...")

config.disable_glog_info()

logger.info(">>> config.pass_builder...")

pass_builder = config.pass_builder()

logger.info(">>> create_predictor(config)...")

predictor = create_predictor(config)

logger.info(">>> predictor.get_input_names...")

input_names = predictor.get_input_names()
output_names = predictor.get_output_names()

logger.info(">>> copy_from_cpu...")

for i in range(len(input_names)):
input_tensor = predictor.get_input_handle(input_names[i])
input_tensor.copy_from_cpu(self.input_feed[self.inputs_name[i]])

logger.info(">>> predictor.run...")

predictor.run()

logger.info(">>> predictor.run finished...")

for output_name in output_names:
output_tensor = predictor.get_output_handle(output_name)
result.append(output_tensor.copy_to_cpu())
Expand All @@ -257,15 +319,24 @@ def _mk_paddle_res(self, ver):
result = (result, )
else:
result = (result.numpy(), )

logger.info(">>> _mk_paddle_res finished ...")

return result

def _mk_onnx_res(self, ver):
"""
make onnx res
"""

logger.info('>>> _mk_onnx_res InferenceSession...')

sess = InferenceSession(
os.path.join(self.pwd, self.name,
self.name + '_' + str(ver) + '.onnx'))

logger.info('>>> sess.run ...')

ort_outs = sess.run(output_names=None, input_feed=self.input_feed)
return ort_outs

Expand All @@ -291,6 +362,8 @@ def _mk_onnx_graph(self, ver):
"""
make onnx graph
"""
logger.info(">>> _mk_onnx_graph ... make_node")

node = onnx.helper.make_node(
self.op_type,
inputs=self.inputs_name,
Expand Down Expand Up @@ -324,8 +397,14 @@ def run(self):
3. use onnx to make res
4. compare diff
"""

logger.info(">>> run ...")

self._mkdir()
for place in self.places:

logger.info(">>> place ..." + str(place))

paddle.set_device(place)
onnx_res = {}
paddle_res = {}
Expand Down
8 changes: 8 additions & 0 deletions test_autoscan/onnx/test_auto_scan_abs.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,12 @@
import numpy as np
import unittest

import logging

logging.basicConfig(level=logging.INFO, format="%(message)s")
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)


class TestAbsConvert(OPConvertAutoScanTest):
"""
Expand Down Expand Up @@ -51,6 +57,8 @@ def sample_convert_config(self, draw):
return (config, attrs)

def test(self):
logger.info('>>> test_auto_scan_abs.py::test')

self.run_and_statis(max_examples=30)


Expand Down
4 changes: 4 additions & 0 deletions test_autoscan/run_autoscan_onnx.sh
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,11 @@ for var in ${file_arr[@]}
do
log_name=${logs_path}/${var}.log
echo " Now start test: ${var}"

echo " >>> begin test ..."

python ${var} > ${log_name} 2>&1
# python ${var}

done

Expand Down
24 changes: 13 additions & 11 deletions test_autoscan/torch/auto_scan_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,8 @@
paddle.set_device("cpu")

logging.basicConfig(level=logging.INFO, format="%(message)s")
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)

settings.register_profile("ci",
max_examples=100,
Expand Down Expand Up @@ -112,36 +114,36 @@ def run_test(configs):
loop_func = given(generator())(run_test)
if reproduce is not None:
loop_func = reproduce(loop_func)
logging.info("Start to running test of {}".format(type(self)))
logger.info("Start to running test of {}".format(type(self)))

paddle.disable_static()
loop_func()

logging.info(
logger.info(
"===================Statistical Information===================")
logging.info("Number of Generated Programs: {}".format(
logger.info("Number of Generated Programs: {}".format(
self.num_ran_tests))
logging.info("Number of Ignore Programs: {}".format(
logger.info("Number of Ignore Programs: {}".format(
self.num_ignore_tests))
successful_ran_programs = int(self.num_ran_tests -
self.num_ignore_tests)
if successful_ran_programs < min_success_num:
logging.warning("satisfied_programs = ran_programs")
logging.error(
logger.warning("satisfied_programs = ran_programs")
logger.error(
"At least {} programs need to ran successfully, but now only about {} programs satisfied."
.format(min_success_num, successful_ran_programs))
assert False
used_time = time.time() - start_time
logging.info("Used time: {} s".format(round(used_time, 2)))
logger.info("Used time: {} s".format(round(used_time, 2)))
if max_duration > 0 and used_time > max_duration:
logging.error(
logger.error(
"The duration exceeds {} seconds, if this is neccessary, try to set a larger number for parameter `max_duration`."
.format(max_duration))
assert False

def run_test(self, configs):
config, models = configs
logging.info("Run configs: {}".format(config))
logger.info("Run configs: {}".format(config))

assert "op_names" in config.keys(
), "config must include op_names in dict keys"
Expand Down Expand Up @@ -218,9 +220,9 @@ def run_test(self, configs):
randtool("float", -2, 2,
shape).astype(input_type[j]))
obj.set_input_data("input_data", tuple(input_data))
logging.info("Now Run >>> dtype: {}, op_name: {}".format(
logger.info("Now Run >>> dtype: {}, op_name: {}".format(
input_type, op_names[i]))
obj.run()
if len(input_type_list) == 0:
obj.run()
logging.info("Run Successfully!")
logger.info("Run Successfully!")
3 changes: 3 additions & 0 deletions test_autoscan/torch/torchbase.py
Original file line number Diff line number Diff line change
Expand Up @@ -180,6 +180,9 @@ def _mk_paddle_res(self, ):
if "float64" in self.inputs_dtype:
self.run_dynamic = True

# TODO(megemini): create_predictor stuck
self.run_dynamic = True

if self.run_dynamic:
paddle_path = os.path.join(self.pwd, self.name,
self.name + '_paddle/')
Expand Down
7 changes: 6 additions & 1 deletion test_benchmark/Caffe/convert.sh
Original file line number Diff line number Diff line change
Expand Up @@ -10,14 +10,19 @@ find . -name "run.log" | xargs rm -rf

# use black.list to control CI tests
filename="black.list"
models=$(ls -d */ | grep -v -F -f "$filename")
models=$(ls -d */ | grep -v -F -f "$filename" | awk -F '/' '{print $1}')
num_of_models=$(ls -d */ | grep -v -F -f "$filename" | wc -l)

counter=1
for model in $models
do
echo "[X2Paddle-Caffe] ${counter}/${num_of_models} $model ..."
cd $model

# make default result is `Failed` in case of `result.txt` not generated
touch result.txt
echo $model ">>>Failed"> result.txt

sh run_convert.sh $model 1>run.log 2>run.err &
cd ..
counter=$(($counter+1))
Expand Down
Loading