Skip to content

Commit 4236c2a

Browse files
authored
[PaddleV3] 修复对于 Paddle 版本的检查 (#1064)
1 parent 7859d67 commit 4236c2a

File tree

17 files changed

+222
-66
lines changed

17 files changed

+222
-66
lines changed

requirements.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1 +1,2 @@
11
sympy
2+
packaging

test_autoscan/onnx/auto_scan_test.py

Lines changed: 14 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,8 @@
3030
paddle.set_device("cpu")
3131

3232
logging.basicConfig(level=logging.INFO, format="%(message)s")
33+
logger = logging.getLogger(__name__)
34+
logger.setLevel(logging.INFO)
3335

3436
settings.register_profile("ci",
3537
max_examples=100,
@@ -97,37 +99,37 @@ def run_test(configs):
9799
loop_func = given(generator())(run_test)
98100
if reproduce is not None:
99101
loop_func = reproduce(loop_func)
100-
logging.info("Start to running test of {}".format(type(self)))
102+
logger.info("Start to running test of {}".format(type(self)))
101103

102104
paddle.disable_static()
103105
loop_func()
104106

105-
logging.info(
107+
logger.info(
106108
"===================Statistical Information===================")
107-
logging.info("Number of Generated Programs: {}".format(
109+
logger.info("Number of Generated Programs: {}".format(
108110
self.num_ran_tests))
109-
logging.info("Number of Ignore Programs: {}".format(
111+
logger.info("Number of Ignore Programs: {}".format(
110112
self.num_ignore_tests))
111113
successful_ran_programs = int(self.num_ran_tests -
112114
self.num_ignore_tests)
113115
if successful_ran_programs < min_success_num:
114-
logging.warning("satisfied_programs = ran_programs")
115-
logging.error(
116+
logger.warning("satisfied_programs = ran_programs")
117+
logger.error(
116118
"At least {} programs need to ran successfully, but now only about {} programs satisfied."
117119
.format(min_success_num, successful_ran_programs))
118120
assert False
119121
used_time = time.time() - start_time
120-
logging.info("Used time: {} s".format(round(used_time, 2)))
122+
logger.info("Used time: {} s".format(round(used_time, 2)))
121123
if max_duration > 0 and used_time > max_duration:
122-
logging.error(
124+
logger.error(
123125
"The duration exceeds {} seconds, if this is neccessary, try to set a larger number for parameter `max_duration`."
124126
.format(max_duration))
125127
assert False
126128

127129
def run_test(self, configs):
128130
config, attrs = configs
129-
logging.info("Run configs: {}".format(config))
130-
logging.info("Run attrs: {}".format(attrs))
131+
logger.info("Run configs: {}".format(config))
132+
logger.info("Run attrs: {}".format(attrs))
131133

132134
assert "op_names" in config.keys(
133135
), "config must include op_names in dict keys"
@@ -220,9 +222,9 @@ def run_test(self, configs):
220222
randtool("float", -2, 2,
221223
shape).astype(input_type[j]))
222224
obj.set_input_data("input_data", tuple(input_data))
223-
logging.info("Now Run >>> dtype: {}, op_name: {}".format(
225+
logger.info("Now Run >>> dtype: {}, op_name: {}".format(
224226
input_type, op_names[i]))
225227
obj.run()
226228
if len(input_type_list) == 0:
227229
obj.run()
228-
logging.info("Run Successfully!")
230+
logger.info("Run Successfully!")

test_autoscan/onnx/onnxbase.py

Lines changed: 79 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,10 @@
2626
from onnx import TensorProto
2727
from onnxruntime import InferenceSession
2828

29+
logging.basicConfig(level=logging.INFO, format="%(message)s")
30+
logger = logging.getLogger(__name__)
31+
logger.setLevel(logging.INFO)
32+
2933
DTYPE_ONNX_STR_MAP = {
3034
'float32': TensorProto.FLOAT,
3135
'float64': TensorProto.DOUBLE,
@@ -44,6 +48,9 @@ def compare(result, expect, delta=1e-10, rtol=1e-10):
4448
delta: absolute error
4549
rtol: relative error
4650
"""
51+
52+
logger.info(">>> compare ...")
53+
4754
if type(result) == np.ndarray:
4855
if type(expect) == list:
4956
expect = expect[0]
@@ -167,6 +174,9 @@ def _mkdir(self):
167174
"""
168175
make dir to save all
169176
"""
177+
178+
logger.info(">>> _mkdir ...")
179+
170180
save_path = os.path.join(self.pwd, self.name)
171181
if not os.path.exists(save_path):
172182
os.mkdir(save_path)
@@ -175,21 +185,33 @@ def _onnx_to_paddle(self, ver):
175185
"""
176186
convert onnx to paddle
177187
"""
188+
logger.info(">>> _onnx_to_paddle ...")
189+
178190
from x2paddle.convert import onnx2paddle
191+
192+
logger.info(">>> from x2paddle.convert import onnx2paddle ...")
193+
179194
onnx_path = os.path.join(self.pwd, self.name,
180195
self.name + '_' + str(ver) + '.onnx')
181196
paddle_path = os.path.join(self.pwd, self.name,
182197
self.name + '_' + str(ver) + '_paddle')
198+
199+
logger.info(">>> onnx2paddle ...")
200+
183201
onnx2paddle(onnx_path,
184202
paddle_path,
185203
convert_to_lite=False,
186204
enable_onnx_checker=self.enable_onnx_checker,
187205
disable_feedback=True)
188206

207+
logger.info(">>> onnx2paddle finished ...")
208+
189209
def _mk_paddle_res(self, ver):
190210
"""
191211
make paddle res
192212
"""
213+
logger.info(">>> _mk_paddle_res ...")
214+
193215
# input data
194216
paddle_tensor_feed = list()
195217
result = list()
@@ -201,7 +223,12 @@ def _mk_paddle_res(self, ver):
201223
if "float64" in self.inputs_dtype:
202224
self.run_dynamic = True
203225

226+
# TODO(megemini): create_predictor stuck
227+
self.run_dynamic = True
228+
204229
if self.run_dynamic:
230+
logger.info(">>> self.run_dynamic...")
231+
205232
paddle_path = os.path.join(self.pwd, self.name,
206233
self.name + '_' + str(ver) + '_paddle/')
207234
restore = paddle.load(os.path.join(paddle_path, "model.pdparams"))
@@ -213,7 +240,12 @@ def _mk_paddle_res(self, ver):
213240
model.set_dict(restore)
214241
model.eval()
215242
result = model(*paddle_tensor_feed)
243+
244+
logger.info(">>> self.run_dynamic finished...")
245+
216246
else:
247+
logger.info(">>> NOT self.run_dynamic...")
248+
217249
paddle_model_path = os.path.join(
218250
self.pwd, self.name, self.name + '_' + str(ver) +
219251
'_paddle/inference_model/model.pdmodel')
@@ -224,22 +256,52 @@ def _mk_paddle_res(self, ver):
224256
config.set_prog_file(paddle_model_path)
225257
if os.path.exists(paddle_param_path):
226258
config.set_params_file(paddle_param_path)
259+
260+
logger.info(">>> config.enable_use_gpu...")
261+
227262
# initial GPU memory(M), device ID
228263
config.enable_use_gpu(200, 0)
264+
265+
logger.info(">>> config.enable_use_gpu finished...")
266+
229267
# optimize graph and fuse op
230268
config.switch_ir_optim(False)
231269
config.enable_memory_optim()
270+
271+
logger.info(">>> enable_memory_optim finished...")
272+
232273
# disable feed, fetch OP, needed by zero_copy_run
233274
config.switch_use_feed_fetch_ops(False)
275+
276+
logger.info(">>> config.disable_glog_info...")
277+
234278
config.disable_glog_info()
279+
280+
logger.info(">>> config.pass_builder...")
281+
235282
pass_builder = config.pass_builder()
283+
284+
logger.info(">>> create_predictor(config)...")
285+
236286
predictor = create_predictor(config)
287+
288+
logger.info(">>> predictor.get_input_names...")
289+
237290
input_names = predictor.get_input_names()
238291
output_names = predictor.get_output_names()
292+
293+
logger.info(">>> copy_from_cpu...")
294+
239295
for i in range(len(input_names)):
240296
input_tensor = predictor.get_input_handle(input_names[i])
241297
input_tensor.copy_from_cpu(self.input_feed[self.inputs_name[i]])
298+
299+
logger.info(">>> predictor.run...")
300+
242301
predictor.run()
302+
303+
logger.info(">>> predictor.run finished...")
304+
243305
for output_name in output_names:
244306
output_tensor = predictor.get_output_handle(output_name)
245307
result.append(output_tensor.copy_to_cpu())
@@ -257,15 +319,24 @@ def _mk_paddle_res(self, ver):
257319
result = (result, )
258320
else:
259321
result = (result.numpy(), )
322+
323+
logger.info(">>> _mk_paddle_res finished ...")
324+
260325
return result
261326

262327
def _mk_onnx_res(self, ver):
263328
"""
264329
make onnx res
265330
"""
331+
332+
logger.info('>>> _mk_onnx_res InferenceSession...')
333+
266334
sess = InferenceSession(
267335
os.path.join(self.pwd, self.name,
268336
self.name + '_' + str(ver) + '.onnx'))
337+
338+
logger.info('>>> sess.run ...')
339+
269340
ort_outs = sess.run(output_names=None, input_feed=self.input_feed)
270341
return ort_outs
271342

@@ -291,6 +362,8 @@ def _mk_onnx_graph(self, ver):
291362
"""
292363
make onnx graph
293364
"""
365+
logger.info(">>> _mk_onnx_graph ... make_node")
366+
294367
node = onnx.helper.make_node(
295368
self.op_type,
296369
inputs=self.inputs_name,
@@ -324,8 +397,14 @@ def run(self):
324397
3. use onnx to make res
325398
4. compare diff
326399
"""
400+
401+
logger.info(">>> run ...")
402+
327403
self._mkdir()
328404
for place in self.places:
405+
406+
logger.info(">>> place ..." + str(place))
407+
329408
paddle.set_device(place)
330409
onnx_res = {}
331410
paddle_res = {}

test_autoscan/onnx/test_auto_scan_abs.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,12 @@
1919
import numpy as np
2020
import unittest
2121

22+
import logging
23+
24+
logging.basicConfig(level=logging.INFO, format="%(message)s")
25+
logger = logging.getLogger(__name__)
26+
logger.setLevel(logging.INFO)
27+
2228

2329
class TestAbsConvert(OPConvertAutoScanTest):
2430
"""
@@ -51,6 +57,8 @@ def sample_convert_config(self, draw):
5157
return (config, attrs)
5258

5359
def test(self):
60+
logger.info('>>> test_auto_scan_abs.py::test')
61+
5462
self.run_and_statis(max_examples=30)
5563

5664

test_autoscan/run_autoscan_onnx.sh

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,11 @@ for var in ${file_arr[@]}
2424
do
2525
log_name=${logs_path}/${var}.log
2626
echo " Now start test: ${var}"
27+
28+
echo " >>> begin test ..."
29+
2730
python ${var} > ${log_name} 2>&1
31+
# python ${var}
2832

2933
done
3034

test_autoscan/torch/auto_scan_test.py

Lines changed: 13 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,8 @@
3232
paddle.set_device("cpu")
3333

3434
logging.basicConfig(level=logging.INFO, format="%(message)s")
35+
logger = logging.getLogger(__name__)
36+
logger.setLevel(logging.INFO)
3537

3638
settings.register_profile("ci",
3739
max_examples=100,
@@ -112,36 +114,36 @@ def run_test(configs):
112114
loop_func = given(generator())(run_test)
113115
if reproduce is not None:
114116
loop_func = reproduce(loop_func)
115-
logging.info("Start to running test of {}".format(type(self)))
117+
logger.info("Start to running test of {}".format(type(self)))
116118

117119
paddle.disable_static()
118120
loop_func()
119121

120-
logging.info(
122+
logger.info(
121123
"===================Statistical Information===================")
122-
logging.info("Number of Generated Programs: {}".format(
124+
logger.info("Number of Generated Programs: {}".format(
123125
self.num_ran_tests))
124-
logging.info("Number of Ignore Programs: {}".format(
126+
logger.info("Number of Ignore Programs: {}".format(
125127
self.num_ignore_tests))
126128
successful_ran_programs = int(self.num_ran_tests -
127129
self.num_ignore_tests)
128130
if successful_ran_programs < min_success_num:
129-
logging.warning("satisfied_programs = ran_programs")
130-
logging.error(
131+
logger.warning("satisfied_programs = ran_programs")
132+
logger.error(
131133
"At least {} programs need to ran successfully, but now only about {} programs satisfied."
132134
.format(min_success_num, successful_ran_programs))
133135
assert False
134136
used_time = time.time() - start_time
135-
logging.info("Used time: {} s".format(round(used_time, 2)))
137+
logger.info("Used time: {} s".format(round(used_time, 2)))
136138
if max_duration > 0 and used_time > max_duration:
137-
logging.error(
139+
logger.error(
138140
"The duration exceeds {} seconds, if this is neccessary, try to set a larger number for parameter `max_duration`."
139141
.format(max_duration))
140142
assert False
141143

142144
def run_test(self, configs):
143145
config, models = configs
144-
logging.info("Run configs: {}".format(config))
146+
logger.info("Run configs: {}".format(config))
145147

146148
assert "op_names" in config.keys(
147149
), "config must include op_names in dict keys"
@@ -218,9 +220,9 @@ def run_test(self, configs):
218220
randtool("float", -2, 2,
219221
shape).astype(input_type[j]))
220222
obj.set_input_data("input_data", tuple(input_data))
221-
logging.info("Now Run >>> dtype: {}, op_name: {}".format(
223+
logger.info("Now Run >>> dtype: {}, op_name: {}".format(
222224
input_type, op_names[i]))
223225
obj.run()
224226
if len(input_type_list) == 0:
225227
obj.run()
226-
logging.info("Run Successfully!")
228+
logger.info("Run Successfully!")

test_autoscan/torch/torchbase.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -180,6 +180,9 @@ def _mk_paddle_res(self, ):
180180
if "float64" in self.inputs_dtype:
181181
self.run_dynamic = True
182182

183+
# TODO(megemini): create_predictor stuck
184+
self.run_dynamic = True
185+
183186
if self.run_dynamic:
184187
paddle_path = os.path.join(self.pwd, self.name,
185188
self.name + '_paddle/')

test_benchmark/Caffe/convert.sh

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,14 +10,19 @@ find . -name "run.log" | xargs rm -rf
1010

1111
# use black.list to control CI tests
1212
filename="black.list"
13-
models=$(ls -d */ | grep -v -F -f "$filename")
13+
models=$(ls -d */ | grep -v -F -f "$filename" | awk -F '/' '{print $1}')
1414
num_of_models=$(ls -d */ | grep -v -F -f "$filename" | wc -l)
1515

1616
counter=1
1717
for model in $models
1818
do
1919
echo "[X2Paddle-Caffe] ${counter}/${num_of_models} $model ..."
2020
cd $model
21+
22+
# make default result is `Failed` in case of `result.txt` not generated
23+
touch result.txt
24+
echo $model ">>>Failed"> result.txt
25+
2126
sh run_convert.sh $model 1>run.log 2>run.err &
2227
cd ..
2328
counter=$(($counter+1))

0 commit comments

Comments
 (0)