2626from onnx import TensorProto
2727from onnxruntime import InferenceSession
2828
29+ logging .basicConfig (level = logging .INFO , format = "%(message)s" )
30+ logger = logging .getLogger (__name__ )
31+ logger .setLevel (logging .INFO )
32+
2933DTYPE_ONNX_STR_MAP = {
3034 'float32' : TensorProto .FLOAT ,
3135 'float64' : TensorProto .DOUBLE ,
@@ -44,6 +48,9 @@ def compare(result, expect, delta=1e-10, rtol=1e-10):
4448 delta: absolute error
4549 rtol: relative error
4650 """
51+
52+ logger .info (">>> compare ..." )
53+
4754 if type (result ) == np .ndarray :
4855 if type (expect ) == list :
4956 expect = expect [0 ]
@@ -167,6 +174,9 @@ def _mkdir(self):
167174 """
168175 make dir to save all
169176 """
177+
178+ logger .info (">>> _mkdir ..." )
179+
170180 save_path = os .path .join (self .pwd , self .name )
171181 if not os .path .exists (save_path ):
172182 os .mkdir (save_path )
@@ -175,21 +185,33 @@ def _onnx_to_paddle(self, ver):
175185 """
176186 convert onnx to paddle
177187 """
188+ logger .info (">>> _onnx_to_paddle ..." )
189+
178190 from x2paddle .convert import onnx2paddle
191+
192+ logger .info (">>> from x2paddle.convert import onnx2paddle ..." )
193+
179194 onnx_path = os .path .join (self .pwd , self .name ,
180195 self .name + '_' + str (ver ) + '.onnx' )
181196 paddle_path = os .path .join (self .pwd , self .name ,
182197 self .name + '_' + str (ver ) + '_paddle' )
198+
199+ logger .info (">>> onnx2paddle ..." )
200+
183201 onnx2paddle (onnx_path ,
184202 paddle_path ,
185203 convert_to_lite = False ,
186204 enable_onnx_checker = self .enable_onnx_checker ,
187205 disable_feedback = True )
188206
207+ logger .info (">>> onnx2paddle finished ..." )
208+
189209 def _mk_paddle_res (self , ver ):
190210 """
191211 make paddle res
192212 """
213+ logger .info (">>> _mk_paddle_res ..." )
214+
193215 # input data
194216 paddle_tensor_feed = list ()
195217 result = list ()
@@ -201,7 +223,12 @@ def _mk_paddle_res(self, ver):
201223 if "float64" in self .inputs_dtype :
202224 self .run_dynamic = True
203225
226+ # TODO(megemini): create_predictor stuck
227+ self .run_dynamic = True
228+
204229 if self .run_dynamic :
230+ logger .info (">>> self.run_dynamic..." )
231+
205232 paddle_path = os .path .join (self .pwd , self .name ,
206233 self .name + '_' + str (ver ) + '_paddle/' )
207234 restore = paddle .load (os .path .join (paddle_path , "model.pdparams" ))
@@ -213,7 +240,12 @@ def _mk_paddle_res(self, ver):
213240 model .set_dict (restore )
214241 model .eval ()
215242 result = model (* paddle_tensor_feed )
243+
244+ logger .info (">>> self.run_dynamic finished..." )
245+
216246 else :
247+ logger .info (">>> NOT self.run_dynamic..." )
248+
217249 paddle_model_path = os .path .join (
218250 self .pwd , self .name , self .name + '_' + str (ver ) +
219251 '_paddle/inference_model/model.pdmodel' )
@@ -224,22 +256,52 @@ def _mk_paddle_res(self, ver):
224256 config .set_prog_file (paddle_model_path )
225257 if os .path .exists (paddle_param_path ):
226258 config .set_params_file (paddle_param_path )
259+
260+ logger .info (">>> config.enable_use_gpu..." )
261+
227262 # initial GPU memory(M), device ID
228263 config .enable_use_gpu (200 , 0 )
264+
265+ logger .info (">>> config.enable_use_gpu finished..." )
266+
229267 # optimize graph and fuse op
230268 config .switch_ir_optim (False )
231269 config .enable_memory_optim ()
270+
271+ logger .info (">>> enable_memory_optim finished..." )
272+
232273 # disable feed, fetch OP, needed by zero_copy_run
233274 config .switch_use_feed_fetch_ops (False )
275+
276+ logger .info (">>> config.disable_glog_info..." )
277+
234278 config .disable_glog_info ()
279+
280+ logger .info (">>> config.pass_builder..." )
281+
235282 pass_builder = config .pass_builder ()
283+
284+ logger .info (">>> create_predictor(config)..." )
285+
236286 predictor = create_predictor (config )
287+
288+ logger .info (">>> predictor.get_input_names..." )
289+
237290 input_names = predictor .get_input_names ()
238291 output_names = predictor .get_output_names ()
292+
293+ logger .info (">>> copy_from_cpu..." )
294+
239295 for i in range (len (input_names )):
240296 input_tensor = predictor .get_input_handle (input_names [i ])
241297 input_tensor .copy_from_cpu (self .input_feed [self .inputs_name [i ]])
298+
299+ logger .info (">>> predictor.run..." )
300+
242301 predictor .run ()
302+
303+ logger .info (">>> predictor.run finished..." )
304+
243305 for output_name in output_names :
244306 output_tensor = predictor .get_output_handle (output_name )
245307 result .append (output_tensor .copy_to_cpu ())
@@ -257,15 +319,24 @@ def _mk_paddle_res(self, ver):
257319 result = (result , )
258320 else :
259321 result = (result .numpy (), )
322+
323+ logger .info (">>> _mk_paddle_res finished ..." )
324+
260325 return result
261326
262327 def _mk_onnx_res (self , ver ):
263328 """
264329 make onnx res
265330 """
331+
332+ logger .info ('>>> _mk_onnx_res InferenceSession...' )
333+
266334 sess = InferenceSession (
267335 os .path .join (self .pwd , self .name ,
268336 self .name + '_' + str (ver ) + '.onnx' ))
337+
338+ logger .info ('>>> sess.run ...' )
339+
269340 ort_outs = sess .run (output_names = None , input_feed = self .input_feed )
270341 return ort_outs
271342
@@ -291,6 +362,8 @@ def _mk_onnx_graph(self, ver):
291362 """
292363 make onnx graph
293364 """
365+ logger .info (">>> _mk_onnx_graph ... make_node" )
366+
294367 node = onnx .helper .make_node (
295368 self .op_type ,
296369 inputs = self .inputs_name ,
@@ -324,8 +397,14 @@ def run(self):
324397 3. use onnx to make res
325398 4. compare diff
326399 """
400+
401+ logger .info (">>> run ..." )
402+
327403 self ._mkdir ()
328404 for place in self .places :
405+
406+ logger .info (">>> place ..." + str (place ))
407+
329408 paddle .set_device (place )
330409 onnx_res = {}
331410 paddle_res = {}
0 commit comments