@@ -276,7 +276,7 @@ def run_epoch(self, session, data, num_epoch=0, train_writer=None, train_op=None
276
276
config = self .config
277
277
dp = config .dropout
278
278
if train_op is None :
279
- train_op = tf .no_op ()
279
+ # train_op = tf.no_op()
280
280
dp = 1
281
281
total_steps = len (data [0 ]) // config .batch_size
282
282
total_loss = []
@@ -295,8 +295,13 @@ def run_epoch(self, session, data, num_epoch=0, train_writer=None, train_op=None
295
295
self .input_len_placeholder : il [index ],
296
296
self .answer_placeholder : a [index ],
297
297
self .dropout_placeholder : dp }
298
- loss , pred , summary , _ = session .run (
299
- [self .calculate_loss , self .pred , self .merged , train_op ], feed_dict = feed )
298
+
299
+ if train_op is None :
300
+ loss , pred , summary , = session .run (
301
+ [self .calculate_loss , self .pred , self .merged ], feed_dict = feed )
302
+ else :
303
+ loss , pred , summary , _ = session .run (
304
+ [self .calculate_loss , self .pred , self .merged , train_op ], feed_dict = feed )
300
305
301
306
if train_writer is not None :
302
307
train_writer .add_summary (summary , num_epoch * total_steps + step )
0 commit comments