-
Notifications
You must be signed in to change notification settings - Fork 0
/
train_lv.py
94 lines (70 loc) · 3.01 KB
/
train_lv.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import project.hourglass.params as hgparams
import project.input.augmentation as augmentation
from project.flags import FLAGS, dump_flags
from project.hourglass import estimator
from project.input import data
from project.utils.tfrecords import tfrecords_count
tf.logging.set_verbosity(tf.logging.DEBUG)
def _input_fn(filename, n_landmarks):
with tf.name_scope("input"):
provider = data.DataProvider(
filename=filename,
n_landmarks=n_landmarks,
batch_size=FLAGS.batch_size,
num_threads=FLAGS.num_preprocess_threads,
augmentation=FLAGS.augmentation,
flip_fn=augmentation.catface_flip_fn
)
images, gt_heatmaps, gt_lms, scale, marked = provider.get()
images /= 255.
features = {'image': images,
'marked_idx': marked,
'scale': scale}
targets = {'heatmap': gt_heatmaps,
'coordinates': gt_lms}
return features, targets
def _configure_validation(eval_data, train_epoch_steps, n_landmarks):
if eval_data is None or FLAGS.eval_every_n_epochs <= 0:
return None
eval_epoch_size = tfrecords_count(FLAGS.eval_data)
eval_epoch_steps = eval_epoch_size / FLAGS.batch_size
every_n_steps = FLAGS.eval_every_n_epochs * train_epoch_steps
early_stopping_rounds = FLAGS.patience * train_epoch_steps
validation_monitor = tf.contrib.learn.monitors.ValidationMonitor(
input_fn=lambda: _input_fn(eval_data, n_landmarks),
eval_steps=eval_epoch_steps,
every_n_steps=every_n_steps,
early_stopping_metric="loss",
early_stopping_metric_minimize=True,
early_stopping_rounds=early_stopping_rounds)
return [validation_monitor]
def train(model_dir, train_data, eval_data=None, params=None):
train_epoch_size = tfrecords_count(train_data)
train_epoch_steps = train_epoch_size / FLAGS.batch_size
max_steps = None
if FLAGS.max_epochs is not None:
max_steps = FLAGS.max_epochs * train_epoch_steps
# Instantiate Estimator
run_config = tf.contrib.learn.RunConfig(
save_summary_steps=50,
save_checkpoints_secs=None,
save_checkpoints_steps=train_epoch_steps,
keep_checkpoint_max=max(FLAGS.patience, 5)
)
params[hgparams.N_LANDMARKS] = 34
params['eval_fn'] = estimator.catface_evaluation
nn = estimator.get_estimator(model_dir=model_dir, params=params, run_config=run_config)
# Fit
nn.fit(input_fn=lambda: _input_fn(train_data, params[hgparams.N_LANDMARKS]),
max_steps=max_steps,
monitors=_configure_validation(eval_data, train_epoch_steps, params[hgparams.N_LANDMARKS]))
def main(unused_argv):
params = hgparams.train_params_from_flags()
dump_flags()
train(FLAGS.model_dir, FLAGS.train_data, FLAGS.eval_data, params)
if __name__ == '__main__':
tf.app.run(main=main)