forked from tatz1101/Edge-AI-Platform-Tutorials
-
Notifications
You must be signed in to change notification settings - Fork 0
/
5_plot_learning_curve.py
90 lines (71 loc) · 2.68 KB
/
5_plot_learning_curve.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
'''
Title :plot_learning_curve.py
Description :This script generates learning curves for caffe models
Author :Adil Moujahid
Date Created :20160619
Date Modified :20160619
version :0.1
usage :python /home/ML/cifar10/caffe/code/code/5_plot_learning_curve.py /home/ML/cifar10/caffe/code/models/miniVggNet/m3/logfile_3_miniVggNet.log /home/ML/cifar10/caffe/code/models/miniVggNet/m3/plt_train_val_3_miniVggNet.png
python_version :2.7.11
history :modified by [email protected]
Date Modified :2018-Sep-19
2018 July 07 :added config file
'''
import os
import sys
import subprocess
import pandas as pd
from config import cifar10_config as config
import matplotlib
matplotlib.use('Agg')
import matplotlib.pylab as plt
plt.style.use('ggplot')
caffe_path = config.CAFFE_ROOT # i.e. "/caffe/Caffe-SSD-Ristretto"
model_log_path = sys.argv[1]
learning_curve_path = sys.argv[2]
#Get directory where the model logs is saved, and move to it
model_log_dir_path = os.path.dirname(model_log_path)
os.chdir(model_log_dir_path)
'''
Generating training and test logs
'''
#Parsing training/validation logs
command = caffe_path + '/tools/extra/parse_log.sh ' + model_log_path
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
process.wait()
#Read training and test logs
train_log_path = model_log_path + '.train'
test_log_path = model_log_path + '.test'
train_log = pd.read_csv(train_log_path, delim_whitespace=True)
test_log = pd.read_csv(test_log_path, delim_whitespace=True)
'''
Making learning curve
'''
fig, ax1 = plt.subplots()
#Plotting training and test losses
train_loss, = ax1.plot(train_log['#Iters'], train_log['TrainingLoss'], color='red', alpha=.5)
test_loss, = ax1.plot(test_log['#Iters'], test_log['TestLoss'], linewidth=2, color='green')
ax1.set_ylim(ymin=0, ymax=1)
ax1.set_xlabel('Iterations', fontsize=15)
ax1.set_ylabel('Loss', fontsize=15)
ax1.tick_params(labelsize=15)
#Plotting test accuracy
ax2 = ax1.twinx()
test_accuracy, = ax2.plot(test_log['#Iters'], test_log['TestAccuracy'], linewidth=2, color='blue')
ax2.set_ylim(ymin=0, ymax=1)
ax2.set_ylabel('Accuracy', fontsize=15)
ax2.tick_params(labelsize=15)
#Adding legend
plt.legend([train_loss, test_loss, test_accuracy], ['Training Loss', 'Test Loss', 'Test Accuracy'], bbox_to_anchor=(1, 0.8))
plt.title('Training Curve', fontsize=18)
#Saving learning curve
plt.savefig(learning_curve_path)
'''
Deleting training and test logs
'''
command = 'rm ' + train_log_path
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
process.wait()
command = command = 'rm ' + test_log_path
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
process.wait()