-
Notifications
You must be signed in to change notification settings - Fork 0
/
q1_classifier.py
257 lines (206 loc) · 8.76 KB
/
q1_classifier.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
import time
import math
import numpy as np
import tensorflow as tf
from q1_softmax import softmax
from q1_softmax import cross_entropy_loss
from model import Model
from utils import data_iterator
class Config(object):
"""Holds model hyperparams and data information.
The config class is used to store various hyperparameters and dataset
information parameters. Model objects are passed a Config() object at
instantiation.
"""
batch_size = 64
n_samples = 1024
n_features = 100
n_classes = 5
# You may adjust the max_epochs to ensure convergence.
max_epochs = 50
# You may adjust this learning rate to ensure convergence.
lr = 1e-4
class SoftmaxModel(Model):
"""Implements a Softmax classifier with cross-entropy loss."""
def load_data(self):
"""Creates a synthetic dataset and stores it in memory."""
np.random.seed(1234)
self.input_data = np.random.rand(
self.config.n_samples, self.config.n_features)
self.input_labels = np.ones((self.config.n_samples,), dtype=np.int32)
def add_placeholders(self):
"""Generate placeholder variables to represent the input tensors.
These placeholders are used as inputs by the rest of the model building
code and will be fed data during training.
Adds following nodes to the computational graph
input_placeholder: Input placeholder tensor of shape
(batch_size, n_features), type tf.float32
labels_placeholder: Labels placeholder tensor of shape
(batch_size, n_classes), type tf.int32
Add these placeholders to self as the instance variables
self.input_placeholder
self.labels_placeholder
(Don't change the variable names)
"""
### YOUR CODE HERE
self.input_placeholder=tf.placeholder(tf.float32, shape=(self.config.batch_size, self.config.n_features))
self.labels_placeholder=tf.placeholder(tf.int32, shape=(self.config.batch_size, self.config.n_classes))
### END YOUR CODE
def create_feed_dict(self, input_batch, label_batch):
"""Creates the feed_dict for softmax classifier.
A feed_dict takes the form of:
feed_dict = {
<placeholder>: <tensor of values to be passed for placeholder>,
....
}
If label_batch is None, then no labels are added to feed_dict.
Hint: The keys for the feed_dict should match the placeholder tensors
created in add_placeholders.
Args:
input_batch: A batch of input data.
label_batch: A batch of label data.
Returns:
feed_dict: The feed dictionary mapping from placeholders to values.
"""
### YOUR CODE HERE
assert input_batch is not None
if label_batch is None:
feed_dict = {self.input_placeholder:input_batch}
else:
feed_dict = {self.input_placeholder:input_batch, self.labels_placeholder:label_batch}
### END YOUR CODE
return feed_dict
def add_training_op(self, loss):
"""Sets up the training Ops.
Creates an optimizer and applies the gradients to all trainable variables.
The Op returned by this function is what must be passed to the
`sess.run()` call to cause the model to train. See
https://www.tensorflow.org/versions/r0.7/api_docs/python/train.html#Optimizer
for more information.
Hint: Use tf.train.GradientDescentOptimizer to get an optimizer object.
Calling optimizer.minimize() will return a train_op object.
Args:
loss: Loss tensor, from cross_entropy_loss.
Returns:
train_op: The Op for training.
"""
### YOUR CODE HERE
optimizer = tf.train.GradientDescentOptimizer(self.config.lr)
train_op = optimizer.minimize(loss)
### END YOUR CODE
return train_op
def add_model(self, input_data):
"""Adds a linear-layer plus a softmax transformation
The core transformation for this model which transforms a batch of input
data into a batch of predictions. In this case, the mathematical
transformation effected is
y = softmax(xW + b)
Hint: Make sure to create tf.Variables as needed. Also, make sure to use
tf.name_scope to ensure that your name spaces are clean.
Hint: For this simple use-case, it's sufficient to initialize both weights W
and biases b with zeros.
Args:
input_data: A tensor of shape (batch_size, n_features).
Returns:
out: A tensor of shape (batch_size, n_classes)
"""
### YOUR CODE HERE
n_features, n_classes = self.config.n_features, self.config.n_classes
with tf.name_scope('softmax_linear'):
weights = tf.Variable(tf.zeros([n_features, n_classes]),name='weights')
biases = tf.Variable(tf.zeros([n_classes]),name='biases')
prod = tf.matmul(input_data, weights)
z = tf.add(prod, biases)
out = softmax(z)
### END YOUR CODE
return out
def add_loss_op(self, pred):
"""Adds cross_entropy_loss ops to the computational graph.
Hint: Use the cross_entropy_loss function we defined. This should be a very
short function.
Args:
pred: A tensor of shape (batch_size, n_classes)
Returns:
loss: A 0-d tensor (scalar)
"""
### YOUR CODE HERE
loss = cross_entropy_loss(self.labels_placeholder, pred)
### END YOUR CODE
return loss
def run_epoch(self, sess, input_data, input_labels):
"""Runs an epoch of training.
Trains the model for one-epoch.
Args:
sess: tf.Session() object
input_data: np.ndarray of shape (n_samples, n_features)
input_labels: np.ndarray of shape (n_samples, n_classes)
Returns:
average_loss: scalar. Average minibatch loss of model on epoch.
"""
# And then after everything is built, start the training loop.
average_loss = 0
for step, (input_batch, label_batch) in enumerate(
data_iterator(input_data, input_labels,
batch_size=self.config.batch_size,
label_size=self.config.n_classes)):
# Fill a feed dictionary with the actual set of images and labels
# for this particular training step.
feed_dict = self.create_feed_dict(input_batch, label_batch)
# Run one step of the model. The return values are the activations
# from the `self.train_op` (which is discarded) and the `loss` Op. To
# inspect the values of your Ops or variables, you may include them
# in the list passed to sess.run() and the value tensors will be
# returned in the tuple from the call.
_, loss_value = sess.run([self.train_op, self.loss], feed_dict=feed_dict)
average_loss += loss_value
average_loss = average_loss / step
return average_loss
def fit(self, sess, input_data, input_labels):
"""Fit model on provided data.
Args:
sess: tf.Session()
input_data: np.ndarray of shape (n_samples, n_features)
input_labels: np.ndarray of shape (n_samples, n_classes)
Returns:
losses: list of loss per epoch
"""
losses = []
for epoch in range(self.config.max_epochs):
start_time = time.time()
average_loss = self.run_epoch(sess, input_data, input_labels)
duration = time.time() - start_time
# Print status to stdout.
print('Epoch %d: loss = %.2f (%.3f sec)'
% (epoch, average_loss, duration))
losses.append(average_loss)
return losses
def __init__(self, config):
"""Initializes the model.
Args:
config: A model configuration object of type Config
"""
self.config = config
# Generate placeholders for the images and labels.
self.load_data()
self.add_placeholders()
self.pred = self.add_model(self.input_placeholder)
self.loss = self.add_loss_op(self.pred)
self.train_op = self.add_training_op(self.loss)
def test_SoftmaxModel():
#Train softmax model for a number of steps.
print("debut test_SoftmaxModel")
config = Config()
with tf.Graph().as_default():
model = SoftmaxModel(config)
# Create a session for running Ops on the Graph.
sess = tf.Session()
# Run the Op to initialize the variables.
init = tf.global_variables_initializer()
sess.run(init)
losses = model.fit(sess, model.input_data, model.input_labels)
# If ops are implemented correctly, the average loss should fall close to zero
# rapidly.
assert losses[-1] < .5
print ("Basic (non-exhaustive) classifier tests pass\n")
if __name__ == "__main__":
test_SoftmaxModel()