Skip to content

Commit

Permalink
Add NASNet Mobile models
Browse files Browse the repository at this point in the history
  • Loading branch information
titu1994 committed Jan 7, 2018
1 parent 3861957 commit b149400
Show file tree
Hide file tree
Showing 6 changed files with 1,144 additions and 6 deletions.
52 changes: 46 additions & 6 deletions data_loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ def parse_data(filename, scores):
image = (tf.cast(image, tf.float32) - 127.5) / 127.5
return image, scores

def parse_data_validation(filename, scores):
def parse_data_without_augmentation(filename, scores):
image = tf.read_file(filename)
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize_images(image, (IMAGE_SIZE, IMAGE_SIZE))
Expand All @@ -68,14 +68,15 @@ def parse_data_validation(filename, scores):

print('Train and validation datasets ready !')

def train_generator(batchsize):
def train_generator(batchsize, shuffle=True):
with tf.Session() as sess:
train_dataset = tfdata.Dataset().from_tensor_slices((train_image_paths, train_scores))
train_dataset = train_dataset.map(parse_data)
train_dataset = train_dataset.map(parse_data, num_parallel_calls=2)

train_dataset = train_dataset.batch(batchsize)
train_dataset = train_dataset.repeat()
train_dataset = train_dataset.shuffle(buffer_size=4)
if shuffle:
train_dataset = train_dataset.shuffle(buffer_size=4)
train_iterator = train_dataset.make_initializable_iterator()

train_batch = train_iterator.get_next()
Expand All @@ -97,11 +98,10 @@ def train_generator(batchsize):
def val_generator(batchsize):
with tf.Session() as sess:
val_dataset = tfdata.Dataset().from_tensor_slices((val_image_paths, val_scores))
val_dataset = val_dataset.map(parse_data_validation)
val_dataset = val_dataset.map(parse_data_without_augmentation)

val_dataset = val_dataset.batch(batchsize)
val_dataset = val_dataset.repeat()
val_dataset = val_dataset.shuffle(buffer_size=4)
val_iterator = val_dataset.make_initializable_iterator()

val_batch = val_iterator.get_next()
Expand All @@ -119,3 +119,43 @@ def val_generator(batchsize):

X_batch, y_batch = sess.run(val_batch)
yield (X_batch, y_batch)

def features_generator(record_path, batchsize, shuffle=True):
with tf.Session() as sess:
def parse_single_record(serialized_example):
# parse a single record
example = tf.parse_single_example(
serialized_example,
features={
'features': tf.FixedLenFeature([1056], tf.float32),
'scores': tf.FixedLenFeature([10], tf.float32),
})

features = example['features']
scores = example['scores']
return features, scores

train_dataset = tfdata.TFRecordDataset([record_path])
train_dataset = train_dataset.map(parse_single_record, num_parallel_calls=4)

train_dataset = train_dataset.batch(batchsize)
train_dataset = train_dataset.repeat()
if shuffle:
train_dataset = train_dataset.shuffle(buffer_size=5)
train_iterator = train_dataset.make_initializable_iterator()

train_batch = train_iterator.get_next()

sess.run(train_iterator.initializer)

while True:
try:
X_batch, y_batch = sess.run(train_batch)
yield (X_batch, y_batch)
except:
train_iterator = train_dataset.make_initializable_iterator()
sess.run(train_iterator.initializer)
train_batch = train_iterator.get_next()

X_batch, y_batch = sess.run(train_batch)
yield (X_batch, y_batch)
34 changes: 34 additions & 0 deletions evaluate_nasnet.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
import numpy as np

from keras.models import Model
from keras.layers import Dense, Dropout
from keras.preprocessing.image import load_img, img_to_array
import tensorflow as tf

from nasnet import NASNetMobile, preprocess_input
from utils import mean_score, std_score

with tf.device('/CPU:0'):
base_model = NASNetMobile((224, 224, 3), include_top=False, pooling='avg', weights=None)
x = Dropout(0.75)(base_model.output)
x = Dense(10, activation='softmax')(x)

model = Model(base_model.input, x)
model.load_weights('weights/nasnet_weights.h5', by_name=True)

img_path = 'images/art1.jpg'
img = load_img(img_path, target_size=(224, 224))
x = img_to_array(img)
x = np.expand_dims(x, axis=0)

x = preprocess_input(x)

scores = model.predict(x, batch_size=1, verbose=1)[0]

mean = mean_score(scores)
std = std_score(scores)

print("Evaluating : ", img_path)
print("NIMA Score : %0.3f +- (%0.3f)" % (mean, std))


78 changes: 78 additions & 0 deletions extract_nasnet_features.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
import numpy as np

import tensorflow as tf
from keras import backend as K
from nasnet import NASNetMobile

from data_loader import train_generator, val_generator

sess = tf.Session()
K.set_session(sess)

image_size = 224

def _float32_feature_list(floats):
return tf.train.Feature(float_list=tf.train.FloatList(value=floats))

model = NASNetMobile((image_size, image_size, 3), include_top=False, pooling='avg')
model.summary()

# ''' TRAIN SET '''
nb_samples = 250000 * 2
batchsize = 200

with sess.as_default():
generator = train_generator(batchsize, shuffle=False)
writer = tf.python_io.TFRecordWriter('weights/nasnet_train.tfrecord')

count = 0
for _ in range(nb_samples // batchsize):
x_batch, y_batch = next(generator)

with sess.as_default():
x_batch = model.predict(x_batch, batchsize, verbose=1)

for i, (x, y) in enumerate(zip(x_batch, y_batch)):
examples = {
'features': _float32_feature_list(x.flatten()),
'scores': _float32_feature_list(y.flatten()),
}
features = tf.train.Features(feature=examples)
example = tf.train.Example(features=features)
writer.write(example.SerializeToString())

count += batchsize

print("Finished %0.2f percentage storing dataset" % (count * 100 / float(nb_samples)))

writer.close()

''' TRAIN SET '''
nb_samples = 5000
batchsize = 200

with sess.as_default():
generator = val_generator(batchsize)
writer = tf.python_io.TFRecordWriter('weights/nasnet_val.tfrecord')

count = 0
for _ in range(nb_samples // batchsize):
x_batch, y_batch = next(generator)

with sess.as_default():
x_batch = model.predict(x_batch, batchsize, verbose=1)

for i, (x, y) in enumerate(zip(x_batch, y_batch)):
examples = {
'features': _float32_feature_list(x.flatten()),
'scores': _float32_feature_list(y.flatten()),
}
features = tf.train.Features(feature=examples)
example = tf.train.Example(features=features)
writer.write(example.SerializeToString())

count += batchsize

print("Finished %0.2f percentage storing dataset" % (count * 100 / float(nb_samples)))

writer.close()
Loading

0 comments on commit b149400

Please sign in to comment.