-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathword_embeddings_2.py
118 lines (90 loc) · 3.12 KB
/
word_embeddings_2.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
import os
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
imdb_dir='/Users/jayde/Downloads/aclImdb/aclImdb'
train_dir=os.path.join(imdb_dir,'train')
labels=[]
texts=[]
for label_type in ['neg','pos']:
dir_name=os.path.join(train_dir,label_type)
for fname in os.listdir(dir_name):
if fname[-4:]=='.txt':
f=open(os.path.join(dir_name,fname))
texts.append(f.read())
f.close()
if label_type=='neg':
labels.append(0)
else:
labels.append(1)
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import numpy as np
maxlen=100
training_samples=200
validation_samples=10000
max_words=10000
tokenizer=Tokenizer(num_words=max_words)
tokenizer.fit_on_texts(texts)
sequences=tokenizer.texts_to_sequences(texts)
word_index=tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
data=pad_sequences(sequences,maxlen=maxlen)
labels=np.asarray(labels)
print('Shape of data tensor:',data.shape)
print('Shape of label tensor:',labels.shape)
indices=np.arange(data.shape[0])
np.random.shuffle(indices)
data=data[indices]
labels=labels[indices]
x_train=data[:training_samples]
y_train=labels[:training_samples]
x_val=data[training_samples:training_samples+validation_samples]
y_val=labels[training_samples:training_samples+validation_samples]
glove_dir='Users/jayde/Downloads/glove.6B'
embeddings_index={}
f=open(os.path.join(glove_dir,'glove.6B.100d.txt'))
for line in f:
values=line.split()
word=values[0]
coefs=np.asarray(values[1:],dtype='float32')
embeddings_index[word]=coefs
f.close
print('Found %s word vectors.' % len(embeddings_index))
embedding_dim=100
embedding_matrix=np.zeros((max_words,embedding_dim))
for word, i in word_index.items():
if i<max_words:
embedding_vector=embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i]=embedding_vector
from keras.models import Sequential
from keras.layers import Embedding,Flatten,Dense
model=Sequential()
model.add(Embedding(max_words, embedding_dim, input_length=maxlen))
model.add(Flatten())
model.add(Dense(32, activation='relu'))
model.add(Dense(1,acitvation='sigmoid'))
model.summary()
model.layers[0].set_weights([embedding_matrix])
model.layers[0].trainable=False
model.compile(optimizer='rmsprop',loss='binary_crossentropy',metrics=['acc'])
history=model.fit(x_train,y_train,epochs=10,batch_size=64,validation_data=(x_val,y_val))
model.save_weights('pre_trained_glove_model.h5')
import matplotlib.pyplot as plt
acc=history.history['acc']
val_acc=history.history['val_acc']
loss=history.history['loss']
val_loss=history.history['val_loss']
epochs=range(1,len(acc)+1)
plt.plot(epochs,acc,'bo',label='Training acc')
plt.plot(epochs,val_acc,'b',label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs,loss,'bo',label='Training loss')
plt.plot(epochs,val_loss,'b',label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()