Skip to content

Commit 26f415b

Browse files
authoredAug 12, 2019
Add files via upload
1 parent 4ce177f commit 26f415b

18 files changed

+2796
-0
lines changed
 

‎binary_test.py

+73
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,73 @@
1+
'''
2+
@Author : lance
3+
@Email : wangyl306@163.com
4+
'''
5+
6+
7+
from keras.models import load_model
8+
from keras_preprocessing.image import ImageDataGenerator
9+
from sklearn.metrics import confusion_matrix
10+
import numpy as np
11+
12+
13+
14+
def mytest(path,steps,input_shape):
15+
#导入数据
16+
test_path = '测试'
17+
test_batches = ImageDataGenerator(rescale=1/255).flow_from_directory(test_path,
18+
target_size=input_shape,
19+
classes=["C2F","X2F"],
20+
class_mode="binary",
21+
batch_size=10, shuffle=False)
22+
23+
model = load_model(path)
24+
# 测试
25+
steps=steps
26+
test_class=np.array([])
27+
28+
for i in range(steps):
29+
test_imgs, test_lables = next(test_batches)
30+
test_class=np.hstack((test_class,test_lables ))
31+
print("真实类别:",test_class)
32+
33+
pred = model.predict_generator(test_batches, steps=steps, verbose=1)
34+
pred=pred.ravel()
35+
pred=list(pred)
36+
for i in range(len(pred)):
37+
if pred[i]<0.5:
38+
pred[i]=0
39+
else:
40+
pred[i]=1
41+
print("预测结果:", pred)
42+
43+
44+
# 打印混淆矩阵
45+
cm = confusion_matrix(test_class, pred)
46+
47+
48+
print(cm)
49+
50+
tmp = 0
51+
for i in range(len(cm[0, :])):
52+
tmp += cm[i][i]
53+
accuracy = tmp / np.sum(cm)
54+
print("acc:", accuracy)
55+
56+
return path, accuracy
57+
58+
59+
60+
if __name__=="__main__":
61+
mytest("weights/bcnn_0033.h5",25,(224,224))#0.77
62+
# mytest("weights/densenet_0023.h5",25,(224,224)) #0.87
63+
# mytest("weights/ince_res_0021.h5",25,(299,299)) #0.85
64+
# mytest("weights/inceptionv3_0033.h5",25,(299,299)) #0.80
65+
# mytest("weights/merge_0022.h5",25,(224,224)) #0.81
66+
# mytest("weights/mobilenetv2_0032.h5",25,(224,224)) #0.87
67+
# mytest("weights/nasnet_0017.h5",25,(224,224)) #0.87
68+
# mytest("weights/resnet_0018.h5",25,(224,224)) #0.79
69+
# mytest("weights/vgg19two_0022.h5",25,(224,224)) #0.82
70+
71+
72+
73+

‎binary_vote_pred.ipynb

+247
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,247 @@
1+
{
2+
"cells": [
3+
{
4+
"cell_type": "code",
5+
"execution_count": 1,
6+
"metadata": {
7+
"collapsed": true
8+
},
9+
"outputs": [
10+
{
11+
"name": "stderr",
12+
"output_type": "stream",
13+
"text": [
14+
"Using TensorFlow backend.\n"
15+
]
16+
}
17+
],
18+
"source": [
19+
"from keras.models import load_model\n",
20+
"from keras_preprocessing.image import ImageDataGenerator\n",
21+
"import numpy as np\n",
22+
"from sklearn.metrics import confusion_matrix\n",
23+
"import cv2 as cv"
24+
]
25+
},
26+
{
27+
"cell_type": "code",
28+
"execution_count": 2,
29+
"metadata": {
30+
"collapsed": false
31+
},
32+
"outputs": [
33+
{
34+
"name": "stdout",
35+
"output_type": "stream",
36+
"text": [
37+
"WARNING:tensorflow:From D:\\myenvs\\lib\\site-packages\\tensorflow\\python\\framework\\op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nColocations handled automatically by placer.\n"
38+
]
39+
},
40+
{
41+
"name": "stdout",
42+
"output_type": "stream",
43+
"text": [
44+
"WARNING:tensorflow:From D:\\myenvs\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py:3445: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\n"
45+
]
46+
},
47+
{
48+
"name": "stdout",
49+
"output_type": "stream",
50+
"text": [
51+
"WARNING:tensorflow:From D:\\myenvs\\lib\\site-packages\\tensorflow\\python\\ops\\math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse tf.cast instead.\n"
52+
]
53+
},
54+
{
55+
"name": "stdout",
56+
"output_type": "stream",
57+
"text": [
58+
"模型加载成功\n"
59+
]
60+
}
61+
],
62+
"source": [
63+
"#加载模型\n",
64+
"model1 = load_model(\"weights/vgg16_0032.h5\")\n",
65+
"model2 = load_model(\"weights/vgg16_0029.h5\")\n",
66+
"model3 = load_model(\"weights/vgg16_0030.h5\")\n",
67+
"\n",
68+
"print(\"模型加载成功\")"
69+
]
70+
},
71+
{
72+
"cell_type": "code",
73+
"execution_count": 9,
74+
"metadata": {},
75+
"outputs": [
76+
{
77+
"name": "stdout",
78+
"output_type": "stream",
79+
"text": [
80+
"Found 10 images belonging to 2 classes.\n"
81+
]
82+
},
83+
{
84+
"name": "stdout",
85+
"output_type": "stream",
86+
"text": [
87+
"\r1/1 [==============================] - 0s 130ms/step\n"
88+
]
89+
},
90+
{
91+
"name": "stdout",
92+
"output_type": "stream",
93+
"text": [
94+
"Found 10 images belonging to 2 classes.\n"
95+
]
96+
},
97+
{
98+
"name": "stdout",
99+
"output_type": "stream",
100+
"text": [
101+
"\r1/1 [==============================] - 0s 140ms/step\n"
102+
]
103+
},
104+
{
105+
"name": "stdout",
106+
"output_type": "stream",
107+
"text": [
108+
"Found 10 images belonging to 2 classes.\n"
109+
]
110+
},
111+
{
112+
"name": "stdout",
113+
"output_type": "stream",
114+
"text": [
115+
"\r1/1 [==============================] - 0s 130ms/step\n"
116+
]
117+
},
118+
{
119+
"name": "stdout",
120+
"output_type": "stream",
121+
"text": [
122+
"Found 10 images belonging to 2 classes.\n真实类别: [0. 0. 0. 0. 0. 1. 1. 1. 1. 1.]\n预测类别: [0, 0, 0, 0, 0, 1, 0, 1, 0, 1]\n[[5 0]\n [2 3]]\nacc: 0.8\n"
123+
]
124+
}
125+
],
126+
"source": [
127+
"#导入数据\n",
128+
"def load_data(shape):\n",
129+
" test_path = '测试/t'\n",
130+
" test_batches = ImageDataGenerator(rescale=1/255).flow_from_directory(test_path,\n",
131+
" target_size=shape,\n",
132+
" classes=[\"C2F\",\"X2F\"],\n",
133+
" class_mode=\"binary\",batch_size=10,shuffle=False)\n",
134+
" return test_batches\n",
135+
"\n",
136+
"# 预测\n",
137+
"def pred(model,steps,shape):\n",
138+
" steps = steps\n",
139+
" test_batches=load_data(shape)\n",
140+
" pred = model.predict_generator(test_batches, steps=steps, verbose=1)\n",
141+
" pred = pred.ravel()\n",
142+
" pred = list(pred)\n",
143+
" for i in range(len(pred)):\n",
144+
" if pred[i] < 0.5:\n",
145+
" pred[i] = 0\n",
146+
" else:\n",
147+
" pred[i] = 1\n",
148+
" return pred\n",
149+
"#投票选出最多的\n",
150+
"def vote(lt):\n",
151+
"\tindex1 = 0\n",
152+
"\tmax = 0\n",
153+
"\tfor i in range(len(lt)):\n",
154+
"\t\tflag = 0\n",
155+
"\t\tfor j in range(i+1,len(lt)):\n",
156+
"\t\t\tif lt[j] == lt[i]:\n",
157+
"\t\t\t\tflag += 1\n",
158+
"\t\tif flag > max:\n",
159+
"\t\t\tmax = flag\n",
160+
"\t\t\tindex1 = i\n",
161+
"\treturn index1\n",
162+
"def Ensemble():\n",
163+
" ans = []\n",
164+
" pred1=list(pred(model1,1,(224,224)))\n",
165+
" pred2=list(pred(model2,1,(224,224)))\n",
166+
" pred3=list(pred(model3,1,(224,224)))\n",
167+
" for i in range(len(pred1)):\n",
168+
" ls = []\n",
169+
" ls.append(pred1[i])\n",
170+
" ls.append(pred2[i])\n",
171+
" ls.append(pred3[i])\n",
172+
" ans.append(ls[vote(ls)])\n",
173+
" return ans\n",
174+
"\n",
175+
"#投票得出最终结果\n",
176+
"predicts=Ensemble()\n",
177+
"# for i in enumerate(predicts):\n",
178+
"# print(i)\n",
179+
"\n",
180+
"\n",
181+
"\n",
182+
"test_batches = load_data((224,224))\n",
183+
"test_class = np.array([])\n",
184+
"\n",
185+
"files=[]\n",
186+
"for i in range(1):\n",
187+
" test_imgs, test_lables = next(test_batches)\n",
188+
" test_class = np.hstack((test_class, test_lables))\n",
189+
" files.append(test_imgs)\n",
190+
"print(\"真实类别:\", test_class)\n",
191+
"print(\"预测类别:\", predicts)\n",
192+
"\n",
193+
"\n",
194+
"# 打印混淆矩阵\n",
195+
"cm = confusion_matrix(test_class, predicts)\n",
196+
"\n",
197+
"print(cm)\n",
198+
"\n",
199+
"tmp = 0\n",
200+
"for i in range(len(cm[0, :])):\n",
201+
" tmp += cm[i][i]\n",
202+
"accuracy = tmp / np.sum(cm)\n",
203+
"print(\"acc:\", accuracy)\n",
204+
"\n",
205+
"\n",
206+
"i=0\n",
207+
"for images in files:\n",
208+
" for label, pred_label, image in zip(test_class, predicts, images):\n",
209+
" i += 1\n",
210+
" cv.imshow('{} - {} {} {}'.format(\n",
211+
" i, label,\n",
212+
" '==' if label == pred_label\n",
213+
" else '!=', pred_label), image)\n",
214+
"cv.waitKey(0)\n",
215+
"cv.destroyAllWindows()"
216+
]
217+
},
218+
{
219+
"cell_type": "code",
220+
"execution_count": null,
221+
"metadata": {},
222+
"outputs": [],
223+
"source": []
224+
}
225+
],
226+
"metadata": {
227+
"kernelspec": {
228+
"display_name": "Python 2",
229+
"language": "python",
230+
"name": "python2"
231+
},
232+
"language_info": {
233+
"codemirror_mode": {
234+
"name": "ipython",
235+
"version": 2
236+
},
237+
"file_extension": ".py",
238+
"mimetype": "text/x-python",
239+
"name": "python",
240+
"nbconvert_exporter": "python",
241+
"pygments_lexer": "ipython2",
242+
"version": "2.7.6"
243+
}
244+
},
245+
"nbformat": 4,
246+
"nbformat_minor": 0
247+
}

‎binary_vote_pred.py

+101
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,101 @@
1+
'''
2+
@Author : lance
3+
@Email : wangyl306@163.com
4+
多模型投票对抗过拟合
5+
'''
6+
from keras.models import load_model
7+
from keras_preprocessing.image import ImageDataGenerator
8+
import numpy as np
9+
from sklearn.metrics import confusion_matrix
10+
11+
12+
def pred(path,steps,input_shape):
13+
#导入数据
14+
test_path = '测试'
15+
test_batches = ImageDataGenerator(rescale=1/255).flow_from_directory(test_path,
16+
target_size=input_shape,
17+
classes=["C2F","X2F"],
18+
class_mode="binary",
19+
batch_size=10, shuffle=False)
20+
model = load_model(path)
21+
# 测试
22+
steps = steps
23+
pred = model.predict_generator(test_batches, steps=steps, verbose=1)
24+
pred = pred.ravel()
25+
pred = list(pred)
26+
for i in range(len(pred)):
27+
if pred[i] < 0.5:
28+
pred[i] = 0
29+
else:
30+
pred[i] = 1
31+
# print("预测结果:", pred)
32+
return pred
33+
34+
35+
#投票选出最多的
36+
def vote(lt):
37+
index1 = 0
38+
max = 0
39+
for i in range(len(lt)):
40+
flag = 0
41+
for j in range(i+1,len(lt)):
42+
if lt[j] == lt[i]:
43+
flag += 1
44+
if flag > max:
45+
max = flag
46+
index1 = i
47+
return index1
48+
49+
def Ensemble():
50+
ans = []
51+
pred1=list(pred("weights/nasnet_0039.h5",25,(224,224)))
52+
pred2=list(pred("weights/vgg19two_0027.h5",25,(224,224)))
53+
pred3=list(pred("weights/inceptionv3_0016.h5",25,(299,299)))
54+
pred4=list(pred("weights/mobilenetv2_0029.h5",25,(224,224)))
55+
pred5=list(pred("weights/densenet_0025.h5",25,(224,224)))
56+
pred6=list(pred("weights/bcnn_0020.h5",25,(224,224)))
57+
pred7=list(pred("weights/resnet_0025.h5",25,(224,224)))
58+
for i in range(len(pred1)):
59+
ls = []
60+
ls.append(pred1[i])
61+
ls.append(pred2[i])
62+
ls.append(pred3[i])
63+
ls.append(pred4[i])
64+
ls.append(pred5[i])
65+
ls.append(pred6[i])
66+
ls.append(pred7[i])
67+
68+
69+
ans.append(ls[vote(ls)])
70+
return ans
71+
72+
73+
if __name__=="__main__":
74+
predicts=Ensemble()
75+
# for i in enumerate(predicts):
76+
# print(i)
77+
78+
test_path = '测试'
79+
test_batches = ImageDataGenerator(rescale=1 / 255).flow_from_directory(test_path,
80+
target_size=(224,224),
81+
classes=["C2F", "X2F"],
82+
class_mode="binary",
83+
batch_size=10, shuffle=False)
84+
test_class = np.array([])
85+
86+
for i in range(25):
87+
test_imgs, test_lables = next(test_batches)
88+
test_class = np.hstack((test_class, test_lables))
89+
print("真实类别:", test_class)
90+
91+
# 打印混淆矩阵
92+
cm = confusion_matrix(test_class, predicts)
93+
94+
print(cm)
95+
96+
tmp = 0
97+
for i in range(len(cm[0, :])):
98+
tmp += cm[i][i]
99+
accuracy = tmp / np.sum(cm)
100+
print("acc:", accuracy)
101+

‎binary_vote_test.ipynb

+358
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,358 @@
1+
{
2+
"cells": [
3+
{
4+
"cell_type": "code",
5+
"execution_count": 1,
6+
"metadata": {
7+
"collapsed": true
8+
},
9+
"outputs": [
10+
{
11+
"name": "stderr",
12+
"output_type": "stream",
13+
"text": [
14+
"Using TensorFlow backend.\n"
15+
]
16+
}
17+
],
18+
"source": [
19+
"from keras.models import load_model\n",
20+
"from keras_preprocessing.image import ImageDataGenerator\n",
21+
"import numpy as np\n",
22+
"from sklearn.metrics import confusion_matrix\n",
23+
"import cv2 as cv"
24+
]
25+
},
26+
{
27+
"cell_type": "code",
28+
"execution_count": 2,
29+
"metadata": {
30+
"collapsed": false
31+
},
32+
"outputs": [
33+
{
34+
"name": "stdout",
35+
"output_type": "stream",
36+
"text": [
37+
"WARNING:tensorflow:From D:\\myenvs\\lib\\site-packages\\tensorflow\\python\\framework\\op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nColocations handled automatically by placer.\n"
38+
]
39+
},
40+
{
41+
"name": "stdout",
42+
"output_type": "stream",
43+
"text": [
44+
"WARNING:tensorflow:From D:\\myenvs\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py:3445: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\n"
45+
]
46+
},
47+
{
48+
"name": "stdout",
49+
"output_type": "stream",
50+
"text": [
51+
"WARNING:tensorflow:From D:\\myenvs\\lib\\site-packages\\tensorflow\\python\\ops\\math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse tf.cast instead.\n"
52+
]
53+
},
54+
{
55+
"name": "stdout",
56+
"output_type": "stream",
57+
"text": [
58+
"模型加载成功\n"
59+
]
60+
}
61+
],
62+
"source": [
63+
"#加载模型\n",
64+
"\n",
65+
"model1 = load_model(\"weights/densenet_0023.h5\")\n",
66+
"model2 = load_model(\"weights/inceptionv3_0016.h5\")\n",
67+
"model3 = load_model(\"weights/mobilenetv2_0032.h5\")\n",
68+
"model4 = load_model(\"weights/nasnet_0017.h5\")\n",
69+
"model5 = load_model(\"weights/vgg19two_0027.h5\")\n",
70+
"\n",
71+
"print(\"模型加载成功\")"
72+
]
73+
},
74+
{
75+
"cell_type": "code",
76+
"execution_count": 17,
77+
"metadata": {},
78+
"outputs": [
79+
{
80+
"name": "stdout",
81+
"output_type": "stream",
82+
"text": [
83+
"Found 30 images belonging to 2 classes.\n"
84+
]
85+
},
86+
{
87+
"name": "stdout",
88+
"output_type": "stream",
89+
"text": [
90+
"\r1/3 [=========>....................] - ETA: 0s"
91+
]
92+
},
93+
{
94+
"name": "stdout",
95+
"output_type": "stream",
96+
"text": [
97+
"\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r2/3 [===================>..........] - ETA: 0s"
98+
]
99+
},
100+
{
101+
"name": "stdout",
102+
"output_type": "stream",
103+
"text": [
104+
"\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r3/3 [==============================] - 0s 137ms/step\n"
105+
]
106+
},
107+
{
108+
"name": "stdout",
109+
"output_type": "stream",
110+
"text": [
111+
"Found 30 images belonging to 2 classes.\n"
112+
]
113+
},
114+
{
115+
"name": "stdout",
116+
"output_type": "stream",
117+
"text": [
118+
"\r1/3 [=========>....................] - ETA: 0s"
119+
]
120+
},
121+
{
122+
"name": "stdout",
123+
"output_type": "stream",
124+
"text": [
125+
"\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r2/3 [===================>..........] - ETA: 0s"
126+
]
127+
},
128+
{
129+
"name": "stdout",
130+
"output_type": "stream",
131+
"text": [
132+
"\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r3/3 [==============================] - 1s 169ms/step\n"
133+
]
134+
},
135+
{
136+
"name": "stdout",
137+
"output_type": "stream",
138+
"text": [
139+
"Found 30 images belonging to 2 classes.\n"
140+
]
141+
},
142+
{
143+
"name": "stdout",
144+
"output_type": "stream",
145+
"text": [
146+
"\r1/3 [=========>....................] - ETA: 0s"
147+
]
148+
},
149+
{
150+
"name": "stdout",
151+
"output_type": "stream",
152+
"text": [
153+
"\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r2/3 [===================>..........] - ETA: 0s"
154+
]
155+
},
156+
{
157+
"name": "stdout",
158+
"output_type": "stream",
159+
"text": [
160+
"\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r3/3 [==============================] - 0s 112ms/step\n"
161+
]
162+
},
163+
{
164+
"name": "stdout",
165+
"output_type": "stream",
166+
"text": [
167+
"Found 30 images belonging to 2 classes.\n"
168+
]
169+
},
170+
{
171+
"name": "stdout",
172+
"output_type": "stream",
173+
"text": [
174+
"\r1/3 [=========>....................] - ETA: 0s"
175+
]
176+
},
177+
{
178+
"name": "stdout",
179+
"output_type": "stream",
180+
"text": [
181+
"\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r2/3 [===================>..........] - ETA: 0s"
182+
]
183+
},
184+
{
185+
"name": "stdout",
186+
"output_type": "stream",
187+
"text": [
188+
"\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r3/3 [==============================] - 0s 116ms/step\n"
189+
]
190+
},
191+
{
192+
"name": "stdout",
193+
"output_type": "stream",
194+
"text": [
195+
"Found 30 images belonging to 2 classes.\n"
196+
]
197+
},
198+
{
199+
"name": "stdout",
200+
"output_type": "stream",
201+
"text": [
202+
"\r1/3 [=========>....................] - ETA: 0s"
203+
]
204+
},
205+
{
206+
"name": "stdout",
207+
"output_type": "stream",
208+
"text": [
209+
"\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r2/3 [===================>..........] - ETA: 0s"
210+
]
211+
},
212+
{
213+
"name": "stdout",
214+
"output_type": "stream",
215+
"text": [
216+
"\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r3/3 [==============================] - 0s 131ms/step\n"
217+
]
218+
},
219+
{
220+
"name": "stdout",
221+
"output_type": "stream",
222+
"text": [
223+
"Found 30 images belonging to 2 classes.\n"
224+
]
225+
},
226+
{
227+
"name": "stdout",
228+
"output_type": "stream",
229+
"text": [
230+
"[[14 1]\n [ 6 9]]\nacc: 0.7666666666666667\n"
231+
]
232+
}
233+
],
234+
"source": [
235+
"#导入数据\n",
236+
"def load_data(shape):\n",
237+
" test_path = '测试数据/测试7'\n",
238+
" test_batches = ImageDataGenerator(rescale=1/255).flow_from_directory(test_path,\n",
239+
" target_size=shape,\n",
240+
" classes=[\"C2F\",\"X2F\"],\n",
241+
" class_mode=\"binary\",batch_size=10,shuffle=False)\n",
242+
" return test_batches\n",
243+
"\n",
244+
"# 预测\n",
245+
"def pred(model,steps,shape):\n",
246+
" steps = steps\n",
247+
" test_batches=load_data(shape)\n",
248+
" pred = model.predict_generator(test_batches, steps=steps, verbose=1)\n",
249+
" pred = pred.ravel()\n",
250+
" pred = list(pred)\n",
251+
" for i in range(len(pred)):\n",
252+
" if pred[i] < 0.5:\n",
253+
" pred[i] = 0\n",
254+
" else:\n",
255+
" pred[i] = 1\n",
256+
" return pred\n",
257+
"#投票选出最多的\n",
258+
"def vote(lt):\n",
259+
"\tindex1 = 0\n",
260+
"\tmax = 0\n",
261+
"\tfor i in range(len(lt)):\n",
262+
"\t\tflag = 0\n",
263+
"\t\tfor j in range(i+1,len(lt)):\n",
264+
"\t\t\tif lt[j] == lt[i]:\n",
265+
"\t\t\t\tflag += 1\n",
266+
"\t\tif flag > max:\n",
267+
"\t\t\tmax = flag\n",
268+
"\t\t\tindex1 = i\n",
269+
"\treturn index1\n",
270+
"def Ensemble(steps):\n",
271+
" ans = []\n",
272+
" pred1=list(pred(model1,steps,(224,224)))\n",
273+
" pred2=list(pred(model2,steps,(299,299)))\n",
274+
" pred3=list(pred(model3,steps,(224,224)))\n",
275+
" pred4=list(pred(model4,steps,(224,224)))\n",
276+
" pred5=list(pred(model5,steps,(224,224)))\n",
277+
" for i in range(len(pred5)):\n",
278+
" ls = []\n",
279+
" ls.append(pred1[i])\n",
280+
" ls.append(pred2[i])\n",
281+
" ls.append(pred3[i])\n",
282+
" ls.append(pred4[i])\n",
283+
" ls.append(pred5[i])\n",
284+
" ans.append(ls[vote(ls)])\n",
285+
" return ans\n",
286+
"\n",
287+
"steps=3\n",
288+
"#投票得出最终结果\n",
289+
"predicts=Ensemble(steps)\n",
290+
"# for i in enumerate(predicts):\n",
291+
"# print(i)\n",
292+
"\n",
293+
"\n",
294+
"\n",
295+
"test_batches = load_data((224,224))\n",
296+
"test_class = np.array([])\n",
297+
"\n",
298+
"files=[]\n",
299+
"for i in range(steps):\n",
300+
" test_imgs, test_lables = next(test_batches)\n",
301+
" test_class = np.hstack((test_class, test_lables))\n",
302+
" files.append(test_imgs)\n",
303+
"# print(\"真实类别:\", test_class)\n",
304+
"# print(\"预测类别:\", predicts)\n",
305+
"\n",
306+
"\n",
307+
"# 打印混淆矩阵\n",
308+
"cm = confusion_matrix(test_class, predicts)\n",
309+
"\n",
310+
"print(cm)\n",
311+
"\n",
312+
"tmp = 0\n",
313+
"for i in range(len(cm[0, :])):\n",
314+
" tmp += cm[i][i]\n",
315+
"accuracy = tmp / np.sum(cm)\n",
316+
"print(\"acc:\", accuracy)\n",
317+
"\n",
318+
"\n",
319+
"# i=0\n",
320+
"# for images in files:\n",
321+
"# for pred_label, image in zip(predicts, images):\n",
322+
"# i += 1\n",
323+
"# cv.imshow('{} - {} '.format(\n",
324+
"# i, pred_label), image)\n",
325+
"# cv.waitKey(0)\n",
326+
"# cv.destroyAllWindows()"
327+
]
328+
},
329+
{
330+
"cell_type": "code",
331+
"execution_count": null,
332+
"metadata": {},
333+
"outputs": [],
334+
"source": []
335+
}
336+
],
337+
"metadata": {
338+
"kernelspec": {
339+
"display_name": "Python 2",
340+
"language": "python",
341+
"name": "python2"
342+
},
343+
"language_info": {
344+
"codemirror_mode": {
345+
"name": "ipython",
346+
"version": 2
347+
},
348+
"file_extension": ".py",
349+
"mimetype": "text/x-python",
350+
"name": "python",
351+
"nbconvert_exporter": "python",
352+
"pygments_lexer": "ipython2",
353+
"version": "2.7.6"
354+
}
355+
},
356+
"nbformat": 4,
357+
"nbformat_minor": 0
358+
}

‎cate_pred.py

+32
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
from skimage import io,transform
2+
import numpy as np
3+
import os
4+
from keras.models import load_model
5+
6+
7+
def predict(model_path,data_path,shape):
8+
9+
new_model=load_model(model_path)
10+
print(dir(new_model))
11+
12+
path=data_path
13+
files=os.listdir(path)
14+
# print(files)
15+
16+
Tmp_Img=[]
17+
for i in range(len(files)):
18+
tmp=io.imread(path+'/'+files[i])
19+
tmp_img=transform.resize(tmp,shape)
20+
Tmp_Img.append(tmp_img)
21+
Tmp_Img=np.array(Tmp_Img)
22+
Tmp_Img=Tmp_Img*(1/255)
23+
predicts=new_model.predict(Tmp_Img)
24+
predicts=np.argmax(predicts, axis=1).astype(np.str)
25+
predicts[predicts=="0"]="C4F"
26+
predicts[predicts=="1"]="X2F"
27+
for i in zip(files,predicts):
28+
print("\n",i)
29+
30+
if __name__=="__main__":
31+
predict("weights/vgg16_0032.h5","测试/tt",[224,224])
32+

‎cate_test.py

+60
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,60 @@
1+
'''
2+
@Author : lance
3+
@Email : wangyl306@163.com
4+
'''
5+
6+
7+
from keras.models import load_model
8+
from keras_preprocessing.image import ImageDataGenerator
9+
from sklearn.metrics import confusion_matrix
10+
import numpy as np
11+
12+
13+
14+
def mytest(path,steps,input_shape):
15+
#导入数据
16+
17+
test_path = '测试'
18+
test_batches = ImageDataGenerator(rescale=1/255).flow_from_directory(test_path,
19+
target_size=input_shape,
20+
classes=["C2F","X2F"],
21+
batch_size=10, shuffle=False)
22+
23+
model = load_model(path)
24+
# 测试
25+
steps=steps
26+
test_class=np.array([])
27+
28+
for i in range(steps):
29+
test_imgs, test_lables = next(test_batches)
30+
test_lables = np.argmax(test_lables, axis=1)
31+
test_lables = np.argmax(test_lables)
32+
test_class=np.hstack((test_class,test_lables ))
33+
print("真实类别:",test_class)
34+
35+
pred = model.predict_generator(test_batches, steps=steps, verbose=1)
36+
predicted_class_indices = np.argmax(pred, axis=1)
37+
print("预测结果:", predicted_class_indices)
38+
39+
# 打印混淆矩阵
40+
cm = confusion_matrix(test_class, predicted_class_indices)
41+
42+
43+
print(cm)
44+
45+
tmp = 0
46+
for i in range(len(cm[0, :])):
47+
tmp += cm[i][i]
48+
accuracy = tmp / np.sum(cm)
49+
print("acc:", accuracy)
50+
51+
return path, accuracy
52+
53+
54+
55+
if __name__=="__main__":
56+
# mytest("weights/mobilenetv2_0025.h5",30,(224,224)) #0.73
57+
# mytest("weights/vgg19one_0032h5",30,(224,224)) #0.75
58+
# mytest("weights/vgg19two_0024.h5",30,(224,224)) #0.84
59+
mytest("weights/vgg16_0007.h5",30,(224,224)) #0.84
60+

‎cate_vote_pred.py

+57
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,57 @@
1+
'''
2+
@Author : lance
3+
@Email : wangyl306@163.com
4+
多模型投票对抗过拟合
5+
'''
6+
from keras.models import load_model
7+
from keras_preprocessing.image import ImageDataGenerator
8+
import numpy as np
9+
10+
#预测模块
11+
def pred(path,steps,input_shape):
12+
#导入数据
13+
test_path = '测试'
14+
test_batches = ImageDataGenerator(rescale=1/255).flow_from_directory(test_path,
15+
target_size=input_shape,
16+
classes=["C2F","X2F"],
17+
batch_size=10, shuffle=False)
18+
model = load_model(path)
19+
# 测试
20+
steps = steps
21+
pred = model.predict_generator(test_batches, steps=steps, verbose=1)
22+
predicted_class_indices = np.argmax(pred, axis=1)
23+
# print("预测结果:", predicted_class_indices)
24+
25+
return predicted_class_indices
26+
27+
28+
#投票选出最多的
29+
def vote(lt):
30+
index1 = 0
31+
max = 0
32+
for i in range(len(lt)):
33+
flag = 0
34+
for j in range(i+1,len(lt)):
35+
if lt[j] == lt[i]:
36+
flag += 1
37+
if flag > max:
38+
max = flag
39+
index1 = i
40+
return index1
41+
42+
def Ensemble():
43+
ans = []
44+
pred1=list(pred(path,steps,input_shape))
45+
for i in range(len(pred1)):
46+
ls = []
47+
ls.append(pred1[i])
48+
49+
50+
ans.append(ls[vote(ls)])
51+
return ans
52+
53+
54+
if __name__=="__main__":
55+
predicts=Ensemble()
56+
for i in enumerate(predicts):
57+
print(i)

‎extractMask.py

+105
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,105 @@
1+
# -*- encoding: utf-8 -*-
2+
'''
3+
@Author : lance
4+
@Email : wangyl306@163.com
5+
'''
6+
7+
from skimage import io
8+
import numpy as np
9+
import os
10+
from skimage import color
11+
import skimage.morphology as sm
12+
13+
#imgpath="./data/test/C3F/C3F_blockId#32756.bmp"
14+
#
15+
#
16+
#img=io.imread(imgpath)
17+
#io.imshow(img)
18+
19+
#print(type(img)) #显示类型
20+
#print(img.shape) #显示尺寸 #height/weight/channel
21+
#print(img.size) #显示总像素个数
22+
#print(img.max()) #最大像素值
23+
#print(img.min()) #最小像素值
24+
#print(img.mean()) #像素平均值
25+
26+
27+
str= './新建文件夹/tt/*.bmp' #源路径
28+
path= "./新建文件夹/tt1/" #保存路径
29+
if not os.path.exists(path):
30+
os.makedirs(path)
31+
#先进行开运算去除小物体,再进行二值化取mask
32+
nb=0
33+
def mask(f):
34+
global nb
35+
nb+=1
36+
print(nb)
37+
image=io.imread(f)
38+
#开运算
39+
img_gray=color.rgb2gray(image)
40+
dst=sm.opening(img_gray,sm.disk(9))
41+
img=color.gray2rgb(dst)
42+
#二值化
43+
img_gray=color.rgb2gray(img)
44+
rows,cols=img_gray.shape
45+
for i in range(rows):
46+
for j in range(cols):
47+
if (img_gray[i,j]<=0.5):
48+
img_gray[i,j]=0
49+
else:
50+
img_gray[i,j]=1
51+
#计算叶面大小-白色区域得大小
52+
area=img_gray[img_gray>0].size
53+
#在原图中找出mask区域
54+
index=np.where(img_gray==1)
55+
h1=index[0].min()
56+
h2=index[0].max()
57+
w1=index[1].min()
58+
w2=index[1].max()
59+
roi=image[h1:h2,w1:w2]
60+
return roi,area
61+
62+
coll = io.ImageCollection(str,load_func=mask)
63+
areas=[]
64+
for n in range(len(coll)):
65+
io.imsave(path+np.str(n)+'.bmp',coll[n][0]) #循环保存图片
66+
areas.append(coll[n][1])
67+
print("min:",np.min(areas))
68+
print("median:",np.median(areas))
69+
print("max:",np.max(areas))
70+
print("mean:",np.mean(areas))
71+
72+
73+
# test train
74+
75+
#min: 1782782 1233141
76+
#median: 2211339.0 2076964
77+
#max: 2922081 3447462
78+
#mean: 2276614 2103386
79+
80+
#min: 1446975
81+
#median: 2263485.0
82+
#max: 3885086
83+
#mean: 2388327.9730941704
84+
85+
#min: 994173
86+
#median: 1726611.0
87+
#max: 3245794
88+
#mean: 1837663.5177304964
89+
90+
#min: 939299
91+
#median: 1655289.0
92+
#max: 2541519
93+
#mean: 1651290.857142857
94+
95+
96+
97+
98+
99+
100+
101+
102+
103+
104+
105+

‎image_diff.py

+51
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,51 @@
1+
# -*- coding: utf-8 -*-
2+
'''
3+
@Author : lance
4+
@Email : wangyl306@163.com
5+
'''
6+
from skimage.measure import compare_ssim
7+
#import argparse
8+
import imutils
9+
import cv2
10+
11+
# load the two input images
12+
pathA="errImages/x3-c3/t1_blockId#33255.bmp"
13+
pathB="errImages/x3-c3/t1_blockId#33260.bmp"
14+
imageA = cv2.imread(pathA)
15+
imageB = cv2.imread(pathB)
16+
print(imageA)
17+
print(imageB)
18+
grayA = cv2.imread(pathA,cv2.IMREAD_GRAYSCALE)
19+
grayB = cv2.imread(pathB,cv2.IMREAD_GRAYSCALE)
20+
21+
22+
# compute the Structural Similarity Index (SSIM) between the two
23+
# images, ensuring that the difference image is returned
24+
(score, diff) = compare_ssim(grayA, grayB, full=True)
25+
#score代表两张输入图片的结构相似性索引。
26+
#该值的范围在[-1, 1],其中值为1时为“完美匹配”
27+
diff = (diff * 255).astype("uint8")
28+
print("SSIM: {}".format(score))
29+
30+
# threshold the difference image, followed by finding contours to
31+
# obtain the regions of the two input images that differ
32+
thresh = cv2.threshold(diff, 0, 255,
33+
cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
34+
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
35+
cv2.CHAIN_APPROX_SIMPLE)
36+
cnts = imutils.grab_contours(cnts)
37+
38+
# loop over the contours
39+
for c in cnts:
40+
# compute the bounding box of the contour and then draw the
41+
# bounding box on both input images to represent where the two
42+
# images differ
43+
(x, y, w, h) = cv2.boundingRect(c)
44+
cv2.rectangle(imageA, (x, y), (x + w, y + h), (0, 0, 255), 2)
45+
cv2.rectangle(imageB, (x, y), (x + w, y + h), (0, 0, 255), 2)
46+
47+
# show the output images
48+
cv2.imwrite("errImages/OriginalA.bmp", imageA)
49+
cv2.imwrite("errImages/ModifiedB.bmp", imageB)
50+
cv2.imwrite("errImages/Diff.bmp", diff)
51+
cv2.imwrite("errImages/Thresh.bmp",thresh)

‎main-cigratte_kmeans.py

+239
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,239 @@
1+
# -*- encoding: utf-8 -*-
2+
'''
3+
@Author : lance
4+
@Email : wangyl306@163.com
5+
'''
6+
import numpy as np
7+
import keras
8+
from keras.models import Sequential
9+
from sklearn import metrics
10+
from keras.preprocessing.image import ImageDataGenerator
11+
from keras.optimizers import Adam
12+
from keras.layers import Activation, Dense
13+
14+
valid_datagen = ImageDataGenerator(rescale=1./255)
15+
16+
train_datagen = ImageDataGenerator(rescale=1./255)
17+
18+
test_datagen = ImageDataGenerator(rescale=1./255)
19+
20+
train_gen = train_datagen.flow_from_directory( 'cx/train',
21+
target_size=(224,224),batch_size=10,class_mode='binary')
22+
23+
valid_gen = valid_datagen.flow_from_directory( 'cx/test',
24+
target_size=(224,224),batch_size=10,class_mode='binary')
25+
26+
27+
test_gen = test_datagen.flow_from_directory( 'cx/测试cx',
28+
target_size=(224,224),batch_size=10,class_mode='binary')
29+
30+
31+
#os.chdir('E:/Python/ANN/Projects/烟草')
32+
model=keras.models.load_model("cx_weights/weights/densenet_0017.h5")
33+
model.summary()
34+
35+
Labels=[]
36+
Predicts=[]
37+
38+
for i in range(3):
39+
print(i)
40+
temp=next(test_gen)
41+
temp1=temp[0]
42+
43+
Labels.append(temp[1])
44+
Predicts.append(model.predict(temp1))
45+
46+
47+
Labels=np.array(Labels).reshape([30,1])
48+
49+
Predicts=np.array(Predicts).reshape([30,1])
50+
51+
temp=np.zeros([30,1])
52+
temp[Predicts>0.5]=1
53+
54+
cf_metrics=metrics.confusion_matrix(Labels,temp)
55+
56+
57+
58+
59+
model_inter = keras.Model(inputs=model.input,
60+
outputs=model.get_layer('dense_1').output)
61+
62+
model_inter.summary()
63+
64+
65+
Labels=[]
66+
Predicts=[]
67+
68+
for i in range(111):
69+
print(i)
70+
temp=next(train_gen)
71+
temp1=temp[0]
72+
73+
Labels.append(temp[1])
74+
Predicts.append(model_inter.predict(temp1))
75+
76+
77+
Train_X=np.array(Predicts).reshape([1110,1024])
78+
79+
Train_Y=np.array(Labels).reshape([1110,1])
80+
81+
Labels=[]
82+
Predicts=[]
83+
84+
for i in range(47):
85+
print(i)
86+
temp=next(valid_gen)
87+
temp1=temp[0]
88+
89+
Labels.append(temp[1])
90+
Predicts.append(model_inter.predict(temp1))
91+
92+
93+
Valid_X=np.array(Predicts).reshape([470,1024])
94+
95+
Valid_Y=np.array(Labels).reshape([470,1])
96+
97+
Labels=[]
98+
Predicts=[]
99+
100+
for i in range(3):
101+
print(i)
102+
temp=next(test_gen)
103+
temp1=temp[0]
104+
105+
Labels.append(temp[1])
106+
Predicts.append(model_inter.predict(temp1))
107+
108+
109+
Test_X=np.array(Predicts).reshape([30,1024])
110+
111+
Test_Y=np.array(Labels).reshape([30,1])
112+
113+
114+
# In[ ]:
115+
116+
117+
model_dense= Sequential([
118+
Dense(16,input_dim=1024),
119+
Activation('tanh'),
120+
Dense(1),
121+
Activation('sigmoid')
122+
])
123+
124+
adam=Adam(lr=0.01)
125+
126+
127+
model_dense.compile(
128+
optimizer=adam,
129+
loss='mean_squared_error',
130+
# loss='binary_crossentropy',
131+
metrics=['accuracy'],
132+
)
133+
134+
model_dense.fit(Train_X,Train_Y,nb_epoch=1000,batch_size=1120)
135+
136+
137+
# In[ ]:
138+
139+
140+
Y_predict = model_dense.predict(Train_X)
141+
142+
Y_predict[Y_predict>=0.5]=1
143+
144+
Y_predict[Y_predict<0.5]=0
145+
146+
cf_metrics=metrics.confusion_matrix(Train_Y, Y_predict)
147+
accuracy = metrics.accuracy_score(Train_Y, Y_predict)
148+
print(accuracy)
149+
150+
151+
#cx的阈值为0.005区分 >0.005=1 X2F
152+
Y_predict = model_dense.predict(Valid_X)
153+
154+
Y_predict[Y_predict>=0.5]=1
155+
156+
Y_predict[Y_predict<0.5]=0
157+
158+
cf_metrics=metrics.confusion_matrix(Valid_Y, Y_predict)
159+
accuracy = metrics.accuracy_score(Valid_Y, Y_predict)
160+
print(accuracy)
161+
162+
163+
Y_predict = model_dense.predict(Test_X)
164+
165+
Y_predict[Y_predict>=0.005]=1
166+
167+
Y_predict[Y_predict<0.005]=0
168+
169+
cf_metrics=metrics.confusion_matrix(Test_Y, Y_predict)
170+
accuracy = metrics.accuracy_score(Test_Y, Y_predict)
171+
print(accuracy)
172+
173+
174+
# In[ ]:
175+
176+
model_dense.summary()
177+
178+
model_feather = keras.Model(inputs=model_dense.input,
179+
outputs=model_dense.get_layer('dense_1').output)
180+
181+
model_feather.summary()
182+
#predicts_inter = model_inter.predict(temp1)
183+
184+
X_train=model_feather.predict(Train_X)
185+
X_valid=model_feather.predict(Valid_X)
186+
X_test=model_feather.predict(Test_X)
187+
188+
189+
190+
191+
# In[ ]:
192+
193+
194+
195+
from sklearn.cluster import KMeans
196+
clf=KMeans(n_clusters=2)
197+
198+
#attention, the cluster sequence will be generated randomly!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
199+
clf.fit(X_train)
200+
#attention, the cluster sequence will be generated randomly!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
201+
202+
predicts=clf.predict(X_train)
203+
204+
cf_metrics=metrics.confusion_matrix(Train_Y, predicts)
205+
accuracy = metrics.accuracy_score(Train_Y, predicts)
206+
print(accuracy)
207+
208+
209+
predicts=clf.predict(X_valid)
210+
211+
cf_metrics=metrics.confusion_matrix(Valid_Y, predicts)
212+
accuracy = metrics.accuracy_score(Valid_Y, predicts)
213+
print(accuracy)
214+
215+
216+
predicts=clf.predict(X_test)
217+
218+
cf_metrics=metrics.confusion_matrix(Test_Y, predicts)
219+
accuracy = metrics.accuracy_score(Test_Y, predicts)
220+
print(accuracy)
221+
222+
223+
threshold=3000
224+
225+
clf_clusters=clf.cluster_centers_
226+
227+
dis_1=X_test-clf_clusters[0,:]
228+
dis_1=np.sum(dis_1**2,axis=1)
229+
230+
dis_2=X_test-clf_clusters[1,:]
231+
dis_2=np.sum(dis_2**2,axis=1)
232+
predicts_t=np.array(dis_2<threshold)+0
233+
234+
cf_metrics=metrics.confusion_matrix(Test_Y, predicts_t)
235+
accuracy = metrics.accuracy_score(Test_Y, predicts_t)
236+
print(accuracy)
237+
238+
239+

‎object_size.py

+144
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,144 @@
1+
# -*- coding: utf-8 -*-
2+
'''
3+
@Author : lance
4+
@Email : wangyl306@163.com
5+
在图片中测量物体的大小与计算相机与物体的距离相似——都是需要定义个比率来度量每个给定指标的像素数(pixels per metric ratio)。
6+
7+
**什么是pixels per metric ratio **
8+
9+
为了确定图片中一个物体的大小,首先,我们需要使用参考物体进行“校准”(利用内置或外在的校准,避免混乱)。我们的参考物体需要有两个重要的性质:
10+
11+
性质1:我们应该知道物体的尺寸(就是宽或高)包括测量的单位(如mm、英寸等等)
12+
13+
性质2:我们应该能够很容易地在图片中找到参照物体,无论是基于物体的位置(例如,参考物体总是放在图片的左上角)还是通过外观(例如,独特的颜色或形状
14+
通过确保硬币是最左边的物体,我们可以从左到右对物体轮廓进行排序,获取硬币(始终是排序列表中的第一个轮廓),并使用它定义每个单位的像素数,我们将其定义为:
15+
16+
pixels_per_metric = object_width / know_width
17+
18+
已知硬币的宽度为0.955英寸。现在假设,物体的宽为150像素(基于其关联的边界框)。
19+
20+
pixels_per_metric可得:
21+
22+
pixels_per_metric=150px/0.955in = 157px/in
23+
24+
因此,在图片中应用每英寸所占的像素点为157个。使用这个比率,我们可以计算图片中物体的大小。
25+
'''
26+
27+
# python object_size.py --image images/example_01.png --width 0.955
28+
# python object_size.py --image images/example_02.png --width 0.955
29+
# python object_size.py --image images/example_03.png --width 3.5
30+
31+
# import the necessary packages
32+
from scipy.spatial import distance as dist
33+
from imutils import perspective
34+
from imutils import contours
35+
import numpy as np
36+
import argparse
37+
import imutils
38+
import cv2
39+
40+
def midpoint(ptA, ptB):
41+
return ((ptA[0] + ptB[0]) * 0.5, (ptA[1] + ptB[1]) * 0.5)
42+
43+
# construct the argument parse and parse the arguments
44+
ap = argparse.ArgumentParser()
45+
ap.add_argument("-i", "--image", required=True,
46+
help="path to the input image")
47+
ap.add_argument("-w", "--width", type=float, required=True,
48+
help="width of the left-most object in the image (in inches)")
49+
args = vars(ap.parse_args())
50+
51+
# load the image, convert it to grayscale, and blur it slightly
52+
image = cv2.imread(args["image"])
53+
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
54+
gray = cv2.GaussianBlur(gray, (7, 7), 0)
55+
56+
# perform edge detection, then perform a dilation + erosion to
57+
# close gaps in between object edges
58+
edged = cv2.Canny(gray, 50, 100)
59+
edged = cv2.dilate(edged, None, iterations=1)
60+
edged = cv2.erode(edged, None, iterations=1)
61+
62+
# find contours in the edge map
63+
cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
64+
cv2.CHAIN_APPROX_SIMPLE)
65+
cnts = imutils.grab_contours(cnts)
66+
67+
# sort the contours from left-to-right and initialize the
68+
# 'pixels per metric' calibration variable
69+
(cnts, _) = contours.sort_contours(cnts)
70+
pixelsPerMetric = None
71+
72+
# loop over the contours individually
73+
for c in cnts:
74+
# if the contour is not sufficiently large, ignore it
75+
if cv2.contourArea(c) < 100:
76+
continue
77+
78+
# compute the rotated bounding box of the contour
79+
orig = image.copy()
80+
# orig = image 可将所有结果显示,同时将最后两行显示的代码缩进可以一次性显示最终结果
81+
box = cv2.minAreaRect(c)
82+
box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)
83+
box = np.array(box, dtype="int")
84+
85+
# order the points in the contour such that they appear
86+
# in top-left, top-right, bottom-right, and bottom-left
87+
# order, then draw the outline of the rotated bounding
88+
# box
89+
box = perspective.order_points(box)
90+
cv2.drawContours(orig, [box.astype("int")], -1, (0, 255, 0), 2)
91+
92+
# loop over the original points and draw them
93+
for (x, y) in box:
94+
cv2.circle(orig, (int(x), int(y)), 5, (0, 0, 255), -1)
95+
96+
# unpack the ordered bounding box, then compute the midpoint
97+
# between the top-left and top-right coordinates, followed by
98+
# the midpoint between bottom-left and bottom-right coordinates
99+
(tl, tr, br, bl) = box
100+
(tltrX, tltrY) = midpoint(tl, tr)
101+
(blbrX, blbrY) = midpoint(bl, br)
102+
103+
# compute the midpoint between the top-left and top-right points,
104+
# followed by the midpoint between the top-righ and bottom-right
105+
(tlblX, tlblY) = midpoint(tl, bl)
106+
(trbrX, trbrY) = midpoint(tr, br)
107+
108+
# draw the midpoints on the image
109+
cv2.circle(orig, (int(tltrX), int(tltrY)), 5, (255, 0, 0), -1)
110+
cv2.circle(orig, (int(blbrX), int(blbrY)), 5, (255, 0, 0), -1)
111+
cv2.circle(orig, (int(tlblX), int(tlblY)), 5, (255, 0, 0), -1)
112+
cv2.circle(orig, (int(trbrX), int(trbrY)), 5, (255, 0, 0), -1)
113+
114+
# draw lines between the midpoints
115+
cv2.line(orig, (int(tltrX), int(tltrY)), (int(blbrX), int(blbrY)),
116+
(255, 0, 255), 2)
117+
cv2.line(orig, (int(tlblX), int(tlblY)), (int(trbrX), int(trbrY)),
118+
(255, 0, 255), 2)
119+
120+
# compute the Euclidean distance between the midpoints
121+
dA = dist.euclidean((tltrX, tltrY), (blbrX, blbrY))
122+
dB = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))
123+
124+
# if the pixels per metric has not been initialized, then
125+
# compute it as the ratio of pixels to supplied metric
126+
# (in this case, inches)
127+
if pixelsPerMetric is None:
128+
pixelsPerMetric = dB / args["width"]
129+
130+
# compute the size of the object
131+
dimA = dA / pixelsPerMetric
132+
dimB = dB / pixelsPerMetric
133+
134+
# draw the object sizes on the image
135+
cv2.putText(orig, "{:.1f}in".format(dimA),
136+
(int(tltrX - 15), int(tltrY - 10)), cv2.FONT_HERSHEY_SIMPLEX,
137+
0.65, (255, 255, 255), 2)
138+
cv2.putText(orig, "{:.1f}in".format(dimB),
139+
(int(trbrX + 10), int(trbrY)), cv2.FONT_HERSHEY_SIMPLEX,
140+
0.65, (255, 255, 255), 2)
141+
142+
# show the output image
143+
cv2.imshow("Image", orig)
144+
cv2.waitKey(0)

‎pred.py

+78
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,78 @@
1+
'''
2+
@Author : lance
3+
@Email : wangyl306@163.com
4+
分级代码
5+
'''
6+
7+
8+
from keras.models import load_model
9+
from keras_preprocessing.image import ImageDataGenerator
10+
import numpy as np
11+
12+
13+
14+
15+
def mytest1(path,steps,input_shape):
16+
#导入数据
17+
18+
test_path = 'testimgs/t'
19+
test_batches = ImageDataGenerator(rotation_range=360,rescale=1/255).flow_from_directory(test_path, target_size=input_shape,
20+
classes=["C4F","X2F"], batch_size=10, shuffle=False)
21+
22+
model = load_model(path)
23+
# 测试
24+
steps=steps
25+
pred = model.predict_generator(test_batches, steps=steps, verbose=1)
26+
predicted_class_indices = np.argmax(pred, axis=1)
27+
# print("预测结果:", predicted_class_indices)
28+
29+
30+
return predicted_class_indices
31+
32+
33+
def mytest2(path,steps,input_shape):
34+
#导入数据
35+
36+
test_path = 'testimgs'
37+
test_batches = ImageDataGenerator(rescale=1/255).flow_from_directory(test_path, target_size=input_shape,
38+
class_mode=None,classes=["X2F","X3F"], batch_size=10, shuffle=False)
39+
40+
model = load_model(path)
41+
# 测试
42+
steps=steps
43+
pred = model.predict_generator(test_batches, steps=steps, verbose=1)
44+
predicted_class_indices = np.argmax(pred, axis=1)
45+
# print("预测结果:", predicted_class_indices)
46+
47+
48+
return predicted_class_indices
49+
50+
if __name__=="__main__":
51+
# pred1=mytest1("weight/model11.h5",2,(224,224))
52+
# pred1=list(pred1)
53+
# for i in range(len(pred1)):
54+
# if pred1[i]==0:
55+
# pred1[i]="C4F"
56+
# else:
57+
# pred1[i]="X2F"
58+
# print(pred1)
59+
#
60+
pred2=mytest2("x23/测试6之后的模型/bcnn_0141.h5",3,(224,224))
61+
# pred2=mytest2("x23/测试6之后的模型/resnet_0093.h5",2,(224,224))
62+
pred2=list(pred2)
63+
64+
for i in range(len(pred2)):
65+
if pred2[i]==0:
66+
pred2[i]="X2F"
67+
else:
68+
pred2[i]="X3F"
69+
#
70+
#
71+
# for i in range(len(pred1)):
72+
# if pred1[i]=="X2F":
73+
# pred1[i]=pred2[i]
74+
# print(pred2)
75+
76+
for i in enumerate(pred2):
77+
print(i)
78+

‎pred_and_test.py

+311
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,311 @@
1+
# -*- coding: utf-8 -*-
2+
'''
3+
@Author : lance
4+
@Email : wangyl306@163.com
5+
'''
6+
from sklearn import metrics
7+
from keras.models import load_model
8+
from keras_preprocessing.image import ImageDataGenerator
9+
import numpy as np
10+
import keras
11+
import keras.backend as K
12+
13+
14+
15+
#%%加载模型
16+
#model_cx=load_model("myweights/model_cx_bin_255_224x224_0.7.h5")
17+
#model_x23=load_model("myweights/model_x23_bin_255_224x224_0.8.h5")
18+
19+
K.clear_session()
20+
K.set_learning_phase(0)
21+
model=load_model("weights/resnet_0014.h5")
22+
23+
#%%单张展示
24+
#区分组别 binary
25+
test_gen=ImageDataGenerator(rescale=1/255).flow_from_directory("ceshi",
26+
target_size=(224,224),
27+
class_mode="binary",
28+
batch_size=1,
29+
shuffle=False)
30+
pred = model_cx.predict_generator(test_gen, steps=110, verbose=1)
31+
pred=pred.ravel()
32+
pred[pred<0.7]=0
33+
pred[pred>=0.7]=1
34+
print("组别(cx)是:", pred)
35+
#准确率
36+
n=0
37+
for i in pred:
38+
if i==1: #类别标签
39+
n+=1
40+
print(n/110)
41+
#%%单张展示
42+
#区分组别 binary
43+
test_gen=ImageDataGenerator(rescale=1/255).flow_from_directory("ceshi",
44+
target_size=(224,224),
45+
class_mode="binary",
46+
batch_size=1,
47+
shuffle=False)
48+
pred = model_x23.predict_generator(test_gen, steps=10, verbose=1)
49+
pred=pred.ravel()
50+
pred[pred<0.8]=0
51+
pred[pred>=0.8]=1
52+
print("级别是(x23):", pred)
53+
#准确率
54+
n=0
55+
for i in pred:
56+
if i==1: #类别标签
57+
n+=1
58+
print(n/110)
59+
60+
#%%4分级
61+
def salt(img, n=10000):
62+
# 循环添加n个椒盐
63+
# for k in range(n):
64+
# # 随机选择椒盐的坐标
65+
# i = int(np.random.random() * img.shape[1])
66+
# j = int(np.random.random() * img.shape[0])
67+
# # 如果是灰度图
68+
# if img.ndim == 2:
69+
# img[j,i] = 255
70+
# # 如果是RBG图片
71+
# elif img.ndim == 3:
72+
# img[j,i,0]= 255
73+
# img[j,i,1]= 255
74+
# img[j,i,2]= 255
75+
noise = np.random.rand(448,448, 3)*0.05-0.025
76+
img = img + noise
77+
return img
78+
test_gen=ImageDataGenerator(rescale=1/255,preprocessing_function=None).flow_from_directory("ceshi",
79+
target_size=(448,448),
80+
batch_size=1,
81+
shuffle=False)
82+
83+
pred= model.predict_generator(test_gen, steps=120, verbose=1)
84+
predicted_class_indices = np.argmax(pred, axis=1)
85+
print("组别是(softmax):", predicted_class_indices)
86+
87+
#准确率
88+
n=0
89+
for i in predicted_class_indices:
90+
if i==1: #类别标签
91+
n+=1
92+
print(n/120)
93+
94+
95+
#plot:×255后显示
96+
import matplotlib.pyplot as plt
97+
def plots(ims,figsize=(10,5),rows=1,interp=False,titles=None):
98+
if type(ims[0]) is np.ndarray:
99+
ims=np.array(ims).astype(np.uint8)
100+
if (ims.shape[-1] != 3):
101+
ims=ims.transpose((0,2,3,1))
102+
f=plt.figure(figsize=figsize)
103+
cols=len(ims)//rows if len(ims)%2 ==0 else len(ims)//rows+1
104+
for i in range(len(ims)):
105+
sp=f.add_subplot(rows,cols,i+1)
106+
sp.axis('off')
107+
if titles is not None:
108+
sp.set_title(titles[i],fontsize=9)
109+
plt.imshow(ims[i],interpolation=None if interp else "none")
110+
111+
112+
113+
imgs,labels=next(test_gen)
114+
plots(imgs)
115+
plt.show()
116+
117+
118+
#%%读图测试
119+
#skimage预测测试
120+
import os
121+
from skimage import io,transform
122+
path="新建文件夹/t1_c41"
123+
files=os.listdir(path)
124+
Tmp_Img=[]
125+
for i in range(len(files)):
126+
print(i)
127+
tmp=io.imread(path+'/'+files[i])
128+
tmp_img=transform.resize(tmp,[448,448])
129+
Tmp_Img.append(tmp_img)
130+
Tmp_Img=np.array(Tmp_Img)
131+
pred=model.predict(Tmp_Img)
132+
pred=np.argmax(pred, axis=1)
133+
print(pred)
134+
#准确率
135+
n=0
136+
for i in pred:
137+
if i==1: #类别标签
138+
n+=1
139+
print(n/120)
140+
141+
142+
143+
#画图
144+
io.imshow(tmp_img)
145+
tmp_img=tmp_img*255
146+
147+
#keras 同ImageDataGenerator
148+
import os
149+
from keras.preprocessing import image
150+
path="ceshi/t1_x31"
151+
file_names = os.listdir(path)
152+
i=0
153+
for file_name in file_names:
154+
img_path=os.path.join(path, file_name)
155+
img = image.load_img(img_path, target_size=(448,448))
156+
x = image.img_to_array(img)
157+
x = x*(1/255)
158+
x = np.expand_dims(x, axis=0)
159+
pred = model.predict(x)
160+
predicted_class_indices=np.argmax(pred, axis=1)
161+
print(predicted_class_indices)
162+
if predicted_class_indices ==3:
163+
i+=1
164+
print(i/110)
165+
166+
plots(x)
167+
plt.show()
168+
169+
170+
#%%加载数据
171+
# train_gen=ImageDataGenerator(1/255).flow_from_directory("re_cx/train",
172+
# target_size=(224,224),
173+
#
174+
# class_mode="binary",
175+
# batch_size=10,
176+
# shuffle=False)
177+
#
178+
#
179+
# valid_gen=ImageDataGenerator(1/255).flow_from_directory("re_cx/valid",
180+
# target_size=(224,224),
181+
#
182+
# class_mode="binary",
183+
# batch_size=10,
184+
# shuffle=False)
185+
186+
#test_gen=ImageDataGenerator(rescale=1/255).flow_from_directory("ceshi",
187+
# target_size=(448,448),
188+
# class_mode="binary",
189+
# batch_size=50,
190+
# shuffle=False)
191+
192+
193+
#%%测试
194+
steps=6
195+
#test_class=np.array([])
196+
197+
#for i in range(steps):
198+
# test_imgs, test_lables = next(test_gen)
199+
# test_class=np.hstack((test_class,test_lables ))
200+
#print("真实类别:",test_class)
201+
202+
pred = model_cx.predict_generator(test_gen, steps=steps, verbose=1)
203+
pred=pred.ravel()
204+
pred=list(pred)
205+
for i in range(len(pred)):
206+
if pred[i]<0.7:
207+
pred[i]=0
208+
else:
209+
pred[i]=1
210+
print("预测结果:", pred)
211+
212+
213+
# 打印混淆矩阵
214+
#cm = metrics.confusion_matrix(test_class, pred)
215+
#
216+
#
217+
#print(cm)
218+
219+
220+
221+
#%%特征模型
222+
model_feather = keras.Model(inputs=model_cx.input,
223+
outputs=model_cx.layers[-2].output)
224+
model_feather.summary()
225+
226+
227+
#%%特征提取
228+
# Labels=[]
229+
# Predicts=[]
230+
#
231+
# for i in range(63):#111
232+
# print(i)
233+
# temp=next(train_gen)
234+
# temp1=temp[0]
235+
#
236+
# Labels.append(temp[1])
237+
# Predicts.append(model_feather.predict(temp1))
238+
#
239+
# train_features=np.array(Predicts).reshape([630,1024]) #[1110,1024]
240+
# train_labels=np.array(Labels).reshape([630,1]) #[1110,1]
241+
#
242+
# Labels=[]
243+
# Predicts=[]
244+
#
245+
# for i in range(27):#47
246+
# print(i)
247+
# temp=next(valid_gen)
248+
# temp1=temp[0]
249+
#
250+
# Labels.append(temp[1])
251+
# Predicts.append(model_feather.predict(temp1))
252+
#
253+
# valid_features=np.array(Predicts).reshape([270,1024]) #[470,1024]
254+
# valid_labels=np.array(Labels).reshape([270,1]) #[470,1]
255+
#
256+
# Labels=[]
257+
# Predicts=[]
258+
#
259+
# for i in range(3):
260+
# print(i)
261+
# temp=next(test_gen)
262+
# temp1=temp[0]
263+
#
264+
# Labels.append(temp[1])
265+
# Predicts.append(model_feather.predict(temp1))
266+
#
267+
# pred_features=np.array(Predicts).reshape([30,1024])
268+
# real_labels=np.array(Labels).reshape([30,1])
269+
270+
271+
272+
#%%误差图
273+
import matplotlib.pyplot as plt
274+
from keras.preprocessing.image import ImageDataGenerator
275+
valid_datagen = ImageDataGenerator(rescale=1./255)
276+
#valid_datagen = ImageDataGenerator()
277+
valid_gen = valid_datagen.flow_from_directory( 'ceshi/c1',
278+
target_size=(224,224),batch_size=10,class_mode='binary')
279+
280+
281+
from keras.preprocessing.image import ImageDataGenerator
282+
valid_datagen = ImageDataGenerator()
283+
#valid_datagen = ImageDataGenerator()
284+
valid_gen2 = valid_datagen.flow_from_directory( 'ceshi/c1',
285+
target_size=(224,224),batch_size=10,class_mode='binary')
286+
287+
288+
289+
temp1=next(valid_gen)
290+
image1=temp1[0]
291+
292+
temp2=next(valid_gen2)
293+
image2=temp2[0]
294+
295+
diff=image1*255-image2
296+
297+
print(np.mean(diff),np.max(diff),np.min(diff))
298+
299+
plt.imshow(diff[1])
300+
plt.show()
301+
302+
303+
304+
305+
306+
#配置:因特尔E5系列金牌处理器、两块总共44核88线程的CPU、四块2080Ti的显卡 - 10万
307+
308+
309+
310+
311+

‎predict.py

+33
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
from skimage import io,transform
2+
import numpy as np
3+
import os
4+
from keras.models import load_model
5+
from keras import backend as K
6+
K.clear_session
7+
8+
def predict(model_path,data_path,shape):
9+
10+
new_model=load_model(model_path)
11+
print(dir(new_model))
12+
13+
# path=data_path
14+
# files=os.listdir(path)
15+
# # print(files)
16+
#
17+
# Tmp_Img=[]
18+
# for i in range(len(files)):
19+
# tmp=io.imread(path+'/'+files[i])
20+
# tmp_img=transform.resize(tmp,shape)
21+
# Tmp_Img.append(tmp_img)
22+
# Tmp_Img=np.array(Tmp_Img)
23+
# Tmp_Img=Tmp_Img*255
24+
# predicts=new_model.predict(Tmp_Img)
25+
# predicts=np.argmax(predicts, axis=1).astype(np.str)
26+
# predicts[predicts=="0"]="C4F"
27+
# predicts[predicts=="1"]="X2F"
28+
# for i in zip(files,predicts):
29+
# print("\n",i)
30+
31+
if __name__=="__main__":
32+
predict("weights/all_resnet2.h5","data/test/X2F",[224,224])
33+

‎readme

+205
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,205 @@
1+
@Author : lance
2+
@Email : wangyl306@163.com
3+
@Time : 2019年6月10日
4+
5+
===================模型选择=========================
6+
1.先选择简单的模型进行fine-tune
7+
2.fine-tune步骤:
8+
最后一层
9+
最后一个卷积块
10+
全网络
11+
3.迁移学习中,目标域和源域差别较大,不用fc的网络比用fc的网络效果差
12+
13+
14+
15+
16+
17+
===================加载数据=========================
18+
model.load_data.py
19+
1.path,batch_size(10,16)
20+
2.input_shape根据模型默认尺寸确定,显存足够的话可以考虑增大模型输入尺寸,相应需要减小batch_size
21+
3.Augmentation在ImageDataGenerator中修改相关参数即可,参考https://www.cnblogs.com/hutao722/p/10075150.html
22+
4.classes=["","",""] 定义自己的类别名
23+
***注意***.flow_from_directory中的shuffle=True是默认的,训练和验证时不用修改,但是用作测试时候一定要改为False
24+
5.rescale=1/255
25+
26+
27+
===================模型训练=========================
28+
model.train.py
29+
1.参数:classes = 3
30+
epochs = 200
31+
steps_per_epoch >= train_nums//batch
32+
validation_steps >= valid_nums//batch
33+
34+
2.模型函数所需的input_shape形参,根据各模型而定 ----注意,必须是元组类型
35+
3.weights:保存在验证集上最高acc的模型文件
36+
4.logs:训练过程记录
37+
5.如果使用tensorboard将其添加到callback中(已经定义好了)
38+
6.学习率设定为监控验证集loss,经过10个loss不变则降低为之前的1/2
39+
7.训练时候一定记得开多线程workers=16
40+
41+
42+
43+
===================模型测试=========================
44+
test.py
45+
1.path:模型文件
46+
2.steps=test_nums/batch
47+
3.input_shape:对应模型的默认输入尺寸
48+
4.注意:修改测试文件的位置,classes=["","",""] 定义自己的类别名,shuffle=False
49+
50+
51+
52+
53+
===================预测=========================
54+
predict.py
55+
predict("weights/bcnn.h5","data/test/B",[224,224])
56+
57+
58+
===================继续训练=========================
59+
model.continue_train.py
60+
continue_train(path,epochs,steps_per_epoch,validation_steps,input_shape)
61+
62+
63+
64+
===================显存问题 GTX1060=========================
65+
1.resnext:56,56
66+
2.senet:64,64
67+
3.octconv:显存不够
68+
69+
70+
71+
72+
===================bug整理=========================
73+
1.问题:AttributeError: 'bytes' object has no attribute 'encode'
74+
解决方法:修改C:\Anaconda3\envs\wyl\lib\site-packages\keras\engine\saving.py中的321行:n.encode('utf8') for n in
75+
如果是encode则改为decode,反之相反
76+
2.读图方式的差异
77+
skimage和keras系列的不同
78+
注意:skimage的transform.resize已经除了255,不用再除255了
79+
keras系列的相同
80+
keras系列包括:from keras.preprocessing import image
81+
ImageDataGenerator
82+
eg.120张c4使用同样的模型,kerse系列识别率92.5%,skimage识别率为93.3%
83+
3.训练测试关于BN层的设置https://blog.datumbox.com/the-batch-normalization-layer-of-keras-is-broken/
84+
85+
86+
===================GPU加速=========================
87+
结论:
88+
第一:keras跑现有模型都能使用单GPU或多GPU(GTX1060,RTX2080ti)加速,配置环境NVIDIA driver 410 以上版本,cuda 9.0以上版本即可。
89+
第二:keras数据预处理速度慢,是限制训练的根本。
90+
第三:踢除GPU,ubuntu18.04台式机的硬件配置在训练加速上不如win10台式机。
91+
第四:keras训练分类模型:
92+
参数设置workers=32(大一些好), use_multiprocessing=False(True会快一些), max_queue_size=20(大一些好),太大没有意义CPU跑满了;
93+
现在跑分类模型最快平均一个epoch(148 for train 30 for val)9s左右。
94+
95+
96+
data:
97+
Found 148 images belonging to 3 classes.
98+
Found 30 images belonging to 3 classes.
99+
100+
keras.fit_generator设定
101+
workers=32, use_multiprocessing=False, max_queue_size=10
102+
103+
win10台式机CPU16线程,内存32G:CPU占用75~79%,内存占用6G
104+
RTX2080ti 最大11GB 可用8.99GB
105+
vgg16(20,5,1)
106+
batch = 30,根据batch调整Train_steps和validation_steps
107+
20 epochs of batch 30 takes total time 197.78
108+
vgg16(20,10,2)
109+
batch = 15
110+
20 epochs of batch 15 takes total time 185.68
111+
vgg16(20,15,3)
112+
batch = 10
113+
20 epochs of batch 10 takes total time 182.57
114+
vgg16(20,25,5)
115+
batch = 6
116+
20 epochs of batch 6 takes total time 178.18
117+
118+
119+
win10台式机CPU16线程,内存32G:CPU占用76~83%,内存占用5.3~5.6G
120+
GTX1060 最大6GB 可用4.97GB
121+
batch = 30
122+
20 epochs of batch 30 takes total time 201.05
123+
warning:Allocator (GPU_0_bfc) ran out of memory trying to allocate 2.26GiB. The caller indicates that this is not a failure, but may mean that there could be performance gains if more memory were available.
124+
batch = 15
125+
20 epochs of batch 15 takes total time 201.77
126+
batch = 10
127+
20 epochs of batch 10 takes total time 194.74
128+
batch = 6
129+
20 epochs of batch 6 takes total time 188.67
130+
131+
132+
ubuntu18.04台式机:CPU 20线程,内存64G
133+
峰值15-16 thread runing,70%~80%CPU占用
134+
RTX2080ti 最大10.72GB 可用10.33GB
135+
峰值56%
136+
batch = 30
137+
20 epochs of batch 30 takes total time 404.95
138+
batch = 15
139+
20 epochs of batch 15 takes total time 377.54
140+
batch = 10
141+
20 epochs of batch 10 takes total time 385.09
142+
batch = 6
143+
20 epochs of batch 6 takes total time 371.07
144+
145+
146+
keras.fit_generator设定
147+
workers=32, use_multiprocessing=False, max_queue_size=20
148+
149+
ubuntu18.04台式机:CPU 20线程,内存64G
150+
峰值20 thread runing,90+%CPU占用
151+
RTX2080ti 最大10.72GB 可用10.33GB
152+
峰值93%
153+
vgg16(20,5,1)
154+
batch = 30
155+
20 epochs of batch 30 takes total time 399.53
156+
对比max_queue_size=10..有加快,
157+
158+
ubuntu18.04台式机:CPU 20线程,内存64G
159+
峰值20 thread runing,90+%CPU占用
160+
双RTX2080ti 最大10.72GB 一个可用10.33GB 一个可用10.53GB
161+
第一个epoch 一个80+%一个90+%(数值只是相对准确)
162+
正常epoch峰值一个52%一个51%
163+
batch = 30
164+
20 epochs of batch 30 takes total time 399.55
165+
batch = 15
166+
20 epochs of batch 15 takes total time 391.86
167+
batch = 10
168+
20 epochs of batch 10 takes total time 354.00
169+
batch = 6
170+
20 epochs of batch 6 takes total time 344.01
171+
172+
keras.fit_generator设定
173+
workers=32, use_multiprocessing=True, max_queue_size=20
174+
175+
ubuntu18.04台式机:CPU 20线程,内存64G
176+
峰值20 thread runing,100%CPU占用(所有thred全部100%)
177+
双RTX2080ti 最大10.72GB 一个可用10.33GB 一个可用10.53GB
178+
第一个epoch 一个80+%一个90+%
179+
正常epoch峰值一个54%一个40%
180+
batch = 30
181+
20 epochs of batch 30 takes total time 389.52
182+
batch = 15
183+
20 epochs of batch 15 takes total time 378.32
184+
batch = 10
185+
20 epochs of batch 10 takes total time 346.65
186+
batch = 6
187+
20 epochs of batch 6 takes total time 338.29
188+
189+
190+
191+
192+
193+
194+
195+
196+
197+
198+
199+
200+
201+
202+
203+
204+
205+

‎requirements.txt

+121
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,121 @@
1+
absl-py==0.7.0
2+
alabaster==0.7.12
3+
astor==0.7.1
4+
astroid==2.2.5
5+
Babel==2.6.0
6+
backcall==0.1.0
7+
bleach==3.1.0
8+
certifi==2018.8.24
9+
chardet==3.0.4
10+
cloudpickle==0.6.1
11+
colorama==0.4.1
12+
cycler==0.10.0
13+
Cython==0.29.2
14+
dask==1.0.0
15+
decorator==4.3.0
16+
defusedxml==0.5.0
17+
docutils==0.14
18+
entrypoints==0.3
19+
gast==0.2.2
20+
grpcio==1.18.0
21+
h5py==2.9.0
22+
hmmlearn==0.2.1
23+
idna==2.8
24+
imageio==2.4.1
25+
imagesize==1.1.0
26+
imgaug==0.2.7
27+
imutils==0.5.2
28+
ipykernel==5.1.0
29+
ipython==7.2.0
30+
ipython-genutils==0.2.0
31+
ipywidgets==7.4.2
32+
isort==4.3.16
33+
jedi==0.13.2
34+
Jinja2==2.10
35+
jsonschema==2.6.0
36+
jupyter==1.0.0
37+
jupyter-client==5.2.4
38+
jupyter-console==6.0.0
39+
jupyter-core==4.4.0
40+
Keras==2.2.4
41+
Keras-Applications==1.0.6
42+
Keras-Preprocessing==1.0.5
43+
keyring==19.0.1
44+
kiwisolver==1.0.1
45+
lazy-object-proxy==1.3.1
46+
lxml==4.3.0
47+
Markdown==3.0.1
48+
MarkupSafe==1.1.0
49+
matplotlib==3.0.2
50+
mccabe==0.6.1
51+
mistune==0.8.4
52+
nbconvert==5.4.0
53+
nbformat==4.4.0
54+
networkx==2.2
55+
notebook==5.7.4
56+
numpy==1.14.0
57+
numpydoc==0.8.0
58+
opencv-python==4.0.0.21
59+
packaging==19.0
60+
pandas==0.23.4
61+
pandocfilters==1.4.2
62+
parso==0.3.1
63+
pickleshare==0.7.5
64+
Pillow==5.4.1
65+
prometheus-client==0.5.0
66+
prompt-toolkit==2.0.7
67+
protobuf==3.6.0
68+
psutil==5.6.1
69+
pycodestyle==2.5.0
70+
pyflakes==2.1.1
71+
Pygments==2.3.1
72+
pylint==2.3.1
73+
pyparsing==2.3.1
74+
PyQt5==5.12.1
75+
PyQt5-sip==4.19.15
76+
PyQtWebEngine==5.12.1
77+
python-dateutil==2.7.5
78+
pytz==2018.9
79+
PyWavelets==1.0.1
80+
pywin32-ctypes==0.2.0
81+
pywinpty==0.5.5
82+
PyYAML==3.13
83+
pyzmq==17.1.2
84+
QtAwesome==0.5.7
85+
qtconsole==4.4.3
86+
QtPy==1.7.0
87+
requests==2.21.0
88+
rope==0.14.0
89+
scikit-image==0.14.1
90+
scikit-learn==0.20.3
91+
scipy==1.2.0
92+
Send2Trash==1.5.0
93+
six==1.11.0
94+
sklearn==0.0
95+
snowballstemmer==1.2.1
96+
Sphinx==2.0.0
97+
sphinxcontrib-applehelp==1.0.1
98+
sphinxcontrib-devhelp==1.0.1
99+
sphinxcontrib-htmlhelp==1.0.1
100+
sphinxcontrib-jsmath==1.0.1
101+
sphinxcontrib-qthelp==1.0.2
102+
sphinxcontrib-serializinghtml==1.1.1
103+
spyder==3.3.4
104+
spyder-kernels==0.4.3
105+
tensorboard==1.11.0
106+
tensorflow-gpu==1.11.0
107+
termcolor==1.1.0
108+
terminado==0.8.1
109+
testpath==0.4.2
110+
toolz==0.9.0
111+
tornado==5.1.1
112+
traitlets==4.3.2
113+
typed-ast==1.3.1
114+
urllib3==1.24.1
115+
wcwidth==0.1.7
116+
webencodings==0.5.1
117+
Werkzeug==0.14.1
118+
widgetsnbextension==3.4.2
119+
win-unicode-console==0.5
120+
wincertstore==0.2
121+
wrapt==1.11.1

‎skimageApi.py

+524
Large diffs are not rendered by default.

‎vote_pred.py

+57
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,57 @@
1+
'''
2+
@Author : lance
3+
@Email : wangyl306@163.com
4+
多模型投票对抗过拟合
5+
'''
6+
from keras.models import load_model
7+
from keras_preprocessing.image import ImageDataGenerator
8+
import numpy as np
9+
10+
#预测模块
11+
def pred(path,steps,input_shape):
12+
#导入数据
13+
test_path = '测试'
14+
test_batches = ImageDataGenerator(rescale=1/255).flow_from_directory(test_path,
15+
target_size=input_shape,
16+
classes=["C2F","X2F"],
17+
batch_size=10, shuffle=False)
18+
model = load_model(path)
19+
# 测试
20+
steps = steps
21+
pred = model.predict_generator(test_batches, steps=steps, verbose=1)
22+
predicted_class_indices = np.argmax(pred, axis=1)
23+
# print("预测结果:", predicted_class_indices)
24+
25+
return predicted_class_indices
26+
27+
28+
#投票选出最多的
29+
def vote(lt):
30+
index1 = 0
31+
max = 0
32+
for i in range(len(lt)):
33+
flag = 0
34+
for j in range(i+1,len(lt)):
35+
if lt[j] == lt[i]:
36+
flag += 1
37+
if flag > max:
38+
max = flag
39+
index1 = i
40+
return index1
41+
42+
def Ensemble():
43+
ans = []
44+
pred1=list(pred(path,steps,input_shape))
45+
for i in range(len(pred1)):
46+
ls = []
47+
ls.append(pred1[i])
48+
49+
50+
ans.append(ls[vote(ls)])
51+
return ans
52+
53+
54+
if __name__=="__main__":
55+
predicts=Ensemble()
56+
for i in enumerate(predicts):
57+
print(i)

0 commit comments

Comments
 (0)
Please sign in to comment.