Skip to content

Commit

Permalink
adding code
Browse files Browse the repository at this point in the history
  • Loading branch information
Daniel Kerrigan committed Jun 4, 2019
1 parent 9daa2e2 commit f6ae266
Show file tree
Hide file tree
Showing 10 changed files with 169 additions and 1 deletion.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
.DS_Store
17 changes: 16 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
@@ -1,2 +1,17 @@
# iris-recognition-OTS-DNN
Source codes and models for the ICB 2019 paper: Iris Recognition with Image Segmentation Employing Retrained Off-the-Shelf Deep Neural Networks
Code and models for the ICB 2019 paper: Iris Recognition with Image Segmentation Employing Retrained Off-the-Shelf Deep Neural Networks

## Contents

### code
You will need to edit the scripts and programs below to point to the correct paths for your data. See the linked pages for more details, such as how to prepare the data.

- dilated-cnn: scripts used to train and test the frontend and context modules, proposed by Yu and Koltun. See their [training page](https://github.com/fyu/dilation/blob/master/docs/training.md) for more info.
- DRN: script used to train the dilated residual network, proposed by Yu, Koltun, and Funkhouser. See their [README](https://github.com/fyu/drn) for more info.
- SegNet: MATLAB programs used to train and test SegNet. See MATLAB's old guide on [Semantic Segmentation Using Deep Learning](https://web.archive.org/web/20180527004009/https://www.mathworks.com/help/vision/examples/semantic-segmentation-using-deep-learning.html), which the training program was based on.
- augment.py: script used to augment the training data

### models
- dilated-cnn: trained frontend and context modules
- DRN: trained dilated residual network
- SegNet: trained SegNet
3 changes: 3 additions & 0 deletions code/DRN/train.script
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
python3 segment.py train -d PATH_TO_DATA -c 2 -s 240 \
--arch drn_d_22 --batch-size 8 --epochs 100 --lr 0.01 --momentum 0.9 \
--step 100
55 changes: 55 additions & 0 deletions code/SegNet/seg.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
train_images_f = fopen('path to training images');
train_labels_f = fopen('path to training labels');

train_images_d = textscan(train_images_f, '%s');
train_labels_d = textscan(train_labels_f, '%s');

imds = imageDatastore(train_images_d{1});

classes = [ "Iris" "Other"];

labelIDs = [ 1 0 ];

pxds = pixelLabelDatastore(train_labels_d{1}, classes, labelIDs);

numTrainingImages = numel(imds.Files);

imageSize = [240 320 3];
numClasses = numel(classes);

lgraph = segnetLayers(imageSize,numClasses,'vgg16');

tbl = countEachLabel(pxds);
pxLayer = pixelClassificationLayer('Name','labels','ClassNames',tbl.Name);

%%
% Update the SegNet network with the new |pixelClassificationLayer| by
% removing the current |pixelClassificationLayer| and adding the new layer.
% The current |pixelClassificationLayer| is named 'pixelLabels'. Remove it
% using |removeLayers|, add the new one using|addLayers|, and connect the
% new layer to the rest of the network using |connectLayers|.

lgraph = removeLayers(lgraph, 'pixelLabels');
lgraph = addLayers(lgraph, pxLayer);
lgraph = connectLayers(lgraph, 'softmax' ,'labels');

%% Start Training
datasource = pixelLabelImageSource(imds,pxds);

%% Select Training Options

options = trainingOptions('sgdm', ...
'Momentum', 0.9, ...
'InitialLearnRate', 0.001, ...
'L2Regularization', 0.0005, ...
'MaxEpochs', 20, ...
'MiniBatchSize', 4, ...
'Shuffle', 'every-epoch', ...
'CheckpointPath', 'path to save checkpoints', ...
'ExecutionEnvironment', 'gpu', ...
'VerboseFrequency', 1000);

%% Start training using |trainNetwork|.
[net, info] = trainNetwork(datasource,lgraph,options);
save net
save info
11 changes: 11 additions & 0 deletions code/SegNet/test.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
maxNumCompThreads(6);

test_images_f = fopen('path to test images');
test_images_d = textscan(test_images_f, '%s');

load('path to net.mat');
load('path to info.mat');

imds = imageDatastore(test_images_d{1});

pxds = semanticseg(imds, net, 'WriteLocation', 'path to write results to', 'Verbose', false);
29 changes: 29 additions & 0 deletions code/augment.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
import os
from PIL import Image, ImageFilter

OLD_DIR = 'path to directory of images to augment'
AUG_DIR = 'path to save the augmented images'

for filename in os.listdir(OLD_DIR):
if 'png' not in filename:
continue
path = os.path.join(OLD_DIR, filename)

im = Image.open(path)
im = im.convert('L')

im_blur_1 = im.filter(ImageFilter.GaussianBlur(2))
im_blur_2 = im.filter(ImageFilter.GaussianBlur(3))
im_blur_3 = im.filter(ImageFilter.GaussianBlur(4))

im_edge_1 = im.filter(ImageFilter.EDGE_ENHANCE)
im_edge_2 = im.filter(ImageFilter.EDGE_ENHANCE_MORE)

basename = os.path.splitext(filename)[0]

im.save(os.path.join(AUG_DIR, basename + '_original.png'))
im_blur_1.save(os.path.join(AUG_DIR, basename + '_blur_1.png'))
im_blur_2.save(os.path.join(AUG_DIR, basename + '_blur_2.png'))
im_blur_3.save(os.path.join(AUG_DIR, basename + '_blur_3.png'))
im_edge_1.save(os.path.join(AUG_DIR, basename + '_edge_1.png'))
im_edge_2.save(os.path.join(AUG_DIR, basename + '_edge_2.png'))
11 changes: 11 additions & 0 deletions code/dilated-cnn/test_context.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
GPU=$1

python ~/iris_segmentation/dilation/test.py context \
--work_dir WORK_DIR \
--image_list PATH_TO_LIST_OF_IMAGES \
--bin_list PATH_TO_LIST_OF_FEATURES \
--weights PATH_TO_CAFFE_MODEL \
--input_size 132 198 \
--classes 2 \
--gpu $GPU \
--bin
9 changes: 9 additions & 0 deletions code/dilated-cnn/test_front.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
GPU=$1

python ~/iris_segmentation/dilation/test.py frontend \
--work_dir PATH_TO_WORK_DIR \
--image_list PATH_TO_IMAGE_LIST \
--weights PATH_TO_CAFFE_MODEL \
--classes 2 \
--gpu $GPU \
--bin
16 changes: 16 additions & 0 deletions code/dilated-cnn/train_context.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
GPU=$1

python ~/iris_segmentation/dilation/train.py context \
--train_image PATH_TO_TRAINING_IMAGES_FEATURES_FILE \
--train_label PATH_TO_LIST_OF_TRAINING_LABELS \
--test_image PATH_TO_VALIDATION_IMAGES_FEATURES_FILE \
--test_label PATH_TO_LIST_OF_VALIDATION_LABELS \
--train_batch 1 \
--test_batch 1 \
--caffe PATH_TO_CAFFE \
--classes 2 \
--layers 8 \
--label_shape 66 132 \
--lr 0.001 \
--momentum 0.9 \
--gpu $GPU
18 changes: 18 additions & 0 deletions code/dilated-cnn/train_front.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
GPU=$1

python ~/iris_segmentation/dilation/train.py frontend \
--work_dir PATH_TO_WORK_DIR \
--train_image PATH_TO_LIST_OF_TRAINING_IMAGES \
--train_label PATH_TO_LIST_OF_TRAINING_LABELS \
--test_image PATH_TO_LIST_OF_VALIDATION_IMAGES \
--test_label PATH_TO_LIST_OF_VALIDATION_LABELS \
--train_batch 1 \
--test_batch 1 \
--caffe PATH_TO_CAFFE \
--weights PATH_TO_dilation/pretrained/vgg_conv.caffemodel \
--crop_size 644 \
--classes 2 \
--lr 0.0001 \
--momentum 0.9 \
--mean 178 178 178 \
--gpu $GPU

0 comments on commit f6ae266

Please sign in to comment.