|
22 | 22 | import argparse |
23 | 23 | import numpy as np |
24 | 24 | from random import shuffle |
| 25 | +import os |
25 | 26 |
|
26 | 27 | import copy |
27 | 28 | from autoencoder import * |
|
38 | 39 | parser = argparse.ArgumentParser(description='Test file') |
39 | 40 | #parser.add_argument('--task_number', default=1, type=int, help='Select the task you want to test out the architecture; choose from 1-4') |
40 | 41 | parser.add_argument('--use_gpu', default=False, type=bool, help = 'Set the flag if you wish to use the GPU') |
41 | | - |
| 42 | +parser.add_argument('--batch_size', default=16, type=int, help='Batch size you want to use whilst testing the model') |
42 | 43 | args = parser.parse_args() |
43 | 44 | use_gpu = args.use_gpu |
44 | 45 |
|
| 46 | + |
45 | 47 | #randomly shuffle the tasks in the sequence |
46 | 48 | task_number_list = [x for x in range(1, 10)] |
47 | | -shuffle(task_number) |
| 49 | +shuffle(task_number_list) |
48 | 50 |
|
49 | 51 |
|
50 | 52 | #transformations for the test data |
|
65 | 67 | ]) |
66 | 68 | } |
67 | 69 |
|
68 | | - |
69 | | -#create the results.txt file |
70 | | -with open("results.txt", "w") as myfile: |
71 | | - myfile.write() |
72 | | - myfile.close() |
73 | | - |
| 70 | +#set the device to be used and initialize the feature extractor to feed the data into the autoencoder |
| 71 | +device = torch.device("cuda:0" if use_gpu else "cpu") |
| 72 | +feature_extractor = Alexnet_FE(models.alexnet(pretrained=True)) |
| 73 | +feature_extractor.to(device) |
74 | 74 |
|
75 | 75 | for task_number in task_number_list: |
76 | 76 |
|
|
91 | 91 | image_folder = datasets.ImageFolder(os.path.join(path_task, 'test'), transform = data_transforms_mnist['test']) |
92 | 92 | dset_size = len(image_folder) |
93 | 93 |
|
94 | | - device = torch.device("cuda:0" if use_gpu else "cpu") |
95 | | - |
| 94 | + |
96 | 95 | dset_loaders = torch.utils.data.DataLoader(image_folder, batch_size = batch_size, |
97 | 96 | shuffle=True, num_workers=4) |
98 | 97 |
|
99 | 98 | best_loss = 99999999999 |
100 | 99 | model_number = 0 |
101 | 100 |
|
| 101 | + |
102 | 102 | #Load autoencoder models for tasks 1-4; need to select the best performing autoencoder model |
103 | 103 | for ae_number in range(1, 10): |
104 | 104 | ae_path = os.path.join(encoder_path, "autoencoder_" + str(ae_number)) |
|
122 | 122 | else: |
123 | 123 | input_data = Variable(input_data) |
124 | 124 |
|
125 | | - preds = model(input_data) |
126 | | - loss = encoder_criterion(preds, input_data) |
| 125 | + |
| 126 | + #get the input to the autoencoder from the conv backbone of the Alexnet |
| 127 | + input_to_ae = feature_extractor(input_data) |
| 128 | + input_to_ae = input_to_ae.view(input_to_ae.size(0), -1) |
127 | 129 |
|
| 130 | + #get the outputs from the model |
| 131 | + preds = model(input_to_ae) |
| 132 | + loss = encoder_criterion(preds, input_to_ae) |
| 133 | + |
128 | 134 | del preds |
129 | 135 | del input_data |
130 | | - |
| 136 | + del input_to_ae |
| 137 | + |
131 | 138 | running_loss = running_loss + loss.item() |
132 | 139 |
|
133 | 140 | model_loss = running_loss/dset_size |
|
146 | 153 | print ("Incorrect routing, wrong model has been selected") |
147 | 154 |
|
148 | 155 |
|
149 | | - trained_model_path = os.path.join(model_path, "model_" + model_number) |
| 156 | + #Load the expert that has been found by this procedure into memory |
| 157 | + trained_model_path = os.path.join(model_path, "model_" + str(model_number)) |
150 | 158 |
|
| 159 | + #Get the number of classes that this expert was exposed to |
151 | 160 | file_name = os.path.join(trained_model_path, "classes.txt") |
152 | 161 | file_object = open(file_name, 'r') |
153 | 162 |
|
154 | 163 | num_of_classes = file_object.read() |
155 | 164 | file_object.close() |
156 | 165 |
|
157 | | - num_of_classes = int(num_of_classes_old) |
| 166 | + num_of_classes = int(num_of_classes) |
158 | 167 |
|
159 | 168 | model = GeneralModelClass(num_of_classes) |
160 | 169 | model.load_state_dict(torch.load(os.path.join(trained_model_path, 'best_performing_model.pth'))) |
|
193 | 202 | model_loss = running_loss/dset_size |
194 | 203 | model_accuracy = running_corrects.double()/dset_size |
195 | 204 |
|
| 205 | + #Store the results into a file |
196 | 206 | with open("results.txt", "a") as myfile: |
197 | 207 | myfile.write("\n{}: {}".format(task_number, model_accuracy*100)) |
198 | 208 | myfile.close() |
|
0 commit comments