-
Notifications
You must be signed in to change notification settings - Fork 36
/
main.py
239 lines (205 loc) · 10.3 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
from __future__ import print_function
import argparse
import pdb
import os
import math
import sys
from timeit import default_timer as timer
import numpy as np
import pandas as pd
### Internal Imports
from datasets.dataset_survival import Generic_WSI_Survival_Dataset, Generic_MIL_Survival_Dataset
from utils.file_utils import save_pkl, load_pkl
from utils.core_utils import train
from utils.utils import get_custom_exp_code
### PyTorch Imports
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader, sampler
def main(args):
#### Create Results Directory
if not os.path.isdir(args.results_dir):
os.mkdir(args.results_dir)
if args.k_start == -1:
start = 0
else:
start = args.k_start
if args.k_end == -1:
end = args.k
else:
end = args.k_end
latest_val_cindex = []
folds = np.arange(start, end)
### Start 5-Fold CV Evaluation.
for i in folds:
start = timer()
seed_torch(args.seed)
results_pkl_path = os.path.join(args.results_dir, 'split_latest_val_{}_results.pkl'.format(i))
if os.path.isfile(results_pkl_path):
print("Skipping Split %d" % i)
continue
### Gets the Train + Val Dataset Loader.
train_dataset, val_dataset = dataset.return_splits(from_id=False,
csv_path='{}/splits_{}.csv'.format(args.split_dir, i))
print('training: {}, validation: {}'.format(len(train_dataset), len(val_dataset)))
datasets = (train_dataset, val_dataset)
### Specify the input dimension size if using genomic features.
if 'omic' in args.mode or args.mode == 'cluster' or args.mode == 'graph' or args.mode == 'pyramid':
args.omic_input_dim = train_dataset.genomic_features.shape[1]
print("Genomic Dimension", args.omic_input_dim)
elif 'coattn' in args.mode:
args.omic_sizes = train_dataset.omic_sizes
print('Genomic Dimensions', args.omic_sizes)
else:
args.omic_input_dim = 0
### Run Train-Val on Survival Task.
if args.task_type == 'survival':
val_latest, cindex_latest = train(datasets, i, args)
latest_val_cindex.append(cindex_latest)
### Write Results for Each Split to PKL
save_pkl(results_pkl_path, val_latest)
end = timer()
print('Fold %d Time: %f seconds' % (i, end - start))
### Finish 5-Fold CV Evaluation.
if args.task_type == 'survival':
results_latest_df = pd.DataFrame({'folds': folds, 'val_cindex': latest_val_cindex})
if len(folds) != args.k:
save_name = 'summary_partial_{}_{}.csv'.format(start, end)
else:
save_name = 'summary.csv'
results_latest_df.to_csv(os.path.join(args.results_dir, 'summary_latest.csv'))
### Training settings
parser = argparse.ArgumentParser(description='Configurations for Survival Analysis on TCGA Data.')
### Checkpoint + Misc. Pathing Parameters
parser.add_argument('--data_root_dir', type=str, default='path/to/data_root_dir', help='Data directory to WSI features (extracted via CLAM')
parser.add_argument('--seed', type=int, default=1, help='Random seed for reproducible experiment (default: 1)')
parser.add_argument('--k', type=int, default=5, help='Number of folds (default: 5)')
parser.add_argument('--k_start', type=int, default=-1, help='Start fold (Default: -1, last fold)')
parser.add_argument('--k_end', type=int, default=-1, help='End fold (Default: -1, first fold)')
parser.add_argument('--results_dir', type=str, default='./results', help='Results directory (Default: ./results)')
parser.add_argument('--which_splits', type=str, default='5foldcv', help='Which splits folder to use in ./splits/ (Default: ./splits/5foldcv')
parser.add_argument('--split_dir', type=str, default='tcga_blca_100', help='Which cancer type within ./splits/<which_splits> to use for training. Used synonymously for "task" (Default: tcga_blca_100)')
parser.add_argument('--log_data', action='store_true', default=True, help='Log data using tensorboard')
parser.add_argument('--overwrite', action='store_true', default=False, help='Whether or not to overwrite experiments (if already ran)')
### Model Parameters.
parser.add_argument('--model_type', type=str, choices=['snn', 'deepset', 'amil', 'mi_fcn', 'mcat'], default='mcat', help='Type of model (Default: mcat)')
parser.add_argument('--mode', type=str, choices=['omic', 'path', 'pathomic', 'cluster', 'coattn'], default='coattn', help='Specifies which modalities to use / collate function in dataloader.')
parser.add_argument('--fusion', type=str, choices=['None', 'concat', 'bilinear'], default='concat', help='Type of fusion. (Default: concat).')
parser.add_argument('--apply_sig', action='store_true', default=False, help='Use genomic features as signature embeddings.')
parser.add_argument('--apply_sigfeats', action='store_true', default=False, help='Use genomic features as tabular features.')
parser.add_argument('--drop_out', action='store_true', default=True, help='Enable dropout (p=0.25)')
parser.add_argument('--model_size_wsi', type=str, default='small', help='Network size of AMIL model')
parser.add_argument('--model_size_omic', type=str, default='small', help='Network size of SNN model')
### Optimizer Parameters + Survival Loss Function
parser.add_argument('--opt', type=str, choices = ['adam', 'sgd'], default='adam')
parser.add_argument('--batch_size', type=int, default=1, help='Batch Size (Default: 1, due to varying bag sizes)')
parser.add_argument('--gc', type=int, default=32, help='Gradient Accumulation Step.')
parser.add_argument('--max_epochs', type=int, default=20, help='Maximum number of epochs to train (default: 20)')
parser.add_argument('--lr', type=float, default=2e-4, help='Learning rate (default: 0.0001)')
parser.add_argument('--bag_loss', type=str, choices=['svm', 'ce', 'ce_surv', 'nll_surv', 'cox_surv'], default='nll_surv', help='slide-level classification loss function (default: ce)')
parser.add_argument('--label_frac', type=float, default=1.0, help='fraction of training labels (default: 1.0)')
parser.add_argument('--bag_weight', type=float, default=0.7, help='clam: weight coefficient for bag-level loss (default: 0.7)')
parser.add_argument('--reg', type=float, default=1e-5, help='L2-regularization weight decay (default: 1e-5)')
parser.add_argument('--alpha_surv', type=float, default=0.0, help='How much to weigh uncensored patients')
parser.add_argument('--reg_type', type=str, choices=['None', 'omic', 'pathomic'], default='None', help='Which network submodules to apply L1-Regularization (default: None)')
parser.add_argument('--lambda_reg', type=float, default=1e-4, help='L1-Regularization Strength (Default 1e-4)')
parser.add_argument('--weighted_sample', action='store_true', default=True, help='Enable weighted sampling')
parser.add_argument('--early_stopping', action='store_true', default=False, help='Enable early stopping')
args = parser.parse_args()
device=torch.device("cuda" if torch.cuda.is_available() else "cpu")
### Creates Experiment Code from argparse + Folder Name to Save Results
args = get_custom_exp_code(args)
args.task = '_'.join(args.split_dir.split('_')[:2]) + '_survival'
print("Experiment Name:", args.exp_code)
### Sets Seed for reproducible experiments.
def seed_torch(seed=7):
import random
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if device.type == 'cuda':
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
seed_torch(args.seed)
encoding_size = 1024
settings = {'num_splits': args.k,
'k_start': args.k_start,
'k_end': args.k_end,
'task': args.task,
'max_epochs': args.max_epochs,
'results_dir': args.results_dir,
'lr': args.lr,
'experiment': args.exp_code,
'reg': args.reg,
'label_frac': args.label_frac,
'inst_loss': args.inst_loss,
'bag_loss': args.bag_loss,
'bag_weight': args.bag_weight,
'seed': args.seed,
'model_type': args.model_type,
'model_size_wsi': args.model_size_wsi,
'model_size_omic': args.model_size_omic,
"use_drop_out": args.drop_out,
'weighted_sample': args.weighted_sample,
'gc': args.gc,
'opt': args.opt}
print('\nLoad Dataset')
if 'survival' in args.task:
args.n_classes = 4
study = '_'.join(args.task.split('_')[:2])
if study == 'tcga_kirc' or study == 'tcga_kirp':
combined_study = 'tcga_kidney'
elif study == 'tcga_luad' or study == 'tcga_lusc':
combined_study = 'tcga_lung'
else:
combined_study = study
study_dir = '%s_20x_features' % combined_study
dataset = Generic_MIL_Survival_Dataset(csv_path = './%s/%s_all_clean.csv.zip' % (args.dataset_path, combined_study),
mode = args.mode,
apply_sig = args.apply_sig,
data_dir= os.path.join(args.data_root_dir, study_dir),
shuffle = False,
seed = args.seed,
print_info = True,
patient_strat= False,
n_bins=4,
label_col = 'survival_months',
ignore=[])
else:
raise NotImplementedError
if isinstance(dataset, Generic_MIL_Survival_Dataset):
args.task_type = 'survival'
else:
raise NotImplementedError
### Creates results_dir Directory.
if not os.path.isdir(args.results_dir):
os.mkdir(args.results_dir)
### Appends to the results_dir path: 1) which splits were used for training (e.g. - 5foldcv), and then 2) the parameter code and 3) experiment code
args.results_dir = os.path.join(args.results_dir, args.which_splits, args.param_code, str(args.exp_code) + '_s{}'.format(args.seed))
if not os.path.isdir(args.results_dir):
os.makedirs(args.results_dir)
if ('summary_latest.csv' in os.listdir(args.results_dir)) and (not args.overwrite):
print("Exp Code <%s> already exists! Exiting script." % args.exp_code)
sys.exit()
### Sets the absolute path of split_dir
args.split_dir = os.path.join('./splits', args.which_splits, args.split_dir)
print("split_dir", args.split_dir)
assert os.path.isdir(args.split_dir)
settings.update({'split_dir': args.split_dir})
with open(args.results_dir + '/experiment_{}.txt'.format(args.exp_code), 'w') as f:
print(settings, file=f)
f.close()
print("################# Settings ###################")
for key, val in settings.items():
print("{}: {}".format(key, val))
if __name__ == "__main__":
start = timer()
results = main(args)
end = timer()
print("finished!")
print("end script")
print('Script Time: %f seconds' % (end - start))