-
Notifications
You must be signed in to change notification settings - Fork 45
/
save_splits.py
75 lines (66 loc) · 3.67 KB
/
save_splits.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
import time
import argparse
import numpy as np
import torch
from deeprobust.graph.defense import GCN, ProGNN
from deeprobust.graph.data import Dataset, PrePtbDataset
from deeprobust.graph.utils import preprocess, encode_onehot, get_train_val_test
# Training settings
parser = argparse.ArgumentParser()
parser.add_argument('--debug', action='store_true',
default=False, help='debug mode')
parser.add_argument('--only_gcn', action='store_true',
default=False, help='test the performance of gcn without other components')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='Disables CUDA training.')
parser.add_argument('--seed', type=int, default=15, help='Random seed.')
parser.add_argument('--lr', type=float, default=0.01,
help='Initial learning rate.')
parser.add_argument('--weight_decay', type=float, default=5e-4,
help='Weight decay (L2 loss on parameters).')
parser.add_argument('--hidden', type=int, default=16,
help='Number of hidden units.')
parser.add_argument('--dropout', type=float, default=0.5,
help='Dropout rate (1 - keep probability).')
parser.add_argument('--dataset', type=str, default='cora',
choices=['cora', 'cora_ml', 'citeseer', 'polblogs', 'pubmed'], help='dataset')
parser.add_argument('--attack', type=str, default='meta',
choices=['no', 'meta', 'random', 'nettack'])
parser.add_argument('--ptb_rate', type=float, default=0.05, help="noise ptb_rate")
parser.add_argument('--epochs', type=int, default=400, help='Number of epochs to train.')
parser.add_argument('--alpha', type=float, default=5e-4, help='weight of l1 norm')
parser.add_argument('--beta', type=float, default=1.5, help='weight of nuclear norm')
parser.add_argument('--gamma', type=float, default=1, help='weight of l2 norm')
parser.add_argument('--lambda_', type=float, default=0, help='weight of feature smoothing')
parser.add_argument('--phi', type=float, default=0, help='weight of symmetric loss')
parser.add_argument('--inner_steps', type=int, default=2, help='steps for inner optimization')
parser.add_argument('--outer_steps', type=int, default=1, help='steps for outer optimization')
parser.add_argument('--lr_adj', type=float, default=0.01, help='lr for training adj')
parser.add_argument('--symmetric', action='store_true', default=False,
help='whether use symmetric matrix')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if args.cuda else "cpu")
if args.cuda:
torch.cuda.manual_seed(args.seed)
if args.ptb_rate == 0:
args.attack = "no"
print(args)
# Here the random seed is to split the train/val/test data, we need to set the random seed to be the same as that when you generate the perturbed graph
data = Dataset(root='/tmp/', name=args.dataset, setting='nettack', seed=15)
adj, features, labels = data.adj, data.features, data.labels
idx_train, idx_val, idx_test = data.idx_train, data.idx_val, data.idx_test
if args.dataset == 'pubmed':
# just for matching the results in the paper, see details in https://github.com/ChandlerBang/Pro-GNN/issues/2
print("just for matching the results in the paper," + \
"see details in https://github.com/ChandlerBang/Pro-GNN/issues/2")
import ipdb
ipdb.set_trace()
idx_train, idx_val, idx_test = get_train_val_test(adj.shape[0],
val_size=0.1, test_size=0.8, stratify=encode_onehot(labels), seed=15)
import json
splits = {'idx_train': idx_train.tolist(),
'idx_val': idx_val.tolist(),
'idx_test': idx_test.tolist()}
with open(f'splits/{args.dataset}_prognn_splits.json', 'w') as f:
json.dump(splits, f)