-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathutils.py
129 lines (102 loc) · 3.49 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
import torch
import torch.nn.functional as F
from torch_sparse import SparseTensor
import numpy as np
import yaml
import random
from scipy.sparse import identity, diags
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.preprocessing import normalize
def seed(seed=123):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def normal_adj(adj):
adj = SparseTensor.from_scipy(adj)
deg = adj.sum(dim=1).to(torch.float)
D_isqrt = deg.pow(-0.5)
D_isqrt[D_isqrt == float('inf')] = 0
DAD = D_isqrt.view(-1,1) * adj * D_isqrt.view(1,-1)
return DAD.to_scipy(layout='csr')
def adj2laplacian(A):
norm_adj = normal_adj(A)
L = identity(norm_adj.shape[0]).multiply(1+1e-6) - norm_adj
return L
def accuracy(output, labels):
preds = output.cpu().max(1)[1].type_as(labels)
correct = preds.eq(labels).double()
correct = correct.sum()
return correct / len(labels)
def train(model, labels, x, adj, idx_train, optimizer):
model.train()
optimizer.zero_grad()
out = model(x, adj)
loss = F.nll_loss(out[idx_train], labels[idx_train])
loss.backward()
optimizer.step()
return loss.item()
@torch.no_grad()
def test(model, labels, x, edge_index, idx_train, idx_val, idx_test, out=None):
if out is None:
model.eval()
out = model(x, edge_index)
train_acc = accuracy(out[idx_train], labels[idx_train])
valid_acc = accuracy(out[idx_val], labels[idx_val])
test_acc = accuracy(out[idx_test], labels[idx_test])
return train_acc.item(), valid_acc.item(), test_acc.item(), out
def svds_jl(adj, k):
from julia.api import Julia
jl = Julia(compiled_modules=False)
from julia import Main
Main.include("./svds.jl")
print('Perform Truncated SVD')
U, S, Vt = Main.main(adj, k)
return U.real, S.real, Vt.real
def embedding_normalize(embedding, norm):
if norm == "unit_vector":
return normalize(embedding, axis=1)
elif norm == "standardize":
scaler = StandardScaler()
return scaler.fit_transform(embedding)
elif norm == "minmax":
scaler = MinMaxScaler()
return scaler.fit_transform(embedding)
else:
return embedding
def minmax_gamma(embedding_norm, gamma, ptb_rate, embed_sim):
if embedding_norm == "minmax":
if ptb_rate == 2.0:
gamma = embed_sim.min()+2.5
elif ptb_rate == 3.0:
gamma = embed_sim.min()+2
elif ptb_rate == 4.0:
gamma = embed_sim.min()+2
return gamma
def preprocess_args(args):
arg_data = args.__dict__
if args.perturbed:
config_file = \
f"configs/{args.dataset}/{args.backbone}/{args.attack}/perturbed_{args.ptb_rate}.yaml"
else:
config_file = \
f"configs/{args.dataset}/{args.backbone}/{args.attack}/clean.yaml"
with open(config_file) as file:
yaml_data= yaml.safe_load(file)
for arg, value in arg_data.items():
if value is None:
continue
if value in ['True', 'False']:
yaml_data[arg] = value=='True'
else:
yaml_data[arg] = value
args.__dict__ = yaml_data
if args.full_distortion:
if args.dataset == "chameleon" and args.attack=="meta":
args.gamma = 0.3
elif args.dataset == "squirrel":
args.gamma = 0.08
return args