Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/graphhdv2' into graphhdv2
Browse files Browse the repository at this point in the history
# Conflicts:
#	GraphHD_v2/graphhd_experiment_1.py
#	GraphHD_v2/graphhd_experiment_2.py
#	GraphHD_v2/graphhd_experiment_3.py
  • Loading branch information
pereverges committed Nov 7, 2023
2 parents c8dc7a4 + bcd600e commit 764e79c
Show file tree
Hide file tree
Showing 3 changed files with 53 additions and 65 deletions.
35 changes: 14 additions & 21 deletions GraphHD_v2/graphhd_experiment_33.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,9 @@
import csv

import time
csv_file = 'experiment_3/result'+str(time.time())+'.csv'

csv_file = "experiment_3/result" + str(time.time()) + ".csv"


def experiment(randomness=0, dataset="MUTAG"):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
Expand All @@ -31,7 +33,6 @@ def experiment(randomness=0, dataset="MUTAG"):
test_size = len(graphs) - train_size
train_ld, test_ld = torch.utils.data.random_split(graphs, [train_size, test_size])


def sparse_stochastic_graph(G):
"""
Returns a sparse adjacency matrix of the graph G.
Expand All @@ -45,7 +46,6 @@ def sparse_stochastic_graph(G):
size = (G.num_nodes, G.num_nodes)
return torch.sparse_coo_tensor(G.edge_index, values_per_node, size)


def pagerank(G, alpha=0.85, max_iter=100, tol=1e-06):
N = G.num_nodes
M = sparse_stochastic_graph(G) * alpha
Expand All @@ -60,7 +60,6 @@ def pagerank(G, alpha=0.85, max_iter=100, tol=1e-06):
return v
return v


def to_undirected(edge_index):
"""
Returns the undirected edge_index
Expand All @@ -70,7 +69,6 @@ def to_undirected(edge_index):
edge_index = torch.unique(edge_index, dim=1)
return edge_index


def min_max_graph_size(graph_dataset):
if len(graph_dataset) == 0:
return None, None
Expand All @@ -85,9 +83,6 @@ def min_max_graph_size(graph_dataset):

return min_num_nodes, max_num_nodes




class Encoder(nn.Module):
def __init__(self, out_features, size):
super(Encoder, self).__init__()
Expand Down Expand Up @@ -116,7 +111,6 @@ def forward(self, x):
hvs = torchhd.bind(node_id_hvs_2[row], node_id_hvs_2[col])
return torchhd.multiset(hvs)


min_graph_size, max_graph_size = min_max_graph_size(graphs)
encode = Encoder(DIMENSIONS, max_graph_size)
encode = encode.to(device)
Expand All @@ -133,8 +127,10 @@ def forward(self, x):
model.add(samples_hv, samples.y)

accuracy = torchmetrics.Accuracy("multiclass", num_classes=graphs.num_classes)
f1 = torchmetrics.F1Score(num_classes=graphs.num_classes, average='macro', multiclass=True)
#f1 = torchmetrics.F1Score("multiclass", num_classes=graphs.num_classes)
f1 = torchmetrics.F1Score(
num_classes=graphs.num_classes, average="macro", multiclass=True
)
# f1 = torchmetrics.F1Score("multiclass", num_classes=graphs.num_classes)

with torch.no_grad():
model.normalize()
Expand All @@ -147,14 +143,13 @@ def forward(self, x):
accuracy.update(outputs.cpu(), samples.y)
f1.update(outputs.cpu(), samples.y)

acc = (accuracy.compute().item() * 100)
f = (f1.compute().item() * 100)
acc = accuracy.compute().item() * 100
f = f1.compute().item() * 100
return acc, f



REPETITIONS = 5
DATASET = ['MUTAG', 'ENZYMES', 'PROTEINS']
DATASET = ["MUTAG", "ENZYMES", "PROTEINS"]

for d in DATASET:
acc_final = []
Expand All @@ -165,13 +160,11 @@ def forward(self, x):
acc, f1 = experiment(100, d)
acc_aux.append(acc)
f1_aux.append(f1)
acc_final.append(round(sum(acc_aux)/REPETITIONS, 2))
f1_final.append(round(sum(f1_aux)/REPETITIONS,2))
acc_final.append(round(sum(acc_aux) / REPETITIONS, 2))
f1_final.append(round(sum(f1_aux) / REPETITIONS, 2))

with open(csv_file, mode='a', newline='') as file:
with open(csv_file, mode="a", newline="") as file:
writer = csv.writer(file)
writer.writerow([d] +['RANDOM'])
writer.writerow([d] + ["RANDOM"])
writer.writerows([acc_final])
writer.writerows([f1_final])


40 changes: 20 additions & 20 deletions GraphHD_v2/graphhd_experiment_4.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,9 @@
import csv

import time
csv_file = 'experiment_4/result'+str(time.time())+'.csv'

csv_file = "experiment_4/result" + str(time.time()) + ".csv"


def experiment(randomness=0, dataset="MUTAG"):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
Expand All @@ -31,7 +33,6 @@ def experiment(randomness=0, dataset="MUTAG"):
test_size = len(graphs) - train_size
train_ld, test_ld = torch.utils.data.random_split(graphs, [train_size, test_size])


def sparse_stochastic_graph(G):
"""
Returns a sparse adjacency matrix of the graph G.
Expand All @@ -45,7 +46,6 @@ def sparse_stochastic_graph(G):
size = (G.num_nodes, G.num_nodes)
return torch.sparse_coo_tensor(G.edge_index, values_per_node, size)


def pagerank(G, alpha=0.85, max_iter=100, tol=1e-06):
N = G.num_nodes
M = sparse_stochastic_graph(G) * alpha
Expand All @@ -60,7 +60,6 @@ def pagerank(G, alpha=0.85, max_iter=100, tol=1e-06):
return v
return v


def to_undirected(edge_index):
"""
Returns the undirected edge_index
Expand All @@ -70,7 +69,6 @@ def to_undirected(edge_index):
edge_index = torch.unique(edge_index, dim=1)
return edge_index


def min_max_graph_size(graph_dataset):
if len(graph_dataset) == 0:
return None, None
Expand Down Expand Up @@ -113,7 +111,9 @@ def local_centrality(self, x):

for i in nodes:
adjacent_nodes = x.edge_index[1][x.edge_index[0] == i]
node_id_hvs[i] = torchhd.bind(self.node_ids.weight[i], self.levels.weight[len(adjacent_nodes)])
node_id_hvs[i] = torchhd.bind(
self.node_ids.weight[i], self.levels.weight[len(adjacent_nodes)]
)

row, col = to_undirected(x.edge_index)
hvs = torchhd.bind(node_id_hvs[row], node_id_hvs[col])
Expand All @@ -127,7 +127,10 @@ def semi_local_centrality(self, x):
for i in nodes:
adjacent_nodes = x.edge_index[1][x.edge_index[0] == i]
for j in adjacent_nodes:
node_id_hvs[i] = torchhd.bundle(self.levels.weight[len(x.edge_index[1][x.edge_index[0] == j])], node_id_hvs[i])
node_id_hvs[i] = torchhd.bundle(
self.levels.weight[len(x.edge_index[1][x.edge_index[0] == j])],
node_id_hvs[i],
)
node_id_hvs[i] = torchhd.bind(node_id_hvs[i], (self.node_ids.weight[i]))

row, col = to_undirected(x.edge_index)
Expand All @@ -136,7 +139,7 @@ def semi_local_centrality(self, x):

def forward(self, x):
return self.local_centrality(x)
'''
"""
nodes, _ = x.edge_index
nodes = list(set(nodes))
node_id_hvs = torch.zeros((x.num_nodes, self.out_features), device=device)
Expand All @@ -157,7 +160,7 @@ def forward(self, x):
row, col = to_undirected(x.edge_index)
hvs = torchhd.bind(node_id_hvs_2[row], node_id_hvs_2[col])
return torchhd.multiset(hvs)
'''
"""

min_graph_size, max_graph_size = min_max_graph_size(graphs)
encode = Encoder(DIMENSIONS, max_graph_size)
Expand All @@ -175,7 +178,7 @@ def forward(self, x):
model.add(samples_hv, samples.y)

accuracy = torchmetrics.Accuracy("multiclass", num_classes=graphs.num_classes)
#f1 = torchmetrics.F1Score(num_classes=graphs.num_classes, average='macro', multiclass=True)
# f1 = torchmetrics.F1Score(num_classes=graphs.num_classes, average='macro', multiclass=True)
f1 = torchmetrics.F1Score("multiclass", num_classes=graphs.num_classes)

with torch.no_grad():
Expand All @@ -189,14 +192,13 @@ def forward(self, x):
accuracy.update(outputs.cpu(), samples.y)
f1.update(outputs.cpu(), samples.y)

acc = (accuracy.compute().item() * 100)
f = (f1.compute().item() * 100)
acc = accuracy.compute().item() * 100
f = f1.compute().item() * 100
return acc, f



REPETITIONS = 1
DATASET = ['MUTAG']
DATASET = ["MUTAG"]

for d in DATASET:
acc_final = []
Expand All @@ -207,13 +209,11 @@ def forward(self, x):
acc, f1 = experiment(100, d)
acc_aux.append(acc)
f1_aux.append(f1)
acc_final.append(round(sum(acc_aux)/REPETITIONS, 2))
f1_final.append(round(sum(f1_aux)/REPETITIONS,2))
acc_final.append(round(sum(acc_aux) / REPETITIONS, 2))
f1_final.append(round(sum(f1_aux) / REPETITIONS, 2))

with open(csv_file, mode='a', newline='') as file:
with open(csv_file, mode="a", newline="") as file:
writer = csv.writer(file)
writer.writerow([d] +['RANDOM'])
writer.writerow([d] + ["RANDOM"])
writer.writerows([acc_final])
writer.writerows([f1_final])


43 changes: 19 additions & 24 deletions GraphHD_v2/graphhd_experiment_5.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,9 @@
import csv

import time
csv_file = 'experiment_4/result'+str(time.time())+'.csv'

csv_file = "experiment_4/result" + str(time.time()) + ".csv"


def experiment(randomness=0, dataset="MUTAG"):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
Expand All @@ -31,7 +33,6 @@ def experiment(randomness=0, dataset="MUTAG"):
test_size = len(graphs) - train_size
train_ld, test_ld = torch.utils.data.random_split(graphs, [train_size, test_size])


def sparse_stochastic_graph(G):
"""
Returns a sparse adjacency matrix of the graph G.
Expand All @@ -45,7 +46,6 @@ def sparse_stochastic_graph(G):
size = (G.num_nodes, G.num_nodes)
return torch.sparse_coo_tensor(G.edge_index, values_per_node, size)


def pagerank(G, alpha=0.85, max_iter=100, tol=1e-06):
N = G.num_nodes
M = sparse_stochastic_graph(G) * alpha
Expand All @@ -60,7 +60,6 @@ def pagerank(G, alpha=0.85, max_iter=100, tol=1e-06):
return v
return v


def to_undirected(edge_index):
"""
Returns the undirected edge_index
Expand All @@ -70,7 +69,6 @@ def to_undirected(edge_index):
edge_index = torch.unique(edge_index, dim=1)
return edge_index


def min_max_graph_size(graph_dataset):
if len(graph_dataset) == 0:
return None, None
Expand All @@ -90,20 +88,17 @@ def __init__(self, out_features, size, node_features):
super(Encoder, self).__init__()
self.out_features = out_features


self.node_ids = embeddings.Random(size, out_features)
self.node_attr = embeddings.Density(node_features, out_features)



self.levels = embeddings.Circular(size, out_features)

def local_centrality(self, x):
nodes, _ = x.edge_index
nodes = list(set(nodes))
node_id_hvs = torch.zeros((x.num_nodes, self.out_features), device=device)

#for i in nodes:
# for i in nodes:
# node_id_hvs[i] = torchhd.bind(self.node_ids.weight[i], self.node_attr(x.x[i]))

node_id_hvs = torchhd.bind(self.node_ids.weight, self.node_attr(x.x))
Expand All @@ -121,7 +116,10 @@ def semi_local_centrality(self, x):
for i in nodes:
adjacent_nodes = x.edge_index[1][x.edge_index[0] == i]
for j in adjacent_nodes:
node_id_hvs[i] = torchhd.bundle(self.levels.weight[len(x.edge_index[1][x.edge_index[0] == j])], node_id_hvs[i])
node_id_hvs[i] = torchhd.bundle(
self.levels.weight[len(x.edge_index[1][x.edge_index[0] == j])],
node_id_hvs[i],
)
node_id_hvs[i] = torchhd.bind(node_id_hvs[i], (self.node_ids.weight[i]))

row, col = to_undirected(x.edge_index)
Expand All @@ -130,7 +128,7 @@ def semi_local_centrality(self, x):

def forward(self, x):
return self.local_centrality(x)
'''
"""
nodes, _ = x.edge_index
nodes = list(set(nodes))
node_id_hvs = torch.zeros((x.num_nodes, self.out_features), device=device)
Expand All @@ -151,10 +149,10 @@ def forward(self, x):
row, col = to_undirected(x.edge_index)
hvs = torchhd.bind(node_id_hvs_2[row], node_id_hvs_2[col])
return torchhd.multiset(hvs)
'''
"""

min_graph_size, max_graph_size = min_max_graph_size(graphs)
encode = Encoder(DIMENSIONS, max_graph_size,len(graphs.x[0]))
encode = Encoder(DIMENSIONS, max_graph_size, len(graphs.x[0]))
encode = encode.to(device)

model = Centroid(DIMENSIONS, graphs.num_classes)
Expand All @@ -169,7 +167,7 @@ def forward(self, x):
model.add(samples_hv, samples.y)

accuracy = torchmetrics.Accuracy("multiclass", num_classes=graphs.num_classes)
#f1 = torchmetrics.F1Score(num_classes=graphs.num_classes, average='macro', multiclass=True)
# f1 = torchmetrics.F1Score(num_classes=graphs.num_classes, average='macro', multiclass=True)
f1 = torchmetrics.F1Score("multiclass", num_classes=graphs.num_classes)

with torch.no_grad():
Expand All @@ -183,14 +181,13 @@ def forward(self, x):
accuracy.update(outputs.cpu(), samples.y)
f1.update(outputs.cpu(), samples.y)

acc = (accuracy.compute().item() * 100)
f = (f1.compute().item() * 100)
acc = accuracy.compute().item() * 100
f = f1.compute().item() * 100
return acc, f



REPETITIONS = 1
DATASET = ['MUTAG', 'ENZYMES', 'PROTEINS']
DATASET = ["MUTAG", "ENZYMES", "PROTEINS"]

for d in DATASET:
acc_final = []
Expand All @@ -201,13 +198,11 @@ def forward(self, x):
acc, f1 = experiment(100, d)
acc_aux.append(acc)
f1_aux.append(f1)
acc_final.append(round(sum(acc_aux)/REPETITIONS, 2))
f1_final.append(round(sum(f1_aux)/REPETITIONS,2))
acc_final.append(round(sum(acc_aux) / REPETITIONS, 2))
f1_final.append(round(sum(f1_aux) / REPETITIONS, 2))

with open(csv_file, mode='a', newline='') as file:
with open(csv_file, mode="a", newline="") as file:
writer = csv.writer(file)
writer.writerow([d] +['RANDOM'])
writer.writerow([d] + ["RANDOM"])
writer.writerows([acc_final])
writer.writerows([f1_final])


0 comments on commit 764e79c

Please sign in to comment.