Skip to content

Commit

Permalink
[github-action] formatting fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
github-actions[bot] committed Nov 8, 2023
1 parent 11ad0ce commit 297742e
Showing 1 changed file with 55 additions and 36 deletions.
91 changes: 55 additions & 36 deletions GraphHD_v2/graphhd_basic_centrality.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
# Note: this example requires the torch_geometric library: https://pytorch-geometric.readthedocs.io
from torch_geometric.datasets import TUDataset
import torch_geometric.utils

# Note: this example requires the torchmetrics library: https://torchmetrics.readthedocs.io
import torchmetrics

Expand All @@ -15,15 +16,16 @@
import csv

import time
csv_file = 'basic_centrality/result'+str(time.time())+'.csv'

csv_file = "basic_centrality/result" + str(time.time()) + ".csv"
DIM = 10000


def experiment(randomness=0, embed='random', dataset="MUTAG"):
def experiment(randomness=0, embed="random", dataset="MUTAG"):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Using {} device".format(device))

DIMENSIONS = DIM # hypervectors dimension
DIMENSIONS = DIM # hypervectors dimension

# for other available datasets see: https://pytorch-geometric.readthedocs.io/en/latest/notes/data_cheatsheet.html?highlight=tudatasets
# dataset = "MUTAG"
Expand All @@ -33,7 +35,6 @@ def experiment(randomness=0, embed='random', dataset="MUTAG"):
test_size = len(graphs) - train_size
train_ld, test_ld = torch.utils.data.random_split(graphs, [train_size, test_size])


def sparse_stochastic_graph(G):
"""
Returns a sparse adjacency matrix of the graph G.
Expand All @@ -47,7 +48,6 @@ def sparse_stochastic_graph(G):
size = (G.num_nodes, G.num_nodes)
return torch.sparse_coo_tensor(G.edge_index, values_per_node, size)


def pagerank(G, alpha=0.85, max_iter=100, tol=1e-06):
N = G.num_nodes
M = sparse_stochastic_graph(G) * alpha
Expand All @@ -62,7 +62,6 @@ def pagerank(G, alpha=0.85, max_iter=100, tol=1e-06):
return v
return v


def to_undirected(edge_index):
"""
Returns the undirected edge_index
Expand All @@ -72,7 +71,6 @@ def to_undirected(edge_index):
edge_index = torch.unique(edge_index, dim=1)
return edge_index


def min_max_graph_size(graph_dataset):
if len(graph_dataset) == 0:
return None, None
Expand All @@ -87,20 +85,19 @@ def min_max_graph_size(graph_dataset):

return min_num_nodes, max_num_nodes


class Encoder(nn.Module):
def __init__(self, out_features, size):
super(Encoder, self).__init__()
self.out_features = out_features
if embed == 'thermometer':
if embed == "thermometer":
self.node_ids = embeddings.Thermometer(size, out_features, vsa=VSA)
elif embed == 'circular':
elif embed == "circular":
self.node_ids = embeddings.Circular(size, out_features, vsa=VSA)
elif embed == 'projection':
elif embed == "projection":
self.node_ids = embeddings.Projection(size, out_features, vsa=VSA)
elif embed == 'sinusoid':
elif embed == "sinusoid":
self.node_ids = embeddings.Sinusoid(size, out_features, vsa=VSA)
elif embed == 'density':
elif embed == "density":
self.node_ids = embeddings.Density(size, out_features, vsa=VSA)
else:
self.node_ids = embeddings.Random(size, out_features, vsa=VSA)
Expand All @@ -116,27 +113,27 @@ def forward(self, x):
nodes, _ = x.edge_index
indexs = list(map(int, torch_geometric.utils.degree(nodes)))


try:
node_id_hvs = torchhd.bind(self.node_ids.weight[list(range(x.num_nodes))], self.levels.weight[indexs])
#node_id_hvs = torchhd.bind(node_id_hvs, self.node_attr2(x.x))
node_id_hvs = torchhd.bind(
self.node_ids.weight[list(range(x.num_nodes))],
self.levels.weight[indexs],
)
# node_id_hvs = torchhd.bind(node_id_hvs, self.node_attr2(x.x))
except Exception as e:
print('err ' + str(e))
print("err " + str(e))

row, col = to_undirected(x.edge_index)

hvs = torchhd.bind(node_id_hvs[row], node_id_hvs[col])
return torchhd.multiset(hvs)


min_graph_size, max_graph_size = min_max_graph_size(graphs)
encode = Encoder(DIMENSIONS, max_graph_size)
encode = encode.to(device)

model = Centroid(DIMENSIONS, graphs.num_classes, VSA)
model = model.to(device)


train_t = time.time()
with torch.no_grad():
for samples in tqdm(train_ld, desc="Training"):
Expand All @@ -147,13 +144,14 @@ def forward(self, x):
model.add(samples_hv, samples.y)
train_t = time.time() - train_t
accuracy = torchmetrics.Accuracy("multiclass", num_classes=graphs.num_classes)
f1 = torchmetrics.F1Score(num_classes=graphs.num_classes, average='macro', multiclass=True)
#f1 = torchmetrics.F1Score("multiclass", num_classes=graphs.num_classes)

f1 = torchmetrics.F1Score(
num_classes=graphs.num_classes, average="macro", multiclass=True
)
# f1 = torchmetrics.F1Score("multiclass", num_classes=graphs.num_classes)

test_t = time.time()
with torch.no_grad():
if VSA != 'BSC':
if VSA != "BSC":
model.normalize()

for samples in tqdm(test_ld, desc="Testing"):
Expand All @@ -164,15 +162,15 @@ def forward(self, x):
accuracy.update(outputs.cpu(), samples.y)
f1.update(outputs.cpu(), samples.y)
test_t = time.time() - test_t
acc = (accuracy.compute().item() * 100)
f = (f1.compute().item() * 100)
acc = accuracy.compute().item() * 100
f = f1.compute().item() * 100
return acc, f, train_t, test_t


REPETITIONS = 50
RANDOMNESS = ['random']
DATASET = ['PTC_FM','MUTAG','NCI1','ENZYMES','PROTEINS','DD']
VSAS = ['BSC','MAP','HRR','FHRR']
RANDOMNESS = ["random"]
DATASET = ["PTC_FM", "MUTAG", "NCI1", "ENZYMES", "PROTEINS", "DD"]
VSAS = ["BSC", "MAP", "HRR", "FHRR"]


for VSA in VSAS:
Expand All @@ -193,13 +191,34 @@ def forward(self, x):
f1_aux.append(f1)
train_aux.append(train_t)
test_aux.append(test_t)
acc_final.append(round(sum(acc_aux)/REPETITIONS, 2))
f1_final.append(round(sum(f1_aux)/REPETITIONS,2))
train_final.append(round(sum(train_aux)/REPETITIONS,2))
test_final.append(round(sum(test_aux)/REPETITIONS,2))
acc_final.append(round(sum(acc_aux) / REPETITIONS, 2))
f1_final.append(round(sum(f1_aux) / REPETITIONS, 2))
train_final.append(round(sum(train_aux) / REPETITIONS, 2))
test_final.append(round(sum(test_aux) / REPETITIONS, 2))

with open(csv_file, mode='a', newline='') as file:
with open(csv_file, mode="a", newline="") as file:
writer = csv.writer(file)
writer.writerow(['dataset','dimensions','train_time','test_time','accuracy','f1','VSA'])
writer.writerows([[d,DIM,train_final[0],test_final[0],acc_final[0],f1_final[0],VSA]])

writer.writerow(
[
"dataset",
"dimensions",
"train_time",
"test_time",
"accuracy",
"f1",
"VSA",
]
)
writer.writerows(
[
[
d,
DIM,
train_final[0],
test_final[0],
acc_final[0],
f1_final[0],
VSA,
]
]
)

0 comments on commit 297742e

Please sign in to comment.