-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathgcn_example.py
83 lines (64 loc) · 3.15 KB
/
gcn_example.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
from ctg_benchmark.loaders.torch_loader import get_cross_validation_loaders
from ctg_benchmark.evaluation.metrics import NodeClassificationMetrics, aggregate_class
import torch
import numpy as np
from tqdm import trange
from torch_geometric.nn.models import GCN
from torch.optim import Adam
import torch.nn.functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def simple_trainer(trainer_loader):
model = GCN(in_channels=74, hidden_channels=128, num_layers=2, out_channels=9, dropout=0.5)
model = model.to(device)
optim = Adam(params=model.parameters(), lr=1e-2, weight_decay=1e-5)
t_range = trange(50, desc=f'Epoch: {0: 03d}, training loss: {0/len(trainer_loader): .2f}')
# basic training loop
for epoch in t_range:
loss_epoch = 0
for batch in trainer_loader:
optim.zero_grad()
batch = batch.to(device)
pred = model.forward(batch.x, batch.edge_index)
logits = torch.log_softmax(pred, 1)
loss = F.nll_loss(logits, batch.y)
loss.backward()
optim.step()
loss_epoch += loss.item()
t_range.set_description(f'Epoch: {epoch + 1: 03d}, training loss: {loss_epoch/len(trainer_loader): .2f}')
t_range.refresh()
return model
def validation(validation_loader, model):
# set up evaluation
eval_metrics = NodeClassificationMetrics(num_classes=9)
accuracy_records, accuracy_class_records = [], []
model.eval()
with torch.no_grad():
for val_batch in validation_loader:
val_batch = val_batch.to(device)
pred = model.forward(val_batch.x, val_batch.edge_index)
logits = torch.log_softmax(pred, 1)
pred = logits.max(1)[1]
# results is a dictionary containing a large number of classification metrics
results = eval_metrics.compute_metrics(pred.cpu(), val_batch.y.cpu())
acc = results['accuracy_micro']
# aggregate class average the single class accuracy and ignores the embryo sack class (7)
acc_class, _ = aggregate_class(results['accuracy_class'], index=7)
accuracy_records.append(acc)
accuracy_class_records.append(acc_class)
return accuracy_records, accuracy_class_records
def main():
# create data loader
loader = get_cross_validation_loaders(root='./ctg_data', batch_size=1, shuffle=True, grs=('label_grs_surface',))
accuracy_records, accuracy_class_records = [], []
for split, split_loader in loader.items():
training_loader, validation_loader = split_loader['train'], split_loader['val']
model = simple_trainer(training_loader)
split_accuracy_records, split_accuracy_class_records = validation(validation_loader, model)
accuracy_records += split_accuracy_records
accuracy_class_records += split_accuracy_class_records
# report results
print(f'\nGCN results:')
print(f'Accuracy {np.mean(accuracy_records):.3f} std: {np.std(accuracy_records):.3f}')
print(f'Class Accuracy {np.mean(accuracy_class_records):.3f} std: {np.std(accuracy_class_records):.3f}')
if __name__ == '__main__':
main()