-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathGraph-NN.py
99 lines (80 loc) · 3.1 KB
/
Graph-NN.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
import networkx as nx
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
# Create a graph representation of a simple computer network where A-> B-> C-> D-> E
G = nx.Graph()
G.add_nodes_from(['A', 'B', 'C', 'D', 'E'])
G.add_edges_from([('A', 'B'), ('B', 'C'), ('C', 'D'), ('D', 'E')])
# Define the GNN architecture
class GNN(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim):
super(GNN, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.conv1 = nn.Conv1d(input_dim, hidden_dim, kernel_size=1)
self.conv2 = nn.Conv1d(hidden_dim, output_dim, kernel_size=1)
def forward(self, x, adjacency_matrix):
x = self.conv1(x)
x = torch.relu(x)
x = torch.mm(adjacency_matrix, x)
x = self.conv2(x)
return x
# Define the input and target data
input_data = np.random.rand(5, 1) # random input feature for each node
target_data = np.random.rand(5, 1) # random target feature for each node
input_data = torch.from_numpy(input_data).float()
target_data = torch.from_numpy(target_data).float()
# Convert the graph to an adjacency matrix
adjacency_matrix = nx.to_numpy_matrix(G)
adjacency_matrix = torch.from_numpy(adjacency_matrix).float()
# Initialize the GNN
gnn = GNN(1, 8, 1)
# Define the loss function and optimizer
criterion = nn.MSELoss()
optimizer = optim.Adam(gnn.parameters())
# Train the GNN
for epoch in range(100):
output = gnn(input_data, adjacency_matrix)
loss = criterion(output, target_data)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('Training complete')
#Combine the fuzzy and GNN
class FuzzyGNN(nn.Module):
def init(self, input_dim, hidden_dim, output_dim):
super(FuzzyGNN, self).init()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.conv1 = nn.Conv1d(input_dim, hidden_dim, kernel_size=1)
self.conv2 = nn.Conv1d(hidden_dim, output_dim, kernel_size=1)
self.fuzzy = FuzzyLogic()
def forward(self, x, adjacency_matrix):
x = self.conv1(x)
x = torch.relu(x)
x = torch.mm(adjacency_matrix, x)
x = self.conv2(x)
x = self.fuzzy(x) # apply fuzzy logic
return x
#Test the new model on any new dummy network
dummy_network = nx.Graph()
dummy_network.add_nodes_from(['A', 'B', 'C'])
dummy_network.add_edges_from([('A', 'B'), ('B', 'C')])
dummy_input_data = np.random.rand(3, 1) # random input feature for each node
dummy_target_data = np.random.rand(3, 1) # random target feature for each node
dummy_input_data = torch.from_numpy(dummy_input_data).float()
dummy_target_data = torch.from_numpy(dummy_target_data).float()
dummy_adjacency_matrix = nx.to_numpy_matrix(dummy_network)
dummy_adjacency_matrix = torch.from_numpy(dummy_adjacency_matrix).float()
fuzzy_gnn = FuzzyGNN(1, 8, 1)
for epoch in range(100):
output = fuzzy_gnn(dummy_input_data, dummy_adjacency_matrix)
loss = criterion(output, dummy_target_data)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('Testing complete')