Skip to content

Commit

Permalink
Fixed codestyle
Browse files Browse the repository at this point in the history
  • Loading branch information
Gaurav S Deshmukh committed Sep 23, 2023
1 parent 2671896 commit 79a9704
Showing 1 changed file with 66 additions and 54 deletions.
120 changes: 66 additions & 54 deletions src/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,10 @@
import torch.nn as nn
import torch_geometric.nn as gnn


class MultiGCN(nn.Module):
"""Class to customize the graph neural network."""

def __init__(self, partition_configs):
"""Initialize the graph neural network.
Expand All @@ -15,17 +17,17 @@ def __init__(self, partition_configs):
List of dictionaries containing parameters for the GNN for each
partition. The number of different GNNs are judged based on the
size of the list. Each partition config should contain the following
keys: n_conv (number of convolutional layers, int), n_hidden (number
keys: n_conv (number of convolutional layers, int), n_hidden (number
of hidden layers, int), conv_size (feature size before convolution, int)
hidden_size (nodes per hidden layer node, int), dropout (dropout
probability for hidden layers, float), conv_type (type of convolution
hidden_size (nodes per hidden layer node, int), dropout (dropout
probability for hidden layers, float), conv_type (type of convolution
layer, str; currently only "CGConv" is supported), pool_type
(type of pooling layer, str; currently "add" and "mean" are supported),
num_node_features (number of node features, int), num_edge_features
(number of edge features, int).
"""
super().__init__()

# Store hyperparameters
self.n_conv = [config["n_conv"] for config in partition_configs]
self.n_hidden = [config["n_hidden"] for config in partition_configs]
Expand Down Expand Up @@ -74,24 +76,27 @@ def __init__(self, partition_configs):
self.hidden_layers = []
for i in range(self.n_partitions):
self.hidden_layers.append(
nn.Sequential(*([
nn.Linear(self.hidden_size[i], self.hidden_size[i]),
nn.LeakyReLU(inplace=True),
nn.Dropout(p=self.dropout[i]),
] * (self.n_hidden[i] - 1) +
[
nn.Linear(self.hidden_size[i], 1),
nn.LeakyReLU(inplace=True),
nn.Dropout(p=self.dropout[i]),
])
nn.Sequential(
*(
[
nn.Linear(self.hidden_size[i], self.hidden_size[i]),
nn.LeakyReLU(inplace=True),
nn.Dropout(p=self.dropout[i]),
]
* (self.n_hidden[i] - 1)
+ [
nn.Linear(self.hidden_size[i], 1),
nn.LeakyReLU(inplace=True),
nn.Dropout(p=self.dropout[i]),
]
)
)
)

# Final linear layer
# TODO: replace 1 with multiple outputs
self.final_lin_transform = nn.Linear(self.n_partitions, 1)


def init_conv_layers(self):
"""Initialize convolutional layers."""
self.conv_layers = []
Expand All @@ -105,10 +110,10 @@ def init_conv_layers(self):
dim=self.num_edge_features[i],
batch_norm=True,
),
nn.LeakyReLU(inplace=True)
nn.LeakyReLU(inplace=True),
]
part_conv_layers.extend(conv_layer)

self.conv_layers.append(nn.ModuleList(part_conv_layers))

def forward(self, data_objects):
Expand All @@ -130,13 +135,16 @@ def forward(self, data_objects):
# For each data object
for i, data in enumerate(data_objects):
# Apply initial transform
conv_data = self.init_transform[i](data.x)
conv_data = self.init_transform[i](data.x)

# Apply convolutional layers
for layer in self.conv_layers[i]:
if isinstance(layer, gnn.MessagePassing):
conv_data = layer(x=conv_data, edge_index=data.edge_index,
edge_attr=data.edge_attr)
conv_data = layer(
x=conv_data,
edge_index=data.edge_index,
edge_attr=data.edge_attr,
)
else:
conv_data = layer(conv_data)

Expand All @@ -157,12 +165,16 @@ def forward(self, data_objects):
output = self.final_lin_transform(contributions)

return {"output": output, "contributions": contributions}



if __name__ == "__main__":
from pathlib import Path

from ase.io import read
from data import AtomsDatapoints

from constants import REPO_PATH
from pathlib import Path
from data import AtomsDatapoints

# Test for one tensor
# Create datapoins
data_root_path = Path(REPO_PATH) / "data" / "S_calcs"
Expand All @@ -185,36 +197,36 @@ def forward(self, data_objects):

# Get result
partition_configs = [
{
"n_conv": 3,
"n_hidden": 3,
"hidden_size": 30,
"conv_size": 40,
"dropout": 0.1,
"num_node_features": data_objects[0].num_node_features,
"num_edge_features": data_objects[0].num_edge_features,
"conv_type": "CGConv",
},
{
"n_conv": 3,
"n_hidden": 3,
"hidden_size": 30,
"conv_size": 40,
"dropout": 0.1,
"num_node_features": data_objects[1].num_node_features,
"num_edge_features": data_objects[1].num_edge_features,
"conv_type": "CGConv",
},
{
"n_conv": 3,
"n_hidden": 3,
"hidden_size": 30,
"conv_size": 40,
"dropout": 0.1,
"num_node_features": data_objects[2].num_node_features,
"num_edge_features": data_objects[2].num_edge_features,
"conv_type": "CGConv",
}
{
"n_conv": 3,
"n_hidden": 3,
"hidden_size": 30,
"conv_size": 40,
"dropout": 0.1,
"num_node_features": data_objects[0].num_node_features,
"num_edge_features": data_objects[0].num_edge_features,
"conv_type": "CGConv",
},
{
"n_conv": 3,
"n_hidden": 3,
"hidden_size": 30,
"conv_size": 40,
"dropout": 0.1,
"num_node_features": data_objects[1].num_node_features,
"num_edge_features": data_objects[1].num_edge_features,
"conv_type": "CGConv",
},
{
"n_conv": 3,
"n_hidden": 3,
"hidden_size": 30,
"conv_size": 40,
"dropout": 0.1,
"num_node_features": data_objects[2].num_node_features,
"num_edge_features": data_objects[2].num_edge_features,
"conv_type": "CGConv",
},
]
net = MultiGCN(partition_configs)
result_dict = net(data_objects)
result_dict = net(data_objects)

0 comments on commit 79a9704

Please sign in to comment.