Skip to content

Commit

Permalink
clean to pylint norm
Browse files Browse the repository at this point in the history
  • Loading branch information
Guillaume Levy committed Dec 20, 2022
1 parent 8b400d8 commit 27c0630
Show file tree
Hide file tree
Showing 10 changed files with 128 additions and 218 deletions.
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
#Ignore Data and the previous models
GRAND_DATA/
GRAND_DATA/GP300Outbox
GRAND_ML_model/
GrandDataset**/
*/processed/
Expand Down
8 changes: 4 additions & 4 deletions core/create_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
import numpy as np
import torch_geometric as tg

from core.utils import computeNeighbors, computeNeighborsKDTree
from core.utils import compute_neighbors, compute_neighbor_kdree
from torch_geometric.data import InMemoryDataset
from scipy import signal
import random
Expand Down Expand Up @@ -92,10 +92,10 @@ def compute_edges(antenna_pos:np.ndarray, has_fix_degree:bool) -> Tuple[np.ndarr
Tuple(np.ndarray, np.ndarray): the indicies in the array of the nodes that needs to be connected as well as their distance
"""
if has_fix_degree:
edge_index, edge_dist = computeNeighbors(antenna_pos)
edge_index, edge_dist = compute_neighbors(antenna_pos)
edge_index, edge_dist = tg.utils.to_undirected(torch.tensor(edge_index, dtype=torch.long).t().contiguous(), edge_attr=edge_dist, reduce="mean")
else:
edge_index = computeNeighborsKDTree(antenna_pos)
edge_index = compute_neighbor_kdree(antenna_pos)
edge_index = tg.utils.to_undirected(torch.tensor(edge_index, dtype=torch.long).t().contiguous())
edge_dist = None

Expand Down Expand Up @@ -519,7 +519,7 @@ def process(self):
obs_lst.append(obs)
label_lst.append(energy)

edge_index,_ = computeNeighbors(antenna_pos_corr)
edge_index,_ = compute_neighbors(antenna_pos_corr)
edge_index = np.array(list(edge_index)) #Transform in array
edge_index_mirrored = edge_index[:, [1, 0]]
edge_index = np.concatenate((edge_index, edge_index_mirrored), axis=0) #To have the edges in the 2 ways
Expand Down
24 changes: 15 additions & 9 deletions core/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,21 +44,24 @@ def parser_to_config():
type=str, default='cpu',
help="The device to use")
parser.add_argument("--dropout",
type=float, default=0.2,
type=float, default=0,
help="The dropout rate")
parser.add_argument("--d_wandb",
action='store_true', default=False,
help="Don't create a run on wandb. "
"It is set to true if the run is a test run")
parser.add_argument("--embed_size",
type=int, default=16,
type=int, default=64,
help="The size of the embedding")
parser.add_argument("--epochs",
type=int, default=1000,
help="The number of epochs to train each model")
parser.add_argument("--fig_dir_name",
type=str, default=None,
help="Use to save the figures with a different name than the model name")
parser.add_argument("--keep_best_models",
action="store_true", default=False,
help="Keep the 5 best models for the testing")
parser.add_argument("--loss_fn",
type=str, default="mse", choices=["mse", "scaled_mse", "scaled_l1"],
help="loss function to use")
Expand Down Expand Up @@ -89,6 +92,9 @@ def parser_to_config():
parser.add_argument("--topkratio",
type=float, default=0.8,
help="The ratio to use for the topk pooling")
parser.add_argument("--not_drop_nodes",
action='store_true', default=False,
help="Train on the whole graph instead of dropiing nodes randomly")
parser.add_argument("--verbose_t",
type=int, default=50,
help="The time between each test during training")
Expand Down Expand Up @@ -166,12 +172,14 @@ def create_model(config_dict:Dict[str, Union[str, int, float]],
if not config["d_wandb"] or not config["test"]:
wandb.init(project="GNN", config=config)
# matplotlib.use(config["matplotlib_gui"])
if config["seed"] == 0:
if config["seed"] != 0:
torch.manual_seed(config["seed"])
np.random.seed(seed=config["seed"])

model_dir_path = "./core/Models/" + config["model_name"]
model_class = algorithm_from_name(config["algo"])

#Create folders if they don't exist
if not os.path.exists(model_dir_path):
os.mkdir(model_dir_path)

Expand All @@ -194,7 +202,6 @@ def create_model(config_dict:Dict[str, Union[str, int, float]],
dataset = GrandDataset(root=config["root"])
train_dataset = dataset.train_datasets[int(config["ant_ratio_train"]*5)]
test_dataset = dataset.test_datasets[int(config["ant_ratio_test"]*5)]

else:
raise ValueError("This dataset don't exist")

Expand Down Expand Up @@ -233,7 +240,7 @@ def train():
model.train()
for epoch in range(config["epochs"]):
for data in train_loader:
if True:
if not config["not_drop_nodes"]:
data_list = data.to_data_list()
for graph in enumerate(data_list):
rand_nb = np.random.random_sample()*0.4 + 0.6
Expand Down Expand Up @@ -279,7 +286,7 @@ def train():
train_loss = 0
train_n_tot = 0
for data in train_loader:
if True:
if not config["not_drop_nodes"]:
data_list = data.to_data_list()
for graph in enumerate(data_list):
rand_nb = np.random.random_sample()*0.4 + 0.6
Expand All @@ -302,8 +309,7 @@ def train():
test_loss = 0
test_n_tot = 0
for data in test_loader:

if True:
if not config["not_drop_nodes"]:
data_list = data.to_data_list()
for graph in enumerate(data_list):
rand_nb = np.random.random_sample()*0.4 + 0.6
Expand Down Expand Up @@ -473,7 +479,7 @@ def test():


# We only keep from here the best models
if False:
if config["keep_best_models"]:
model_index = np.argsort(test_loss)[:5]
lst_model = []
for index in model_index:
Expand Down
133 changes: 12 additions & 121 deletions core/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,19 +3,18 @@
import torch
import torch.nn.functional as F

from torch_geometric.nn import GCNConv, GATv2Conv, SAGEConv, GatedGraphConv
from torch_geometric.nn import GCNConv, GatedGraphConv
import torch_geometric.nn as tgn

class DenseNet(torch.nn.Module):
"""MLP model"""
def __init__(self, in_feats: int, h_feats: int, num_classes:int, config=None):
super().__init__()
self.dropout_rate = config["dropout"]
self.dense1 = torch.nn.Linear(in_feats, h_feats)
self.dense2 = torch.nn.Linear(h_feats, h_feats)
self.dense3 = torch.nn.Linear(h_feats, h_feats)
self.dense4 = torch.nn.Linear(h_feats, h_feats)
self.dense5 = torch.nn.Linear(h_feats, num_classes)
self.num_layers = config["num_layers"]-2
self.dense_in = torch.nn.Linear(in_feats, h_feats)
self.dense = torch.nn.ModuleList([torch.nn.Linear(h_feats, h_feats) for _ in range(self.num_layers)])
self.dense_out = torch.nn.Linear(h_feats, num_classes)


def forward(self, inputs, edge_index, batch, edge_weight=None):
Expand All @@ -27,11 +26,10 @@ def forward(self, inputs, edge_index, batch, edge_weight=None):
if torch.any(torch.isnan(flatten)):
print("Nan detected in the prediction")

h_emb = F.relu(self.dense1(flatten))
h_emb = F.relu(self.dense2(F.dropout(h_emb, p=self.dropout_rate)))
h_emb = F.relu(self.dense3(F.dropout(h_emb, p=self.dropout_rate)))
h_emb = F.relu(self.dense4(F.dropout(h_emb, p=self.dropout_rate)))
outputs = self.dense5(F.dropout(h_emb, p=self.dropout_rate))
h_emb = F.relu(self.dense_in(flatten))
for layer in self.dense:
h_emb = F.relu(layer(F.dropout(h_emb, p=self.dropout_rate)))
outputs = self.dense_out(F.dropout(h_emb, p=self.dropout_rate))

return outputs

Expand Down Expand Up @@ -66,7 +64,7 @@ def __init__(self, in_feats: int, h_feats: int, num_classes:int, config=None):
self.convinput = GCNBlock(in_feats, h_feats)
self.convblocks = torch.nn.ModuleList([GCNBlock(h_feats, h_feats, add_residue=True) for _ in range(self.num_layers)])

self.dense1 = torch.nn.Linear(2*h_feats, 4*h_feats)
self.dense1 = torch.nn.Linear(h_feats, 4*h_feats)
self.dense2 = torch.nn.Linear(4*h_feats, num_classes)


Expand All @@ -92,66 +90,6 @@ def forward(self, inputs, edge_index, batch, edge_weight=None):

return outputs


class TopkGAT(torch.nn.Module):
def __init__(self, in_feats: int, h_feats: int, num_classes:int, config=None):
super().__init__()
self.dropout_rate = config["dropout"]

self.conv1 = GATv2Conv(in_feats, h_feats, dropout=self.dropout_rate)
self.pool1 = tgn.pool.TopKPooling(h_feats, ratio=0.8)
self.batch_norm1 = tgn.norm.BatchNorm(h_feats)

self.conv2 = GATv2Conv(h_feats, h_feats, dropout=self.dropout_rate)
self.pool2 = tgn.pool.TopKPooling(h_feats, ratio=0.8)
self.batch_norm2 = tgn.norm.BatchNorm(h_feats)

self.conv3 = GATv2Conv(h_feats, h_feats, dropout=self.dropout_rate)
self.pool3 = tgn.pool.TopKPooling(h_feats, ratio=0.8)
self.batch_norm3 = tgn.norm.BatchNorm(h_feats)

self.conv4 = GATv2Conv(h_feats, h_feats, dropout=self.dropout_rate)
self.pool4 = tgn.pool.TopKPooling(h_feats, ratio=0.8)
self.batch_norm4 = tgn.norm.BatchNorm(h_feats)

self.dense1 = torch.nn.Linear(8*h_feats, 4*h_feats)
self.dense2 = torch.nn.Linear(4*h_feats, num_classes)


def forward(self, x, edge_index, batch):
"""equivalent to __call__"""
h = F.relu(self.batch_norm1(self.conv1(x, edge_index)))
h1, edge_index, _, batch, _, _ = self.pool1(h, edge_index, batch=batch) ###Relu before

flat_1 = torch.cat([tgn.pool.global_add_pool(h1, batch=batch), tgn.pool.global_max_pool(h1, batch=batch)], axis=-1)

h = F.relu(self.batch_norm2(self.conv2(h1, edge_index)))
h2, edge_index, _, batch, _, _ = self.pool2(h, edge_index, batch=batch)

flat_2 = torch.cat([tgn.pool.global_add_pool(h2, batch=batch), tgn.pool.global_max_pool(h2, batch=batch)], axis=-1)

h = F.relu(self.batch_norm3(self.conv3(h2, edge_index)))
h3, edge_index, _, batch, _, _ = self.pool3(h, edge_index, batch=batch)

flat_3 = torch.cat([tgn.pool.global_add_pool(h3, batch=batch), tgn.pool.global_max_pool(h3, batch=batch)], axis=-1)

h = F.relu(self.batch_norm4(self.conv4(h3, edge_index)))
if torch.any(torch.isnan(self.pool4(h, edge_index, batch=batch)[0])):
print()
h4, edge_index, _, batch, _, _ = self.pool4(h, edge_index, batch=batch)

flat_4 = torch.cat([tgn.pool.global_add_pool(h4, batch=batch), tgn.pool.global_max_pool(h4, batch=batch)], axis=-1)

#flatten = flat_1 + flat_2 + flat_3 + flat_4
flatten = torch.cat([flat_1, flat_2, flat_3, flat_4], axis=-1)
if torch.any(torch.isnan(flatten)):
print()

h = F.relu(self.dense1(F.dropout(flatten, p=self.dropout_rate, training=self.training)))
outputs = self.dense2(F.dropout(h, p=self.dropout_rate, training=self.training))

return outputs

class TopkGCNBlock(torch.nn.Module):
"""TopkGCN base block"""
def __init__(self, in_feats: int, h_feats: int, ratio: float=0.8, add_residue=False):
Expand Down Expand Up @@ -181,6 +119,7 @@ def forward(self, inputs, edge_index, batch, edge_weight):
return h_emb, flat, edge_index, edge_weight, batch

class TopkGCN(torch.nn.Module):
"""GCN Hierarchical architecture with topk pooling"""
def __init__(self, in_feats: int, h_feats: int, num_classes:int, config=None):
super().__init__()
self.dropout_rate = config["dropout"]
Expand Down Expand Up @@ -242,54 +181,8 @@ def forward(self, inputs, edge_index, batch, edge_weight=None):

return outputs

class TopkSAGEBlock(torch.nn.Module):
def __init__(self, in_feats: int, h_feats: int, dropout: float, ratio: float=0.8):
super().__init__()
self.conv = SAGEConv(in_feats, h_feats, dropout=dropout)
self.pool = tgn.pool.TopKPooling(h_feats, ratio=ratio)
self.batch_norm = tgn.norm.BatchNorm(h_feats)

def forward(self, x, edge_index, batch):
"""equivalent to __call__"""
h = F.relu(self.batch_norm(self.conv(x, edge_index)))
h, edge_index, _, batch, _, _ = self.pool(h, edge_index, batch=batch) ###Relu before

flat_1 = torch.cat([tgn.pool.global_add_pool(h, batch=batch), tgn.pool.global_max_pool(h, batch=batch)], axis=-1)

return h, flat_1, edge_index, batch

class TopkSAGE(torch.nn.Module):
def __init__(self, in_feats: int, h_feats: int, num_classes:int, config=None):
super().__init__()
self.dropout_rate = config["dropout"]

self.convblock1 = TopkSAGEBlock(in_feats, h_feats, dropout=self.dropout_rate)
self.convblock2 = TopkSAGEBlock(h_feats, h_feats, dropout=self.dropout_rate)
self.convblock3 = TopkSAGEBlock(h_feats, h_feats, dropout=self.dropout_rate)
self.convblock4 = TopkSAGEBlock(h_feats, h_feats, dropout=self.dropout_rate)

self.dense1 = torch.nn.Linear(8*h_feats, 4*h_feats)
self.dense2 = torch.nn.Linear(4*h_feats, num_classes)


def forward(self, x, edge_index, batch):
"""equivalent to __call__"""
h1, flat_1, edge_index, batch = self.convblock1(x, edge_index, batch)
h2, flat_2, edge_index, batch = self.convblock2(h1, edge_index, batch)
h3, flat_3, edge_index, batch = self.convblock3(h2, edge_index, batch)
h4, flat_4, edge_index, batch = self.convblock4(h3, edge_index, batch)

#flatten = flat_1 + flat_2 + flat_3 + flat_4
flatten = torch.cat([flat_1, flat_2, flat_3, flat_4], axis=-1)
if torch.any(torch.isnan(flatten)):
print()

h = F.relu(self.dense1(F.dropout(flatten, p=self.dropout_rate, training=self.training)))
outputs = self.dense2(F.dropout(h, p=self.dropout_rate, training=self.training))

return outputs

class SimpleSignalModel(torch.nn.Module):
"""Model containing a cnn network to work directly on the signals"""
def __init__(self, last_activation:str=None):
super(SimpleSignalModel, self).__init__()
self.layers = []
Expand Down Expand Up @@ -390,7 +283,5 @@ def algorithm_from_name(name:str):
"GCN": GCN,
"TopkGCN": TopkGCN,
"GatedGCN": GatedGCN,
"TopkGAT": TopkGAT,
"TopkSAGE": TopkSAGE,
"Dense": DenseNet,
}
39 changes: 11 additions & 28 deletions core/plot.ipynb

Large diffs are not rendered by default.

10 changes: 5 additions & 5 deletions core/test_data.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
"from tqdm import tqdm\n",
"import torch_geometric as tg\n",
"import networkx as nx\n",
"from utils import computeNeighbors, computeNeighborsKDTree"
"from utils import compute_neighbors, compute_neighbor_kdree"
]
},
{
Expand Down Expand Up @@ -146,7 +146,7 @@
"file = 0\n",
"\n",
"antenna_id, antenna_pos, efields, (energy, zenith, azimuth) = load_event(file)\n",
"edge_index, _ = computeNeighbors(antenna_pos)\n",
"edge_index, _ = compute_neighbors(antenna_pos)\n",
"edge_index = np.array(list(edge_index)) #Transform in array \n",
"edge_index_mirrored = edge_index[:, [1, 0]]\n",
"edge_index = np.concatenate((edge_index, edge_index_mirrored), axis=0) #To have the edges in the 2 ways\n",
Expand Down Expand Up @@ -388,7 +388,7 @@
" \n",
" n_samples += len(antenna_pos)\n",
" \n",
" edge_index, _ = computeNeighbors(antenna_pos)\n",
" edge_index, _ = compute_neighbors(antenna_pos)\n",
" edge_index = np.array(list(edge_index)) #Transform in array \n",
" edge_index_mirrored = edge_index[:, [1, 0]]\n",
" edge_index = np.concatenate((edge_index, edge_index_mirrored), axis=0) #To have the edges in the 2 ways\n",
Expand Down Expand Up @@ -861,11 +861,11 @@
}
],
"source": [
"from utils import computeNeighborsKDTree\n",
"from utils import compute_neighbor_kdree\n",
"event = 75\n",
"antenna_pos_corr = np.array([antenna_id_to_pos[id] for id in antenna_id_all_events[event]])\n",
"print([id for id in antenna_id_all_events[event]])\n",
"edge_index = computeNeighborsKDTree(antenna_pos_corr, distance=1500)\n",
"edge_index = compute_neighbor_kdree(antenna_pos_corr, distance=1500)\n",
"print(len(edge_index), sorted(edge_index))"
]
},
Expand Down
5 changes: 2 additions & 3 deletions core/test_gpu.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import numpy as np
import torch


print(torch.cuda.is_available())
def test_is_gpu_available():
print(torch.cuda.is_available())
Loading

0 comments on commit 27c0630

Please sign in to comment.