-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtrain.py
401 lines (335 loc) · 16.7 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
# -*- coding: utf-8 -*-
"""
-----------------------------------------------------------------------
Automatic detection of onset of seizures on EEG datasets using
non-overlapping Sliding Windows (Epochs) and Convolutional Neural Network (CNN)
Authors:
Emanuel Iwanow de Araujo
Sophia Bianchi Moyen
Michael Stivaktakis
-----------------------------------------------------------------------
"""
"""
--------------------------------------------------------
Importing libraries and functions
--------------------------------------------------------
"""
# Basic libraries
import numpy as np
# From the WKI competition
from wettbewerb import load_references, get_3montages, get_6montages
import torch
from torch import nn, optim
from torch.utils.data import Dataset, DataLoader, random_split
from sklearn.metrics import f1_score, precision_score, recall_score
from sklearn.preprocessing import StandardScaler
import mne
from scipy import signal as sig
from imblearn.under_sampling import RandomUnderSampler
### if __name__ == '__main__': # bei multiprocessing auf Windows notwendig
"""
---------------------------------------------------------------------------
Loading Dataset
---------------------------------------------------------------------------
"""
# Daten laden und vorbereiten
training_folder = "../test_5"
ids, channels, data, sampling_frequencies, reference_systems, eeg_labels = load_references(training_folder)
"""
---------------------------------------------------------------------
Parameters
---------------------------------------------------------------------
"""
N_samples = 2000 # Number of samples per division for the sliding windows (Epochs)
scaler = StandardScaler() # Scaler chosen for normalization of signal
# Create an array of signal for each of the 3 montages
mont1_signal = []
mont2_signal = []
mont3_signal = []
whole_mont = [mont1_signal,mont2_signal,mont3_signal] # Array with all montages
for i,_id in enumerate(ids):
"""
------------------------------------------------------------------------
******************** Generating Epochs *********************************
Each signal is split into smaller Epochs composed of a fixed number of samples
------------------------------------------------------------------------
"""
# Getting montage data
_montage, _montage_data, _is_missing = get_3montages(channels[i], data[i])
# Getting frequency used for sampling in signal in dataset
_fs = sampling_frequencies[i]
for j, signal_name in enumerate(_montage):
# Get signal from montage
signal = _montage_data[j]
"""
-------------------- Pre-Processing ---------------------------
"""
# Notch-Filter to compensate net frequency of 50 Hz and its harmonic 100 Hz
signal_notch = mne.filter.notch_filter(x=signal, Fs=_fs, freqs=np.array(50), n_jobs=2, verbose=False)
# Bandpassfilter between 0.5Hz and 70Hz to filter out noise
signal_filter = mne.filter.filter_data(data=signal_notch, sfreq=_fs, l_freq=3, h_freq=40.0, n_jobs=2, verbose=False)
# Defining number of divisions for signal according to user-defined N_samples per Epoch
N_div = len(signal_filter)//N_samples
# Normalizing data
norm_montage_data = scaler.fit_transform(signal_filter.reshape(-1,1)).reshape(1,-1)[0]
for i in range(N_div):
# Adds Epoch to array
montage_array = norm_montage_data[i*N_samples:(i+1)*N_samples]
whole_mont[j].append(montage_array)
"""
-------------------------------------------------------------------------
Relabling dataset based on Epochs acquired from signal windowing
-------------------------------------------------------------------------
"""
labels = []
for i,_id in enumerate(ids):
if eeg_labels[i][0]:
# If signal contains seizure -> relabels each epoch according to where seizure happens
onset = eeg_labels[i][1] # Getting onset
offset = eeg_labels[i][2] # Getting offset
sample_freq = sampling_frequencies[i] # Getting frequency
total_time = len(data[i][1])/sample_freq # Getting length of signal in seconds
N_div = len(data[i][1])//N_samples # Getting number of Epochs for that signal
for num in range(N_div):
"""
If Epoch starts before/at onset and seizure continues until end of Epoch
OR
If Epoch starts after/at onset and ends before offset
>> THEN >>
Label Epoch as having seizure
"""
start_epoch_timestep = (total_time/N_div)*(num)
end_epoch_timestep = (total_time/N_div)*(num+1)
if ((start_epoch_timestep <= onset) and (end_epoch_timestep > onset)) or ((start_epoch_timestep >= onset) and (start_epoch_timestep < offset)):
# Seizure present in Epoch
labels.append([1])
else:
# No seizure present in Epoch
labels.append([0])
else:
# If signal doesn't contain seizure -> label = False all Epochs
N_div = len(data[i][1])//N_samples
for num in range(N_div):
labels.append([0])
# Reshape labels array
labels = np.reshape(labels, (1,-1))[0]
"""
-----------------------------------------------------------------
************ Dealing with Class Imbalance ***********************
To deal with the class imbalance (More Epochs with no seizure than
with seizure), we are doing an Undersampling
-----------------------------------------------------------------
"""
"""
# Instanziierung von RandomUnderSampler
undersample = RandomUnderSampler()
# Erstellen einer Funktion, die das Resampling durchführt
def resample_signal(signal, labels):
# Anwenden von RandomUnderSampler
signal_resampled, labels_resampled = undersample.fit_resample(signal, labels)
return signal_resampled, labels_resampled
# Anwenden der Funktion auf jedes Signal
mont1_signal_resampled, labels_resampled_1 = resample_signal(np.array(mont1_signal), labels)
mont2_signal_resampled, labels_resampled_2 = resample_signal(np.array(mont2_signal), labels)
mont3_signal_resampled, labels_resampled_3 = resample_signal(np.array(mont3_signal), labels)
# Sicherstellen, dass die Labels für alle Signale gleich sind, da sie das gleiche Set von Beispielen repräsentieren sollten
assert np.array_equal(labels_resampled_1, labels_resampled_2)
assert np.array_equal(labels_resampled_1, labels_resampled_3)
labels_resampled = labels_resampled_1
whole_mont_resampled = [mont1_signal_resampled,mont2_signal_resampled,mont3_signal_resampled]
whole_mont_resampled_np = np.array(whole_mont_resampled)
"""
labels_resampled = labels
whole_mont_resampled_np = np.array(whole_mont)
"""
---------------------------------------------------------------------
PyTorch Dataset class for the EEG signals and labels
---------------------------------------------------------------------
"""
# Dataset-Klasse
class EEGDataset(Dataset):
def __init__(self, whole_mont_resampled_np, labels_resampled):
self.data = torch.from_numpy(whole_mont_resampled_np).float()
self.labels = torch.from_numpy(np.array(labels_resampled)).long()
def __len__(self):
return self.data.shape[1] # Anzahl der Beispiele entspricht nun der zweiten Dimension
def __getitem__(self, idx):
# Für jedes Beispiel: Holt das idx-te Beispiel über alle Kanäle
sample = self.data[:, idx, :] # Behält die Form [Kanäle, Länge] bei
label = self.labels[idx]
return sample, label
# Initialisierung des EEGDataset mit vorbereiteten Daten und Labels
eeg_dataset = EEGDataset(whole_mont_resampled_np, labels_resampled)
"""
---------------------------------------------------------------------------------------------------------------------------------
CNN architecture
Convolutional Layers: Initiates with two layers, capturing temporal patterns in EEG signals, enhancing feature depth.
Batch Normalization: Applied after each convolution to stabilize outputs, facilitating faster and more stable training.
ReLU Activation: Introduces non-linearity after each convolution and batch normalization, enabling complex pattern capture.
Max Pooling: Reduces dimensionality post-convolution, emphasizing dominant features and improving signal variation robustness.
Fully Connected Layers: High-level features are processed through three dense layers, refining them for the final classification.
Output Layer: Utilizes processed features to classify EEG signals, representing the culmination of learned patterns.
----------------------------------------------------------------------------------------------------------------------------------
"""
# CNN-Modell
class CNN(nn.Module):
def __init__(self, num_classes, seq_length):
super().__init__()
self.classifier = nn.Sequential(
nn.Conv1d(in_channels=3, out_channels=6, kernel_size=5),
nn.BatchNorm1d(num_features=6),
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, stride=2),
nn.Conv1d(6, 16, 5),
nn.BatchNorm1d(16),
nn.ReLU(),
nn.MaxPool1d(2, 2),
)
# Anpassung für die Berechnung der Größe des linearen Layers
linear_input_size = self._get_conv_output(seq_length)
self.fc = nn.Sequential(
nn.Linear(linear_input_size, 120),
nn.BatchNorm1d(120),
nn.ReLU(),
nn.Linear(120, 84),
nn.BatchNorm1d(84),
nn.ReLU(),
nn.Linear(84, num_classes),
)
def _get_conv_output(self, shape):
with torch.no_grad():
input = torch.zeros(1, 3, shape)
output = self.classifier(input)
return output.numel()
def forward(self, x):
x = self.classifier(x)
x = torch.flatten(x, start_dim=1)
x = self.fc(x)
return x
"""
---------------------------------------------------------------------
Divide the dataset into Training and Validation
---------------------------------------------------------------------
"""
dataset_size = len(eeg_dataset)
train_size = int(dataset_size * 0.8)
val_size = dataset_size - train_size
train_dataset, val_dataset = random_split(eeg_dataset, [train_size, val_size])
train_dataloader = DataLoader(train_dataset, batch_size=32, shuffle=True)
val_dataloader = DataLoader(val_dataset, batch_size=32)
"""
---------------------------------------------------------------------
Define an Early Stopping class that stops the training
if the validation F1 doesn´t improve
---------------------------------------------------------------------
"""
class EarlyStopping:
def __init__(self, patience=5, verbose=True, delta=0.0, path='checkpoint2.pt', trace_func=print):
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.f1_score_max = np.Inf
self.delta = delta
self.path = path
self.trace_func = trace_func
def __call__(self, f1_score, model):
score = f1_score
if self.best_score is None:
self.best_score = score
self.save_checkpoint(f1_score, model)
elif score < self.best_score + self.delta:
self.counter += 1
self.trace_func(f'EarlyStopping counter: {self.counter} out of {self.patience}')
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
self.save_checkpoint(f1_score, model)
self.counter = 0
def save_checkpoint(self, f1_score, model):
if self.verbose:
self.trace_func(f'Validation F1 Score increased ({self.f1_score_max:.6f} --> {f1_score:.6f}). Saving model...')
torch.save(model.state_dict(), self.path)
self.f1_score_max = f1_score
# Set the device for model training to GPU if available, otherwise fall back to CPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Initialize CNN model with specified classes and sequence length, assigning it to the configured device
model = CNN(num_classes=2, seq_length=2000).to(device)
# Define loss function with class weights for handling class imbalance
weights = torch.tensor([1.0, 1.0], device=device)
criterion = nn.CrossEntropyLoss(weight=weights)
"""
--------------------------------------------------------------------------------------------------------------------
Set up the optimizer for training the model. We're using Stochastic Gradient Descent (SGD).
It adjusts the model's weights to minimize errors, with an initial learning rate of 0.05.
Momentum helps speed up training in the right direction, and weight decay prevents overfitting.
After setting the optimizer, we use a scheduler to adjust the learning rate over time.
This scheduler reduces the rate according to a cosine curve, helping to fine-tune the model as training progresses.
It adjusts the learning rate every 100 epochs, aiming to reach optimal performance smoothly.
---------------------------------------------------------------------------------------------------------------------
"""
optimizer = optim.SGD(model.parameters(), lr=0.05, momentum=0.9, weight_decay=5e-4)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=100)
# Initialize early stopping mechanism to prevent overfitting.
early_stopping = EarlyStopping(patience=50, verbose=True, delta=0.01, path='checkpoint2.pt')
"""
--------------------------------------------------------------------------------------------------------------------
Defining the Training Function to Train the CNN on EEG Data
Training: For each epoch, it trains the model with batches of EEG data, updates the model's weights to reduce errors,
and calculates training accuracy metrics.
Validation: After training, it checks the model's performance on a separate set of data (validation) to ensure it's
learning correctly and not just memorizing.
Metrics Calculation: It computes F1 score, precision, and recall to understand how well the model predicts across
different classes.
Early Stopping: Stops training early if the model doesn't improve, saving time and preventing overfitting.
Learning Rate Adjustment: Changes the learning rate over time to help the model learn better.
---------------------------------------------------------------------------------------------------------------------
"""
def train(model, train_dataloader, val_dataloader, criterion, optimizer, scheduler, epochs, device):
for epoch in range(epochs):
model.train()
train_losses = []
train_predictions = []
train_targets = []
for inputs, labels in train_dataloader:
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
train_losses.append(loss.item())
_, predictions = torch.max(outputs, 1)
train_predictions.extend(predictions.cpu().numpy())
train_targets.extend(labels.cpu().numpy())
train_f1 = f1_score(train_targets, train_predictions, average='macro')
train_precision = precision_score(train_targets, train_predictions, average='macro')
train_recall = recall_score(train_targets, train_predictions, average='macro')
val_losses = []
val_predictions = []
val_targets = []
model.eval()
with torch.no_grad():
for inputs, labels in val_dataloader:
inputs, labels = inputs.to(device), labels.to(device)
outputs = model(inputs)
loss = criterion(outputs, labels)
val_losses.append(loss.item())
_, predictions = torch.max(outputs, 1)
val_predictions.extend(predictions.cpu().numpy())
val_targets.extend(labels.cpu().numpy())
val_f1 = f1_score(val_targets, val_predictions, average='macro')
val_precision = precision_score(val_targets, val_predictions, average='macro')
val_recall = recall_score(val_targets, val_predictions, average='macro')
print(f'Epoch {epoch+1}: Train F1: {train_f1:.4f}, Precision: {train_precision:.4f}, Recall: {train_recall:.4f}')
print(f'Epoch {epoch+1}: Val F1: {val_f1:.4f}, Precision: {val_precision:.4f}, Recall: {val_recall:.4f}')
early_stopping(val_f1, model)
if early_stopping.early_stop:
print("Early stopping")
break
scheduler.step()
train(model, train_dataloader, val_dataloader, criterion, optimizer, scheduler, epochs=300, device=device)
# Nach dem Training das beste Modell laden
model.load_state_dict(torch.load('checkpoint2.pt'))