Commit fb7cb10e authored by Ard Kastrati's avatar Ard Kastrati
Browse files

Implemented Logging

parent d6a08108
...@@ -5,3 +5,4 @@ ...@@ -5,3 +5,4 @@
/data/* /data/*
all_EEGprocuesan.mat all_EEGprocuesan.mat
all_trialinfoprosan.mat all_trialinfoprosan.mat
/runs/*
...@@ -3,7 +3,7 @@ import torch.nn as nn ...@@ -3,7 +3,7 @@ import torch.nn as nn
import torch.nn.functional as F import torch.nn.functional as F
import torch.optim as optim import torch.optim as optim
import torch.utils.data import torch.utils.data
import logging
from CNN.split_cnn import * from CNN.split_cnn import *
class Net(nn.Module): class Net(nn.Module):
...@@ -68,11 +68,11 @@ def train(trainloader, net, optimizer, criterion,cluster_index, epoch=50): ...@@ -68,11 +68,11 @@ def train(trainloader, net, optimizer, criterion,cluster_index, epoch=50):
loss.backward() loss.backward()
optimizer.step() optimizer.step()
# print statistics # logging.info statistics
running_loss += loss.item() running_loss += loss.item()
if i % 200 == 0: # print every 2000 mini-batches if i % 200 == 0: # logging.info every 2000 mini-batches
print('[%d, %5d] loss: %.3f' % logging.info('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 200)) (epoch + 1, i + 1, running_loss / 200))
running_loss = 0.0 running_loss = 0.0
print('Finished Training') logging.info('Finished Training')
...@@ -7,6 +7,7 @@ import torch.utils.data ...@@ -7,6 +7,7 @@ import torch.utils.data
from utils.utils import * from utils.utils import *
from config import config from config import config
import logging
class Net(nn.Module): class Net(nn.Module):
...@@ -69,15 +70,15 @@ def train(trainloader, net, optimizer, criterion, epoch=50): ...@@ -69,15 +70,15 @@ def train(trainloader, net, optimizer, criterion, epoch=50):
loss.backward() loss.backward()
optimizer.step() optimizer.step()
# print statistics # logging.info statistics
run_loss = loss.item() run_loss = loss.item()
running_loss+=run_loss running_loss+=run_loss
loss_values.append(run_loss) loss_values.append(run_loss)
if i % 200 == 0: # print every 200 mini-batches if i % 200 == 0: # logging.info every 200 mini-batches
print('[%d, %5d] loss: %.3f' % logging.info('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 200)) (epoch + 1, i + 1, running_loss / 200))
running_loss = 0.0 running_loss = 0.0
los=np.mean(loss_values) los=np.mean(loss_values)
loss.append(los) loss.append(los)
return loss return loss
print('Finished Training') logging.info('Finished Training')
...@@ -5,6 +5,7 @@ from tensorflow.keras.layers import Conv2D, MaxPooling2D, AveragePooling2D ...@@ -5,6 +5,7 @@ from tensorflow.keras.layers import Conv2D, MaxPooling2D, AveragePooling2D
from tensorflow.keras.layers import SeparableConv2D, DepthwiseConv2D from tensorflow.keras.layers import SeparableConv2D, DepthwiseConv2D
from tensorflow.keras.layers import BatchNormalization from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.constraints import max_norm from tensorflow.keras.constraints import max_norm
from keras.callbacks import CSVLogger
from config import config from config import config
from utils.utils import * from utils.utils import *
...@@ -170,6 +171,7 @@ class Classifier_DEEPEYE: ...@@ -170,6 +171,7 @@ class Classifier_DEEPEYE:
def fit(self, deepeye_x, y): def fit(self, deepeye_x, y):
self.model.compile(loss='binary_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy']) self.model.compile(loss='binary_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
hist = self.model.fit(deepeye_x, y, verbose=1, validation_split=0.2, epochs=10) csv_logger = CSVLogger(config=['batches_log'], append=True, separator=';')
hist = self.model.fit(deepeye_x, y, verbose=1, validation_split=0.2, epochs=10, callbacks=[csv_logger])
return hist return hist
...@@ -11,6 +11,8 @@ from tensorflow.keras.layers import BatchNormalization ...@@ -11,6 +11,8 @@ from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import SpatialDropout2D from tensorflow.keras.layers import SpatialDropout2D
from tensorflow.keras.layers import Input, Flatten from tensorflow.keras.layers import Input, Flatten
from tensorflow.keras.constraints import max_norm from tensorflow.keras.constraints import max_norm
from keras.callbacks import CSVLogger
import logging
def run(trainX, trainY): def run(trainX, trainY):
classifier = Classifier_EEGNet(output_directory=config['root_dir']) classifier = Classifier_EEGNet(output_directory=config['root_dir'])
...@@ -41,7 +43,7 @@ class Classifier_EEGNet: ...@@ -41,7 +43,7 @@ class Classifier_EEGNet:
if build: if build:
self.model = self.build_model() self.model = self.build_model()
if self.verbose: if self.verbose:
self.model.summary() logging.info(self.model.summary())
# self.model.save_weights(self.output_directory + 'model_init.hdf5') # self.model.save_weights(self.output_directory + 'model_init.hdf5')
def build_model(self): def build_model(self):
...@@ -85,5 +87,6 @@ class Classifier_EEGNet: ...@@ -85,5 +87,6 @@ class Classifier_EEGNet:
def fit(self, eegnet_x, y): def fit(self, eegnet_x, y):
self.model.compile(loss='binary_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy']) self.model.compile(loss='binary_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
hist = self.model.fit(eegnet_x, y, verbose=1, validation_split=0.2, epochs=10) csv_logger = CSVLogger(config['batches_log'], append=True, separator=';')
hist = self.model.fit(eegnet_x, y, verbose=1, validation_split=0.2, epochs=2, callbacks=[csv_logger])
return hist return hist
\ No newline at end of file
...@@ -2,10 +2,11 @@ import tensorflow as tf ...@@ -2,10 +2,11 @@ import tensorflow as tf
import tensorflow.keras as keras import tensorflow.keras as keras
from config import config from config import config
from utils.utils import * from utils.utils import *
import logging
from keras.callbacks import CSVLogger
def run(trainX, trainY): def run(trainX, trainY):
print("Starting InceptionTime.") logging.info("Starting InceptionTime.")
classifier = Classifier_INCEPTION(output_directory=config['root_dir'], input_shape=(500, 129)) classifier = Classifier_INCEPTION(output_directory=config['root_dir'], input_shape=(500, 129))
hist = classifier.fit(trainX, trainY) hist = classifier.fit(trainX, trainY)
plot_loss(hist, config['model_dir'], config['model'], True) plot_loss(hist, config['model_dir'], config['model'], True)
...@@ -95,5 +96,6 @@ class Classifier_INCEPTION: ...@@ -95,5 +96,6 @@ class Classifier_INCEPTION:
return model return model
def fit(self, inception_x, y): def fit(self, inception_x, y):
hist = self.model.fit(inception_x, y, verbose=1, validation_split=0.2, epochs=1) csv_logger = CSVLogger(config['batches_log'], append=True, separator=';')
hist = self.model.fit(inception_x, y, verbose=1, validation_split=0.2, epochs=1, callbacks=[csv_logger])
return hist return hist
\ No newline at end of file
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
# let's keep it here to have a clean code on other methods that we try # let's keep it here to have a clean code on other methods that we try
import time import time
import logging
import os import os
config = dict() config = dict()
...@@ -77,4 +78,7 @@ timestamp = str(int(time.time())) ...@@ -77,4 +78,7 @@ timestamp = str(int(time.time()))
model_folder_name = timestamp if config['model'] == '' else timestamp + "_" + config['model'] model_folder_name = timestamp if config['model'] == '' else timestamp + "_" + config['model']
config['model_dir'] = os.path.abspath(os.path.join(config['log_dir'], model_folder_name)) config['model_dir'] = os.path.abspath(os.path.join(config['log_dir'], model_folder_name))
if not os.path.exists(config['model_dir']): if not os.path.exists(config['model_dir']):
os.makedirs(config['model_dir']) os.makedirs(config['model_dir'])
\ No newline at end of file
config['info_log'] = config['model_dir'] + '/' + 'info.log'
config['batches_log'] = config['model_dir'] + '/' + 'batches.log'
...@@ -6,8 +6,12 @@ from DeepEye import deepEye ...@@ -6,8 +6,12 @@ from DeepEye import deepEye
from InceptionTime import inception from InceptionTime import inception
from EEGNet import eegNet from EEGNet import eegNet
import numpy as np import numpy as np
import logging
def main(): def main():
logging.basicConfig(filename=config['info_log'], level=logging.INFO)
logging.info('Started the Logging')
start_time = time.time() start_time = time.time()
try: try:
trainX, trainY = IOHelper.get_mat_data(config['data_dir'], verbose=True) trainX, trainY = IOHelper.get_mat_data(config['data_dir'], verbose=True)
...@@ -17,27 +21,27 @@ def main(): ...@@ -17,27 +21,27 @@ def main():
return return
if config['model'] == 'cnn': if config['model'] == 'cnn':
print("Started running CNN-1. If you want to run other methods please choose another model in the config.py file.") logging.info("Started running CNN-1. If you want to run other methods please choose another model in the config.py file.")
CNN.run(trainX, trainY) CNN.run(trainX, trainY)
elif config['model'] == 'inception': elif config['model'] == 'inception':
print("Started running InceptionTime. If you want to run other methods please choose another model in the config.py file.") logging.info("Started running InceptionTime. If you want to run other methods please choose another model in the config.py file.")
inception.run(trainX=trainX, trainY=trainY) inception.run(trainX=trainX, trainY=trainY)
elif config['model'] == 'eegnet': elif config['model'] == 'eegnet':
print("Started running EEGNet. If you want to run other methods please choose another model in the config.py file.") logging.info("Started running EEGNet. If you want to run other methods please choose another model in the config.py file.")
eegnet_x = np.transpose(trainX, (0, 2, 1)) eegnet_x = np.transpose(trainX, (0, 2, 1))
print(eegnet_x.shape) logging.info(eegnet_x.shape)
eegNet.run(trainX=eegnet_x, trainY=trainY) eegNet.run(trainX=eegnet_x, trainY=trainY)
elif config['model'] == 'deepeye': elif config['model'] == 'deepeye':
print("Started running DeepEye. If you want to run other methods please choose another model in the config.py file.") logging.info("Started running DeepEye. If you want to run other methods please choose another model in the config.py file.")
deepEye.run(trainX=trainX, trainY=trainY) deepEye.run(trainX=trainX, trainY=trainY)
else: else:
print('Cannot start the program. Please choose one model in the config.py file') logging.info('Cannot start the program. Please choose one model in the config.py file')
print("--- Runtime: %s seconds ---" % (time.time() - start_time))
logging.info("--- Runtime: %s seconds ---" % (time.time() - start_time))
logging.info('Finished Logging')
if __name__=='__main__': if __name__=='__main__':
main() main()
\ No newline at end of file
...@@ -4,26 +4,27 @@ import scipy.io as sio ...@@ -4,26 +4,27 @@ import scipy.io as sio
import os import os
import pickle import pickle
import h5py import h5py
import logging
def get_mat_data(data_dir, verbose=True): def get_mat_data(data_dir, verbose=True):
with h5py.File(data_dir + 'all_EEGprocuesan.mat', 'r') as f: with h5py.File(data_dir + 'all_EEGprocuesan.mat', 'r') as f:
X = f['all_EEGprocuesan'][:] X = f['all_EEGprocuesan'][:]
if verbose: if verbose:
print("X training loaded.") logging.info("X training loaded.")
print(X.shape) logging.info(X.shape)
with h5py.File(data_dir + 'all_trialinfoprosan.mat', 'r') as f: with h5py.File(data_dir + 'all_trialinfoprosan.mat', 'r') as f:
y = f['all_trialinfoprosan'][:] y = f['all_trialinfoprosan'][:]
if verbose: if verbose:
print("y training loaded.") logging.info("y training loaded.")
print(y.shape) logging.info(y.shape)
if verbose: print("Setting the shapes") if verbose: logging.info("Setting the shapes")
X = np.transpose(X, (2, 1, 0)) X = np.transpose(X, (2, 1, 0))
y = np.transpose(y, (1, 0)) y = np.transpose(y, (1, 0))
if verbose: if verbose:
print(X.shape) logging.info(X.shape)
print(y.shape) logging.info(y.shape)
return X, y return X, y
def get_pickle_data(data_dir, verbose=True): def get_pickle_data(data_dir, verbose=True):
...@@ -31,15 +32,15 @@ def get_pickle_data(data_dir, verbose=True): ...@@ -31,15 +32,15 @@ def get_pickle_data(data_dir, verbose=True):
x = pickle.load(pkl_file_x) x = pickle.load(pkl_file_x)
pkl_file_x.close() pkl_file_x.close()
if verbose: if verbose:
print("X training loaded.") logging.info("X training loaded.")
print(x.shape) logging.info(x.shape)
pkl_file_y = open(data_dir + 'y.pkl', 'rb') pkl_file_y = open(data_dir + 'y.pkl', 'rb')
y = pickle.load(pkl_file_y) y = pickle.load(pkl_file_y)
pkl_file_y.close() pkl_file_y.close()
if verbose: if verbose:
print("Y training loaded.") logging.info("Y training loaded.")
print(y.shape) logging.info(y.shape)
return x, y return x, y
def store(x, y, clip=True): def store(x, y, clip=True):
...@@ -74,19 +75,19 @@ def collect_trial_data(data_path, filename, variable1, variable2, verbose=True, ...@@ -74,19 +75,19 @@ def collect_trial_data(data_path, filename, variable1, variable2, verbose=True,
""" """
Extract data from the file. Extract data from the file.
:param data_path: name of the file to open. :param data_path: name of the file to open.
:param verbose: boolean; if true, it prints information about :param verbose: boolean; if true, it logging.infos information about
the status of the program. the status of the program.
:return: a numpy array of shape ...?0 :return: a numpy array of shape ...?0
""" """
if verbose: print("Loading data... ") if verbose: logging.info("Loading data... ")
if verbose: print("Extracting trials...") if verbose: logging.info("Extracting trials...")
trials = extract_trials() trials = extract_trials()
if verbose: print(len(trials), " trials found.") if verbose: logging.info(len(trials), " trials found.")
full_data = np.array([]) full_data = np.array([])
for i in range(20): for i in range(20):
if detailed_verbose: print("Trying trial", trials[i]) if detailed_verbose: logging.info("Trying trial", trials[i])
try: try:
next_trial = load_matlab_trial(datapath=data_path, trial=trials[i], filename=filename, variable1=variable1, next_trial = load_matlab_trial(datapath=data_path, trial=trials[i], filename=filename, variable1=variable1,
variable2=variable2) variable2=variable2)
...@@ -94,11 +95,11 @@ def collect_trial_data(data_path, filename, variable1, variable2, verbose=True, ...@@ -94,11 +95,11 @@ def collect_trial_data(data_path, filename, variable1, variable2, verbose=True,
full_data = next_trial full_data = next_trial
else: else:
full_data = np.concatenate((full_data, next_trial)) full_data = np.concatenate((full_data, next_trial))
if detailed_verbose: print(np.shape(full_data)) if detailed_verbose: logging.info(np.shape(full_data))
except: except:
print("Trying other trials...") logging.info("Trying other trials...")
if verbose: print("Data loaded.") if verbose: logging.info("Data loaded.")
return full_data return full_data
...@@ -113,7 +114,7 @@ def load_matlab_trial(datapath, trial, filename, variable1, variable2): ...@@ -113,7 +114,7 @@ def load_matlab_trial(datapath, trial, filename, variable1, variable2):
try: try:
data = sio.loadmat(datapath + trial + "/" + filename)[variable1][variable2][0][0] data = sio.loadmat(datapath + trial + "/" + filename)[variable1][variable2][0][0]
except: except:
print("Trial " + trial + " could not be opened. ") logging.info("Trial " + trial + " could not be opened. ")
raise Exception raise Exception
if len(np.shape(data)) == 3: if len(np.shape(data)) == 3:
...@@ -131,7 +132,7 @@ def extract_trials(): ...@@ -131,7 +132,7 @@ def extract_trials():
try: try:
my_list = os.listdir(config['data_dir']) my_list = os.listdir(config['data_dir'])
except: except:
print("Server unreachable. Cannot list the directories. Did you (maybe) forget to connect to the server by VPN? Is your root directory set correctly in config.py file? :)") logging.info("Server unreachable. Cannot list the directories. Did you (maybe) forget to connect to the server by VPN? Is your root directory set correctly in config.py file? :)")
raise Exception raise Exception
trials = [name for name in my_list if len(name) == 3] trials = [name for name in my_list if len(name) == 3]
return trials return trials
...@@ -8,39 +8,42 @@ import torch ...@@ -8,39 +8,42 @@ import torch
import pandas as pd import pandas as pd
import os import os
sns.set_style('darkgrid') sns.set_style('darkgrid')
import logging
def plot_acc(hist, output_directory, model,val=False): def plot_acc(hist, output_directory, model,val=False):
''' '''
plot the accuracy against the epochs during training plot the accuracy against the epochs during training
''' '''
epochs=len(hist.history['accuracy']) epochs = len(hist.history['accuracy'])
epochs=np.arange(epochs) epochs = np.arange(epochs)
plt.figure() plt.figure()
plt.title(model + ' accuracy') plt.title(model + ' accuracy')
plt.plot(epochs,hist.history['accuracy'],'b-',label='training') plt.plot(epochs, hist.history['accuracy'],'b-',label='training')
if val: if val:
plt.plot(epochs,hist.history['val_accuracy'],'g-',label='validation') plt.plot(epochs, hist.history['val_accuracy'],'g-',label='validation')
plt.legend() plt.legend()
plt.xlabel('epochs') plt.xlabel('epochs')
plt.ylabel('Accuracy') plt.ylabel('Accuracy')
plt.savefig(output_directory + '/' + model + '_accuracy.png') plt.savefig(output_directory + '/' + model + '_accuracy.png')
# plt.show() # plt.show()
print(10*'*'+'\n') logging.info(10*'*'+'\n')
def plot_loss(hist, output_directory, model,val=False): def plot_loss(hist, output_directory, model,val=False):
epochs=len(hist.history['accuracy']) epochs = len(hist.history['accuracy'])
epochs=np.arange(epochs) epochs = np.arange(epochs)
plt.figure() plt.figure()
plt.title(model + ' loss') plt.title(model + ' loss')
plt.plot(epochs,hist.history['loss'],'b-',label='training') plt.plot(epochs,hist.history['loss'], 'b-', label='training')
if val: if val:
plt.plot(epochs,hist.history['val_loss'],'g-',label='validation') plt.plot(epochs,hist.history['val_loss'], 'g-', label='validation')
plt.legend() plt.legend()
plt.xlabel('epochs') plt.xlabel('epochs')
plt.ylabel('Binary Cross Entropy') plt.ylabel('Binary Cross Entropy')
plt.savefig(output_directory + '/' + model + '_loss.png') plt.savefig(output_directory + '/' + model + '_loss.png')
# plt.show() # plt.show()
def plot_loss_torch(loss, output_directory, model): def plot_loss_torch(loss, output_directory, model):
epochs=np.arange(len(loss)) epochs=np.arange(len(loss))
plt.figure() plt.figure()
...@@ -87,6 +90,7 @@ def save_logs(hist, output_directory, model, pytorch=False): ...@@ -87,6 +90,7 @@ def save_logs(hist, output_directory, model, pytorch=False):
except: except:
return return
# Save the model parameters (newly added without debugging) # Save the model parameters (newly added without debugging)
def save_model_param(classifier, output_directory, model, pytorch=False): def save_model_param(classifier, output_directory, model, pytorch=False):
try: try:
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment