To receive notifications about scheduled maintenance, please subscribe to the mailing-list gitlab-operations@sympa.ethz.ch. You can subscribe to the mailing-list at https://sympa.ethz.ch

Commit 03dc4195 authored by flerch's avatar flerch
Browse files

Cleaned up code

parent de3c1ebd
......@@ -60,17 +60,15 @@ def try_sklearn_classifiers(X, y, subjectID):
logging.info("Training the simple classifiers: kNN, Linear SVM, Random Forest and Naive Bayes.")
names = [# "Nearest Neighbors",
# "Linear SVM",
# "Random Forest",
# "Naive Bayes",
"Linear SVM"
"Random Forest",
# "Naive Bayes"
]
classifiers = [
# KNeighborsClassifier(n_neighbors=5, weights='uniform', algorithm='auto', leaf_size=30, n_jobs=-1),
# LinearSVC(tol=1e-5, C=1, random_state=42, max_iter=500),
# RandomForestClassifier(n_estimators=main.args.n_estimators, max_depth=main.args.max_depth, max_features='auto', random_state=42, n_jobs=-1),
# LinearSVC(tol=1e-4, C=main.args.C, random_state=42, max_iter=500)
RandomForestClassifier(n_estimators=main.args.n_estimators, max_depth=main.args.max_depth, max_features='auto', random_state=42, n_jobs=-1),
# GaussianNB(),
LinearSVC(tol=1e-4, C=main.args.C, random_state=42, max_iter=500)
]
X = X.reshape((36109, 500 * 129))
......
......@@ -3,6 +3,9 @@ import numpy as np
import math
from scipy.io import savemat
from sklearn.model_selection import train_test_split
# A file to try out random stuff
'''
def get_mat_data():
with h5py.File('/Users/florianlerch/Documents/PycharmProjects/DL_data/all_trialinfoprosan_old.mat', 'r') as f:
......@@ -114,10 +117,30 @@ print(a)
print(d)
'''
dict_data = np.load('/Volumes/methlab/ETH_AS/NIPS/Data/all_EEG.npz')
'''dict_data = np.load('/Volumes/methlab/ETH_AS/NIPS/Data/all_EEG.npz')
data = dict_data['EEG']
labels = dict_data['labels']
print(data.shape)
print(labels.shape)
print(data)
print(labels)
print(labels)'''
counters = np.loadtxt('/Users/florianlerch/Documents/PycharmProjects/DL_data/counters_max.csv', delimiter=',')
print(counters.shape)
all = counters.sum(axis=0)
print(all)
saccades = counters[:, 0] + counters[:, 3]
print(np.shape(saccades))
print(np.mean(saccades))
print(np.std(saccades))
fixations = counters[:, 1] + counters[:, 4]
print(np.shape(fixations))
print(np.mean(fixations))
print(np.std(fixations))
blinks = counters[:, 2] + counters[:, 5]
print(np.shape(blinks))
print(np.mean(blinks))
print(np.std(blinks))
......@@ -14,9 +14,8 @@ config = dict()
config['log_dir'] = './runs/'
# Path to training, validation and test data folders.
# PS: Note that we have to upload the data to the server!!!
# config['data_dir'] = '/cluster/home/your_username/data/'
# config['data_dir'] = '/Users/florianlerch/Documents/PycharmProjects/DL_data/'
config['data_dir'] = '/itet-stor/flerch/net_scratch/DL_data/'
config['data_dir'] = '/Users/florianlerch/Documents/PycharmProjects/DL_data/'
# config['data_dir'] = '/itet-stor/flerch/net_scratch/DL_data/' # ETH server
# Path of root
config['root_dir'] = '.'
......
import numpy as np
import os
import h5py
import csv
import re
from tqdm import tqdm
......
import numpy as np
import os
import h5py
import csv
import re
from tqdm import tqdm
import filters
......@@ -9,7 +8,7 @@ import filters
class Segment:
def __init__(self, possible_blocks=['20 ', '30 ', '30 ', '30 ', '20 '], desired_blocks=[True, False, False, False, True],
min_latency=50, min_amplitude=1.5, length=1, padding=None, *event_config):
min_latency=50, min_amplitude=1.5, length=500, padding=None, *event_config):
self.possible_blocks = possible_blocks
self.desired_blocks = desired_blocks
self.min_latency = min_latency
......
......@@ -3,7 +3,6 @@ import h5py
import numpy as np
data_dir = '/Volumes/methlab/ETH_AS/NIPS/antisaccade_task_data/ANTI/'
# data_dir = '\\130.60.169.45\methlab\ETH_AS\NIPS\antisaccade_task_data\ANTI\'
subjects = os.listdir(data_dir)
all_EEGprocuesan = []
......
......@@ -48,7 +48,7 @@ class EnsembleNet:
depth=main.args.depth, bottleneck_size=main.args.bottleneck_size, epochs=50)
elif config['model'] == 'xception':
classifier = Classifier_XCEPTION(input_shape=config['inception']['input_shape'], use_residual=True,
kernel_size=40, nb_filters=64, depth=18, epochs=50)
kernel_size=40, nb_filters=64, depth=18, epochs=50)
elif config['model'] == 'deepeye-rnn':
classifier = Classifier_DEEPEYE_RNN(input_shape=config['deepeye-rnn']['input_shape'])
else:
......@@ -77,6 +77,6 @@ class EnsembleNet:
hist.history['val_loss'] = loss
hist.history['val_accuracy'] = accuracy
plot_loss(hist, config['model_dir'], model, val = True)
plot_acc(hist, config['model_dir'], model, val = True)
save_logs(hist, config['model_dir'], model, pytorch = False)
plot_loss(hist, config['model_dir'], model, val=True)
plot_acc(hist, config['model_dir'], model, val=True)
save_logs(hist, config['model_dir'], model, pytorch=False)
from config import config
import ensemble
import numpy as np
import scipy
from utils.utils import select_best_model, comparison_plot_accuracy, comparison_plot_loss
from utils import IOHelper
from scipy import io
import h5py
import logging
import time
import argparse
from SimpleClassifiers.sklearnclassifier import try_sklearn_classifiers
from kerasTuner import tune
from utils import IOHelper
from SimpleClassifiers.sklearnclassifier import try_sklearn_classifiers, cross_validate_kNN, cross_validate_RFC, \
cross_validate_SVC
from utils.utils import select_best_model, comparison_plot_accuracy, comparison_plot_loss
# This section allows running multiple configurations on the cluster simultaneously through the script.sh files
parser = argparse.ArgumentParser()
if config['within_subjects']:
# parser.add_argument('subjectID', type=int)
# parser.add_argument('n_estimators', type=int)
# parser.add_argument('max_depth', type=int)
# parser.add_argument('C', type=float)
# parser.add_argument('kernLength', type=int)
# parser.add_argument('F1', type=int)
# parser.add_argument('D', type=int)
# parser.add_argument('F2', type=int)
parser.add_argument('kernel_size', type=int)
parser.add_argument('nb_filters', type=int)
parser.add_argument('depth', type=int)
if config['model'] == 'eegnet':
parser.add_argument('kernLength', type=int)
parser.add_argument('F1', type=int)
parser.add_argument('D', type=int)
parser.add_argument('F2', type=int)
elif config['model'] == 'pyramidal_cnn':
parser.add_argument('kernel_size', type=int)
parser.add_argument('nb_filters', type=int)
parser.add_argument('depth', type=int)
elif config['model'] == 'simple_classifier':
# RandomForestClassifier
parser.add_argument('n_estimators', type=int)
parser.add_argument('max_depth', type=int)
# LinearSVC
# parser.add_argument('C', type=float)
else:
parser.add_argument('percentage', type=float)
parser.add_argument('kernel_size', type=int)
......@@ -36,41 +36,53 @@ else:
parser.add_argument('bottleneck_size', type=int)
args = parser.parse_args()
def main():
logging.basicConfig(filename=config['info_log'], level=logging.INFO)
logging.info('Started the Logging')
start_time = time.time()
# trainX, trainY = IOHelper.get_mat_data(config['data_dir'], verbose=True)
dict_data = np.load(config['data_dir'] + 'all_EEG.npz')
trainX = dict_data['EEG']
trainY = dict_data['labels']
trainX, trainY = IOHelper.get_mat_data(config['data_dir'], verbose=True)
# Load the data instead from the .npz file created with merging.py in data_preparation
# dict_data = np.load(config['data_dir'] + 'all_EEG.npz')
# trainX = dict_data['EEG']
# trainY = dict_data['labels']
logging.info(trainX.shape)
logging.info(trainY.shape)
if config['model'] == 'eegnet' or config['model'] == 'eegnet_cluster':
trainX = np.transpose(trainX, (0, 2, 1))
logging.info(trainX.shape)
trainX = np.transpose(trainX, (0, 2, 1))
logging.info(trainX.shape)
# Use Keras Tuner
# tune(trainX, trainY)
if config['within_subjects']:
# logging.info('n_estimators: %d', args.n_estimators)
# logging.info('max_depth: %d', args.max_depth)
# logging.info('C: %f', args.C)
# scores = []
for i in range(1, 358): # [1, 5, 12, 42, 99, 124, 178, 255, 300, 357]:
if i == 344:
continue # needs to be skipped because it contains only one trial
logging.info('Using within subjects: subject ID %d', i)
model = ensemble.EnsembleNet
model.run(trainX, trainY, i)
# scores.append(try_sklearn_classifiers(trainX, trainY, i))
# cross_validate_kNN(trainX, trainY)
# cross_validate_SVC(trainX, trainY)
# cross_validate_RFC(trainX, trainY)
# logging.info('Mean of all scores: %f', np.mean(scores))
# logging.info('Standard deviation of all scores: %f', np.std(scores))
if config['model'] == 'simple_classifier':
# RandomForestClassifier
logging.info('n_estimators: %d', args.n_estimators)
logging.info('max_depth: %d', args.max_depth)
# LinearSVC
# logging.info('C: %f', args.C)
scores = []
for i in range(1, 358):
if i == 344:
continue # needs to be skipped because it contains only one trial
logging.info('Using within subjects: subject ID %d', i)
scores.append(try_sklearn_classifiers(trainX, trainY, i))
logging.info('Mean of all scores: %f', np.mean(scores))
logging.info('Standard deviation of all scores: %f', np.std(scores))
else:
for i in range(1, 358):
if i == 344:
continue # needs to be skipped because it contains only one trial
logging.info('Using within subjects: subject ID %d', i)
model = ensemble.EnsembleNet
model.run(trainX, trainY, i)
else:
logging.info('Using across subjects version 2: %s', config['across_subjects_v2'])
logging.info('Percentage of training data to use: %f', args.percentage)
......@@ -84,5 +96,6 @@ def main():
logging.info("--- Runtime: %s seconds ---" % (time.time() - start_time))
logging.info('Finished Logging')
if __name__=='__main__':
if __name__ == '__main__':
main()
#!/bin/bash
#SBATCH --mail-type=ALL # mail configuration: NONE, BEGIN, END, FAIL, REQUEUE, ALL
#SBATCH --output=log/%j.out # where to store the output (%j is the JOBID), subdirectory must exist
#SBATCH --error=log/%j.err # where to store error messages
#SBATCH --gres=gpu:1
#SBATCH --mem=200G
echo "Running on host: $(hostname)"
echo "In directory: $(pwd)"
echo "Starting on: $(date)"
echo "SLURM_JOB_ID: ${SLURM_JOB_ID}"
# Exit on errors
set -o errexit
# Set a directory for temporary files unique to the job with automatic removal at job termination
TMPDIR=$(mktemp -d)
if [[ ! -d ${TMPDIR} ]]; then
echo 'Failed to create temp directory' >&2
exit 1
fi
trap "exit 1" HUP INT TERM
trap 'rm -rf "${TMPDIR}"' EXIT
export TMPDIR
# Binary or script to execute
python ./main.py 10
echo "Finished at: $(date)"
exit 0
#!/bin/bash
#SBATCH --mail-type=ALL # mail configuration: NONE, BEGIN, END, FAIL, REQUEUE, ALL
#SBATCH --output=log/%j.out # where to store the output (%j is the JOBID), subdirectory must exist
#SBATCH --error=log/%j.err # where to store error messages
#SBATCH --gres=gpu:1
#SBATCH --mem=200G
echo "Running on host: $(hostname)"
echo "In directory: $(pwd)"
echo "Starting on: $(date)"
echo "SLURM_JOB_ID: ${SLURM_JOB_ID}"
# Exit on errors
set -o errexit
# Set a directory for temporary files unique to the job with automatic removal at job termination
TMPDIR=$(mktemp -d)
if [[ ! -d ${TMPDIR} ]]; then
echo 'Failed to create temp directory' >&2
exit 1
fi
trap "exit 1" HUP INT TERM
trap 'rm -rf "${TMPDIR}"' EXIT
export TMPDIR
# Binary or script to execute
python ./main.py 1 32 8 1 8
echo "Finished at: $(date)"
exit 0
#!/bin/bash
#SBATCH --mail-type=ALL # mail configuration: NONE, BEGIN, END, FAIL, REQUEUE, ALL
#SBATCH --output=log/%j.out # where to store the output (%j is the JOBID), subdirectory must exist
#SBATCH --error=log/%j.err # where to store error messages
#SBATCH --gres=gpu:1
#SBATCH --mem=200G
echo "Running on host: $(hostname)"
echo "In directory: $(pwd)"
echo "Starting on: $(date)"
echo "SLURM_JOB_ID: ${SLURM_JOB_ID}"
# Exit on errors
set -o errexit
# Set a directory for temporary files unique to the job with automatic removal at job termination
TMPDIR=$(mktemp -d)
if [[ ! -d ${TMPDIR} ]]; then
echo 'Failed to create temp directory' >&2
exit 1
fi
trap "exit 1" HUP INT TERM
trap 'rm -rf "${TMPDIR}"' EXIT
export TMPDIR
# Binary or script to execute
python ./main.py 1 16 4 1 16
echo "Finished at: $(date)"
exit 0
#!/bin/bash
#SBATCH --mail-type=ALL # mail configuration: NONE, BEGIN, END, FAIL, REQUEUE, ALL
#SBATCH --output=log/%j.out # where to store the output (%j is the JOBID), subdirectory must exist
#SBATCH --error=log/%j.err # where to store error messages
#SBATCH --gres=gpu:1
#SBATCH --mem=200G
echo "Running on host: $(hostname)"
echo "In directory: $(pwd)"
echo "Starting on: $(date)"
echo "SLURM_JOB_ID: ${SLURM_JOB_ID}"
# Exit on errors
set -o errexit
# Set a directory for temporary files unique to the job with automatic removal at job termination
TMPDIR=$(mktemp -d)
if [[ ! -d ${TMPDIR} ]]; then
echo 'Failed to create temp directory' >&2
exit 1
fi
trap "exit 1" HUP INT TERM
trap 'rm -rf "${TMPDIR}"' EXIT
export TMPDIR
# Binary or script to execute
python ./main.py 1 16 4 2 8
echo "Finished at: $(date)"
exit 0
#!/bin/bash
#SBATCH --mail-type=ALL # mail configuration: NONE, BEGIN, END, FAIL, REQUEUE, ALL
#SBATCH --output=log/%j.out # where to store the output (%j is the JOBID), subdirectory must exist
#SBATCH --error=log/%j.err # where to store error messages
#SBATCH --gres=gpu:1
#SBATCH --mem=200G
echo "Running on host: $(hostname)"
echo "In directory: $(pwd)"
echo "Starting on: $(date)"
echo "SLURM_JOB_ID: ${SLURM_JOB_ID}"
# Exit on errors
set -o errexit
# Set a directory for temporary files unique to the job with automatic removal at job termination
TMPDIR=$(mktemp -d)
if [[ ! -d ${TMPDIR} ]]; then
echo 'Failed to create temp directory' >&2
exit 1
fi
trap "exit 1" HUP INT TERM
trap 'rm -rf "${TMPDIR}"' EXIT
export TMPDIR
# Binary or script to execute
python ./main.py 1 16 8 1 8
echo "Finished at: $(date)"
exit 0
#!/bin/bash
#SBATCH --mail-type=ALL # mail configuration: NONE, BEGIN, END, FAIL, REQUEUE, ALL
#SBATCH --output=log/%j.out # where to store the output (%j is the JOBID), subdirectory must exist
#SBATCH --error=log/%j.err # where to store error messages
#SBATCH --gres=gpu:1
#SBATCH --mem=200G
echo "Running on host: $(hostname)"
echo "In directory: $(pwd)"
echo "Starting on: $(date)"
echo "SLURM_JOB_ID: ${SLURM_JOB_ID}"
# Exit on errors
set -o errexit
# Set a directory for temporary files unique to the job with automatic removal at job termination
TMPDIR=$(mktemp -d)
if [[ ! -d ${TMPDIR} ]]; then
echo 'Failed to create temp directory' >&2
exit 1
fi
trap "exit 1" HUP INT TERM
trap 'rm -rf "${TMPDIR}"' EXIT
export TMPDIR
# Binary or script to execute
python ./main.py 1 32 4 1 8
echo "Finished at: $(date)"
exit 0
#!/bin/bash
#SBATCH --mail-type=ALL # mail configuration: NONE, BEGIN, END, FAIL, REQUEUE, ALL
#SBATCH --output=log/%j.out # where to store the output (%j is the JOBID), subdirectory must exist
#SBATCH --error=log/%j.err # where to store error messages
#SBATCH --gres=gpu:1
#SBATCH --mem=200G
echo "Running on host: $(hostname)"
echo "In directory: $(pwd)"
echo "Starting on: $(date)"
echo "SLURM_JOB_ID: ${SLURM_JOB_ID}"
# Exit on errors
set -o errexit
# Set a directory for temporary files unique to the job with automatic removal at job termination
TMPDIR=$(mktemp -d)
if [[ ! -d ${TMPDIR} ]]; then
echo 'Failed to create temp directory' >&2
exit 1
fi
trap "exit 1" HUP INT TERM
trap 'rm -rf "${TMPDIR}"' EXIT
export TMPDIR
# Binary or script to execute
python ./main.py 1 16 4 1 8
echo "Finished at: $(date)"
exit 0
#!/bin/bash
#SBATCH --mail-type=ALL # mail configuration: NONE, BEGIN, END, FAIL, REQUEUE, ALL
#SBATCH --output=log/%j.out # where to store the output (%j is the JOBID), subdirectory must exist
#SBATCH --error=log/%j.err # where to store error messages
#SBATCH --gres=gpu:2
#SBATCH --mem=200G
echo "Running on host: $(hostname)"
echo "In directory: $(pwd)"
echo "Starting on: $(date)"
echo "SLURM_JOB_ID: ${SLURM_JOB_ID}"
# Exit on errors
set -o errexit
# Set a directory for temporary files unique to the job with automatic removal at job termination
TMPDIR=$(mktemp -d)
if [[ ! -d ${TMPDIR} ]]; then
echo 'Failed to create temp directory' >&2
exit 1
fi
trap "exit 1" HUP INT TERM
trap 'rm -rf "${TMPDIR}"' EXIT
export TMPDIR
# Binary or script to execute
python ./main.py 8 2 4
echo "Finished at: $(date)"
exit 0
#!/bin/bash
#SBATCH --mail-type=ALL # mail configuration: NONE, BEGIN, END, FAIL, REQUEUE, ALL
#SBATCH --output=log/%j.out # where to store the output (%j is the JOBID), subdirectory must exist
#SBATCH --error=log/%j.err # where to store error messages
#SBATCH --gres=gpu:2
#SBATCH --mem=200G
echo "Running on host: $(hostname)"
echo "In directory: $(pwd)"
echo "Starting on: $(date)"
echo "SLURM_JOB_ID: ${SLURM_JOB_ID}"
# Exit on errors
set -o errexit
# Set a directory for temporary files unique to the job with automatic removal at job termination
TMPDIR=$(mktemp -d)
if [[ ! -d ${TMPDIR} ]]; then
echo 'Failed to create temp directory' >&2
exit 1
fi
trap "exit 1" HUP INT TERM
trap 'rm -rf "${TMPDIR}"' EXIT
export TMPDIR
# Binary or script to execute
python ./main.py 4 2 4
echo "Finished at: $(date)"
exit 0
#!/bin/bash
#SBATCH --mail-type=ALL # mail configuration: NONE, BEGIN, END, FAIL, REQUEUE, ALL
#SBATCH --output=log/%j.out # where to store the output (%j is the JOBID), subdirectory must exist
#SBATCH --error=log/%j.err # where to store error messages
#SBATCH --gres=gpu:1
#SBATCH --mem=200G
echo "Running on host: $(hostname)"
echo "In directory: $(pwd)"
echo "Starting on: $(date)"
echo "SLURM_JOB_ID: ${SLURM_JOB_ID}"
# Exit on errors
set -o errexit
# Set a directory for temporary files unique to the job with automatic removal at job termination
TMPDIR=$(mktemp -d)
if [[ ! -d ${TMPDIR} ]]; then
echo 'Failed to create temp directory' >&2
exit 1
fi
trap "exit 1" HUP INT TERM
trap 'rm -rf "${TMPDIR}"' EXIT
export TMPDIR
# Binary or script to execute
python ./main.py 60 20
echo "Finished at: $(date)"
exit 0
#!/bin/bash
#SBATCH --mail-type=ALL # mail configuration: NONE, BEGIN, END, FAIL, REQUEUE, ALL
#SBATCH --output=log/%j.out # where to store the output (%j is the JOBID), subdirectory must exist
#SBATCH --error=log/%j.err # where to store error messages
#SBATCH --gres=gpu:1
#SBATCH --mem=200G
echo "Running on host: $(hostname)"
echo "In directory: $(pwd)"
echo "Starting on: $(date)"
echo "SLURM_JOB_ID: ${SLURM_JOB_ID}"
# Exit on errors
set -o errexit
# Set a directory for temporary files unique to the job with automatic removal at job termination
TMPDIR=$(mktemp -d)
if [[ ! -d ${TMPDIR} ]]; then
echo 'Failed to create temp directory' >&2
exit 1
fi
trap "exit 1" HUP INT TERM
trap 'rm -rf "${TMPDIR}"' EXIT
export TMPDIR
# Binary or script to execute
python ./main.py 30 40
echo "Finished at: $(date)"
exit 0