Commit fe2699fc authored by Ard Kastrati's avatar Ard Kastrati
Browse files

Added a new variant where we use different types of convolutions

parent ee8c2ef1
import tensorflow as tf
import numpy as np
import tensorflow.keras as keras
from config import config
from utils.utils import *
import logging
from keras.callbacks import CSVLogger
def run(trainX, trainY):
logging.info("Starting DeepEye3.")
classifier = Classifier_DEEPEYE3(input_shape=config['deepeye3']['input_shape'])
hist = classifier.fit(trainX, trainY)
plot_loss(hist, config['model_dir'], config['model'], True)
plot_acc(hist, config['model_dir'], config['model'], True)
save_logs(hist, config['model_dir'], config['model'], pytorch=False)
save_model_param(classifier.model, config['model_dir'], config['model'], pytorch=False)
class Classifier_DEEPEYE3:
def __init__(self, input_shape, verbose=True, build=True, batch_size=64, nb_filters=32,
use_residual=True, use_bottleneck=True, depth=6, kernel_size=40, nb_epochs=1500):
self.nb_filters = nb_filters
self.use_residual = use_residual
self.use_bottleneck = use_bottleneck
self.depth = depth
self.kernel_size = kernel_size
self.callbacks = None
self.batch_size = batch_size
self.bottleneck_size = 32
self.nb_epochs = nb_epochs
self.verbose = verbose
if build:
self.model = self._build_model(input_shape)
if self.verbose:
self.model.summary()
def _inception_module(self, input_tensor, nb_filters=32, use_bottleneck=True, kernel_size=40, bottleneck_size=32,
stride=1, activation='linear'):
if use_bottleneck and int(input_tensor.shape[-1]) > 1:
input_inception = tf.keras.layers.Conv1D(filters=bottleneck_size, kernel_size=1, padding='same', activation=activation, use_bias=False)(input_tensor)
else:
input_inception = input_tensor
# kernel_size_s = [3, 5, 8, 11, 17]
kernel_size_s = [kernel_size // (2 ** i) for i in range(2)]
conv_list = []
for i in range(len(kernel_size_s)):
conv_list.append(
tf.keras.layers.Conv1D(filters=nb_filters, kernel_size=kernel_size_s[i], strides=stride, padding='same', activation=activation, use_bias=False)(input_inception))
max_pool_1 = tf.keras.layers.MaxPool1D(pool_size=10, strides=stride, padding='same')(input_tensor)
conv_6 = tf.keras.layers.Conv1D(filters=nb_filters, kernel_size=1, padding='same', activation=activation, use_bias=False)(max_pool_1)
max_pool_2 = tf.keras.layers.MaxPool1D(pool_size=10, strides=stride, padding='same')(input_tensor)
conv_7 = tf.keras.layers.Conv1D(filters=nb_filters/8, kernel_size=16, padding='same', activation=activation, use_bias=False)(max_pool_2)
conv_8 = tf.keras.layers.SeparableConv1D(filters=nb_filters, kernel_size=32, padding='same', activation=activation, use_bias=False, depth_multiplier=1)(input_tensor)
conv_list.append(conv_6)
conv_list.append(conv_7)
conv_list.append(conv_8)
x = tf.keras.layers.Concatenate(axis=2)(conv_list)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation(activation='relu')(x)
return x
def _shortcut_layer(self, input_tensor, out_tensor):
shortcut_y = tf.keras.layers.Conv1D(filters=int(out_tensor.shape[-1]), kernel_size=1, padding='same', use_bias=False)(input_tensor)
shortcut_y = tf.keras.layers.BatchNormalization()(shortcut_y)
x = keras.layers.Add()([shortcut_y, out_tensor])
x = keras.layers.Activation('relu')(x)
return x
def _build_model(self, input_shape, use_residual=True, depth=9):
input_layer = tf.keras.layers.Input(input_shape)
x = input_layer
input_res = input_layer
for d in range(depth):
x = self._inception_module(x)
if use_residual and d % 3 == 2:
x = self._shortcut_layer(input_res, x)
input_res = x
gap_layer = tf.keras.layers.GlobalAveragePooling1D()(x)
output_layer = tf.keras.layers.Dense(1, activation='sigmoid')(gap_layer)
model = tf.keras.models.Model(inputs=input_layer, outputs=output_layer)
model.compile(loss='binary_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
return model
def fit(self, inception_x, y):
csv_logger = CSVLogger(config['batches_log'], append=True, separator=';')
hist = self.model.fit(inception_x, y, verbose=1, validation_split=0.2, epochs=1, callbacks=[csv_logger])
return hist
\ No newline at end of file
......@@ -35,7 +35,7 @@ deepeye: Our method
"""
# Choosing model
config['model'] = 'xception'
config['model'] = 'deepeye3'
config['downsampled'] = False
config['split'] = False
config['cluster'] = clustering()
......@@ -53,6 +53,8 @@ config['inception'] = {}
config['deepeye'] = {}
# DeepEye2
config['deepeye2'] = {}
# DeepEye2
config['deepeye3'] = {}
# Xception
config['xception'] = {}
# EEGNet
......@@ -62,6 +64,7 @@ config['deepeye-lstm'] = {}
config['inception']['input_shape'] = (125, 129) if config['downsampled'] else (500, 129)
config['deepeye2']['input_shape'] = (125, 129) if config['downsampled'] else (500, 129)
config['deepeye3']['input_shape'] = (125, 129) if config['downsampled'] else (500, 129)
config['xception']['input_shape'] = (125, 129) if config['downsampled'] else (500, 129)
config['eegnet']['channels'] = 129
......
......@@ -2,7 +2,7 @@ from config import config
import time
from CNN import CNN
from utils import IOHelper
from DeepEye import deepEye, RNNdeep, deepEye2, Xception
from DeepEye import deepEye, RNNdeep, deepEye2, deepEye3, Xception
from InceptionTime import inception
from EEGNet import eegNet
import numpy as np
......@@ -40,6 +40,10 @@ def main():
logging.info("Started running DeepEye2. If you want to run other methods please choose another model in the config.py file.")
deepEye2.run(trainX=trainX, trainY=trainY)
elif config['model'] == 'deepeye3':
logging.info("Started running DeepEye3. If you want to run other methods please choose another model in the config.py file.")
deepEye3.run(trainX=trainX, trainY=trainY)
elif config['model'] == 'xception':
logging.info("Started running XceptionTime. If you want to run other methods please choose another model in the config.py file.")
Xception.run(trainX=trainX, trainY=trainY)
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment