deepEye.py 7.27 KB
Newer Older
okiss's avatar
okiss committed
1
2
3
4
5
6
7
8
9
import keras
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Dense, Activation, Permute, Dropout
from tensorflow.keras.layers import Conv2D, MaxPooling2D, AveragePooling2D
from tensorflow.keras.layers import SeparableConv2D, DepthwiseConv2D
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.constraints import max_norm

10
11
from config import general_params
from utils.utils import *
okiss's avatar
okiss committed
12
13
14
15
16
17
18
19
import seaborn as sns
sns.set_style('darkgrid')

from sklearn.model_selection import train_test_split
import os


def run(trainX,trainY):
20
21
22
23
    classifier = Classifier_DEEPEYE(output_directory=general_params['root_dir'], input_shape=(129, 500))
    hist = classifier.fit(deepeye_x=trainX, y=trainY)
    plot_loss(hist, 'DeepEye', True)
    plot_acc(hist, 'DeepEye', True)
okiss's avatar
okiss committed
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58

class Classifier_DEEPEYE:
    """
     Inputs:

      nb_classes      : int, number of classes to classify
      input_shape     : shape of the input tensor, in our case: 128 * 500 * 1
      use_bottleneck  : use Bottleneck layer to select the most informative channels
      use_residual    : use a shortcut layer (RNN) to try to avoid vanishing gradient
      kernel_size     : 41
      batch_size      : 64
      epochs          : 1500
      output_directory: directory where plot weights and results are stored
      depth           : 6, number of repetion of the inception module

      Outputs:

      y_pred          : class (left/right for nb_class=2) of the given input_tensor
    """

    def __init__(self, output_directory, input_shape, nb_classes, verbose=False, build=True,
                 batch_size=64, nb_filters=32, use_residual=True, use_bottleneck=True, depth=6,
                 kernel_size=41, nb_epochs=1500):

        self.output_directory = output_directory
        self.nb_filters = nb_filters
        self.use_residual = use_residual
        self.use_bottleneck = use_bottleneck
        self.depth = depth
        self.kernel_size = kernel_size - 1
        self.callbacks = None
        self.batch_size = batch_size
        self.bottleneck_size = 32
        self.nb_epochs = nb_epochs

59
        if build:
okiss's avatar
okiss committed
60
            # build model
61
62
            self.model = self.build_model(input_shape)
            if verbose:
okiss's avatar
okiss committed
63
64
                self.model.summary()
            self.verbose = verbose
65
            # self.model.save_weights(self.output_directory + 'model_init.hdf5')
okiss's avatar
okiss committed
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151

    @staticmethod
    def _eeg_preprocessing(self, input_tensor, F1=8, D=2, kernLength=125):
        """
        Static method since this function does not receive any reference argument from this class.
        """
        # EEGNet feature extraction
        Chans = input_tensor.shape[1]
        Samples = input_tensor.shape[2]

        # Filter slides horizontally
        horizontal_tensor = Conv2D(F1, (1, kernLength), padding='same',
                                   input_shape=(Chans, Samples, 1),
                                   use_bias=False)(input_tensor)
        horizontal_tensor = BatchNormalization()(horizontal_tensor)

        # Filter slides vertically
        vertical_tensor = DepthwiseConv2D((Chans, 1), use_bias=False,
                                          depth_multiplier=D,
                                          depthwise_constraint=max_norm(1.))(horizontal_tensor)
        vertical_tensor = BatchNormalization()(vertical_tensor)
        eeg_tensor = Activation('elu')(vertical_tensor)

        # Reshape the tensor (129, 500, 1) to (129, 500), and feed into the inception module
        output_tensor = eeg_tensor[:, 0, :, :]
        output_tensor = tf.transpose(output_tensor, perm=[0, 2, 1])  # For the input of Inception it should be
        # transposed.
        return output_tensor

    def _inception_module(self, input_tensor, stride=1, activation='linear'):

        '''
        Inception Network
        Input:
                input_tensor           : input of size (128 * 500 * 1) to be forwarded
                stride                 : 1
                F1                     : number of filters of the first convolution
                kernLength             : 25, second dimension of the kernel in the first convolution, the first dimension is 1
                D                      : 2, depth multiplier
                F1                     : 8,
                activation function    : linear
        Output:
                output_tensor          : input through the inception network
        '''

        if self.use_bottleneck and int(input_tensor.shape[-1]) > 1:
            input_inception = keras.layers.Conv1D(filters=self.bottleneck_size, kernel_size=1,
                                                  padding='same', activation=activation, use_bias=False)(input_tensor)
        else:
            input_inception = input_tensor

        # kernel_size_s = [3, 5, 8, 11, 17]
        kernel_size_s = [self.kernel_size // (2 ** i) for i in range(3)]
        conv_list = []

        for i in range(len(kernel_size_s)):
            conv_list.append(keras.layers.Conv1D(filters=self.nb_filters, kernel_size=kernel_size_s[i],
                                                 strides=stride, padding='same', activation=activation,
                                                 use_bias=False)(input_inception))

        max_pool_1 = keras.layers.MaxPool1D(pool_size=3, strides=stride, padding='same')(
            input_tensor)  # I think it should be eeg_tensor here!

        conv_6 = keras.layers.Conv1D(filters=self.nb_filters, kernel_size=1, padding='same', activation=activation,
                                     use_bias=False)(max_pool_1)

        conv_list.append(conv_6)

        x = keras.layers.Concatenate(axis=2)(conv_list)
        x = keras.layers.BatchNormalization()(x)
        x = keras.layers.Activation(activation='relu')(x)
        return x

    @staticmethod
    def _shortcut_layer(self, input_tensor, out_tensor):
        '''
        implementation of a shortcut layer inspired by the Residual NN
        '''
        shortcut_y = keras.layers.Conv1D(filters=int(out_tensor.shape[-1]), kernel_size=1,
                                         padding='same', use_bias=False)(input_tensor)
        shortcut_y = keras.layers.normalization.BatchNormalization()(shortcut_y)

        x = keras.layers.Add()([shortcut_y, out_tensor])
        x = keras.layers.Activation('relu')(x)
        return x

152
    def build_model(self, input_shape, nb_filters=32, use_residual=True, use_bottleneck=True, depth=6, kernel_size=40, F1=8, D=2, kernLength=125):
okiss's avatar
okiss committed
153
154
155
156
157
        input_layer = keras.layers.Input((input_shape[0], input_shape[1], 1))
        eeg_tensor = self._eeg_preprocessing(input_layer, F1, D, kernLength)
        x = eeg_tensor
        input_res = eeg_tensor

158
        for d in range(depth):
okiss's avatar
okiss committed
159
160
161

            x = self._inception_module(x)

162
            if use_residual and d % 3 == 2:
okiss's avatar
okiss committed
163
164
165
                x = self._shortcut_layer(input_res, x)
                input_res = x

166
167
168
        gap_layer = tf.keras.layers.GlobalAveragePooling1D()(x)
        output_layer = tf.keras.layers.Dense(1, activation='sigmoid')(gap_layer)
        model = tf.keras.models.Model(inputs=input_layer, outputs=output_layer)
okiss's avatar
okiss committed
169
170
171

        return model

172
173
174
175
    def fit(self, deepeye_x, y):
        self.model.compile(loss='binary_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
        hist = self.model.fit(deepeye_x, y, verbose=1, validation_split=0.2, epochs=10)
        return hist
okiss's avatar
okiss committed
176