Commit 276ea993 authored by okiss's avatar okiss
Browse files

Update RNNdeep.py

parent a9c52171
......@@ -17,7 +17,9 @@ sns.set_style('darkgrid')
def run(trainX, trainY):
classifier = Classifier_DEEPEYE_LSTM(output_directory=config['root_dir'],
if config['split']:
config['model']=config['model']+'_cluster'
classifier = Classifier_DEEPEYE_LSTM(output_directory=config['root_dir'],
input_shape=config['deepeye-lstm']['input_shape'])
hist = classifier.fit(trainX, trainY)
plot_loss(config['model_dir'], hist, config['model'], True)
......@@ -63,10 +65,13 @@ class Classifier_DEEPEYE_LSTM:
self.momentum = 0.9
self.lr = 0.001
self.feature_nb = 75
if build:
# build model
self.model = self.build_model(input_shape)
# build mode
if config['split']:
self.model = self.split_model(input_shape)
else:
self.model = self.build_model(input_shape)
if verbose:
self.model.summary()
print(20*'*')
......@@ -74,11 +79,31 @@ class Classifier_DEEPEYE_LSTM:
"Learning rate:", self.lr, "# of features:", self.feature_nb, "# of filters:", self.nb_filters)
print(20*'*')
self.verbose = verbose
# self.model.save_weights(self.output_directory + 'model_init.hdf5')
def split_model(self,input_shape):
input_layer = keras.layers.Input((input_shape[0], input_shape[1]))
output=[]
# run inception over the cluster
for c in config['cluster'].keys():
output.append(self.build_model(input_shape = None, X = tf.transpose(tf.nn.embedding_lookup(
tf.transpose(input_layer,(1,0,2)),config['cluster'][c]),(1,0,2))))
# append the results and perform 1 dense layer with last_channel dimension and the output layer
x = tf.keras.layers.Concatenate(axis=1)(output)
dense=tf.keras.layers.Dense(32, activation='relu')(x)
output_layer=tf.keras.layers.Dense(1,activation='sigmoid')(dense)
model = tf.keras.models.Model(inputs=input_layer, outputs=output_layer)
return model
def _LSTM_preprocessing(self, input_tensor, output_feature_nb):
lstm = Sequential()
lstm.add(LSTM(output_feature_nb, return_sequences=True))
lstm.add(Dropout(self.dropoutRate))
......@@ -119,7 +144,7 @@ class Classifier_DEEPEYE_LSTM:
strides=stride, padding='same', activation=activation,
use_bias=False)(input_inception))
max_pool_1 = keras.layers.MaxPool1D(pool_size=3, strides=stride, padding='same')(input_tensor)
max_pool_1 = keras.layers.MaxPool1D(pool_size=3, strides=stride, padding='same')(input_tensor)
conv_6 = keras.layers.Conv1D(filters=self.nb_filters, kernel_size=1, padding='same', activation=activation,
use_bias=False)(max_pool_1)
......@@ -131,7 +156,7 @@ class Classifier_DEEPEYE_LSTM:
x = keras.layers.Activation(activation='relu')(x)
return x
def _shortcut_layer(self, input_tensor, out_tensor):
'''
implementation of a shortcut layer inspired by the Residual NN
......@@ -144,8 +169,12 @@ class Classifier_DEEPEYE_LSTM:
x = keras.layers.Activation('relu')(x)
return x
def build_model(self, input_shape):
input_layer = keras.layers.Input((input_shape[0], input_shape[1]))
def build_model(self, input_shape, X = None):
if config['split']:
input_layer = X
else:
input_layer = keras.layers.Input((input_shape[0], input_shape[1]))
lstm_tensor = self._LSTM_preprocessing(input_layer, self.feature_nb)
x = lstm_tensor
input_res = lstm_tensor
......@@ -161,6 +190,8 @@ class Classifier_DEEPEYE_LSTM:
gap_layer = tf.keras.layers.GlobalAveragePooling1D()(x)
# Add Dropout layer
gap_layer = tf.keras.layers.Dropout(self.dropoutRate)(gap_layer)
if config['split']:
return gap_layer
output_layer = tf.keras.layers.Dense(1, activation='sigmoid')(gap_layer)
model = tf.keras.models.Model(inputs=input_layer, outputs=output_layer)
......@@ -177,11 +208,11 @@ class Classifier_DEEPEYE_LSTM:
else:
mini_batch_size = self.batch_size
#self.model.compile(loss='binary_crossentropy', optimizer=keras.optimizers.Adam(learning_rate=self.lr), metrics=['accuracy'])
self.model.compile(loss='binary_crossentropy', optimizer=keras.optimizers.SGD(learning_rate=self.lr,
self.model.compile(loss='binary_crossentropy', optimizer=keras.optimizers.SGD(learning_rate=self.lr,
momentum=self.momentum, nesterov=True), metrics=['accuracy'])
#self.model.compile(loss='binary_crossentropy',
#self.model.compile(loss='binary_crossentropy',
#optimizer=keras.optimizers.RMSprop(learning_rate=self.lr, momentum=self.momentum), metrics=['accuracy'])
hist = self.model.fit(lstm_x, y, batch_size=mini_batch_size, verbose=1, validation_split=0.25,
epochs=self.nb_epochs, shuffle=True, callbacks=[early_stop, csv_logger])
return hist
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment