Commit dcea5128 authored by zigeng's avatar zigeng
Browse files

Remain to fix RNNdeep.py, @Oriel not understanding the code in build_model function...

parent 0c27f129
......@@ -17,7 +17,8 @@ sns.set_style('darkgrid')
def run(trainX, trainY):
if config['split']: config['model']=config['model']+'_cluster'
if config['split']:
config['model']=config['model']+'_cluster'
classifier = Classifier_DEEPEYE_LSTM(output_directory=config['root_dir'], input_shape=config['deepeye-lstm']['input_shape'])
hist = classifier.fit(trainX, trainY)
plot_loss(hist, config['model_dir'], config['model'], True)
......@@ -146,8 +147,14 @@ class Classifier_DEEPEYE_LSTM:
x = keras.layers.Activation('relu')(x)
return x
def build_model(self, input_shape):
input_layer = keras.layers.Input((input_shape[0], input_shape[1]))
def build_model(self, input_shape, X=None):
"""
@Oriel, can u plz fix this function in order to run the split?
"""
if config['split']:
input_layer = X
else:
input_layer = keras.layers.Input((input_shape[0], input_shape[1]))
lstm_tensor = self._LSTM_preprocessing(input_layer, self.feature_nb)
x = lstm_tensor
input_res = lstm_tensor
......@@ -167,26 +174,23 @@ class Classifier_DEEPEYE_LSTM:
model = tf.keras.models.Model(inputs=input_layer, outputs=output_layer)
return model
def split_model(self, input_shape):
input_layer = keras.layers.Input((input_shape[0], input_shape[1]))
# output=[]
output=[]
# run inception over the cluster
# for c in config['cluster'].keys():
# output.append(self.build_model(input_shape = None, X = tf.expand_dims(tf.transpose(tf.nn.embedding_lookup(
# tf.transpose(input_layer,(1,0,2)),config['cluster'][c]),(1,0,2)),axis=-1), c = c))
for c in config['cluster'].keys():
output.append(self.build_model(input_shape = None, X = tf.expand_dims(tf.transpose(tf.nn.embedding_lookup(
tf.transpose(input_layer,(1,0,2)),config['cluster'][c]),(1,0,2)),axis=-1), c = c))
# append the results and perform 1 dense layer with last_channel dimension and the output layer
# x = tf.keras.layers.Concatenate(axis=1)(output)
# dense=tf.keras.layers.Dense(32, activation='relu')(x)
# output_layer=tf.keras.layers.Dense(1, activation='sigmoid')(dense)
# model = tf.keras.models.Model(inputs=input_layer, outputs=output_layer)
# return model
x = tf.keras.layers.Concatenate(axis=1)(output)
dense=tf.keras.layers.Dense(32, activation='relu')(x)
output_layer=tf.keras.layers.Dense(1, activation='sigmoid')(dense)
model = tf.keras.models.Model(inputs=input_layer, outputs=output_layer)
return model
def fit(self, lstm_x, y):
# Add early stopping and reduced learning rata to mitigate overfitting
......@@ -195,7 +199,7 @@ class Classifier_DEEPEYE_LSTM:
early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=50)
csv_logger = CSVLogger(config['batches_log'], append=True, separator=';')
if self.batch_size is None:
mini_batch_size = int(min(lstm_x.shape[0] / 10, 16))
mini_batch_size = int(min(deepeye_x.shape[0] / 10, 16))
else:
mini_batch_size = self.batch_size
self.model.compile(loss='binary_crossentropy', optimizer=keras.optimizers.Adam(learning_rate=self.lr), metrics=['accuracy'])
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment