Commit ebbcce36 authored by Ard Kastrati's avatar Ard Kastrati
Browse files

Commented some parts so that old methods still work. Please make sure that the...

Commented some parts so that old methods still work. Please make sure that the code works before pushing new methods
parent b07cd8f0
......@@ -17,8 +17,7 @@ sns.set_style('darkgrid')
def run(trainX, trainY):
if config['split']:
config['model']=config['model']+'_cluster'
if config['split']: config['model']=config['model']+'_cluster'
classifier = Classifier_DEEPEYE_LSTM(output_directory=config['root_dir'], input_shape=config['deepeye-lstm']['input_shape'])
hist = classifier.fit(trainX, trainY)
plot_loss(hist, config['model_dir'], config['model'], True)
......@@ -168,27 +167,26 @@ class Classifier_DEEPEYE_LSTM:
model = tf.keras.models.Model(inputs=input_layer, outputs=output_layer)
return model
def split_model(self, input_shape):
input_layer = keras.layers.Input((input_shape[0], input_shape[1]))
output=[]
def split_model(self, input_shape):
input_layer = keras.layers.Input((input_shape[0], input_shape[1]))
# output=[]
# run inception over the cluster
for c in config['cluster'].keys():
# for c in config['cluster'].keys():
output.append(self.build_model(input_shape = None, X = tf.expand_dims(tf.transpose(tf.nn.embedding_lookup(
tf.transpose(input_layer,(1,0,2)),config['cluster'][c]),(1,0,2)),axis=-1), c = c))
# output.append(self.build_model(input_shape = None, X = tf.expand_dims(tf.transpose(tf.nn.embedding_lookup(
# tf.transpose(input_layer,(1,0,2)),config['cluster'][c]),(1,0,2)),axis=-1), c = c))
# append the results and perform 1 dense layer with last_channel dimension and the output layer
x = tf.keras.layers.Concatenate(axis=1)(output)
dense=tf.keras.layers.Dense(32, activation='relu')(x)
output_layer=tf.keras.layers.Dense(1, activation='sigmoid')(dense)
# x = tf.keras.layers.Concatenate(axis=1)(output)
# dense=tf.keras.layers.Dense(32, activation='relu')(x)
# output_layer=tf.keras.layers.Dense(1, activation='sigmoid')(dense)
model = tf.keras.models.Model(inputs=input_layer, outputs=output_layer)
# model = tf.keras.models.Model(inputs=input_layer, outputs=output_layer)
return model
# return model
def fit(self, lstm_x, y):
# Add early stopping and reduced learning rata to mitigate overfitting
......@@ -197,7 +195,7 @@ class Classifier_DEEPEYE_LSTM:
early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=50)
csv_logger = CSVLogger(config['batches_log'], append=True, separator=';')
if self.batch_size is None:
mini_batch_size = int(min(deepeye_x.shape[0] / 10, 16))
mini_batch_size = int(min(lstm_x.shape[0] / 10, 16))
else:
mini_batch_size = self.batch_size
self.model.compile(loss='binary_crossentropy', optimizer=keras.optimizers.Adam(learning_rate=self.lr), metrics=['accuracy'])
......
......@@ -108,7 +108,7 @@ class Classifier_INCEPTION:
x = keras.layers.Activation('relu')(x)
return x
def _build_model(self, input_shape, X, nb_filters=32, use_residual=True, use_bottleneck=True, depth=6, kernel_size=40):
def _build_model(self, input_shape, X=[], nb_filters=32, use_residual=True, use_bottleneck=True, depth=6, kernel_size=40):
if config['split']:
input_layer = X
......
......@@ -35,8 +35,8 @@ deepeye: Our method
"""
# Choosing model
config['model'] = 'deepeye'
config['downsampled'] = True
config['model'] = 'inception'
config['downsampled'] = False
config['split'] = False
config['cluster'] = clustering()
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment