Commit d459de8e authored by Lukas Wolf's avatar Lukas Wolf
Browse files

added functionality for filter plots and sanity check

parent 1e86f6cc
......@@ -6,6 +6,7 @@ import tensorflow.keras as keras
from config import config
from tensorflow.keras.callbacks import CSVLogger
import logging
from utils.analysis import sanity_check
class prediction_history(tf.keras.callbacks.Callback):
......@@ -138,4 +139,10 @@ class Regression_ConvNet(ABC):
# Fit model
hist = self.model.fit(X_train, y_train, verbose=verbose, batch_size=self.batch_size, validation_data=(X_val,y_val),
epochs=self.epochs, callbacks=[csv_logger, ckpt, prediction_ensemble])
# Log how good predictions in x and y directions are
if config['sanity_check'] and not config['data_mode'] == 'fix_sacc_fix':
x_mean_err, y_mean_err = sanity_check(self.model, X_val, y_val)
logging.info("x mean coordinate error: {:.2f}, y mean coordinate error: {:.2f}".format(x_mean_err, y_mean_err))
return hist , prediction_ensemble
\ No newline at end of file
......@@ -46,12 +46,12 @@ Cluster can be set to clustering(), clustering2() or clustering3(), where differ
# Hyper-parameters and training configuration.
config['learning_rate'] = 1e-3 # fix only: 1e-2, sac only: 1e-3, sac_fix: 1e-3 , fix_sac_fix: 1e-4
config['regularization'] = 1e-1 # fix only: 1e-3, sac only: 1e-2, sac_fix: 1e-1, fix_sac_fix: 5
config['epochs'] = 100
config['epochs'] = 5
config['batch_size'] = 64
# Choose experiment
config['gaze-reg'] = True # Set to False if you want to run the saccade classification task
config['data-fraction'] = 1.0 # Set to 1.0 if you want to use the whole dataset, experimental feature only for regression task \
config['data-fraction'] = 0.1 # Set to 1.0 if you want to use the whole dataset, experimental feature only for regression task \
# Choose which dataset to run the gaze regression on
#config['data_mode'] = 'fix_only'
......@@ -73,6 +73,10 @@ config['model'] = 'cnn'
config['run'] = 'ensemble'
config['ensemble'] = 5 #number of models in the ensemble method
# Other functions that can be chosen optionally
config['sanity_check'] = True
config['plot_filters'] = True
# Set loss automatically depending on the dataset/task to run
if config['data_mode'] == 'fix_sacc_fix':
from utils.losses import angle_loss
......
This source diff could not be displayed because it is too large. You can view the blob instead.
import tensorflow as tf
from config import config
from utils.utils import *
from utils.losses import angle_loss
from utils.plot import plot_filters
import logging
from CNN.Regression_CNN import Regression_CNN
......@@ -31,6 +31,7 @@ def run(trainX, trainY):
mse = tf.keras.losses.MeanSquaredError()
hist = None
reg = None
loss = []
accuracy = []
......@@ -83,6 +84,9 @@ def run(trainX, trainY):
if i == 0:
pred = pred_ensemble.predhis
# Plot the filters of the conv modules only once for the first model
if config['plot_filters']:
plot_filters(reg.model, config['model_dir'])
else:
for j, pred_epoch in enumerate(pred_ensemble.predhis):
pred[j] = (np.array(pred[j]) + np.array(pred_epoch))
......@@ -117,7 +121,8 @@ def run(trainX, trainY):
#if config['split']:
#config['model'] = config['model'] + '_cluster'
logging.info("Done with training and plotting.")
#TODO: rewrite the function below to properly store stats and results
#save_logs(hist, config['model_dir'], config['model'], pytorch = False)
logging.info("Done with training and plotting.")
import numpy as np
def sanity_check(model, X_val, y_val):
"""
Make predictions on the given validation data
Return the mean error in x and y coordinates
"""
y_pred = model.predict(X_val)
x_coord_pred = y_pred[:, 0]
y_coord_pred = y_pred[:, 1]
x_true = y_val[:, 0]
y_true = y_val[:, 1]
x_mean_diff = np.mean(np.abs(x_coord_pred - x_true))
y_mean_diff = np.mean(np.abs(y_coord_pred - y_true))
return x_mean_diff, y_mean_diff
\ No newline at end of file
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from tensorflow import keras
from matplotlib.ticker import FormatStrFormatter
import os
#path = '../runs/1615920751_xception_gaze-reg/batches.log'
def plot_batches_log_loss(model_name):
"""
......@@ -27,3 +28,37 @@ def plot_batches_log_loss(model_name):
plt.savefig(fname=save_path+model_name)
def plot_filters(model, model_dir):
"""
Create a plot for every filter in every convolutional module and save it in the models directory under filterplots
"""
dir = './runs/' # must be correct relative to caller
path = model_dir + '/filterplots/'
# create a dir for the plots
os.makedirs(path)
# Create the plots
for i, layer in enumerate(model.layers):
# check for convolutional layers
if 'conv' not in layer.name:
continue
# get filter weights
filters = layer.get_weights()[0]
# normalize filter values to 0-1 such that we can visualize them
f_min, f_max = filters.min(), filters.max()
filters = (filters - f_min) / (f_max - f_min)
# Create the plot for each of the filter kernels
kernel_size, input_dim, num_filters = filters.shape
for filternum in range(num_filters):
filter_channel = filters[:,:,filternum]
fig, ax = plt.subplots()
ax.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
title = "Conv Layer {} with shape: {} \n Filter number: {}".format(i, filters.shape, filternum+1)
ax.set_title(title)
ax.set_ylabel("Normalized activation")
ax.set_xlabel("Coefficients of kernel")
ax.plot(filter_channel.T[filternum])
fname = "layer_{}_filter_{}".format(i, filternum+1)
fig.savefig(path + fname, facecolor='white', edgecolor='none')
plt.close()
......@@ -50,10 +50,6 @@ def plot_loss(hist, output_directory, model, val=False, savefig=True):
epochs = len(hist.history[metric])
epochs = np.arange(epochs)
logging.info("Length of hist.history[loss]: {}".format(len(hist.history['loss'])))
logging.info("Length of hist.history[val_loss]: {}".format(len(hist.history['val_loss'])))
logging.info("Length of epochs: {}".format(len(epochs)))
plt.figure()
plt.title(model + ' loss')
# plot the training curve
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment