Commit 103169b4 authored by Lukas Wolf's avatar Lukas Wolf
Browse files

cleaned explain functionality - next step: build torch/tf

parent de90b261
......@@ -21,4 +21,5 @@ preds.txt
angle_comparison.ipynb
retrain_model.ipynb
conv_analysis.ipynb
./scripts/*
\ No newline at end of file
./scripts/*
./src/*
\ No newline at end of file
......@@ -6,8 +6,10 @@ import tensorflow.keras as keras
from config import config
from tensorflow.keras.callbacks import CSVLogger
import logging
import os
import numpy as np
from tf_explain.callbacks.integrated_gradients import IntegratedGradientsCallback
from tf_explain.callbacks.vanilla_gradients import VanillaGradientsCallback
class prediction_history(tf.keras.callbacks.Callback):
def __init__(self,validation_data):
......@@ -15,7 +17,6 @@ class prediction_history(tf.keras.callbacks.Callback):
self.predhis = []
self.targets = validation_data[1]
def on_epoch_end(self, epoch, logs={}):
y_pred = self.model.predict(self.validation_data[0])
self.predhis.append(y_pred)
......@@ -123,33 +124,40 @@ class ConvNet(ABC):
ckpt = tf.keras.callbacks.ModelCheckpoint(ckpt_dir, verbose=1, monitor='val_accuracy', save_best_only=True, mode='auto')
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=42)
prediction_ensemble = prediction_history((X_val,y_val))
# Create a callback for tensorboard
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=config['tensorboard_log_dir'], histogram_freq=1)
"""
# Define callbacks for model intepretability experiments
# For integrated gradients, create a validation set with 0 and 1 labels and treat them separately
validation_class_zero = (np.array([el for el, label in zip(X_val, y_val) if np.all(np.argmax(label) == 0)][0:5]))
validation_class_one = (np.array([el for el, label in zip(X_val, y_val) if np.all(np.argmax(label) == 1)][0:5]))
logging.info(validation_class_zero)
logging.info(validation_class_one)
# Define the integrated gradient callbacks for the two classes
#integrated_grad_zero = IntegratedGradientsCallback(validation_class_zero, class_index=0, n_steps=20, output_dir=config['model_dir'] + '/integrated_grad_zero/')
#integrated_grad_one = IntegratedGradientsCallback(validation_class_one, class_index=1, n_steps=20, output_dir=config['model_dir'] + '/integrated_grad_one/')
X_zero, y_zero, X_one, y_one = split_valid_data(X_val[:100], y_val[:100])
if not os.path.exists(config['tensorboard_log_dir'] + '/integrated_grad_zero/'):
os.makedirs(config['tensorboard_log_dir'] + '/integrated_grad_zero/')
if not os.path.exists(config['tensorboard_log_dir'] + '/integrated_grad_one/'):
os.makedirs(config['tensorboard_log_dir'] + '/integrated_grad_one/')
integrated_grad_zero = IntegratedGradientsCallback(validation_data=(X_zero, y_zero), class_index=0, n_steps=20,
output_dir=config['tensorboard_log_dir'] + '/integrated_grad_zero/')
integrated_grad_one = IntegratedGradientsCallback(validation_data=(X_one, y_one), class_index=1, n_steps=20,
output_dir=config['tensorboard_log_dir'] + '/integrated_grad_one/')
"""
# Fit the model on the training data
hist = self.model.fit(X_train, y_train, verbose=2, batch_size=self.batch_size,
validation_data=(X_val,y_val), epochs=self.epochs,
callbacks=[csv_logger, ckpt, prediction_ensemble, early_stop, integrated_grad_one,
integrated_grad_zero, tensorboard_callback])
hist = self.model.fit(X_train, y_train, verbose=2, batch_size=self.batch_size, validation_data=(X_val,y_val),
epochs=self.epochs, callbacks=[csv_logger, ckpt, prediction_ensemble, early_stop""", integrated_grad_one, integrated_grad_zero"""])
return hist, prediction_ensemble
def get_val_sets(X, y, label, num_elements):
"""
Return a small validation set with zero labels for the integrated gradients callback
X_val = []
y_val = []
for i in range(len(X)):
if len(X_val) > num_elements: # collected enough elements
return np.array(X_val), np.array(y_val)\
if y[i][0] != label:
continue
X_val.append(X[i])
y_val.append(y[i])
"""
def split_valid_data(X, y):
X_zero = []
y_zero = []
X_one = []
y_one = []
for el, label in zip(X, y):
if label[0] == 0:
X_zero.append(el)
y_zero.append(label)
else:
X_one.append(el)
y_one.append(label)
return np.array(X_zero), np.array(y_zero), np.array(X_one), np.array(y_one)
\ No newline at end of file
"""
Class for ensemble training
TODO: implement
"""
\ No newline at end of file
from tensorflow.keras.models import Model
from deepexplain.tensorflow import DeepExplain
from tensorflow.keras import backend as K
import tensorflow as tf
import numpy as np
def split_valid_data(X, y):
"""
Split the validation data into two sets of zero and one labeles samples
"""
X_zero = []
y_zero = []
X_one = []
y_one = []
for el, label in zip(X, y):
if label[0] == 0:
X_zero.append(el)
y_zero.append(label)
else:
X_one.append(el)
y_one.append(label)
return np.array(X_zero), np.array(y_zero), np.array(X_one), np.array(y_one)
if __name__ == '__main__':
# Turn off eager execution to be able to use deep_explain
tf.compat.v1.disable_eager_execution()
# Load data
X = np.load("./data/precomputed/prosaccade/part_prosaccade_X.npy")
y = np.load("./data/precomputed/prosaccade/part_prosaccade_y.npy")
# Split the data
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42)
# Create left and right samples
X_zero, y_zero, X_one, y_one = split_valid_data(X_val, y_val)
# Load the model
model = keras.models.load_model("./archive_runs/prosaccade/1618069872_inception_left-right-pred_ensemble/inception_best_model.h5")
model.compile(loss='binary_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
# Apply deepexplain
with DeepExplain(session = K.get_session()) as de:
input_tensor = model.layers[0].input
fModel = Model(inputs = input_tensor, outputs = model.layers[-1].output)
target_tensor = fModel(input_tensor)
# can use epsilon-LRP as well if you like.
attributions = de.explain('deeplift', target_tensor, input_tensor, X_val)
# attributions = de.explain('elrp', target_tensor * Y_test, input_tensor, X_test)
print("Attributions shape: {}".format(attributions.shape))
\ No newline at end of file
#!/bin/bash
#SBATCH --mail-type=ALL # mail configuration: NONE, BEGIN, END, FAIL, REQUEUE, ALL
#SBATCH --output=log/%j.out # where to store the output (%j is the JOBID), subdirectory must exist
#SBATCH --error=log/%j.err # where to store error messages
#SBATCH --gres=gpu:1
#SBATCH --mem=60G
echo "Running on host: $(hostname)"
echo "In directory: $(pwd)"
echo "Starting on: $(date)"
echo "SLURM_JOB_ID: ${SLURM_JOB_ID}"
# Exit on errors
set -o errexit
# Set a directory for temporary files unique to the job with automatic removal at job termination
TMPDIR=$(mktemp -d)
if [[ ! -d ${TMPDIR} ]]; then
echo 'Failed to create temp directory' >&2
exit 1
fi
trap "exit 1" HUP INT TERM
trap 'rm -rf "${TMPDIR}"' EXIT
export TMPDIR
# Binary or script to execute
python deep_explain.py
echo "Finished at: $(date)"
exit 0
%% Cell type:markdown id: tags:
# In this notebook we use tf-explain on pretrained models
## https://tf-explain.readthedocs.io/en/latest/usage.html
%% Cell type:code id: tags:
```
import tensorflow as tf
import tensorflow.keras as keras
import numpy as np
from tf_explain.core.integrated_gradients import IntegratedGradients
```
%% Cell type:code id: tags:
```
X = np.load("./data/precomputed/prosaccade/part_prosaccade_X.npy")
y = np.load("./data/precomputed/prosaccade/part_prosaccade_y.npy")
```
%% Cell type:code id: tags:
```
X_zero = X[0]
y_zero = y[0]
X_one = X[1]
y_one = y[1]
```
%% Cell type:code id: tags:
```
model = keras.models.load_model("./archive_runs/prosaccade/1618069872_inception_left-right-pred_ensemble/inception_best_model.h5")
```
%% Cell type:code id: tags:
```
model.compile(loss='binary_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
```
%% Cell type:code id: tags:
```
#model.summary()
```
%% Cell type:code id: tags:
```
explainer = IntegratedGradients()
```
%% Cell type:code id: tags:
```
X_zero.shape
```
%%%% Output: execute_result
(500, 129)
%% Cell type:code id: tags:
```
grid = explainer.explain(validation_data=(X_zero, y_zero), model=model, class_index=0)
```
%%%% Output: error
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-14-9017db136594> in <module>
----> 1 grid = explainer.explain(validation_data=(X_zero, y_zero), model=model, class_index=0)
/itet-stor/wolflu/net_scratch/conda_envs/tencu10/lib/python3.8/site-packages/tf_explain/core/integrated_gradients.py in explain(self, validation_data, model, class_index, n_steps)
38 )
39
---> 40 integrated_gradients = IntegratedGradients.get_integrated_gradients(
41 interpolated_images, model, class_index, n_steps
42 )
/itet-stor/wolflu/net_scratch/conda_envs/tencu10/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds)
826 tracing_count = self.experimental_get_tracing_count()
827 with trace.Trace(self._name) as tm:
--> 828 result = self._call(*args, **kwds)
829 compiler = "xla" if self._experimental_compile else "nonXla"
830 new_tracing_count = self.experimental_get_tracing_count()
/itet-stor/wolflu/net_scratch/conda_envs/tencu10/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _call(self, *args, **kwds)
860 # In this case we have not created variables on the first call. So we can
861 # run the first trace but we should fail if variables are created.
--> 862 results = self._stateful_fn(*args, **kwds)
863 if self._created_variables:
864 raise ValueError("Creating variables on a non-first call to a function"
/itet-stor/wolflu/net_scratch/conda_envs/tencu10/lib/python3.8/site-packages/tensorflow/python/eager/function.py in __call__(self, *args, **kwargs)
2939 with self._lock:
2940 (graph_function,
-> 2941 filtered_flat_args) = self._maybe_define_function(args, kwargs)
2942 return graph_function._call_flat(
2943 filtered_flat_args, captured_inputs=graph_function.captured_inputs) # pylint: disable=protected-access
/itet-stor/wolflu/net_scratch/conda_envs/tencu10/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _maybe_define_function(self, args, kwargs)
3359
3360 self._function_cache.missed.add(call_context_key)
-> 3361 graph_function = self._create_graph_function(args, kwargs)
3362 self._function_cache.primary[cache_key] = graph_function
3363
/itet-stor/wolflu/net_scratch/conda_envs/tencu10/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
3194 arg_names = base_arg_names + missing_arg_names
3195 graph_function = ConcreteFunction(
-> 3196 func_graph_module.func_graph_from_py_func(
3197 self._name,
3198 self._python_function,
/itet-stor/wolflu/net_scratch/conda_envs/tencu10/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
988 _, original_func = tf_decorator.unwrap(python_func)
989
--> 990 func_outputs = python_func(*func_args, **func_kwargs)
991
992 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
/itet-stor/wolflu/net_scratch/conda_envs/tencu10/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in wrapped_fn(*args, **kwds)
632 xla_context.Exit()
633 else:
--> 634 out = weak_wrapped_fn().__wrapped__(*args, **kwds)
635 return out
636
/itet-stor/wolflu/net_scratch/conda_envs/tencu10/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
975 except Exception as e: # pylint:disable=broad-except
976 if hasattr(e, "ag_error_metadata"):
--> 977 raise e.ag_error_metadata.to_exception(e)
978 else:
979 raise
ValueError: in user code:
/itet-stor/wolflu/net_scratch/conda_envs/tencu10/lib/python3.8/site-packages/tf_explain/core/integrated_gradients.py:70 get_integrated_gradients *
predictions = model(inputs)
/itet-stor/wolflu/net_scratch/conda_envs/tencu10/lib/python3.8/site-packages/tensorflow/python/keras/engine/base_layer.py:998 __call__ **
input_spec.assert_input_compatibility(self.input_spec, inputs, self.name)
/itet-stor/wolflu/net_scratch/conda_envs/tencu10/lib/python3.8/site-packages/tensorflow/python/keras/engine/input_spec.py:271 assert_input_compatibility
raise ValueError('Input ' + str(input_index) +
ValueError: Input 0 is incompatible with layer model_4: expected shape=(None, 500, 129), found shape=(5000, 129)
%% Cell type:code id: tags:
```
explainer.save(grid, '.', 'int_grad_zero.png')
```
%% Cell type:code id: tags:
```
```
"""
This script is for the analysis of pretrained networks with tf-explain
"""
import tensorflow as tf
import tensorflow.keras as keras
import numpy as np
from tf_explain.callbacks.integrated_gradients import IntegratedGradientsCallback
from sklearn.model_selection import train_test_split
import time
def split_valid_data(X, y):
"""
Split the validation data into two sets of zero and one labeles samples
"""
X_zero = []
y_zero = []
X_one = []
y_one = []
for el, label in zip(X, y):
if label[0] == 0:
X_zero.append(el)
y_zero.append(label)
else:
X_one.append(el)
y_one.append(label)
return np.array(X_zero), np.array(y_zero), np.array(X_one), np.array(y_one)
def explain():
model_dir = "./archive_runs/prosaccade/1618069872_inception_left-right-pred_ensemble/"
# Load some data
X = np.load("./data/precomputed/prosaccade/prosaccade_X.npy")
y = np.load("./data/precomputed/prosaccade/prosaccade_y.npy")
# Split the data
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42)
# Create a left and right samples
# Define callbacks for model intepretability experiments
X_zero, y_zero, X_one, y_one = split_valid_data(X_val, y_val) # For integrated gradients, create a validation set with 0 and 1 labels and treat them separately
integrated_grad_zero = IntegratedGradientsCallback(validation_data=(X_zero, y_zero), class_index=0, n_steps=20,
output_dir=model_dir+"/logs/fit/" + '/integrated_grad_zero/')
integrated_grad_one = IntegratedGradientsCallback(validation_data=(X_one, y_one), class_index=1, n_steps=20,
output_dir=model_dir+"/logs/fit/" + '/integrated_grad_one/')
# Create callback for tensorboard
timestamp = str(int(time.time()))
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=model_dir+"/logs/fit/"+timestamp, histogram_freq=1)
# Load and compile model
model = keras.models.load_model("./archive_runs/prosaccade/1618069872_inception_left-right-pred_ensemble/inception_best_model.h5", compile=False)
model.compile(loss='binary_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
# Fit again with new callbacks
model.fit(X_train, y_train, batch_size=16, epochs=1, callbacks=[integrated_grad_zero], verbose=2)
if __name__ == "__main__":
explain()
\ No newline at end of file
#!/bin/bash
#SBATCH --mail-type=ALL # mail configuration: NONE, BEGIN, END, FAIL, REQUEUE, ALL
#SBATCH --output=log/%j.out # where to store the output (%j is the JOBID), subdirectory must exist
#SBATCH --error=log/%j.err # where to store error messages
#SBATCH --gres=gpu:1
#SBATCH --mem=60G
echo "Running on host: $(hostname)"
echo "In directory: $(pwd)"
echo "Starting on: $(date)"
echo "SLURM_JOB_ID: ${SLURM_JOB_ID}"
# Exit on errors
set -o errexit
# Set a directory for temporary files unique to the job with automatic removal at job termination
TMPDIR=$(mktemp -d)
if [[ ! -d ${TMPDIR} ]]; then
echo 'Failed to create temp directory' >&2
exit 1
fi
trap "exit 1" HUP INT TERM
trap 'rm -rf "${TMPDIR}"' EXIT
export TMPDIR
# Binary or script to execute
python explain.py
echo "Finished at: $(date)"
exit 0
import tensorflow as tf
import numpy as np
import tensorflow.keras as keras
from tf_explain.core.integrated_gradients import IntegratedGradients
if __name__ == '__main__':
X = np.load("./data/precomputed/prosaccade/part_prosaccade_X.npy")
y = np.load("./data/precomputed/prosaccade/part_prosaccade_y.npy")
X_zero = X[0]
y_zero = y[0]
X_one = X[1]
y_one = y[1]
model = keras.models.load_model("./archive_runs/prosaccade/1618069872_inception_left-right-pred_ensemble/inception_best_model.h5")
model.compile(loss='binary_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
explainer = IntegratedGradients()
grid = explainer.explain(validation_data=(X_zero, y_zero), model=model, class_index=0, n_steps=20)
explainer.save(grid, '.', 'int_grad_zero.png')
\ No newline at end of file
#!/bin/sh
#SBATCH --ntasks=1
#SBATCH -t 04:00:00 # max runtime is 4 hours
#SBATCH -J tensorboard_server # name
#SBATCH -o /work/thpaul/tf_tools/tensorflow/im2txt/tb-%J.out #TODO: Where to save your output
# To run as an array job, use the following command:
# sbatch --partition=beards --array=0-0 tensorboardHam.sh
# squeue --user thpaul
source /home/thpaul/.bash_profile #TODO: Your profile
MODEL_DIR=/work/thpaul/tf_tools/tensorflow/im2txt/im2txt/model #TODO: Your TF model directory
let ipnport=($UID-6025)%65274
echo ipnport=$ipnport
ipnip=$(hostname -i)
echo ipnip=$ipnip
module load cuda/8.0 #TODO: Your Cuda Module if required
tensorboard --logdir="${MODEL_DIR}" --port=$ipnport
\ No newline at end of file
......@@ -137,21 +137,21 @@ class Regression_ConvNet(ABC):
def fit(self, x, y, verbose=2):
# Split data
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=42)
# Define callbacks
csv_logger = CSVLogger(config['batches_log'], append=True, separator=';')
early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=20)
prediction_ensemble = prediction_history((X_val,y_val))
ckpt_dir = config['model_dir'] + '/' + config['model'] + '_' + 'best_model.h5'
ckpt = tf.keras.callbacks.ModelCheckpoint(ckpt_dir, verbose=1, monitor='val_loss', save_best_only=True, mode='auto')
# Create a callback for tensorboard
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=config['tensorboard_log_dir'], histogram_freq=1)
# Fit model
hist = self.model.fit(X_train, y_train, verbose=verbose, batch_size=self.batch_size, validation_data=(X_val,y_val),
epochs=self.epochs, callbacks=[csv_logger, ckpt, prediction_ensemble])
epochs=self.epochs, callbacks=[csv_logger, ckpt, prediction_ensemble, tensorboard_callback])
# Log how good predictions in x and y directions are
if config['sanity_check'] and not config['data_mode'] == 'fix_sacc_fix':
x_mean_err, y_mean_err = sanity_check(self.model, X_val, y_val)
logging.info("x mean coordinate error: {:.2f}, y mean coordinate error: {:.2f}".format(x_mean_err, y_mean_err))
return hist , prediction_ensemble
\ No newline at end of file
......@@ -29,12 +29,11 @@ TODO: write a proper description how to set the fields in the config
"""
# Choose which task to run
config['task'] = 'prosaccade-clf'
#config['task'] = 'prosaccade-clf'
#config['task'] = 'gaze-reg'
#config['task'] = 'angle-reg'
config['task'] = 'angle-reg'
# Choose from which experiment the dataset to load. Can only be chosen for angle-pred and gaze-reg
# TODO: also make calibration task data available for gaze-reg
if config['task'] != 'prosaccade-clf':
#config['dataset'] = 'processing_speed_task'
config['dataset'] = 'calibration_task'
......@@ -69,8 +68,8 @@ Cluster can be set to clustering(), clustering2() or clustering3(), where differ
config['pretrained'] = False
# Choose model
#config['model'] = 'cnn'
config['model'] = 'inception'
config['model'] = 'cnn'
#config['model'] = 'inception'
#config['model'] = 'eegnet'
#config['model'] = 'deepeye'
#config['model'] = 'xception'
......@@ -79,8 +78,8 @@ config['model'] = 'inception'
# Hyper-parameters and training configuration.
config['learning_rate'] = 1e-3 # fix only: 1e-2, sac only: 1e-3, sac_fix: 1e-3 , fix_sac_fix: 1e-4, for inception on angle 1e-5
config['regularization'] = 1 # fix only: 1e-3, sac only: 1e-2, sac_fix: 1, fix_sac_fix: 5, for inception on angle 0
config['epochs'] = 100
config['regularization'] = 0 # fix only: 1e-3, sac only: 1e-2, sac_fix: 1, fix_sac_fix: 5, for inception on angle 0
config['epochs'] = 5
config['batch_size'] = 64
# Choose the kerastuner or an ensemble of models
......@@ -90,7 +89,7 @@ config['ensemble'] = 1 #number of models in the ensemble method
# Other functions that can be chosen optionally
config['sanity_check'] = False
config['plot_model'] = True
config['plot_model'] = False
config['plot_filters'] = False #TODO: make this work, valueerror from tf because of (1,w,h) instead of (none,w,h) expected
# Set loss automatically depending on the dataset/task to run
......@@ -206,6 +205,11 @@ if not os.path.exists(config['model_dir']):
config['info_log'] = config['model_dir'] + '/' + 'info.log'
config['batches_log'] = config['model_dir'] + '/' + 'batches.log'
# Create a directory to store logs for tensorboard
config['tensorboard_log_dir'] = config['model_dir'] + "/logs/fit/" + timestamp
if not os.path.exists(config['tensorboard_log_dir']):
os.makedirs(config['tensorboard_log_dir'])
# Save config to model dir
import pickle
config_path = config['model_dir'] + "/config.p"
......
......@@ -110,7 +110,7 @@ def run(trainX, trainY):
loss_fname = config['model_dir'] + "/" + "ensemble_loss.txt"
np.savetxt(loss_fname, loss, delimiter=',')
# save the validation predictions to the model directory
# save the validation set predictions to the model directory
#pred_val_fname = config['model_dir'] + "/" + "pred_val.txt"
#logging.info("prediction val shape: {}".format(pred[-1]))
#np.savetxt(pred_val_fname, pred[-1], delimiter=',') # this is the prediction in the last epoch
......@@ -124,10 +124,6 @@ def run(trainX, trainY):
hist.history['val_loss'] = np.array(loss)
# plot ensemble loss
plot_loss(hist, config['model_dir'], config['model'], val = True)
#if config['task'] == 'prosaccade-clf':
# Plot also accuracy for the saccade classification task
# hist.history['val_accuracy'] = accuracy
# plot_acc(hist, config['model_dir'], config['model'], val = True)
#if config['split']:
#config['model'] = config['model'] + '_cluster'
......
This diff is collapsed.
......@@ -7,9 +7,18 @@ from scipy import io
import h5py
import logging
import time
import os
# Create debugging log for tensorboard
import tensorflow as tf
debug_dir = config['tensorboard_log_dir'] + '/tfdbg'
if not os.path.exists(debug_dir):
os.makedirs(debug_dir)
tf.debugging.experimental.enable_dump_debug_info(debug_dir, tensor_debug_mode="FULL_HEALTH", circular_buffer_size=-1)
# Import the correct functions depending on the task
if config['task'] == 'gaze-reg' or config['task'] == 'angle-reg':
from ensemble_regression import run # gaze regression task
from ensemble_regression import run # Regression tasks
from kerasTuner_regression import tune
elif config['task'] == 'prosaccade-clf':
from ensemble import run # (pro-)saccade task
......
......@@ -4,7 +4,7 @@
#SBATCH --output=log/%j.out # where to store the output (%j is the JOBID), subdirectory must exist
#SBATCH --error=log/%j.err # where to store error messages
#SBATCH --gres=gpu:1
#SBATCH --mem=100G
#SBATCH --mem=80G
echo "Running on host: $(hostname)"
echo "In directory: $(pwd)"
......
deepexplain @ 87fb43a1