Commit 053f8caf authored by Lukas Wolf's avatar Lukas Wolf
Browse files

calib task for all models works

parent f821ffd0
......@@ -14,4 +14,10 @@ models_scratch.py
/log/*
/images/*
/archive_runs/*
run.sh
\ No newline at end of file
run.sh
retrain.sh
labels.txt
preds.txt
angle_comparison.ipynb
retrain_model.ipynb
conv_analysis.ipynb
\ No newline at end of file
......@@ -36,8 +36,8 @@ config['task'] = 'angle-reg'
# Choose from which experiment the dataset to load. Can only be chosen for angle-pred and gaze-reg
# TODO: also make calibration task data available for gaze-reg
if config['task'] != 'prosaccade-clf':
config['dataset'] = 'processing_speed_task'
#config['dataset'] = 'calibration_task'
#config['dataset'] = 'processing_speed_task'
config['dataset'] = 'calibration_task'
# Choose which data to use for gaze-reg
if config['task'] == 'gaze-reg':
......@@ -66,8 +66,8 @@ Cluster can be set to clustering(), clustering2() or clustering3(), where differ
"""
# Choose model
config['model'] = 'cnn'
#config['model'] = 'inception'
#config['model'] = 'cnn'
config['model'] = 'inception'
#config['model'] = 'eegnet'
#config['model'] = 'deepeye'
#config['model'] = 'xception'
......@@ -75,9 +75,9 @@ config['model'] = 'cnn'
#config['model'] = 'siamese' # Note that you have to set data_mode to sacc_fix for this model
# Hyper-parameters and training configuration.
config['learning_rate'] = 1e-4 # fix only: 1e-2, sac only: 1e-3, sac_fix: 1e-3 , fix_sac_fix: 1e-4, calib_task
config['regularization'] = 1 # fix only: 1e-3, sac only: 1e-2, sac_fix: 1, fix_sac_fix: 5
config['epochs'] = 150
config['learning_rate'] = 1e-5 # fix only: 1e-2, sac only: 1e-3, sac_fix: 1e-3 , fix_sac_fix: 1e-4, for inception on angle 1e-5
config['regularization'] = 0 # fix only: 1e-3, sac only: 1e-2, sac_fix: 1, fix_sac_fix: 5, for inception on angle 0
config['epochs'] = 100
config['batch_size'] = 64
# Choose the kerastuner or an ensemble of models
......@@ -87,7 +87,8 @@ config['ensemble'] = 1 #number of models in the ensemble method
# Other functions that can be chosen optionally
config['sanity_check'] = False
config['plot_model'] = False
config['plot_model'] = False
config['plot_filters'] = False #TODO: make this work, valueerror from tf because of (1,w,h) instead of (none,w,h) expected
# Set loss automatically depending on the dataset/task to run
if config['task'] == 'angle-reg':
......@@ -105,8 +106,10 @@ if config['task'] != 'prosaccade-clf':
config['padding'] = 'repeat' # options: zero, repeat
config['min_fixation'] = 50 # min number of samples for the gaze fixation
config['max_fixation'] = 150 # max number of samples for the gaze fixation
if config['dataset'] == 'calibration_task' or config['dataset'] == 'processing_speed_task':
config['fixation_padlength'] = config['max_fixation'] # for the proc speed task
if config['dataset'] == 'calibration_task':
config['max_fixation'] = 1000
config['fixation_padlength'] = 300 # cut off the fixation at this length
config['min_saccade'] = 10 # minimum number of samples for a saccade that we want to use
config['max_saccade'] = 30 # maximum number of samples for a saccade that we want to use
config['x_screen'] = 600
......@@ -150,18 +153,17 @@ if config['task'] != 'prosaccade-clf':
config['xception']['input_shape'] = (config['max_saccade'] + config['max_fixation'], 129)
# Choose the shapes for angle pred depending on dataset
elif config['data_mode'] == 'fix_sacc_fix' and config['dataset'] == 'processing_speed_task':
config['cnn']['input_shape'] = (config['max_saccade'] + 2 * config['max_fixation'], 129)
config['pyramidal_cnn']['input_shape'] = (config['max_saccade'] + 2 * config['max_fixation'], 129)
config['inception']['input_shape'] = (config['max_saccade'] + 2 * config['max_fixation'], 129)
config['deepeye']['input_shape'] = (config['max_saccade'] + 2 * config['max_fixation'], 129)
config['xception']['input_shape'] = (config['max_saccade'] + 2 * config['max_fixation'], 129)
config['cnn']['input_shape'] = (config['max_saccade'] + 2 * config['fixation_padlength'], 129)
config['pyramidal_cnn']['input_shape'] = (config['max_saccade'] + 2 * config['fixation_padlength'], 129)
config['inception']['input_shape'] = (config['max_saccade'] + 2 * config['fixation_padlength'], 129)
config['deepeye']['input_shape'] = (config['max_saccade'] + 2 * config['fixation_padlength'], 129)
config['xception']['input_shape'] = (config['max_saccade'] + 2 * config['fixation_padlength'], 129)
elif config['data_mode'] == 'fix_sacc_fix' and config['dataset'] == 'calibration_task':
config['cnn']['input_shape'] = (config['max_saccade'] + 2 * config['max_fixation'], 129)
config['pyramidal_cnn']['input_shape'] = (config['max_saccade'] + 2 * config['max_fixation'], 129)
config['inception']['input_shape'] = (config['max_saccade'] + 2 * config['max_fixation'], 129)
config['deepeye']['input_shape'] = (config['max_saccade'] + 2 * config['max_fixation'], 129)
config['xception']['input_shape'] = (config['max_saccade'] + 2 * config['max_fixation'], 129)
config['cnn']['input_shape'] = (config['max_saccade'] + 2 * config['fixation_padlength'], 129)
config['pyramidal_cnn']['input_shape'] = (config['max_saccade'] + 2 * config['fixation_padlength'], 129)
config['inception']['input_shape'] = (config['max_saccade'] + 2 * config['fixation_padlength'], 129)
config['deepeye']['input_shape'] = (config['max_saccade'] + 2 * config['fixation_padlength'], 129)
config['xception']['input_shape'] = (config['max_saccade'] + 2 * config['fixation_padlength'], 129)
# These models are not yet implemented for regression
#config['deepeye-rnn']['input_shape'] = (int(config['max_duration']), 129)
#config['eegnet']['channels'] = 129
......
This diff is collapsed.
......@@ -4,6 +4,7 @@ import numpy as np
from config import config
from utils.utils import *
from utils.plot import plot_model
from utils.plot import plot_filters
import logging
from CNN.CNN import Classifier_CNN
......@@ -59,8 +60,12 @@ def run(trainX, trainY):
for j, pred_epoch in enumerate(pred_ensemble.predhis):
pred[j] = (np.array(pred[j])+np.array(pred_epoch))
if config['plot_model']:
# Plot the model as a graph with keras
if config['plot_model'] and i == 0: # only for the first model
plot_model(classifier.model)
# Plot the patterns that the filters look for
if config['plot_filters'] and i == 0:
plot_filters(classifier.model, config['model_dir'])
for j, pred_epoch in enumerate(pred):
pred_epoch = (pred_epoch/config['ensemble']).tolist()
......@@ -73,6 +78,8 @@ def run(trainX, trainY):
if config['split']:
config['model'] = config['model'] + '_cluster'
hist.history['val_loss'] = loss
hist.history['val_accuracy'] = accuracy
plot_loss(hist, config['model_dir'], config['model'], val = True)
......
......@@ -3,6 +3,7 @@ from config import config
from utils.utils import *
from utils.losses import angle_loss
from utils.plot import plot_model
from utils.plot import plot_filters
import logging
from CNN.Regression_CNN import Regression_CNN
......@@ -35,38 +36,31 @@ def run(trainX, trainY):
for i in range(config['ensemble']):
print('Beginning model number {}/{} ...'.format(i+1, config['ensemble']))
if config['model'] == 'cnn':
reg = Regression_CNN(input_shape=config['cnn']['input_shape'], kernel_size=64, epochs = config['epochs'],
nb_filters=16, verbose=True, batch_size=config['batch_size'], use_residual=True, depth=10,
learning_rate=config['learning_rate'])
elif config['model'] == 'inception':
reg = Regression_INCEPTION(input_shape=config['inception']['input_shape'], use_residual=True, batch_size=config['batch_size'],
kernel_size=64, nb_filters=64, depth=12, bottleneck_size=32, epochs=config['epochs'],
learning_rate=config['learning_rate'], regularization=config['regularization'])
elif config['model'] == 'xception':
reg = Regression_XCEPTION(input_shape=config['inception']['input_shape'], use_residual=True,
kernel_size=40, nb_filters=64, depth=18, epochs=config['epochs'], batch_size=config['batch_size'],
learning_rate=config['learning_rate'], regularization=config['regularization'])
elif config['model'] == 'deepeye':
reg = Regression_DEEPEYE(input_shape=config['deepeye']['input_shape'], use_residual=True, batch_size=config['batch_size'],
kernel_size=64, nb_filters=32, depth=10, epochs=config['epochs'], preprocessing=False,
use_separable_convolution=True, use_simple_convolution=True, bottleneck_size=16,
learning_rate=config['learning_rate'], regularization=config['regularization'])
elif config['model'] == 'pyramidal_cnn':
reg = Regression_PyramidalCNN(input_shape=config['cnn']['input_shape'], epochs=config['epochs'], depth=8,
batch_size=config['batch_size'], learning_rate=config['learning_rate'],
regularization=config['regularization'])
elif config['model'] == 'siamese':
reg = Siamese_ConvNet(input_shape=None, use_residual=True, batch_size=config['batch_size'],
kernel_size=40, nb_filters=64, depth=12, epochs=config['epochs'], # nb_filters=64 was default from inception
learning_rate=config['learning_rate'], regularization=config['regularization'])
else:
logging.info('Cannot start the program. Please choose one model in the config.py file')
......@@ -80,7 +74,6 @@ def run(trainX, trainY):
"""
hist, pred_ensemble = reg.fit(trainX, trainY)
if i == 0:
# store the prediction on the validation set of the first model in pred
pred = pred_ensemble.predhis
......@@ -88,23 +81,25 @@ def run(trainX, trainY):
#targets_val_fname = config['model_dir'] + "/" + "targets_val.txt"
#logging.info("targets shape: {}".format(pred_ensemble.targets.shape))
#np.savetxt(targets_val_fname, pred_ensemble.targets, delimiter=',')
# Plot the model
if config['plot_model']:
plot_model(reg.model)
else:
for j, pred_epoch in enumerate(pred_ensemble.predhis):
# add up the resuls of the different models for each epoch into pred, which is a list of lists (one for each epoch prediction)
pred[j] = (np.array(pred[j]) + np.array(pred_epoch))
# Plot the model as a graph with keras
if config['plot_model'] and i == 0: # only for the first model
plot_model(reg.model)
# Plot the patterns that the filters look for
if config['plot_filters'] and i == 0:
plot_filters(reg.model, config['model_dir'])
for j, pred_epoch in enumerate(pred):
pred_epoch = (pred_epoch / config['ensemble']).tolist() # divide by number of ensembles to get mean prediction
# Compute the loss
if config['data_mode'] == 'fix_sacc_fix':
loss.append(angle_loss(pred_ensemble.targets, pred_epoch).numpy())
else:
loss.append(mse(pred_ensemble.targets, pred_epoch).numpy())
pred_epoch = np.round(pred_epoch, 0) # round to integral number
# save the ensemble loss to the model directory
......@@ -116,7 +111,6 @@ def run(trainX, trainY):
#logging.info("prediction val shape: {}".format(pred[-1]))
#np.savetxt(pred_val_fname, pred[-1], delimiter=',') # this is the prediction in the last epoch
if(config['ensemble'] == 1):
# Only one model, just plot the loss
plot_loss(hist, config['model_dir'], config['model'], val = True)
......@@ -133,9 +127,6 @@ def run(trainX, trainY):
#if config['split']:
#config['model'] = config['model'] + '_cluster'
#TODO: rewrite the function below to properly store stats and results
#save_logs(hist, config['model_dir'], config['model'], pytorch = False)
logging.info("Done with training and plotting.")
logging.info("Done with training and plotting.")
\ No newline at end of file
This diff is collapsed.
%% Cell type:code id: tags:
```
import tensorflow as tf
from tensorflow import keras
import numpy as np
```
%% Cell type:code id: tags:
```
model_path = "./archive_runs/prosaccade/1618069872_inception_left-right-pred_ensemble/inception_best_model.h5"
```
%% Cell type:code id: tags:
```
model = keras.models.load_model(model_path)
```
%% Cell type:code id: tags:
```
for layer in model.layers:
if hasattr(layer, 'kernel_size'):
print(layer.name, layer.kernel_size)
```
%%%% Output: stream
conv1d_256 (1,)
conv1d_257 (64,)
conv1d_258 (32,)
conv1d_259 (16,)
conv1d_260 (1,)
conv1d_261 (1,)
conv1d_262 (64,)
conv1d_263 (32,)
conv1d_264 (16,)
conv1d_265 (1,)
conv1d_266 (1,)
conv1d_267 (64,)
conv1d_268 (32,)
conv1d_269 (16,)
conv1d_270 (1,)
conv1d_271 (1,)
conv1d_272 (1,)
conv1d_273 (64,)
conv1d_274 (32,)
conv1d_275 (16,)
conv1d_276 (1,)
conv1d_277 (1,)
conv1d_278 (64,)
conv1d_279 (32,)
conv1d_280 (16,)
conv1d_281 (1,)
conv1d_282 (1,)
conv1d_283 (64,)
conv1d_284 (32,)
conv1d_285 (16,)
conv1d_286 (1,)
conv1d_287 (1,)
conv1d_288 (1,)
conv1d_289 (64,)
conv1d_290 (32,)
conv1d_291 (16,)
conv1d_292 (1,)
conv1d_293 (1,)
conv1d_294 (64,)
conv1d_295 (32,)
conv1d_296 (16,)
conv1d_297 (1,)
conv1d_298 (1,)
conv1d_299 (64,)
conv1d_300 (32,)
conv1d_301 (16,)
conv1d_302 (1,)
conv1d_303 (1,)
conv1d_304 (1,)
conv1d_305 (64,)
conv1d_306 (32,)
conv1d_307 (16,)
conv1d_308 (1,)
conv1d_309 (1,)
conv1d_310 (64,)
conv1d_311 (32,)
conv1d_312 (16,)
conv1d_313 (1,)
conv1d_314 (1,)
conv1d_315 (64,)
conv1d_316 (32,)
conv1d_317 (16,)
conv1d_318 (1,)
conv1d_319 (1,)
%% Cell type:code id: tags:
```
layer = model.layers[1]
layer.name
```
%%%% Output: execute_result
'conv1d_256'
%% Cell type:code id: tags:
```
weights = layer.get_weights()
```
%% Cell type:code id: tags:
```
weights[0].shape
```
%%%% Output: execute_result
(1, 129, 16)
%% Cell type:code id: tags:
```
layer_names = [layer.name for layer in model.layers]
#layer_names
```
%% Cell type:code id: tags:
```
layer_outputs = [layer.output for layer in model.layers]
#layer_output
```
%% Cell type:code id: tags:
```
feature_map_model = tf.keras.Model(inputs=model.input, outputs=layer_outputs)
```
%% Cell type:code id: tags:
```
from utils.IOHelper import get_mat_data
from config import config
```
%% Cell type:code id: tags:
```
X = np.load("./data/precomputed/prosaccade/part_prosaccade_X.npy")
input = X[0]
input.shape
```
%%%% Output: execute_result
(500, 129)
%% Cell type:code id: tags:
```
input = input.reshape((1,) + input.shape)
input.shape
```
%%%% Output: execute_result
(1, 500, 129)
%% Cell type:code id: tags:
```
feature_maps= feature_map_model.predict(input)
```
%% Cell type:code id: tags:
```
for layer_name, feature_map in zip(layer_names, feature_maps):
print(f"The shape of the {layer_name} is =======>> {feature_map.shape}")
```
%%%% Output: stream
The shape of the input_5 is =======>> (1, 500, 129)
The shape of the conv1d_256 is =======>> (1, 500, 16)
The shape of the max_pooling1d_48 is =======>> (1, 500, 129)
The shape of the conv1d_257 is =======>> (1, 500, 16)
The shape of the conv1d_258 is =======>> (1, 500, 16)
The shape of the conv1d_259 is =======>> (1, 500, 16)
The shape of the conv1d_260 is =======>> (1, 500, 16)
The shape of the concatenate_48 is =======>> (1, 500, 64)
The shape of the batch_normalization_64 is =======>> (1, 500, 64)
The shape of the activation_64 is =======>> (1, 500, 64)
The shape of the conv1d_261 is =======>> (1, 500, 16)
The shape of the max_pooling1d_49 is =======>> (1, 500, 64)
The shape of the conv1d_262 is =======>> (1, 500, 16)
The shape of the conv1d_263 is =======>> (1, 500, 16)
The shape of the conv1d_264 is =======>> (1, 500, 16)
The shape of the conv1d_265 is =======>> (1, 500, 16)
The shape of the concatenate_49 is =======>> (1, 500, 64)
The shape of the batch_normalization_65 is =======>> (1, 500, 64)
The shape of the activation_65 is =======>> (1, 500, 64)
The shape of the conv1d_266 is =======>> (1, 500, 16)
The shape of the max_pooling1d_50 is =======>> (1, 500, 64)
The shape of the conv1d_267 is =======>> (1, 500, 16)
The shape of the conv1d_268 is =======>> (1, 500, 16)
The shape of the conv1d_269 is =======>> (1, 500, 16)
The shape of the conv1d_270 is =======>> (1, 500, 16)
The shape of the concatenate_50 is =======>> (1, 500, 64)
The shape of the conv1d_271 is =======>> (1, 500, 64)
The shape of the batch_normalization_66 is =======>> (1, 500, 64)
The shape of the batch_normalization_67 is =======>> (1, 500, 64)
The shape of the activation_66 is =======>> (1, 500, 64)
The shape of the add_16 is =======>> (1, 500, 64)
The shape of the activation_67 is =======>> (1, 500, 64)
The shape of the conv1d_272 is =======>> (1, 500, 16)
The shape of the max_pooling1d_51 is =======>> (1, 500, 64)
The shape of the conv1d_273 is =======>> (1, 500, 16)
The shape of the conv1d_274 is =======>> (1, 500, 16)
The shape of the conv1d_275 is =======>> (1, 500, 16)
The shape of the conv1d_276 is =======>> (1, 500, 16)
The shape of the concatenate_51 is =======>> (1, 500, 64)
The shape of the batch_normalization_68 is =======>> (1, 500, 64)
The shape of the activation_68 is =======>> (1, 500, 64)
The shape of the conv1d_277 is =======>> (1, 500, 16)
The shape of the max_pooling1d_52 is =======>> (1, 500, 64)
The shape of the conv1d_278 is =======>> (1, 500, 16)
The shape of the conv1d_279 is =======>> (1, 500, 16)
The shape of the conv1d_280 is =======>> (1, 500, 16)
The shape of the conv1d_281 is =======>> (1, 500, 16)
The shape of the concatenate_52 is =======>> (1, 500, 64)
The shape of the batch_normalization_69 is =======>> (1, 500, 64)
The shape of the activation_69 is =======>> (1, 500, 64)
The shape of the conv1d_282 is =======>> (1, 500, 16)
The shape of the max_pooling1d_53 is =======>> (1, 500, 64)
The shape of the conv1d_283 is =======>> (1, 500, 16)
The shape of the conv1d_284 is =======>> (1, 500, 16)
The shape of the conv1d_285 is =======>> (1, 500, 16)
The shape of the conv1d_286 is =======>> (1, 500, 16)
The shape of the concatenate_53 is =======>> (1, 500, 64)
The shape of the conv1d_287 is =======>> (1, 500, 64)
The shape of the batch_normalization_70 is =======>> (1, 500, 64)
The shape of the batch_normalization_71 is =======>> (1, 500, 64)
The shape of the activation_70 is =======>> (1, 500, 64)
The shape of the add_17 is =======>> (1, 500, 64)
The shape of the activation_71 is =======>> (1, 500, 64)
The shape of the conv1d_288 is =======>> (1, 500, 16)
The shape of the max_pooling1d_54 is =======>> (1, 500, 64)
The shape of the conv1d_289 is =======>> (1, 500, 16)
The shape of the conv1d_290 is =======>> (1, 500, 16)
The shape of the conv1d_291 is =======>> (1, 500, 16)
The shape of the conv1d_292 is =======>> (1, 500, 16)
The shape of the concatenate_54 is =======>> (1, 500, 64)
The shape of the batch_normalization_72 is =======>> (1, 500, 64)
The shape of the activation_72 is =======>> (1, 500, 64)
The shape of the conv1d_293 is =======>> (1, 500, 16)
The shape of the max_pooling1d_55 is =======>> (1, 500, 64)
The shape of the conv1d_294 is =======>> (1, 500, 16)
The shape of the conv1d_295 is =======>> (1, 500, 16)
The shape of the conv1d_296 is =======>> (1, 500, 16)
The shape of the conv1d_297 is =======>> (1, 500, 16)
The shape of the concatenate_55 is =======>> (1, 500, 64)
The shape of the batch_normalization_73 is =======>> (1, 500, 64)
The shape of the activation_73 is =======>> (1, 500, 64)
The shape of the conv1d_298 is =======>> (1, 500, 16)
The shape of the max_pooling1d_56 is =======>> (1, 500, 64)
The shape of the conv1d_299 is =======>> (1, 500, 16)
The shape of the conv1d_300 is =======>> (1, 500, 16)
The shape of the conv1d_301 is =======>> (1, 500, 16)
The shape of the conv1d_302 is =======>> (1, 500, 16)
The shape of the concatenate_56 is =======>> (1, 500, 64)
The shape of the conv1d_303 is =======>> (1, 500, 64)
The shape of the batch_normalization_74 is =======>> (1, 500, 64)
The shape of the batch_normalization_75 is =======>> (1, 500, 64)
The shape of the activation_74 is =======>> (1, 500, 64)
The shape of the add_18 is =======>> (1, 500, 64)
The shape of the activation_75 is =======>> (1, 500, 64)
The shape of the conv1d_304 is =======>> (1, 500, 16)
The shape of the max_pooling1d_57 is =======>> (1, 500, 64)
The shape of the conv1d_305 is =======>> (1, 500, 16)
The shape of the conv1d_306 is =======>> (1, 500, 16)
The shape of the conv1d_307 is =======>> (1, 500, 16)
The shape of the conv1d_308 is =======>> (1, 500, 16)
The shape of the concatenate_57 is =======>> (1, 500, 64)
The shape of the batch_normalization_76 is =======>> (1, 500, 64)
The shape of the activation_76 is =======>> (1, 500, 64)
The shape of the conv1d_309 is =======>> (1, 500, 16)
The shape of the max_pooling1d_58 is =======>> (1, 500, 64)
The shape of the conv1d_310 is =======>> (1, 500, 16)
The shape of the conv1d_311 is =======>> (1, 500, 16)
The shape of the conv1d_312 is =======>> (1, 500, 16)
The shape of the conv1d_313 is =======>> (1, 500, 16)
The shape of the concatenate_58 is =======>> (1, 500, 64)
The shape of the batch_normalization_77 is =======>> (1, 500, 64)
The shape of the activation_77 is =======>> (1, 500, 64)
The shape of the conv1d_314 is =======>> (1, 500, 16)
The shape of the max_pooling1d_59 is =======>> (1, 500, 64)
The shape of the conv1d_315 is =======>> (1, 500, 16)
The shape of the conv1d_316 is =======>> (1, 500, 16)
The shape of the conv1d_317 is =======>> (1, 500, 16)
The shape of the conv1d_318 is =======>> (1, 500, 16)
The shape of the concatenate_59 is =======>> (1, 500, 64)
The shape of the conv1d_319 is =======>> (1, 500, 64)
The shape of the batch_normalization_78 is =======>> (1, 500, 64)
The shape of the batch_normalization_79 is =======>> (1, 500, 64)
The shape of the activation_78 is =======>> (1, 500, 64)
The shape of the add_19 is =======>> (1, 500, 64)
The shape of the activation_79 is =======>> (1, 500, 64)
The shape of the global_average_pooling1d_4 is =======>> (1, 64)
The shape of the dense_4 is =======>> (1, 1)
%% Cell type:code id: tags:
```
image_belt = np.ndarray([], [])
for layer_name, feature_map in zip(layer_names, feature_maps):
print(feature_map.shape)
"""
if len(feature_map.shape) == 3:
k = feature_map.shape[-1]
#print(k)
size = feature_map.shape[1]
#print(size)
for i in range(k):
feature_image = feature_map[0, :, i]
feature_image-= feature_image.mean()
feature_image/= feature_image.std ()
feature_image*= 64
feature_image+= 128
feature_image= np.clip(input, 0, 255).astype('uint8')
image_belt[:, i * size : (i + 1) * size] = feature_image
scale = 20. / k
plt.figure( figsize=(scale * k, scale) )
plt.title ( layer_name )
plt.grid ( False )
plt.imshow( image_belt, aspect='auto')
"""
```
%%%% Output: stream
(1, 500, 129)
(1, 500, 16)
(1, 500, 129)
(1, 500, 16)
(1, 500, 16)
(1, 500, 16)
(1, 500, 16)
(1, 500, 64)
(1, 500, 64)
(1, 500, 64)
(1, 500, 16)
(1, 500, 64)
(1, 500, 16)
(1, 500, 16)
(1, 500, 16)
(1, 500, 16)
(1, 500, 64)
(1, 500, 64)
(1, 500, 64)
(1, 500, 16)
(1, 500, 64)
(1, 500, 16)
(1, 500, 16)
(1, 500, 16)
(1, 500, 16)
(1, 500, 64)
(1, 500, 64)
(1, 500, 64)
(1, 500, 64)
(1, 500, 64)
(1, 500, 64)
(1, 500, 64)
(1, 500, 16)
(1, 500, 64)
(1, 500, 16)
(1, 500, 16)
(1, 500, 16)
(1, 500, 16)
(1, 500, 64)
(1, 500, 64)
(1, 500, 64)
(1, 500, 16)
(1, 500, 64)
(1, 500, 16)
(1, 500, 16)