plot.py 4.66 KB
Newer Older
1
2
import matplotlib.pyplot as plt 
import numpy as np
3
import pandas as pd
4
from tensorflow import keras
Lukas Wolf's avatar
Lukas Wolf committed
5
import tensorflow as tf 
6
7
from matplotlib.ticker import FormatStrFormatter
import os
Lukas Wolf's avatar
Lukas Wolf committed
8
from config import config 
9
10
11
12
13
14


def plot_batches_log_loss(model_name):
    """
    Create loss and validation loss plots from the batches.log file
    """
15
16
    dir = './runs/' # must be correct relative to caller
    path = dir + model_name + 'batches.log'
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
    df = pd.read_csv(path, sep=';')
    nparr = df.to_numpy()
    epochs = nparr[:, 0]
    loss = nparr[:, 1]
    val_loss= nparr[:, 3]

    plt.plot(epochs, loss, label='loss')
    plt.plot(epochs, val_loss, label='val_loss')
    plt.ylabel("mse loss")
    plt.title("mse")
    plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)

    save_path = '../images/'
    plt.savefig(fname=save_path+model_name)

Lukas Wolf's avatar
Lukas Wolf committed
32
#TODO: create a class FilterPlot for better structuring 
33
34
35
36
def plot_filters(model, model_dir):
    """
    Create a plot for every filter in every convolutional module and save it in the models directory under filterplots
    """
Lukas Wolf's avatar
Lukas Wolf committed
37
    #dir = './runs/' # must be correct relative to caller
38
39
40
    path = model_dir + '/filterplots/'
    # create a dir for the plots 
    os.makedirs(path)
Lukas Wolf's avatar
Lukas Wolf committed
41
42
43
    
    print(model.input_shape)
    (none, img_height, img_width) = model.input_shape
44

Lukas Wolf's avatar
Lukas Wolf committed
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
    # Run the gradient ascent algorithm for all convolutional layers 
    for layer in model.layers:
        if hasattr(layer, 'kernel_size'):
            # Set up a model that returns the activation values for our target layer 
            feature_extractor = keras.Model(inputs=model.inputs, outputs=layer.output)
            # Run it for all filters in conv layer
            weights = layer.get_weights()
            nb_filters = weights[0].shape[2]
            for i in range(nb_filters):
                loss, img = visualize_filter(i, img_width, img_height, feature_extractor)
                # Plot and save it 
                plt.imshow(img.T, interpolation='none', cmap='Blue')
                plt.title(config['model'] + ", " + layer_name + ", Filter # {}".format(i))
                plt.ylabel("Channels")
                plt.xlabel("Time samples")
                plt.margins(0,0)
                plt.axvline(x=64)
                plt.xlim(0,70)
                plt.xticks(np.arange(0, 65, step=16))
                #plt.gca().xaxis.set_major_locator(plt.NullLocator())
                #plt.gca().yaxis.set_major_locator(plt.NullLocator())
                plt.savefig(path + layer_name + "_filternum_{}".format(i))
                #plt.show("InceptionTime: " + layer_name)
Lukas Wolf's avatar
Lukas Wolf committed
68
69

def plot_model(model, dir=config['model_dir'], show_shapes=True):
Lukas Wolf's avatar
Lukas Wolf committed
70
71
72
    """
    Plot the model as graph and save it
    """
Lukas Wolf's avatar
Lukas Wolf committed
73
74
    pathname = dir + "/model_plot.png"
    keras.utils.plot_model(model, to_file=pathname, show_shapes=show_shapes)
Lukas Wolf's avatar
Lukas Wolf committed
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132

def compute_loss(input_image, filter_index, feature_extractor):
    """
    Part of the gradient ascent algorithm to maximize filter activation
    """ 
    activation = feature_extractor(input_image)
    # We avoid border artifacts by only involving non-border pixels in the loss.
    filter_activation = activation[:, filter_index]
    return tf.reduce_mean(filter_activation)

@tf.function
def gradient_ascent_step(img, filter_index, learning_rate, feature_extractor):
    with tf.GradientTape() as tape:
        tape.watch(img)
        loss = compute_loss(img, filter_index, feature_extractor)
    # Compute gradients.
    grads = tape.gradient(loss, img)
    # Normalize gradients.
    grads = tf.math.l2_normalize(grads)
    img += learning_rate * grads
    return loss, img

def initialize_image(img_width, img_height):
    # We start from a gray image with some random noise 
    img = tf.random.uniform((1, img_width, img_height))
    # ResNet50V2 expects inputs in the range [-1, +1].
    # Here we scale our random inputs to [-0.125, +0.125]
    return (img - 0.5) * 0.25

def visualize_filter(filter_index, img_width, img_height, feature_extractor):
    # We run gradient ascent for 30 steps
    iterations = 30
    learning_rate = 10.0
    img = initialize_image(img_width, img_height)
    for iteration in range(iterations):
        loss, img = gradient_ascent_step(img, filter_index, learning_rate, feature_extractor)

    # Decode the resulting input image
    img = deprocess_image(img[0].numpy())
    return loss, img

def deprocess_image(img):
    # Normalize array: center on 0., ensure variance is 0.15
    img -= img.mean()
    img /= img.std() + 1e-5
    img *= 0.15

    # Center crop
    #img = img[25:-25, 25:-25, :]

    # Clip to [0, 1]
    img += 0.5
    img = np.clip(img, 0, 1)

    # Convert to RGB array
    img *= 255
    img = np.clip(img, 0, 255).astype("uint8")
    return img