Commit 45be1c8f authored by Lukas Wolf's avatar Lukas Wolf
Browse files

torch cnn works. added more models

parent cf96b098
......@@ -41,8 +41,8 @@ config['root_dir'] = '.'
##################################################################
#config['task'] = 'prosaccade-clf'
config['task'] = 'gaze-reg'
#config['task'] = 'angle-reg'
#config['task'] = 'gaze-reg'
config['task'] = 'angle-reg'
if config['task'] != 'prosaccade-clf':
#config['dataset'] = 'processing_speed_task'
......@@ -59,7 +59,7 @@ config['framework'] = 'torch'
# Choose model
##################################################################
config['ensemble'] = 5 #number of models in the ensemble
config['ensemble'] = 3 #number of models in the ensemble
config['pretrained'] = False # We can use a model pretrained on processing speed task
config['model'] = 'cnn'
......@@ -76,7 +76,7 @@ config['model'] = 'cnn'
##################################################################
config['learning_rate'] = 1e-3 # fix only: 1e-2, sac only: 1e-3, sac_fix: 1e-3 , fix_sac_fix: 1e-4, for inception on angle 1e-5
config['regularization'] = 0 # fix only: 1e-3, sac only: 1e-2, sac_fix: 1, fix_sac_fix: 5, for inception on angle 0
config['epochs'] = 2
config['epochs'] = 10
config['batch_size'] = 64
##################################################################
......@@ -90,7 +90,7 @@ config['run'] = 'ensemble'
##################################################################
config['tensorboard_on'] = False
config['sanity_check'] = False
config['plot_model'] = False
config['plot_model'] = True
##################################################################
# Options for prosaccade task, currently not used for regression
......@@ -107,9 +107,8 @@ if config['task'] != 'prosaccade-clf':
config['min_fixation'] = 50 # min number of samples for the gaze fixation
config['max_fixation'] = 150 # max number of samples for the gaze fixation
config['fixation_padlength'] = 300 # for the proc speed task to be
if config['dataset'] == 'calibration_task':
if config['dataset'] == 'calibration_task': # has much longer fixations
config['max_fixation'] = 1000
config['fixation_padlength'] = 300 # cut off the fixation at this length
config['min_saccade'] = 10 # minimum number of samples for a saccade that we want to use
config['max_saccade'] = 30 # maximum number of samples for a saccade that we want to use
config['x_screen'] = 600
......@@ -220,6 +219,10 @@ if config['tensorboard_on']:
if not os.path.exists(config['tensorboard_log_dir']):
os.makedirs(config['tensorboard_log_dir'])
config['best_models_dir'] = config['model_dir'] + "/best_models/"
if not os.path.exists(config['best_models_dir']):
os.makedirs(config['best_models_dir'])
# Save config to model dir
import pickle
config_path = config['model_dir'] + "/config.p"
......
......@@ -7,6 +7,9 @@ from utils.utils import train_val_split
class prediction_history(tf.keras.callbacks.Callback):
"""
Prediction history for model ensembles=
"""
def __init__(self, validation_data):
self.validation_data = validation_data
self.predhis = []
......
import tensorflow as tf
from config import config
from utils.utils import *
import logging
from Regression_ConvNet import Regression_ConvNet
from tensorflow.keras.constraints import max_norm
class Regression_DEEPEYE(Regression_ConvNet):
"""
The Regressoion_DeepEye is the architecture that combines many ideas from InceptionTime, Xception ana EEGNet.
It implements the class Regression_ConvNet, which is made of modules with a specific depth.
"""
def __init__(self, input_shape, kernel_size=40, nb_filters=32, verbose=True, batch_size=64, use_residual=True,
depth=6, bottleneck_size=32, preprocessing=True, preprocessing_F1 = 8, preprocessing_D = 2,
preprocessing_kernLength = 250, use_simple_convolution=True, use_separable_convolution=True,
epochs=1, learning_rate=0.01, regularization=0.01):
"""
The DeepEye architecture has the following basic structures. It offers the possibility to do a preprocessing inspired by EEGNet.
It is made of modules of specific depth. Each module is made the inceptionTime submodule, a separable convolution and a simple
convolution with max pooling for stability reasons.
"""
self.preprocessing_F1 = preprocessing_F1
self.preprocessing_D = preprocessing_D
self.preprocessing_kernLength = preprocessing_kernLength
self.bottleneck_size = bottleneck_size
self.use_simple_convolution = use_simple_convolution
self.use_separable_convolution = use_separable_convolution
self.regularization = regularization
if preprocessing: input_shape = input_shape + (1,)
super(Regression_DEEPEYE, self).__init__(input_shape=input_shape, kernel_size=kernel_size, nb_filters=nb_filters,
verbose=verbose, batch_size=batch_size, use_residual=use_residual,
depth=depth, preprocessing=preprocessing, epochs=epochs, learning_rate=learning_rate)
if preprocessing: logging.info('--------------- preprocessing_F1 : ' + str(self.preprocessing_F1))
if preprocessing: logging.info('--------------- preprocessing_D : ' + str(self.preprocessing_D))
if preprocessing: logging.info('--------------- preprocessing_kernLength : ' + str(self.preprocessing_kernLength))
logging.info('--------------- bottleneck_size : ' + str(self.bottleneck_size))
logging.info('--------------- use_simple_convolution : ' + str(self.use_simple_convolution))
logging.info('--------------- use_separable_convolution : ' + str(self.use_separable_convolution))
def _preprocessing(self, input_tensor):
"""
This is the implementation of preprocessing for deepeye. It is inpired by EEGNet which offers a way to filter the signal
into spatially specific band-pass frequencies.
"""
print(input_tensor.shape)
# Filter slides horizontally
horizontal_tensor = tf.keras.layers.Conv2D(self.preprocessing_F1, (self.preprocessing_kernLength, 1), padding='same',
input_shape=input_tensor.shape[1:], use_bias=False)(input_tensor)
horizontal_tensor = tf.keras.layers.BatchNormalization()(horizontal_tensor)
# Filter slides vertically
vertical_tensor = tf.keras.layers.DepthwiseConv2D((1, input_tensor.shape[2]), use_bias=False,
depth_multiplier=self.preprocessing_D,
depthwise_constraint=max_norm(1.))(horizontal_tensor)
vertical_tensor = tf.keras.layers.BatchNormalization()(vertical_tensor)
eeg_tensor = tf.keras.layers.Activation('elu')(vertical_tensor)
eeg_tensor = tf.keras.layers.Dropout(0.5)(eeg_tensor)
output_tensor = eeg_tensor[:, :, 0, :]
return output_tensor
def _module(self, input_tensor, current_depth):
"""
The module of DeepEye. It starts with a bottleneck of InceptionTime which is followed by different filters with a different kernel size.
The default values are [40,20,10]. In parallel it uses a simple convolution and a separable convolution to make use of 'extreme'
convolutions as explained in Xception paper.
"""
if int(input_tensor.shape[-1]) > 1:
input_inception = tf.keras.layers.Conv1D(filters=self.bottleneck_size, kernel_size=1, padding='same', use_bias=False,
kernel_regularizer=tf.keras.regularizers.l1(self.regularization),
activity_regularizer=tf.keras.regularizers.l2(self.regularization))(input_tensor)
else:
input_inception = input_tensor
kernel_size_s = [self.kernel_size // (2 ** i) for i in range(3)]
conv_list = []
for i in range(len(kernel_size_s)):
conv_list.append(
tf.keras.layers.Conv1D(filters=self.nb_filters, kernel_size=kernel_size_s[i], padding='same', use_bias=False,
kernel_regularizer=tf.keras.regularizers.l1(self.regularization),
activity_regularizer=tf.keras.regularizers.l2(self.regularization))(input_inception))
max_pool_1 = tf.keras.layers.MaxPool1D(pool_size=10, strides=1, padding='same')(input_tensor)
conv_6 = tf.keras.layers.Conv1D(filters=self.nb_filters, kernel_size=1, padding='same', use_bias=False,
kernel_regularizer=tf.keras.regularizers.l1(self.regularization),
activity_regularizer=tf.keras.regularizers.l2(self.regularization))(max_pool_1)
conv_list.append(conv_6)
if self.use_simple_convolution:
max_pool_2 = tf.keras.layers.MaxPool1D(pool_size=10, strides=1, padding='same')(input_tensor)
conv_7 = tf.keras.layers.Conv1D(filters=self.nb_filters / 8, kernel_size=16, padding='same', use_bias=False,
kernel_regularizer=tf.keras.regularizers.l1(self.regularization),
activity_regularizer=tf.keras.regularizers.l2(self.regularization))(max_pool_2)
conv_list.append(conv_7)
if self.use_separable_convolution:
conv_8 = tf.keras.layers.SeparableConv1D(filters=self.nb_filters, kernel_size=32, padding='same', use_bias=False, depth_multiplier=1,
kernel_regularizer=tf.keras.regularizers.l1(self.regularization),
activity_regularizer=tf.keras.regularizers.l2(self.regularization))(input_tensor)
conv_list.append(conv_8)
x = tf.keras.layers.Concatenate(axis=2)(conv_list)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation(activation='relu')(x)
return x
......@@ -12,6 +12,9 @@ from tf_models.Xception.Xception import XCEPTION
from tf_models.InceptionTime.Inception import INCEPTION
from tf_models.EEGNet.eegNet import EEGNet
#TODO: Creata a BaseEnsemble class that both tensorflow and torch ensembles inherit
# The trainer then has the same interface methods (run, predict, load, etc.) to interact with ensembles
class Ensemble_tf:
"""
The Ensemble is a model itself, which contains a number of models that are averaged on prediction.
......@@ -102,43 +105,13 @@ class Ensemble_tf:
config['model'] = config['model'] + '_cluster'
hist.history['val_loss'] = loss
np.savetxt('ensemble_loss.csv', loss, delimiter=',')
plot_loss(hist, config['model_dir'], config['model'], val = True)
if config['task'] == 'prosaccade-clf':
hist.history['val_accuracy'] = accuracy
plot_acc(hist, config['model_dir'], config['model'], val = True)
save_logs(hist, config['model_dir'], config['model'], pytorch = False)
"""
# Compute averaged metrics on the validation set
for j, pred_epoch in enumerate(pred):
print(f"pred epoch {j} with loss {self.loss_fn(pred_ensemble.targets,pred_epoch).numpy()} and accuracy {np.mean((np.array(pred_epoch).reshape(-1)+np.array(pred_ensemble.targets).reshape(-1)-1)**2)}")
pred_epoch = (pred_epoch/config['ensemble']).tolist()
loss.append(self.loss_fn(pred_ensemble.targets,pred_epoch).numpy())
pred_epoch = np.round(pred_epoch,0)
if config['task'] == 'prosaccade_clf':
accuracy.append(np.mean((np.array(pred_epoch).reshape(-1)+np.array(pred_ensemble.targets).reshape(-1)-1)**2))
# save the ensemble loss to the model directory
loss_fname = config['model_dir'] + "/" + "ensemble_loss.txt"
np.savetxt(loss_fname, loss, delimiter=',')
logging.info("loss len {}".format(len(loss)))
# Plot
logging.info("Creating plots")
if(self.nb_models == 1):
plot_loss(hist, config['model_dir'], config['model'], val = True)
elif self.nb_models > 1:
config['model'] += '_ensemble'
hist.history['val_loss'] = np.array(loss)
plot_loss(hist, config['model_dir'], config['model'], val = True)
if config['task'] == 'prosaccade_clf':
hist.history['val_accuracy'] = accuracy
plot_acc(hist, config['model_dir'], config['model'], val = True)
save_logs(hist, config['model_dir'], config['model'], pytorch = False)
logging.info("Done with training and plotting.")
"""
def predict(self, X):
"""
Predict with all models on the dataset X
......
import tensorflow as tf
from tensorflow import keras
from config import config
#from utils.utils import *
import logging
from tf_models.ConvNet import ConvNet
class Pretrained_Model(ConvNet):
"""
Load a pretrained model and refit it with new data
We need to keep track of the best saved models on a particular task that should be loaded
Update the paths if necessary
"""
def __init__(self, input_shape, epochs = 50, verbose=True, batch_size=64, learning_rate=0.01):
......@@ -41,5 +42,4 @@ class Pretrained_Model(ConvNet):
# Load the model
model = keras.models.load_model(model_dir + name + "_best_model.h5", compile=False)
#TODO: log the config of the pretrained model, not the values as in config.py, since they may not be valid
return model
\ No newline at end of file
import tensorflow as tf
from config import config
from utils.utils import *
import logging
from Regression_ConvNet import Regression_ConvNet
class Regression_XCEPTION(Regression_ConvNet):
"""
The Xception architecture. This is inspired by Xception paper, which describes how 'extreme' convolutions can be represented
as separable convolutions and can achieve better accuracy then the Inception architecture. It is made of modules in a specific depth.
Each module, in our implementation, consists of a separable convolution followed by batch normalization and a ReLu activation layer.
"""
def __init__(self, input_shape, kernel_size=40, nb_filters=128, verbose=True, epochs=1,
batch_size=64, use_residual=True, depth=6, learning_rate=0.01, regularization=0.01):
self.regularization = regularization
super(Regression_XCEPTION, self).__init__( input_shape,
kernel_size=kernel_size,
nb_filters=nb_filters,
verbose=verbose,
epochs=epochs,
batch_size=batch_size,
use_residual=use_residual,
depth=depth,
preprocessing=False,
learning_rate=learning_rate)
def _module(self, input_tensor, current_depth):
"""
The module of Xception. Consists of a separable convolution followed by batch normalization and a ReLu activation function.
"""
x = tf.keras.layers.SeparableConv1D(filters=self.nb_filters, kernel_size=self.kernel_size, padding='same', use_bias=False, depth_multiplier=1,
kernel_regularizer=tf.keras.regularizers.l1(self.regularization),
activity_regularizer=tf.keras.regularizers.l2(self.regularization))(input_tensor)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation(activation='relu')(x)
return x
......@@ -4,26 +4,72 @@ import numpy as np
from config import config
import logging
from sklearn.model_selection import train_test_split
from torch_models.utils.dataloader import create_dataloader
from torch_models.utils.training import train_loop, test_loop
from torch_models.torch_utils.dataloader import create_dataloader
from torch_models.torch_utils.training import train_loop, test_loop
class Prediction_history():
"""
Prediction history for pytorch model ensembles
"""
def __init__(self, X_val, y_val) -> None:
# Create tensor
self.X_val = torch.tensor(X_val)
self.y_val = torch.tensor(y_val)
self.X_val.cuda()
self.y_val.cuda()
self.predhis = []
def on_epoch_end(self, model):
y_pred = model(self.X_val.float())
# Transform back to numpy array because ensemble handles it that way
self.predhis.append(y_pred.numpy())
class BaseNet(nn.Module):
"""
BaseNet class for ConvNet and EEGnet to inherit common functionality
"""
def __init__(self, input_shape, epochs=50, verbose=True, model_number=0, batch_size=64):
def __init__(self, epochs=50, verbose=True, model_number=0):
super().__init__()
self.input_shape = input_shape
self.epochs = epochs
self.verbose = verbose
self.model_number = model_number
self.batch_size = batch_size
self.nb_channels = self.input_shape[1]
self.timesamples = self.input_shape[0]
if self.verbose:
print(self) # works for torch
# Set the number of features that are passed throught the internal network (except input layer)
if config['model'] == 'cnn':
self.num_features = 16
elif config['model'] == 'deepeye':
self.num_features = 164
else: # all other current models have tensors of width 64
self.num_features = 64
# Compute the number of features for the output layer
eegNet_out = 4*2*7
convNet_out = self.num_features * self.timesamples
# Get cpu or gpu device for training.
self.device = "cuda" if torch.cuda.is_available() else "cpu"
logging.info("Using {} device".format(self.device))
# Create output layer depending on task and
if config['task'] == 'prosaccade_clf':
self.loss_fn = nn.BCELoss()
self.output_layer = nn.Sequential(
nn.Linear(in_features=eegNet_out if config['model'] == 'eegnet' else convNet_out, out_features=1)
)
elif config['task'] == 'gaze-reg':
self.loss_fn = nn.MSELoss()
self.output_layer = nn.Sequential(
nn.Linear(in_features=eegNet_out if config['model'] == 'eegnet' else convNet_out, out_features=2)
)
else: #elif config['task'] == 'angle-reg':
from torch_models.torch_utils.custom_losses import angle_loss
self.loss_fn = angle_loss
self.output_layer = nn.Sequential(
nn.Linear(in_features=eegNet_out if config['model'] == 'eegnet' else convNet_out, out_features=1)
)
# abstract method
def forward(self, x):
......@@ -41,33 +87,29 @@ class BaseNet(nn.Module):
pass
def fit(self, x, y, subjectID=None):
logging.info(f"Fiting model {self.__name__}, model number {self.model_number}")
logging.info("------------------------------------------------------------------------------------")
logging.info(f"Fitting model number {self.model_number}")
# Create a split
x = np.transpose(x, (0, 2, 1)) # (batch_size, samples, channels) to (bs, ch, samples) as torch conv layers want it
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=42)
# Create dataloaders
train_dataloader = create_dataloader(X_train, y_train, batch_size=config['batch_size'])
test_dataloader = create_dataloader(X_val, y_val, batch_size=config['batch_size'])
logging.info(f"Created train dataloader with x shape{x[0].shape} and y shape {y[0].shape}")
# Create the training depending on the task
if config['task'] == 'prosaccade-clf':
loss_fn = nn.BCELoss()
elif config['task'] == 'gaze-reg':
loss_fn = nn.MSELoss()
elif config['task'] == 'angle-reg':
from torch_models.utils.custom_losses import angle_loss
# Create the optimizer
optimizer = torch.optim.Adam(list(self.parameters()), lr=config['learning_rate'])
# Create history and log
prediction_ensemble = Prediction_history(X_val, y_val)
# Train the model
epochs = config['epochs']
epochs = config['epochs']
for t in range(epochs):
logging.info(f"Epoch {t+1}\n-------------------------------")
train_loop(train_dataloader, self, loss_fn, optimizer)
test_loop(test_dataloader, self, loss_fn)
logging.info(f"Finished model {self.__name__}, model number {self.model_number}")
train_loop(train_dataloader, self.float(), self.loss_fn, optimizer)
test_loop(test_dataloader, self.float(), self.loss_fn)
prediction_ensemble.on_epoch_end(model=self)
logging.info(f"Finished model number {self.model_number}")
# Save model
ckpt_dir = config['model_dir'] + '/best_models/' + self.__str__ + '_nb_{}_'.format(self.model_number) + 'best_model.h5'
ckpt_dir = config['model_dir'] + '/best_models/' + config['model'] + '_nb_{}_'.format(self.model_number) + 'best_model.h5'
torch.save(self, ckpt_dir)
return prediction_ensemble
\ No newline at end of file
from torch.nn.modules.batchnorm import BatchNorm1d
from config import config
from torch_models.ConvNetTorch import ConvNet
import torch
import torch.nn as nn
import math
from torch_models.Modules import Shortcut_layer
from torch_models.torch_utils.padding import pad_conv1d, pad_pool1d
class CNN(ConvNet):
"""
The CNN is one of the simplest classifiers. It implements the class ConvNet, which is made of modules with a specific depth.
It can be used for both classification and regression models based on the ConvNet class.
"""
def __init__(self, input_shape, kernel_size=64, epochs = 50, nb_filters=16, verbose=True, batch_size=64,
use_residual=True, depth=12, regularization=0.01, model_number=0):
self.regularization = regularization
self.__name__ = 'CNN'
super(CNN, self).__init__(input_shape, kernel_size=kernel_size, epochs=epochs,
super().__init__(input_shape, kernel_size=kernel_size, epochs=epochs,
nb_filters=nb_filters, verbose=verbose, batch_size=batch_size,
use_residual=use_residual, depth=depth, model_number=model_number)
self.regularization = regularization
self.nb_filters = nb_filters
def __str__(self):
return self.__class__.__name__
def _module(self, input_tensor, current_depth):
def _module(self, depth):
"""
The module of CNN is made of a simple convolution with batch normalization and ReLu activation. Finally, MaxPooling is also used.
"""
return nn.Sequential(
nn.Conv1d(in_channels=self.nb_filters, out_channels=self.nb_filters, kernel_size=self.kernel_size, groups=self.nb_filters),
nn.BatchNorm1d(),
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, stride=1, padding=1)
)
nn.Conv1d(in_channels=self.nb_channels if depth==0 else self.num_features,
out_channels=self.num_features, kernel_size=self.kernel_size),
nn.BatchNorm1d(num_features=self.num_features),
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, stride=1)
)
"""
Tensorflow code:
x = tf.keras.layers.Conv1D(filters=self.nb_filters, kernel_size=self.kernel_size,
......@@ -37,4 +37,28 @@ class CNN(ConvNet):
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation(activation='relu')(x)
x = tf.keras.layers.MaxPool1D(pool_size=2, strides=1, padding='same')(x)
"""
\ No newline at end of file
"""
def _run_conv(self, module, tensor):
"""
Run the tensor x through the CNN module such that shape stays the same
Similar to tf.keras padding=same
"""
# Dereference the parts of the module
conv = module[0]
batchnorm = module[1]
activation = module[2]
maxpool = module[3]
# Pad for convolution
x = pad_conv1d(tensor, kernel_size=self.kernel_size, value=0)
#print(f"x after pad conv {x.size()}")
x = conv(x)
#print(f"x after conv {x.size()}")
x = batchnorm(x)
x = activation(x)
# Pad for maxpooling
x = pad_pool1d(x, value=0)
#print(f"x after pad pool {x.size()}")
x = maxpool(x)
#print(f"x after maxpool {x.size()}")
return x
\ No newline at end of file
from __future__ import print_function
import sys
from abc import ABC, abstractmethod
from re import X
from torch_models.BaseNetTorch import BaseNet
from torch_models.Modules import Shortcut_layer
import torch
import torch.nn as nn
import torch.nn.functional as F
from config import config
import logging
from torch_models.torch_utils.padding import pad_conv1d, pad_pool1d
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
class ConvNet(ABC, BaseNet):
"""
This class defines all the common functionality for more complex convolutional nets
......@@ -18,58 +26,22 @@ class ConvNet(ABC, BaseNet):
"""
We define the layers of the network in the __init__ function
"""
super().__init__(epochs=epochs, verbose=verbose, model_number=model_number)
super().__init__(input_shape=input_shape, epochs=epochs, verbose=verbose,
model_number=model_number, batch_size=batch_size)
self.use_residual = use_residual
self.depth = depth
self.callbacks = None
self.batch_size = batch_size
self.kernel_size = kernel_size
self.nb_filters = nb_filters
self.preprocessing = preprocessing
self.input_shape = input_shape
# Define the layers that we need in the forward pass
#self.module = self._module() #returns a nn.Sequential that can be used
self.input_layer = nn.Sequential(
nn.Conv1d(in_channels=self.input_shape[0], out_channels=16, kernel_size=3),
nn.Sigmoid(),
nn.MaxPool1d(kernel_size=1, stride=1) # do not reduce size
)
self.gap_layer = nn.AvgPool1d(kernel_size=1, stride=1)
# Create output layer depending on task
if config['task'] == 'prosaccade_clf':
self.output_layer = nn.Sequential(
nn.Linear(16, 1),
nn.Sigmoid()
)
elif config['task'] == 'gaze-reg':
self.output_layer = nn.Sequential(
nn.Linear(16, 2)
)
else: #elif config['task'] == 'angle-reg':
self.output_layer = nn.Sequential(
nn.Linear(16, 1)
)
"""
Working example network
self.conlayer1 = nn.Sequential(
nn.Conv1d(in_channels=180, out_channels=16, kernel_size=3, groups=4),
nn.Sigmoid(),
nn.MaxPool1d(2))
self.conlayer2 = nn.Sequential(
nn.Conv1d(in_channels=16, out_channels=16, kernel_size=3, groups=16),
nn.Sigmoid(),
nn.MaxPool1d(2))
self.fc = nn.Sequential(
nn.Linear(480,120),
nn.Linear(120,84),
nn.Linear(84,2))
"""
# Define all the convolutional and shortcut modules that we will need in the model
self.conv_blocks = nn.ModuleList([self._module(d) for d in range(self.depth)])
self.shortcuts = nn.ModuleList([self._shortcut(d) for d in range(int(self.depth / 3))])
self.gap_layer = nn.AvgPool1d(kernel_size=2, stride=1)