Commit cb020edf authored by nstorni's avatar nstorni
Browse files

Some utils

parent cd7c4de2
import cv2
import numpy as np
import glob
img_array = []
for filename in glob.glob('movingmnistdata/*.jpg'):
img = cv2.imread(filename)
height, width, layers = img.shape
size = (width,height)
img_array.append(img)
out = cv2.VideoWriter('test2.avi',cv2.VideoWriter_fourcc(*'DIVX'), 15, size)
for i in range(len(img_array)):
out.write(img_array[i])
out.release()
\ No newline at end of file
#!/bin/bash
echo "Loading modules"
module purge
module load gcc/4.8.5 python_gpu/3.7.1
cd $HOME
#pip install --user -r world-models/requirements.txt
#pip install --user --upgrade torch
while [ "$1" != "" ]; do
case $1 in
--modelconfigdir ) shift
CONFIGDIR=$1
;;
--modeldir ) shift
MODEL=$1
;;
-interactive ) interactive=1
;;
--help ) usage
exit
;;
* ) usage
exit 1
esac
shift
done
# python world-models/trainvae.py --modelconfig $HOME/world-models/template_config.json
python world-models/$MODEL --modelconfig $CONFIGDIR
\ No newline at end of file
{
"exp_name" : "testing",
"vaesamples" : 8,
"interpolation":"True",
"interpolation_steps":6,
"interpolations_dir":"$HOME/data/interpolations/RGBA",
"trainFull":"True",
"TrainingLevel":5,
"replaceReparametrization":"False",
"learning_rate" : 0.0003,
"batch_size" : 20,
"reload" : "True",
"reload_dir": "$SCRATCH/experiments/mice_shvae/models/micetd_shvae_l5_continued_D20200110T063425",
"epochs" : 400,
"epochsamples": 1,
"loss_log_freq":2,
"img_log_freq":1,
"betavae":1,
"input_dim": 256,
"latent_dim": 64,
"early_stopping": "True",
"weight_decay": "True",
"normalize":"False",
"input_ch":4,
"logdir" : "$SCRATCH/experiments/testing",
"dataset_dir":"$SCRATCH/data/mice_tempdiff_medium",
"train_dataset_dir": "$SCRATCH/data/mice_tempdiff_medium/train",
"val_dataset_dir":"$SCRATCH/data/mice_tempdiff_medium/val",
"output_dir": ""
}
\ No newline at end of file
#!/bin/bash
# Make bash file executable chmod u+x filename.sh
echo "Loading modules"
echo $1
rm -r $SCRATCH/experiments/shvae_temporalmask/tensorboard_logs/$1
rm -r $SCRATCH/experiments/shvae_temporalmask/models/$1
......@@ -4,4 +4,4 @@ echo "Loading modules"
module purge
module load gcc/4.8.5 python_cpu/3.7.1
echo "Starting tensorboard"
tensorboard --logdir $SCRATCH/$1/tensorboard_logs --port=6993
\ No newline at end of file
tensorboard --logdir $SCRATCH/$1/tensorboard_logs
\ No newline at end of file
import cv2
import glob
import os
import numpy as np
input_dir = os.path.expandvars("video")
output_dir = os.path.expandvars("train/nolabel")
vid_num = 1
threshold = 15
# Temporal offsets to avoid video parts with person ( large changes in image due to illumination changes and arm of experimenter)
start_offset = 110
stop_offset = 13000
for v in glob.iglob(input_dir + "/*.mp4"):
vidcap = cv2.VideoCapture(v)
print("Processing" + v)
success, image_old = vidcap.read()
count = 0
print("This video is labeled as No." + str(vid_num))
while success:
success,image_new = vidcap.read()
if success:
# Compute differences and mask
difference = cv2.absdiff(image_old, image_new)
difference_grayscale = cv2.cvtColor(difference, cv2.COLOR_BGR2GRAY)
th, image_changes = cv2.threshold(difference_grayscale, threshold, 255, cv2.THRESH_BINARY)
res = cv2.bitwise_and(image_new,image_new,mask = image_changes)
# Generate RGBA channels
b_channel, g_channel, r_channel = cv2.split(res)
img_BGRA = cv2.merge((b_channel, g_channel, r_channel, image_changes))
if count > start_offset and count < stop_offset:
saving_result = cv2.imwrite(output_dir + "/" + str(vid_num) + "_frame_" + str(count) + ".png", img_BGRA) # save frame as JPEG file
print("Save result {} of image {}".format(saving_result, count))
else:
print(count)
image_old = image_new
count += 1
vid_num += 1
import cv2
import glob
import os
import numpy as np
# input_dir = os.path.expandvars("video")
input_dir = os.path.expandvars("/cluster/scratch/nstorni/data/temporal_differences_test/video")
# output_dir = os.path.expandvars("/cluster/home/nstorni/temporal_differences")
output_dir = os.path.expandvars("/cluster/home/nstorni/temporal_differences")
vid_num = 1
img_array = []
# out = cv2.VideoWriter(output_dir+'/test12.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 25, (928,576))
for v in glob.iglob(input_dir + "/*.mp4"):
vidcap = cv2.VideoCapture(v)
print("Processing" + v)
success, image_old = vidcap.read()
count = 0
print("This video is labeled as No." + str(vid_num))
while success:
#image = image[50:500, 170:750] # crop
success,image_new = vidcap.read()
# print(count)
# frame_old = cv2.cvtColor(image_old, cv2.COLOR_BGR2GRAY)
# frame_new = cv2.cvtColor(image_new, cv2.COLOR_BGR2GRAY)
difference = cv2.absdiff(image_old, image_new)
differencegs = cv2.cvtColor(difference, cv2.COLOR_BGR2GRAY)
th, dframe = cv2.threshold(differencegs, 15, 255, cv2.THRESH_BINARY)
res = cv2.bitwise_and(image_new,image_new,mask = dframe)
b_channel, g_channel, r_channel = cv2.split(res)
alpha_channel = np.ones(b_channel.shape, dtype=b_channel.dtype) * 50 #creating a dummy alpha channel image.
img_BGRA = cv2.merge((b_channel, g_channel, r_channel, dframe))
test = cv2.imwrite(output_dir + "/" + str(vid_num) + "_frame_" + str(count) + ".png", img_BGRA) # save frame as JPEG file
break
# frame = cv2.cvtColor(dframe, cv2.COLOR_GRAY2RGB)
# out.write(frame)
if count > 100:
test = cv2.imwrite(output_dir + "/" + str(vid_num) + "_frame_" + str(count) + ".png", res) # save frame as JPEG file
print(test)
print(count)
# cv2.imwrite(output_dir + "/" + str(vid_num) + "_frame_" + str(count) + ".png", frame_new) # save frame as JPEG file
# cv2.imwrite(output_dir + "/" + str(vid_num) + "_framemask_" + str(count) + ".png", res) # save frame as JPEG file
# elif count > 20:
image_old = image_new
count += 1
# if count > 4000: break
# out.release()
vid_num += 1
#!/bin/bash
# Make bash file executable chmod u+x filename.sh
echo "Loading modules"
module purge
module load gcc/4.8.5 python_cpu/3.7.1
python $HOME/world-models/video_frame/gen_tempdiff_data.py
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment