To receive notifications about scheduled maintenance, please subscribe to the mailing-list gitlab-operations@sympa.ethz.ch. You can subscribe to the mailing-list at https://sympa.ethz.ch

Commit caf4ed4c authored by widmerl's avatar widmerl
Browse files

added ros to camera

parent 8d439fc9
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
================================================================================
Finding Marker Balls, labeling them and calculating distance and angle estimates
......@@ -40,6 +42,18 @@ c to change color, then type "lower h +10" to the Terminal to increase the lower
Copyright 2019 Lars Widmer
'''
#ros stuff import
import roslib; roslib.load_manifest('dfall_pkg')
import rospy
from std_msgs.msg import Int32
from dfall_pkg.msg import ControlCommand
from dfall_pkg.msg import IntWithHeader
from dfall_pkg.srv import IntIntService
import rosbag
from rospkg import RosPack
from std_msgs.msg import Float32
from std_msgs.msg import String
......@@ -61,7 +75,7 @@ WIDTH = 640
BLURR = 11
ACT_SIZE = 100
FOV = 70
CAMERA = 0
CAMERA = 2
greenLower = (50, 50, 40)
......@@ -77,6 +91,20 @@ magentaUpper = (150,255,255)
upper = greenUpper
lower = greenLower
#Start ROS-node
global node_name
node_name = "CrazyCamera"
rospy.init_node(node_name, anonymous=True)
# Get the namespace of this node
global ros_namespace
ros_namespace = rospy.get_namespace()
# Initialise a publisher for the battery voltage
global cfcamera_pub
cfcamera_pub = rospy.Publisher(node_name, Float32, queue_size=10)
def angle(p1, p2):
......@@ -295,6 +323,7 @@ while True:
if similar_value(l,l_old,10) or similar_value(phi_ud,phi_ud_old,5) or similar_value(phi_lr,phi_lr_old,5):
output = 'dist: ' + str(int(l)) + ' phi_ud: ' + "{:5.1f}".format(phi_ud * 57.296) + ' phi_lr: ' + "{:5.1f}".format(phi_lr * 57.296)
cfcamera_pub.publish(l)
else:
mark_missing = True
......
......@@ -26,6 +26,15 @@
>
<rosparam command="load" file="$(find dfall_pkg)/param/BatteryMonitor.yaml" />
</node>
<!-- CRAZY Camera -->
<node
pkg = "dfall_pkg"
name = "CrazyCamera"
output = "screen"
type = "CrazyCamera.py"
>
</node>
<!-- FLYING AGENT CLIENT -->
<node
......
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
================================================================================
Finding Marker Balls, labeling them and calculating distance and angle estimates
================================================================================
Using HSV Color Masks this finds the four biggest green balls, detects their positions and
measures distance to the camera (dist) as well as the two angles that define the position
of the camera in the frame of the Marker. Zero is perpendicular to the Marker Plane.
phi_ud for up/down and phi_lr for left/right. The Output Values are filtered by a kalman filter.
Constants:
WIDTH should contain the Width of the Video Frame
BLURR defines the Size of the Gaussian Blurr filter. Should be between 5 and 30
ACT_SIZE defines the distance between the centres of the Markers in mm
FOV defines the field ov view of the camera.
Marker Structure:
ACT_SIZE at least 100mm
diameter at least 30mm
G means coloured in Green
ACT_SIZE
|----------|
___ ___
/ G \ / G \
\___/ \___/ -
|
|ACT_SIZE
___ ___ |
/ G \ / G \ |
\___/ \___/ -
|---|
diameter
Usage:
q to quit
c to change color, then type "lower h +10" to the Terminal to increase the lower bound of the h value
Copyright 2019 Lars Widmer
'''
#ros stuff import
import roslib; roslib.load_manifest('dfall_pkg')
import rospy
from std_msgs.msg import Int32
from dfall_pkg.msg import ControlCommand
from dfall_pkg.msg import IntWithHeader
from dfall_pkg.srv import IntIntService
import rosbag
from rospkg import RosPack
from std_msgs.msg import Float32
from std_msgs.msg import String
# import the necessary packages
from collections import deque
from imutils.video import VideoStream
import numpy as np
import math
import argparse
import cv2
import imutils
import time
import matplotlib.pyplot as plt
from operator import itemgetter
from pykalman import KalmanFilter
WIDTH = 640
BLURR = 11
ACT_SIZE = 100
FOV = 70
CAMERA = 2
greenLower = (50, 50, 40)
greenUpper = (80, 255, 255)
blueLower = (90,40,40)
blueUpper = (110,255,255)
magentaLower = (130,80,30)
magentaUpper = (150,255,255)
upper = greenUpper
lower = greenLower
#Start ROS-node
global node_name
node_name = "CrazyCamera"
rospy.init_node(node_name, anonymous=True)
# Get the namespace of this node
global ros_namespace
ros_namespace = rospy.get_namespace()
# Initialise a publisher for the battery voltage
global cfcamera_pub
cfcamera_pub = rospy.Publisher(node_name, Float32, queue_size=10)
def angle(p1, p2):
return (math.sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)/WIDTH)*FOV
def big_diff(v1, v2, mult):
return v1/v2 < 1/mult or v1/v2 > mult
def similar_value(v1,v2,max_diff):
return max_diff > abs(v1-v2)
#initialize font
fontface = cv2.FONT_HERSHEY_SIMPLEX
fontscale = 0.5
fontcolor = (255, 255, 255)
# #initialize Kalman Filter
# kf = KalmanFilter(
# transition_matrix
# observation_matrix
# initial_transition_covariance
# initial_observation_covariance
# transition_offsets
# observation_offset
# initial_state_mean
# initial_state_covariance
# random_state
# )
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=64,
help="max buffer size")
args = vars(ap.parse_args())
# define the lower and upper boundaries of the "green"
# ball in the HSV color space, then initialize the
# list of tracked points
points = []
t_meas_past = []
t_meas_kalman = []
radius_avg = 0
phi_ud = 0
phi_lr = 0
l = 0
phi_ud_old = 0
phi_lr_old = 0
l_old = 0
fig, ax = plt.subplots()
pts = deque(maxlen=args["buffer"])
# if a video path was not supplied, grab the reference
# to the webcam
if not args.get("video", False):
vs = VideoStream(src=CAMERA).start()
# otherwise, grab a reference to the video file
else:
vs = cv2.VideoCapture(args["video"])
# allow the camera or video file to warm up
time.sleep(0.2)
# keep looping
while True:
#allow changing of color-range in runtime
key = cv2.waitKey(1) & 0xFF
if key == ord("c"):
colorchange = input()
space = colorchange[0:5]
field = colorchange[6]
value_change = int(colorchange[7:])
if space == 'upper':
if field == 'h':
upper = (upper[0]+value_change, upper[1], upper[2])
elif field == 's':
upper = (upper[0], upper[1]+value_change, upper[2])
elif field == 'v':
upper = (upper[0], upper[1], upper[2]+value_change)
else:
print('options are h, s or v')
elif space == 'lower':
if field == 'h':
lower = (lower[0]+value_change, lower[1], lower[2])
elif field == 's':
lower = (lower[0], lower[1]+value_change, lower[2])
elif field == 'v':
lower = (lower[0], lower[1], lower[2]+value_change)
else:
print('options are h, s or v')
else:
print('usage: upper h +10')
print('new upper: ', upper)
print('new lower: ', lower)
#clear points queue
points = []
#mark measurements that are improbable (reset variable)
mark_missing = False
# grab the current frame
frame = vs.read()
# handle the frame from VideoCapture or VideoStream
frame = frame[1] if args.get("video", False) else frame
# if we are viewing a video and we did not grab a frame,
# then we have reached the end of the video
if frame is None:
break
# resize the frame, blur it, and convert it to the HSV
# color space
frame = imutils.resize(frame, width=WIDTH)
BLURRed = cv2.GaussianBlur(frame, (BLURR, BLURR), 0)
cv2.imshow("BLURRed", BLURRed)
hsv = cv2.cvtColor(BLURRed, cv2.COLOR_BGR2HSV)
# construct a mask for the color "green", then perform
# a series of dilations and erosions to remove any small
# blobs left in the mask
mask = cv2.inRange(hsv, lower, upper)
cv2.imshow("Masked", mask)
mask = cv2.erode(mask, None, iterations=2)
#cv2.imshow("Eroded", mask)
mask = cv2.dilate(mask, None, iterations=2)
cv2.imshow("Dilated", mask)
# find contours in the mask and initialize the current
# (x, y) center of the ball
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
center = None
# only proceed if at least one contour was found
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
cnts = sorted(cnts, key=cv2.contourArea)
#print(cnts)
cnts = cnts[-4:]
for c in cnts:
((x, y), radius) = cv2.minEnclosingCircle(c)
#print('Green position: ', int(x), ',', int(y), 'radius: ', int(radius))
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
cv2.circle(frame, (int(x), int(y)), int(radius), (0, 255, 255), 2)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
#add to points list
points.append((x,y,radius))
radius_avg += radius
#calculate new Vector if all four points visible
if len(points) == 4:
radius_avg = radius_avg/4
points.sort(key = itemgetter(1))
down = sorted(points[-2:], key = itemgetter(0))
up = sorted(points[:2], key = itemgetter(0))
up_left = up[0]
up_right = up[1]
down_left = down[0]
down_right = down[1]
#mark measurements that are improbable
for point in points:
if big_diff(point[2], radius_avg, 2):
mark_missing = True
if not mark_missing:
cv2.putText(frame, 'up_left', (int(up_left[0]),int(up_left[1])), fontface, fontscale, fontcolor)
cv2.putText(frame, 'up_right', (int(up_right[0]),int(up_right[1])), fontface, fontscale, fontcolor)
cv2.putText(frame, 'down_left', (int(down_left[0]),int(down_left[1])), fontface, fontscale, fontcolor)
cv2.putText(frame, 'down_right', (int(down_right[0]),int(down_right[1])), fontface, fontscale, fontcolor)
t_meas = angle(up_left, up_right)
b_meas = angle(down_left, down_right)
l_meas = angle(up_left, down_left)
r_meas = angle(up_right, down_right)
t_meas_past.append(t_meas)
l_t = ACT_SIZE/math.sin(t_meas*0.01745)
l_b = ACT_SIZE/math.sin(b_meas*0.01745)
l_l = ACT_SIZE/math.sin(l_meas*0.01745)
l_r = ACT_SIZE/math.sin(r_meas*0.01745)
l_old = l
phi_ud_old = phi_ud
phi_lr_old = phi_lr
l = (l_t+l_b+l_r+l_l)/4
phi_ud = math.atan((l_t - l_b)/ACT_SIZE)
phi_lr = math.atan((l_l - l_r)/ACT_SIZE)
if similar_value(l,l_old,10) or similar_value(phi_ud,phi_ud_old,5) or similar_value(phi_lr,phi_lr_old,5):
output = 'dist: ' + str(int(l)) + ' phi_ud: ' + "{:5.1f}".format(phi_ud * 57.296) + ' phi_lr: ' + "{:5.1f}".format(phi_lr * 57.296)
cfcamera_pub.publish(l)
else:
mark_missing = True
cv2.putText(frame, output, (0,20), fontface, fontscale, fontcolor)
# t_meas_kalman.append( #???????????????????????????????????)
else:
mark_missing = True
if mark_missing:
cv2.putText(frame, 'No marker detected', (0,20), fontface, fontscale, fontcolor)
# show the frame to our screen
cv2.imshow("Frame", frame)
# if the 'q' key is pressed, stop the loop
if key == ord("q"):
break
# if we are not using a video file, stop the camera video stream
if not args.get("video", False):
vs.stop()
# otherwise, release the camera
else:
vs.release()
# close all windows
#plt.plot(t_meas_past)
#plt.ylabel('t_measured')
#plt.show()
time.sleep(0.1)
while True:
if key == ord("q"):
break
cv2.destroyAllWindows()
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
================================================================================
Finding Marker Balls, labeling them and calculating distance and angle estimates
================================================================================
Using HSV Color Masks this finds the four biggest green balls, detects their positions and
measures distance to the camera (dist) as well as the two angles that define the position
of the camera in the frame of the Marker. Zero is perpendicular to the Marker Plane.
phi_ud for up/down and phi_lr for left/right. The Output Values are filtered by a kalman filter.
Constants:
WIDTH should contain the Width of the Video Frame
BLURR defines the Size of the Gaussian Blurr filter. Should be between 5 and 30
ACT_SIZE defines the distance between the centres of the Markers in mm
FOV defines the field ov view of the camera.
Marker Structure:
ACT_SIZE at least 100mm
diameter at least 30mm
G means coloured in Green
ACT_SIZE
|----------|
___ ___
/ G \ / G \
\___/ \___/ -
|
|ACT_SIZE
___ ___ |
/ G \ / G \ |
\___/ \___/ -
|---|
diameter
Usage:
q to quit
c to change color, then type "lower h +10" to the Terminal to increase the lower bound of the h value
Copyright 2019 Lars Widmer
'''
#ros stuff import
import roslib; roslib.load_manifest('dfall_pkg')
import rospy
from std_msgs.msg import Int32
from dfall_pkg.msg import ControlCommand
from dfall_pkg.msg import IntWithHeader
from dfall_pkg.srv import IntIntService
import rosbag
from rospkg import RosPack
from std_msgs.msg import Float32
from std_msgs.msg import String
# import the necessary packages
from collections import deque
from imutils.video import VideoStream
import numpy as np
import math
import argparse
import cv2
import imutils
import time
import matplotlib.pyplot as plt
from operator import itemgetter
from pykalman import KalmanFilter
WIDTH = 640
BLURR = 11
ACT_SIZE = 100
FOV = 70
CAMERA = 2
greenLower = (50, 50, 40)
greenUpper = (80, 255, 255)
blueLower = (90,40,40)
blueUpper = (110,255,255)
magentaLower = (130,80,30)
magentaUpper = (150,255,255)
upper = greenUpper
lower = greenLower
#Start ROS-node
global node_name
node_name = "CrazyCamera"
rospy.init_node(node_name, anonymous=True)
# Get the namespace of this node
global ros_namespace
ros_namespace = rospy.get_namespace()
# Fetch the YAML paramter "agentID" and "coordID"
global m_agentID
m_agentID = rospy.get_param(ros_namespace + "/FlyingAgentClient/agentID")
coordID = rospy.get_param(ros_namespace + "/FlyingAgentClient/coordID")
# Convert the coordinator ID to a zero-padded string
coordID_as_string = format(coordID, '03')
# Initialise a publisher for the battery voltage
global cfcamera_pub
cfcamera_pub = rospy.Publisher(node_name, Float32, queue_size=10)
def angle(p1, p2):
return (math.sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)/WIDTH)*FOV
def big_diff(v1, v2, mult):
return v1/v2 < 1/mult or v1/v2 > mult
def similar_value(v1,v2,max_diff):
return max_diff > abs(v1-v2)
#initialize font
fontface = cv2.FONT_HERSHEY_SIMPLEX
fontscale = 0.5
fontcolor = (255, 255, 255)
# #initialize Kalman Filter
# kf = KalmanFilter(
# transition_matrix
# observation_matrix
# initial_transition_covariance
# initial_observation_covariance
# transition_offsets
# observation_offset
# initial_state_mean
# initial_state_covariance
# random_state
# )
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=64,
help="max buffer size")
args = vars(ap.parse_args())
# define the lower and upper boundaries of the "green"
# ball in the HSV color space, then initialize the
# list of tracked points
points = []
t_meas_past = []
t_meas_kalman = []
radius_avg = 0
phi_ud = 0
phi_lr = 0
l = 0
phi_ud_old = 0
phi_lr_old = 0
l_old = 0
fig, ax = plt.subplots()
pts = deque(maxlen=args["buffer"])
# if a video path was not supplied, grab the reference
# to the webcam
if not args.get("video", False):
vs = VideoStream(src=CAMERA).start()
# otherwise, grab a reference to the video file
else:
vs = cv2.VideoCapture(args["video"])