Commit b0990b2f authored by luroth's avatar luroth
Browse files

.

parent 37a23275
......@@ -11,6 +11,8 @@ import csv
from urllib import request
import os
import warnings
import matplotlib.pyplot as plt
import shapefile
def frange(start, stop, step):
i = start
......@@ -108,12 +110,16 @@ def main():
project_dir = r'E:\UAV\_Workspace_\ethz_eschikon_FIP_50m_soja_only_20160526\output\\'
DEM_file = 'DEM_superrough.stl'
DEM_stl = mesh.Mesh.from_file(project_dir + DEM_file)
interpolation_size_x = 64
interpolation_size_y = 64
plane_normales = DEM_stl.normals
plane_points = DEM_stl.points
epsg = getWKT_PRJ("2056")
print("init camera parameters")
# Load DEM
DEM_stl = mesh.Mesh.from_file(project_dir + DEM_file)
DEM_normales = DEM_stl.normals
DEM_points = DEM_stl.points
## SONY-RX100II-VIS Specification
sensor_dimension_x = 13.2 # mm
......@@ -122,37 +128,35 @@ def main():
sensor_resolution_x = 5472 # pixel
sensor_resolution_y = 3648 # pixel
sensor_dimension_x_inM = sensor_dimension_x/1000
sensor_dimension_y_inM = sensor_dimension_y/1000
focal_lengh_inM = focal_lengh / 1000
pixel_size_x = sensor_dimension_x_inM / sensor_resolution_x
pixel_size_y = sensor_dimension_y_inM / sensor_resolution_y
resolution_inter_x = 64
resolution_inter_y = 64
# Axis for rotations
x_axis = [1, 0, 0]
y_axis = [0, 1, 0]
z_axis = [0, 0, 1]
# Relative coordinates of corners of sensor
sensor_Z = focal_lengh_inM
sensor_X_C1u2 = - (sensor_dimension_x_inM / 2)
sensor_X_C3u4 = (sensor_dimension_x_inM / 2)
sensor_Y_C1u4 = (sensor_dimension_y_inM / 2)
sensor_Y_C2u3 = - (sensor_dimension_y_inM / 2)
# Coordinates of corners of sensor, relative to center
sensor_Z = focal_lengh/1000
sensor_X_C1u2 = - (sensor_dimension_x / 2000)
sensor_X_C3u4 = (sensor_dimension_x / 2000)
sensor_Y_C1u4 = (sensor_dimension_y / 2000)
sensor_Y_C2u3 = -(sensor_dimension_y / 2000)
sensor_x = [x for x in frange((-(sensor_dimension_x_inM / 2)), (sensor_dimension_x_inM / 2), pixel_size_x*resolution_inter_x)]
sensor_y = [y for y in frange((-(sensor_dimension_y_inM / 2)), (sensor_dimension_y_inM / 2), pixel_size_y*resolution_inter_y)]
sensor_corner_xy = np.array([[0,0],[sensor_resolution_x,0],[sensor_resolution_x,sensor_dimension_y],[0, sensor_resolution_y]])
sensor_corner_coords = np.array([[-(sensor_dimension_x / 2000), (sensor_dimension_y / 2000), focal_lengh/1000],
[-(sensor_dimension_x / 2000), -(sensor_dimension_y / 2000), focal_lengh/1000],
[ (sensor_dimension_x / 2000), -(sensor_dimension_y / 2000), focal_lengh/1000],
[ (sensor_dimension_x / 2000), (sensor_dimension_y / 2000), focal_lengh/1000]])
sensor_pixels = cartesian([sensor_x, sensor_y, sensor_Z])
# Coordinates of sample pixels of sensor (later used for interpolation), relative to center
sensor_pixels_x = [x for x in range(0,sensor_resolution_x,interpolation_size_x)]
sensor_pixels_y = [y for y in range(0,sensor_resolution_y,interpolation_size_y)]
sensor_pixels_xy = cartesian([sensor_pixels_x, sensor_pixels_y])
sensor_corners = np.array([(sensor_X_C1u2, sensor_Y_C1u4, sensor_Z),
(sensor_X_C1u2, sensor_Y_C2u3, sensor_Z),
(sensor_X_C3u4, sensor_Y_C2u3, sensor_Z),
(sensor_X_C3u4, sensor_Y_C1u4, sensor_Z)])
sensor_pixel_coordx = [x/1000 for x in frange((-(sensor_dimension_x / 2)), (sensor_dimension_x /2), sensor_dimension_x/(sensor_resolution_x/interpolation_size_x))]
sensor_pixel_coordy = [y/1000 for y in frange((-(sensor_dimension_y / 2)), (sensor_dimension_y /2), sensor_dimension_y/(sensor_resolution_y/interpolation_size_y))]
sensor_pixel_coords = cartesian([sensor_pixel_coordx, sensor_pixel_coordy, focal_lengh/1000])
# Calculate coordinates on ground for all cameras
with open(project_dir + 'camera_position_RGB.txt', 'r') as csvfile:
spamreader = csv.reader(csvfile, delimiter='\t', quotechar='|')
next(spamreader, None)
......@@ -176,90 +180,82 @@ def main():
#Phi = -1.3090272913737455
#Kappa = 141.0244127303900300
# Coordinates of focal poin
# Coordinates of focal point
focal_point = np.array([X, Y, Z])
# Rotation (order omega phi kappa correct?)
omega = math.radians(Omega)
phi = math.radians(Phi)
kappa = math.radians(Kappa)
print("rotate camera corners")
omega_rad = math.radians(Omega)
phi_rad = math.radians(Phi)
kappa_rad = math.radians(Kappa)
sensor_corners_rot_all = sensor_corners
sensor_corners_rot_all = np.array([np.dot(rotation_matrix(z_axis, kappa), sensor_corner) for sensor_corner in sensor_corners_rot_all])
sensor_corners_rot_all = np.array([np.dot(rotation_matrix(y_axis, phi), sensor_corner) for sensor_corner in sensor_corners_rot_all])
sensor_corners_rot_all = np.array([np.dot(rotation_matrix(x_axis, omega), sensor_corner) for sensor_corner in sensor_corners_rot_all])
print("rotate camera")
sensor_corner_coords_rot = sensor_corner_coords
sensor_corner_coords_rot = np.array([np.dot(rotation_matrix(z_axis, kappa_rad), sensor_corner) for sensor_corner in sensor_corner_coords_rot])
sensor_corner_coords_rot = np.array([np.dot(rotation_matrix(y_axis, phi_rad), sensor_corner) for sensor_corner in sensor_corner_coords_rot])
sensor_corner_coords_rot = np.array([np.dot(rotation_matrix(x_axis, omega_rad), sensor_corner) for sensor_corner in sensor_corner_coords_rot])
print("calculate position of corners")
#Define plane
sensor_pixel_coords_rot = sensor_pixel_coords
sensor_pixel_coords_rot = np.array([np.dot(rotation_matrix(z_axis, kappa_rad), sensor_corner) for sensor_corner in sensor_pixel_coords_rot])
sensor_pixel_coords_rot = np.array([np.dot(rotation_matrix(y_axis, phi_rad), sensor_corner) for sensor_corner in sensor_pixel_coords_rot])
sensor_pixel_coords_rot = np.array([np.dot(rotation_matrix(x_axis, omega_rad), sensor_corner) for sensor_corner in sensor_pixel_coords_rot])
print("calculate ground points for corners")
line_point=focal_point
#ground_corner_coords = np.array([intersect_point_with_planes(DEM_normales, DEM_points, corner_vector, focal_point) for corner_vector in sensor_corner_coords_rot])
ground_corner_coords = np.array(Parallel(n_jobs=4)(delayed(intersect_point_with_planes)(DEM_normales, DEM_points, corner_vector, focal_point) for corner_vector in sensor_corner_coords_rot))
# corners = [intersect_point_with_planes(plane_normales, plane_points, line_vector, line_point) for line_vector in sensor_corners_rot_all]
corners = Parallel(n_jobs=4)(delayed(intersect_point_with_planes)(plane_normales, plane_points, line_vector, line_point) for line_vector in sensor_corners_rot_all)
corners.append(corners[0])
# add start point as end point to close polygon
ground_corner_coords = np.append(ground_corner_coords, [ground_corner_coords[0]], axis=0)
print("\ncorners found:", corners)
if(corners[0][0] == None or corners[1][0] is None or corners[2][0] is None or corners[3][0] is None):
print("\ncorners found:", ground_corner_coords)
if(np.isnan(ground_corner_coords).any()):
print("corners missing, skipping Photo")
continue
print("save to shapefile")
import shapefile
print("save corner polygon to shapefile")
shp = shapefile.Writer(shapeType=shapefile.POLYGON)
shp.field('label')
shp.poly(parts=[corners], shapeType=shapefile.POLYGON)
shp.poly(parts=[ground_corner_coords.tolist()], shapeType=shapefile.POLYGON)
shp.record('a')
shp.save(project_dir + PhotoID + '_corners')
prj = open(project_dir + PhotoID + '_corners.prj', "w")
epsg = getWKT_PRJ("2056")
prj.write(epsg)
prj.close()
print("rotate pixel matrix")
sensor_pixels_rot = sensor_pixels
sensor_pixels_rot = np.array([np.dot(rotation_matrix(z_axis, kappa), sensor_pixel) for sensor_pixel in sensor_pixels_rot])
sensor_pixels_rot = np.array([np.dot(rotation_matrix(y_axis, phi), sensor_pixel) for sensor_pixel in sensor_pixels_rot])
sensor_pixels_rot = np.array([np.dot(rotation_matrix(x_axis, omega), sensor_pixel) for sensor_pixel in sensor_pixels_rot])
print("calculate pixel matrix position")
print("calculate ground points for sample pixels")
# prepare DEM
print("reducing DEM")
corners_np = np.array(corners)
plane_normal_red = []
plane_point_red = []
for plane_index in range(plane_normales.shape[0]):
plane_normal = plane_normales[plane_index]
plane_point1 = plane_points[plane_index, [0,1,2]]
plane_point2 = plane_points[plane_index, [3,4,5]]
plane_point3 = plane_points[plane_index, [6,7,8]]
hit = point_in_poly(plane_point1[0], plane_point1[1], corners_np[:,[0,1]] ) or point_in_poly(plane_point2[0], plane_point2[1], corners_np[:,[0,1]] ) or point_in_poly(plane_point3[0], plane_point3[1], corners_np[:,[0,1]] )
print("reducing DEM to corner polygon")
DEM_normales_red = []
DEM_points_red = []
for plane_index in range(DEM_normales.shape[0]):
plane_normal = DEM_normales[plane_index]
plane_point1 = DEM_points[plane_index, [0,1,2]]
plane_point2 = DEM_points[plane_index, [3,4,5]]
plane_point3 = DEM_points[plane_index, [6,7,8]]
hit = point_in_poly(plane_point1[0], plane_point1[1], ground_corner_coords[:,[0,1]] ) or point_in_poly(plane_point2[0], plane_point2[1], ground_corner_coords[:,[0,1]] ) or point_in_poly(plane_point3[0], plane_point3[1], ground_corner_coords[:,[0,1]] )
if hit:
plane_normal_red.append(plane_normal.tolist())
plane_point_red.append(plane_points[plane_index].tolist())
DEM_normales_red.append(plane_normal.tolist())
DEM_points_red.append(DEM_points[plane_index].tolist())
plane_normal_red_np = np.array(plane_normal_red)
plane_point_red_np = np.array(plane_point_red)
DEM_normales_red = np.array(DEM_normales_red)
DEM_points_red = np.array(DEM_points_red)
print('Points to calculate:', sensor_pixels_rot.shape)
print('Planes to intersect with:', plane_normal_red_np.shape)
print('calculate intersections, running on', multiprocessing.cpu_count(), 'cores')
pixels= Parallel(n_jobs=multiprocessing.cpu_count()-5)(delayed(intersect_point_with_planes)(plane_normal_red_np, plane_point_red_np, line_vector, line_point) for line_vector in sensor_pixels_rot)
pixels=[]
for line_vector in sensor_pixels_rot:
pixels.append(intersect_point_with_planes(plane_normal_red_np, plane_point_red_np, line_vector, line_point))
#pixels= Parallel(n_jobs=multiprocessing.cpu_count()-5)(delayed(intersect_point_with_planes)(plane_normal_red_np, plane_point_red_np, line_vector, line_point) for line_vector in sensor_pixels_rot)
image_x = [x for x in range(0, sensor_resolution_x, resolution_inter_x)]
image_y = [y for y in range(0, sensor_resolution_y, resolution_inter_y)]
pixels_np = np.array(pixels)
coordinates_world = pixels_np.reshape(len(image_x), len(image_y), 3)
sensor_pixelcoords = cartesian([image_x, image_y])
print("calculating angles")
print("\ncalculating angles")
angles = [None] * len(pixels)
for index, pixel in enumerate(pixels):
# rotation in xy plane
......@@ -268,22 +264,18 @@ def main():
else:
angles[index] = ([None, None])
pixels_all = np.append(sensor_pixelcoords, pixels, axis=1)
angles_np = np.array(angles)
view_angles = angles_np.reshape(len(image_x), len(image_y), 2)
angles_all = np.append(sensor_pixelcoords, angles, axis=1)
all_infos = np.append(coordinates_world, view_angles, axis=2)
print("\nwrite pixel matrix to csv")
with open(project_dir + PhotoID + '_pixel_to_position.csv', 'w') as f:
with open(project_dir + PhotoID + '_pixel_informations.csv', 'w') as f:
writer = csv.writer(f)
writer.writerows(['image_x', 'image_y', 'CH1903_E', 'CH1903_N', 'Altitude', 'view_angle_xy', 'view_angle_z'])
writer.writerows(pixels_all.tolist())
f.close()
print("\nwrite angle matrix to csv")
with open(project_dir + PhotoID + '_pixle_to_angle.csv', 'w') as f:
writer = csv.writer(f)
writer.writerows(angles_all.tolist())
f.close()
if __name__ == "__main__":
main()
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment