Commit c2821068 authored by luroth's avatar luroth
Browse files

Input Ear segmentation Simon Treier

parent 70caf5d5
# @Author: Simon Treier
import numpy as np
from scipy import ndimage
def generate_filter_ellipsoid(GSD, Min_SD, Max_SD, DimPlot, shape, rows, cols):
X = np.arange(-DimPlot, DimPlot, 1)
Y = np.arange(-DimPlot, DimPlot, 1)
X, Y = np.meshgrid(X, Y)
Y_Dim = rows * GSD
X_Dim = cols * GSD
Center_SD = 2 * Max_SD
El_X_Center = int(round(X_Dim / (2 * Center_SD)))
El_Y_Center = int(round(Y_Dim / (2 * Center_SD)))
El_X_min = int(round(X_Dim / (2 * Max_SD)))
El_X_max = int(round(X_Dim / (2 * Min_SD)))
El_Y_min = int(round(Y_Dim / (2 * Max_SD)))
El_Y_max = int(round(Y_Dim / (2 * Min_SD)))
Eli_center = ((X ** 2) / El_X_Center ** 2) + ((Y ** 2) / El_Y_Center ** 2)
Eli_inner = ((X ** 2) / El_X_min ** 2) + ((Y ** 2) / El_Y_min ** 2)
Eli_outer = ((X ** 2) / El_X_max ** 2) + ((Y ** 2) / El_Y_max ** 2)
#print('El_X_min: ', El_X_min, ' El_X_max: ', El_X_max, ' El_Y_min: ', El_Y_min, ' El_Y_max: ', El_Y_max)
TEli = np.where(Eli_center <= 1, 1, 0)
TEli = np.where(Eli_inner >= 1, 1, TEli)
TEli = np.where(Eli_outer >= 1, 0, TEli)
TRows, TCols = TEli.shape
TCropRows, TCropCols = shape
Tcrop_eli = TEli[np.int((TRows / 2) - (TCropRows / 2)): np.int((TRows / 2) + (TCropRows / 2)),
np.int((TCols / 2) - (TCropCols / 2)): np.int((TCols / 2) + (TCropCols / 2))]
TEliInv = 1 - Tcrop_eli
Tcrop_eli = Tcrop_eli * 255
Tcrop_eli = ndimage.gaussian_filter(Tcrop_eli, sigma=(3, 3))
Tcrop_eli = Tcrop_eli / 255
Tcrop_eli = Tcrop_eli.astype(float)
return (TEli, TEliInv, Tcrop_eli, X, Y)
def fft_forward_back(band, Tcrop_eli):
#fgauss = ndimage.gaussian_filter(band, sigma=(1, 1))
f = np.fft.fft2(band)
fshift = np.fft.fftshift(f)
TImg = Tcrop_eli * fshift
f_ishift = np.fft.ifftshift(TImg)
img_back = np.fft.ifft2(f_ishift)
img_back = np.abs(img_back)
img_back = ndimage.median_filter(img_back, 4)
img_back = img_back - np.min(img_back) # Avoid negative values
img_back = np.divide(img_back,np.max(img_back)) # Normalize range to 0 - 1
img_back = np.multiply(img_back,(np.max(band)-np.min(band))) # Normalize to original range
img_back = img_back + np.min(band) # Add original min values
return(img_back)
......@@ -6,8 +6,100 @@ import cv2
# TEST
path_image = '/home/luroth/PycharmProjects/crop_height_analysis/Scripts/Analysis/Active_Learning/samples2/RAW/_DSC7431.ARW'
def enhance_for_fft_sonyA9_rawpy(image_raw, pixel_shift):
'''Gets a rawpy.RawPy object and generates 16 bit RGB, HSV, Lab and ExG/ExR representants
:param image_raw: rawpy.RawPy object
:param pixel_shift: Offset pixel used to cutout border pixel
:return: tuple of RGB, HSV, and Lab (all 0..1 float32) and ExG and ExR (n..m float32)
:return: descriptors as numpy array
:return: descriptor names
'''
# Demosaic image using rawpy
a_XYZ_16bit = image_raw.postprocess(
output_bps=16,
output_color=rawpy.ColorSpace.XYZ,
demosaic_algorithm=0,
use_camera_wb=True,
no_auto_bright=True)
# Cut image
a_XYZ_16bit = a_XYZ_16bit[
pixel_shift:a_XYZ_16bit.shape[0] - pixel_shift,
pixel_shift:a_XYZ_16bit.shape[1] - pixel_shift,
:
]
# Convert XYZ to RGB space using opencv (cv2)
a_RGB_16bit = cv2.cvtColor(a_XYZ_16bit, cv2.COLOR_XYZ2RGB)
# Scale to 0...1
a_RGB_16bitf = np.array(a_RGB_16bit / 2 ** 16, dtype=np.float32)
# Convert to HSV space using opencv (cv2)
a_HSV_16bitf = cv2.cvtColor(a_RGB_16bitf, cv2.COLOR_RGB2HSV)
# Convert to LAB space using opencv (cv2)
a_Lab_16bitf = cv2.cvtColor(a_RGB_16bitf, cv2.COLOR_RGB2Lab)
# Calcualte vegetation indices: ExR and ExG and ExB
R, G, B = cv2.split(a_RGB_16bitf)
R = np.array(R, dtype=np.float32)
G = np.array(G, dtype=np.float32)
B = np.array(B, dtype=np.float32)
normalizer = np.array(R + G + B)
# Avoid division by zero
normalizer[normalizer == 0] = 1
normalizer = normalizer
r, g, b = (R, G, B) / normalizer
#print('rmin: ',np.min(r),'rmax: ',np.max(r),'gmin: ',np.min(g),'gmax: ',np.max(g),'bmin: ',np.min(b),'bmax: ',np.max(b))
# ExR + ExG + ExB
a_ExR = np.array(1.4 * r - b, dtype=np.float32)
a_ExG = np.array(2.0 * g - r - b, dtype=np.float32)
a_ExB = np.array(1.4 * b - g, dtype=np.float32) # Mao, W., Wang, Y., Wang, Y., 2003. Real-time detection of between-row weeds using machine vision. ASAE paper number 031004. The Society for Agricultural, Food, and Biological Systems, St. Joseph, MI.
a_ExB_n = a_ExB + 1
a_ExB_n = a_ExB_n/ 2.4
RGB_intensities = R + G + B
RGB_intensities = RGB_intensities - np.min(RGB_intensities)
RGB_intensities = RGB_intensities / 3
RGB_VARI = (G - R) / ((G + R - B) + 0.00000001) # Avoid non-zero values
green_chromatic_coordinate = G / RGB_intensities
green_chromatic_coordinate = np.nan_to_num(green_chromatic_coordinate)
red_chromatic_coordinate = R / RGB_intensities
red_chromatic_coordinate = np.nan_to_num(red_chromatic_coordinate)
blue_chromatic_coordinate = B / RGB_intensities
blue_chromatic_coordinate = np.nan_to_num(blue_chromatic_coordinate)
H = a_HSV_16bitf[:, :, 0]
S = a_HSV_16bitf[:, :, 1]
V = a_HSV_16bitf[:, :, 2]
L = a_Lab_16bitf[:, :, 0]
a = a_Lab_16bitf[:, :, 1]
b = a_Lab_16bitf[:, :, 2]
L_n = L - np.min(L)
L_n = L_n / 100
# Return as tuple
return ((a_RGB_16bitf, a_ExG, a_ExR, a_ExB, a_ExB_n,
R, G, B, RGB_intensities,
RGB_VARI,
green_chromatic_coordinate, red_chromatic_coordinate, blue_chromatic_coordinate,
H, S, V,
L, a, b, L_n))
def postprocess_sonyA9_RAW(path_image):
def postprocess_sonyA9_RAW(path_image, pixel_shift=0):
# Read RAW file
image_raw = rawpy.imread(path_image)
......
# Prototype algorithm to transform images to better detect ears
# Author: Simon Treier
# 11. September 2019
import os
import shutil
import timeit
import imageio
import rawpy, time
import pandas as pd
import numpy as np
from Common import FFTfunctions
from Common import Imagefunctions
from skimage.feature import peak_local_max
from skimage.morphology import watershed
from scipy.ndimage.measurements import center_of_mass, label
from scipy.sparse import csr_matrix
from pathlib import Path
from progressbar import ProgressBar
import multiprocessing
from multiprocessing import Process, Manager
def segment_ear_image(path_raw, path_processed, raw_image_name,
GSD = 3.02, # Indicate ground sampling distance in mm
Min_SD = 7, # Indicate minimal sampling distance (dimension of objects) to detect in mm
Max_SD = 100, # Indicate maximal sampling distance (dimension of objects) to detect in mm
pixel_shift = 12, # dim_raw - dim_rgb / 2
local_maxima_threshold = 0.35,
local_maxima_min_distance = 2,
min_intensity_watershed_area = 0.35,
RGB_factor = 2,
ExB_factor = 1,
R_factor = 1,
L_factor = 1,
V_factor = 1):
tic = time.process_time()
########################################################################################################################################################################################################################################################################################################################
pathCompl = Path(path_raw) / raw_image_name
print(pathCompl)
image_name = raw_image_name.split(".")[0]
# Read RAW file
image_raw = rawpy.imread(str(pathCompl))
# Create dir
path_processed = Path(path_processed)
path_processed.mkdir(exist_ok=True)
# Demosaic RAW, transform to color spaces and calculate indices
a_RGB_16bitf, a_ExG, a_ExR, a_ExB, a_ExB_n, \
R, G, B, RGB_intensities, RGB_VARI, \
GCC, RCC, BCC, \
H, S, V, \
L, a, b, L_n = Imagefunctions.enhance_for_fft_sonyA9_rawpy(image_raw, pixel_shift)
# Read dimensions from imput image for calculations
rows, cols, dim = a_RGB_16bitf.shape
DimIm = np.max(a_RGB_16bitf.shape)
Dim = np.max(a_RGB_16bitf.shape)
DimPlot = Dim/2
# Build a filter to subsequently filter the FFT images
TEli, TEliInv, Tcrop_eli, X, Y = FFTfunctions.generate_filter_ellipsoid(GSD, Min_SD, Max_SD, DimPlot, a_RGB_16bitf[:,:,1].shape, rows, cols)
# Apply FFT and filter to images
RGB_intensities_trans, ExB_trans, R_trans, L_trans, V_trans = (FFTfunctions.fft_forward_back(band, Tcrop_eli) for band in (RGB_intensities, a_ExB_n, R, L_n, V))
sum_divisior = RGB_factor + ExB_factor + R_factor + L_factor + V_factor
sum_image = np.multiply(RGB_factor,RGB_intensities_trans) + np.multiply(ExB_factor,ExB_trans) + np.multiply(R_factor,R_trans) + np.multiply(L_factor,L_trans) + np.multiply(V_factor,V_trans)
sum_image = np.divide(sum_image,sum_divisior)
sum_image_trans = FFTfunctions.fft_forward_back(sum_image, Tcrop_eli)
local_maxima = peak_local_max(sum_image_trans, min_distance=local_maxima_min_distance, indices=False, threshold_rel=local_maxima_threshold)
local_maxima_labels, n_max = label(local_maxima)
# Extract coords
local_maxima_merged = np.round(center_of_mass( local_maxima, local_maxima_labels, range(1, n_max+1) ))
local_maxima_labels = csr_matrix((np.arange(local_maxima_merged.shape[0])+1, (local_maxima_merged[:, 0], local_maxima_merged[:, 1])), shape=sum_image_trans.shape).toarray()
color_value_chart_ = [(int(Y_coord), int(X_coord), R[int(Y_coord),int(X_coord)], G[int(Y_coord),int(X_coord)], B[int(Y_coord),int(X_coord)],
RGB_intensities[int(Y_coord),int(X_coord)],
H[int(Y_coord),int(X_coord)], S[int(Y_coord),int(X_coord)], V[int(Y_coord),int(X_coord)],
L[int(Y_coord),int(X_coord)], a[int(Y_coord),int(X_coord)], b[int(Y_coord),int(X_coord)],
a_ExG[int(Y_coord),int(X_coord)], a_ExR[int(Y_coord),int(X_coord)], a_ExB[int(Y_coord),int(X_coord)],
RGB_VARI[int(Y_coord),int(X_coord)],
GCC[int(Y_coord),int(X_coord)],
RCC[int(Y_coord),int(X_coord)],
BCC[int(Y_coord),int(X_coord)]) for Y_coord, X_coord in local_maxima_merged]
color_value_chart = pd.DataFrame(color_value_chart_,
columns=['Y', 'X', 'R', 'G', 'B', 'RGB_intensities', 'H', 'S', 'V', 'L', 'a', 'b', \
'ExG', 'ExR', 'ExB', 'VARI', 'GCC', 'RCC', 'BCC']
)
# Do watershed with min intensity limit
watershed_labels = watershed(-sum_image, local_maxima_labels, mask=sum_image_trans>min_intensity_watershed_area)
result = np.array((watershed_labels >0)*255, dtype=np.uint8)
print("Save...", end="")
imageio.imsave(path_processed / (image_name + "_watershed.tif"), result)
pd.DataFrame(np.array(local_maxima_merged, dtype=np.uint16), columns=["y", "x"]).to_csv(path_processed / (image_name + "_localmax.csv"), index=False)
tac = time.process_time()
print(tac - tic, "seconds")
def segment_ear_image_worker(work_queue, result_queue, raw_files_folder, output_directory,
GSD, Min_SD, Max_SD, pixel_shift, local_maxima_threshold, local_maxima_min_distance,
min_intensity_watershed_area, RGB_factor, ExB_factor, R_factor, L_factor, V_factor):
for job in iter(work_queue.get, 'STOP'):
raw_filename = job['raw_filename']
segment_ear_image(raw_files_folder, output_directory, raw_filename,
GSD, Min_SD, Max_SD, pixel_shift, local_maxima_threshold, local_maxima_min_distance,
min_intensity_watershed_area, RGB_factor, ExB_factor, R_factor, L_factor, V_factor)
result_queue.put(raw_filename)
def segment_ear_images_process(path_raw_files, output_directory,
GSD=0.00302, # Indicate ground sampling distance in mm
Min_SD=7, # Indicate minimal sampling distance (dimension of objects) to detect in mm
Max_SD=100, # Indicate maximal sampling distance (dimension of objects) to detect in mm
pixel_shift=12, # dim_raw - dim_rgb / 2
local_maxima_threshold=0.35,
local_maxima_min_distance=2,
min_intensity_watershed_area=0.35,
RGB_factor=2,
ExB_factor=1,
R_factor=1,
L_factor=1,
V_factor=1
):
GSD = GSD * 1000
shutil.rmtree(output_directory, ignore_errors=True)
os.mkdir(output_directory)
# Job and results queue
m = Manager()
jobs = m.Queue()
results = m.Queue()
processes = []
# Progress bar counter
max_jobs = 0
count = 0
# Build up job queue by iterating over RAW files
directory = os.fsencode(path_raw_files)
for file in os.listdir(directory):
# Get filename
raw_filename_full = os.fsdecode(file)
raw_filename = os.path.splitext(raw_filename_full)[0]
if os.path.splitext(raw_filename_full)[1].lower() == ".arw" or \
os.path.splitext(raw_filename_full)[1].lower() == ".cr2":
print(raw_filename_full, "to queue")
job = dict()
job['raw_filename_full'] = raw_filename_full
job['raw_filename'] = raw_filename
jobs.put(job)
max_jobs += 1
# Init progress bar
progress = ProgressBar(min_value=0, max_value=max_jobs)
# Start processes, number of CPU - 2 to have some left for main thread / OS
for w in range(multiprocessing.cpu_count() - 2):
p = Process(target=segment_ear_image_worker,
args=(jobs, results, path_raw_files, output_directory,
GSD, Min_SD, Max_SD, pixel_shift, local_maxima_threshold, local_maxima_min_distance,
min_intensity_watershed_area, RGB_factor, ExB_factor, R_factor, L_factor, V_factor
))
p.daemon = True
p.start()
processes.append(p)
jobs.put('STOP')
print("jobs all started")
progress.update(0)
# Get pseudo results and increment counter with it
while count < (max_jobs):
results.get()
progress.update(count)
count += 1
progress.finish()
for p in processes:
p.join()
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -22,5 +22,4 @@ if __name__ == "__main__":
print("Processing", campaign_date)
CanopyAnalysis.process_NadirCC_LCCC_LA_BBCH30(path_campaign, GSD)
\ No newline at end of file
from EarCount import EarCountProcessing
from pathlib import Path
path_p = Path("P:")
#path_p = Path("/home/luroth/public")
if __name__ == "__main__":
base_path_campaign = path_p / "Evaluation/UAV/_Processed_/ETHZ_eschikon_FPWW024_lot2"
base_path_raw = path_p / "Evaluation/UAV/_DATA_/Matrice600/2019/FIP_lot2/"
GSD = 0.00302
date_folders = base_path_campaign.glob('[0-9]*')
for date_folder in date_folders:
campaign_date = date_folder.name
path_output = base_path_campaign / campaign_date / '28m_M600P' / 'ear_count'
path_raw = base_path_raw / campaign_date / "RAW"
if int(campaign_date) > 20190501:
print("Processing", campaign_date)
EarCountProcessing.segment_ear_images_process(path_raw, path_output,
GSD)
\ No newline at end of file
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment