Commit 8f103a63 authored by Roman Trüb's avatar Roman Trüb
Browse files

cleanup of worker_datatrace (conversion of float to int)

parent a1612eef
...@@ -43,7 +43,7 @@ import numpy as np ...@@ -43,7 +43,7 @@ import numpy as np
logger = None logger = None
debug = False debug = False
testid = None testid = None
errors = [] errors = []
FetchObsThread_list = [] FetchObsThread_list = []
FetchObsThread_stopEvent = None FetchObsThread_stopEvent = None
...@@ -87,13 +87,13 @@ class ServiceInfo(): ...@@ -87,13 +87,13 @@ class ServiceInfo():
self.servicename = servicename self.servicename = servicename
self.files = [] self.files = []
self.pattern = "^%s_[0-9]+\.[a-z]+$" % servicename self.pattern = "^%s_[0-9]+\.[a-z]+$" % servicename
def matchFileName(self, filename): def matchFileName(self, filename):
return re.search(self.pattern, os.path.basename(filename)) is not None return re.search(self.pattern, os.path.basename(filename)) is not None
def addFile(self, filename): def addFile(self, filename):
self.files.append(filename) self.files.append(filename)
def stripFileList(self, removelast=True): def stripFileList(self, removelast=True):
self.files.sort() self.files.sort()
if ((len(self.files) > 0) and removelast): if ((len(self.files) > 0) and removelast):
...@@ -107,15 +107,15 @@ class ServiceInfo(): ...@@ -107,15 +107,15 @@ class ServiceInfo():
# #
############################################################################## ##############################################################################
def sigterm_handler(signum, frame): def sigterm_handler(signum, frame):
"""If the program is terminated by sending it the signal SIGTERM """If the program is terminated by sending it the signal SIGTERM
(e.g. by executing 'kill') or SIGINT (pressing ctrl-c), (e.g. by executing 'kill') or SIGINT (pressing ctrl-c),
this signal handler is invoked for cleanup.""" this signal handler is invoked for cleanup."""
global mainloop_stop global mainloop_stop
global FetchObsThread_stopEvent global FetchObsThread_stopEvent
logger.info("Process received SIGTERM or SIGINT signal") logger.info("Process received SIGTERM or SIGINT signal")
# Signal all observer fetcher threads to stop: # Signal all observer fetcher threads to stop:
logger.debug("Stopping observer fetcher threads...") logger.debug("Stopping observer fetcher threads...")
shutdown_timeout = flocklab.config.getint("fetcher", "shutdown_timeout") shutdown_timeout = flocklab.config.getint("fetcher", "shutdown_timeout")
...@@ -137,7 +137,7 @@ def sigterm_handler(signum, frame): ...@@ -137,7 +137,7 @@ def sigterm_handler(signum, frame):
cn.close() cn.close()
except: except:
logger.warning("Could not connect to database.") logger.warning("Could not connect to database.")
# Tell the main loop to stop: # Tell the main loop to stop:
mainloop_stop = True mainloop_stop = True
logger.debug("Set stop signal for main loop.") logger.debug("Set stop signal for main loop.")
...@@ -445,25 +445,31 @@ def worker_datatrace(queueitem=None, nodeid=None, resultfile_path=None, logqueue ...@@ -445,25 +445,31 @@ def worker_datatrace(queueitem=None, nodeid=None, resultfile_path=None, logqueue
logqueue.put_nowait((loggername, logging.WARNING, "Empty data trace results file.")) logqueue.put_nowait((loggername, logging.WARNING, "Empty data trace results file."))
else: else:
with open(resultfile_path, "a") as outfile: with open(resultfile_path, "a") as outfile:
infile = open(tmpfile2, "r") df = pd.read_csv(tmpfile2)
for line in infile: # debug
# input format: global_ts, comparator, data, PC, operation, local_ts df.to_csv('/home/flocklab/tmp/tmp_df.txt')
(timestamp, var, val, pc, access, localts) = line.strip().split(',') # nan values cannot be converted to int -> drop corresponding lines
if access == 'operation' or access == '' or var == '': df.dropna(inplace=True)
continue # since there were nan values, comparator column was stored as nan but we need int; round is necessary otherwise 0.999999 is converted to 0 which is wrong
if flocklab.parse_int(var) < len(varnames): df.comparator = df.comparator.round().astype(int)
var = varnames[flocklab.parse_int(var)] df.data = df.data.round().astype(int)
# output format: timestamp,observer_id,node_id,variable,value,access,pc df['obsid'] = obsid
outfile.write("%s,%d,%s,%s,%s,%s,%s\n" % (timestamp, obsid, nodeid, var, val, access, pc)) df['nodeid'] = nodeid
infile.close() df['varname'] = df.comparator.apply(lambda x: (varnames[x] if x < len(varnames) else str(x)))
df.to_csv(
outfile,
columns=['global_ts', 'obsid', 'nodeid', 'varname', 'data', 'operation', 'PC'],
index=False,
header=False
)
except: except:
msg = "Error in datatrace worker process: %s: %s\n%s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc()) msg = "Error in datatrace worker process: %s: %s\n%s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc())
_errors.append((msg, obsid)) _errors.append((msg, obsid))
logqueue.put_nowait((loggername, logging.ERROR, msg)) logqueue.put_nowait((loggername, logging.ERROR, msg))
finally: finally:
# debug # debug
#shutil.copyfile(input_filename, "%s_raw" % resultfile_path) # shutil.copyfile(input_filename, "%s_raw" % resultfile_path)
#shutil.copyfile(tmpfile1, "%s_uncorrected.csv" % resultfile_path) # shutil.copyfile(tmpfile1, "%s_uncorrected.csv" % resultfile_path)
# delete files # delete files
os.remove(input_filename) os.remove(input_filename)
os.remove(tmpfile1) os.remove(tmpfile1)
...@@ -483,7 +489,7 @@ def worker_datatrace(queueitem=None, nodeid=None, resultfile_path=None, logqueue ...@@ -483,7 +489,7 @@ def worker_datatrace(queueitem=None, nodeid=None, resultfile_path=None, logqueue
def worker_callback(result): def worker_callback(result):
global errors global errors
global FetchObsThread_queue global FetchObsThread_queue
# the result contains two elements: # the result contains two elements:
# 1st: a list of errors # 1st: a list of errors
if len(result[0]) > 0: if len(result[0]) > 0:
...@@ -493,7 +499,7 @@ def worker_callback(result): ...@@ -493,7 +499,7 @@ def worker_callback(result):
errors.append(msg) errors.append(msg)
except: except:
errors.append("Failed to convert the error list in worker_callback (%s)." % str(result[0])) errors.append("Failed to convert the error list in worker_callback (%s)." % str(result[0]))
# 2nd: a list of the processed elements # 2nd: a list of the processed elements
try: try:
FetchObsThread_queue.put(item=result[1], block=True, timeout=10) FetchObsThread_queue.put(item=result[1], block=True, timeout=10)
...@@ -511,16 +517,16 @@ def worker_callback(result): ...@@ -511,16 +517,16 @@ def worker_callback(result):
############################################################################## ##############################################################################
class LogQueueThread(threading.Thread): class LogQueueThread(threading.Thread):
""" Thread which logs from queue to logfile. """ Thread which logs from queue to logfile.
""" """
def __init__(self, logqueue, logger, stopEvent): def __init__(self, logqueue, logger, stopEvent):
threading.Thread.__init__(self) threading.Thread.__init__(self)
self._logger = logger self._logger = logger
self._stopEvent = stopEvent self._stopEvent = stopEvent
self._logqueue = logqueue self._logqueue = logqueue
def run(self): def run(self):
self._logger.info("LogQueueThread started") self._logger.info("LogQueueThread started")
# Let thread run until someone calls terminate() on it: # Let thread run until someone calls terminate() on it:
while not self._stopEvent.is_set(): while not self._stopEvent.is_set():
try: try:
...@@ -528,7 +534,7 @@ class LogQueueThread(threading.Thread): ...@@ -528,7 +534,7 @@ class LogQueueThread(threading.Thread):
self._logger.log(loglevel, loggername + msg) self._logger.log(loglevel, loggername + msg)
except queue.Empty: except queue.Empty:
pass pass
# Stop the process: # Stop the process:
self._logger.info("LogQueueThread stopped") self._logger.info("LogQueueThread stopped")
### END LogQueueThread ### END LogQueueThread
...@@ -541,9 +547,9 @@ class LogQueueThread(threading.Thread): ...@@ -541,9 +547,9 @@ class LogQueueThread(threading.Thread):
############################################################################## ##############################################################################
class FetchObsThread(threading.Thread): class FetchObsThread(threading.Thread):
""" Thread which downloads database files from an observer to the server. """ Thread which downloads database files from an observer to the server.
""" """
def __init__(self, obsid, obsethernet, dirname, debugdirname, workQueue, stopEvent): def __init__(self, obsid, obsethernet, dirname, debugdirname, workQueue, stopEvent):
threading.Thread.__init__(self) threading.Thread.__init__(self)
self._obsid = obsid self._obsid = obsid
self._obsethernet = obsethernet self._obsethernet = obsethernet
self._obsfiledir = dirname self._obsfiledir = dirname
...@@ -551,21 +557,21 @@ class FetchObsThread(threading.Thread): ...@@ -551,21 +557,21 @@ class FetchObsThread(threading.Thread):
self._workQueue = workQueue self._workQueue = workQueue
self._stopEvent = stopEvent self._stopEvent = stopEvent
self._logger = logger self._logger = logger
self._min_sleep = flocklab.config.getint("fetcher", "min_sleeptime") self._min_sleep = flocklab.config.getint("fetcher", "min_sleeptime")
self._max_randsleep = flocklab.config.getint("fetcher", "max_rand_sleeptime") self._max_randsleep = flocklab.config.getint("fetcher", "max_rand_sleeptime")
self._obstestresfolder = "%s/%d" % (flocklab.config.get("observer", "testresultfolder"), testid) self._obstestresfolder = "%s/%d" % (flocklab.config.get("observer", "testresultfolder"), testid)
def run(self): def run(self):
try: try:
self._loggerprefix = "(FetchObsThread.%d) "%self._obsid self._loggerprefix = "(FetchObsThread.%d) "%self._obsid
self._logger.info(self._loggerprefix + "FetchObsThread starting...") self._logger.info(self._loggerprefix + "FetchObsThread starting...")
removelast = True removelast = True
# Let thread run until someone calls terminate() on it: # Let thread run until someone calls terminate() on it:
while removelast == True: while removelast == True:
""" Get data from the observer over SCP. """ Get data from the observer over SCP.
Then request data from the observer and store it in the server's filesystem. Then request data from the observer and store it in the server's filesystem.
Then sleep some random time before fetching data again. Then sleep some random time before fetching data again.
""" """
# Wait for some random time: # Wait for some random time:
...@@ -599,7 +605,7 @@ class FetchObsThread(threading.Thread): ...@@ -599,7 +605,7 @@ class FetchObsThread(threading.Thread):
copyfilelist.append(resfile) copyfilelist.append(resfile)
#if (len(service.files) > 0): #if (len(service.files) > 0):
# self._logger.debug(self._loggerprefix + "Will process files %s for service %s" % (" ".join(service.files), service.servicename)) # self._logger.debug(self._loggerprefix + "Will process files %s for service %s" % (" ".join(service.files), service.servicename))
if len(copyfilelist) > 0: if len(copyfilelist) > 0:
# Download the database files: # Download the database files:
self._logger.debug(self._loggerprefix + "Downloading results files %s" % (" ".join(copyfilelist))) self._logger.debug(self._loggerprefix + "Downloading results files %s" % (" ".join(copyfilelist)))
...@@ -654,10 +660,10 @@ class FetchObsThread(threading.Thread): ...@@ -654,10 +660,10 @@ class FetchObsThread(threading.Thread):
else: else:
self._logger.error(self._loggerprefix + "SSH to observer did not succeed, fetcher thread terminated with code %d. Error: %s" % (rs, err.strip())) self._logger.error(self._loggerprefix + "SSH to observer did not succeed, fetcher thread terminated with code %d. Error: %s" % (rs, err.strip()))
break # abort break # abort
except: except:
logger.error(self._loggerprefix + "FetchObsThread crashed: %s, %s\n%s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc())) logger.error(self._loggerprefix + "FetchObsThread crashed: %s, %s\n%s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc()))
# Stop the process: # Stop the process:
self._logger.info(self._loggerprefix + "FetchObsThread stopped") self._logger.info(self._loggerprefix + "FetchObsThread stopped")
### END FetchObsThread ### END FetchObsThread
...@@ -674,21 +680,21 @@ def start_fetcher(): ...@@ -674,21 +680,21 @@ def start_fetcher():
global FetchObsThread_queue global FetchObsThread_queue
global FetchObsThread_stopEvent global FetchObsThread_stopEvent
global obsfetcher_dict global obsfetcher_dict
# Daemonize the process --- # Daemonize the process ---
daemon.daemonize(None, closedesc=False) daemon.daemonize(None, closedesc=False)
logger.info("Daemon started") logger.info("Daemon started")
logger.info("Going to fetch data for test ID %d" %testid) logger.info("Going to fetch data for test ID %d" %testid)
# Get needed metadata from database --- # Get needed metadata from database ---
try: try:
(cn, cur) = flocklab.connect_to_db() (cn, cur) = flocklab.connect_to_db()
except: except:
flocklab.error_logandexit("Could not connect to database.", errno.EAGAIN) flocklab.error_logandexit("Could not connect to database.", errno.EAGAIN)
try: try:
cur.execute("""SELECT `a`.observer_id, `a`.ethernet_address cur.execute("""SELECT `a`.observer_id, `a`.ethernet_address
FROM `tbl_serv_observer` AS `a` FROM `tbl_serv_observer` AS `a`
LEFT JOIN `tbl_serv_map_test_observer_targetimages` AS `b` ON `a`.serv_observer_key = `b`.observer_fk LEFT JOIN `tbl_serv_map_test_observer_targetimages` AS `b` ON `a`.serv_observer_key = `b`.observer_fk
WHERE `b`.test_fk = %d GROUP BY `a`.observer_id; WHERE `b`.test_fk = %d GROUP BY `a`.observer_id;
""" % testid) """ % testid)
except MySQLdb.Error as err: except MySQLdb.Error as err:
...@@ -703,7 +709,7 @@ def start_fetcher(): ...@@ -703,7 +709,7 @@ def start_fetcher():
if not rs: if not rs:
logger.info("No observers found for this test. Nothing has to be done, thus exiting...") logger.info("No observers found for this test. Nothing has to be done, thus exiting...")
return errno.ENODATA return errno.ENODATA
# Start fetcher threads --- # Start fetcher threads ---
# Create a directory structure to store the downloaded files from the DB: # Create a directory structure to store the downloaded files from the DB:
obsfiledir = "%s/%d" % (flocklab.config.get('fetcher', 'obsfile_dir'), testid) obsfiledir = "%s/%d" % (flocklab.config.get('fetcher', 'obsfile_dir'), testid)
...@@ -725,7 +731,7 @@ def start_fetcher(): ...@@ -725,7 +731,7 @@ def start_fetcher():
debugdirname = "%s/%d" % (obsfiledebugdir, obsid) debugdirname = "%s/%d" % (obsfiledebugdir, obsid)
if (not os.path.exists(debugdirname)): if (not os.path.exists(debugdirname)):
os.makedirs(debugdirname) os.makedirs(debugdirname)
# Start thread: # Start thread:
try: try:
thread = FetchObsThread(obsid, observer[1], dirname, debugdirname, FetchObsThread_queue, FetchObsThread_stopEvent) thread = FetchObsThread(obsid, observer[1], dirname, debugdirname, FetchObsThread_queue, FetchObsThread_stopEvent)
FetchObsThread_list.append(thread) FetchObsThread_list.append(thread)
...@@ -734,7 +740,7 @@ def start_fetcher(): ...@@ -734,7 +740,7 @@ def start_fetcher():
except: except:
logger.warning("Error when starting fetcher thread for observer %d: %s, %s" % (obsid, str(sys.exc_info()[0]), str(sys.exc_info()[1]))) logger.warning("Error when starting fetcher thread for observer %d: %s, %s" % (obsid, str(sys.exc_info()[0]), str(sys.exc_info()[1])))
continue continue
return flocklab.SUCCESS return flocklab.SUCCESS
### END start_fetcher ### END start_fetcher
...@@ -786,7 +792,7 @@ def stop_fetcher(): ...@@ -786,7 +792,7 @@ def stop_fetcher():
except: except:
logger.warning("Could not connect to database.") logger.warning("Could not connect to database.")
return errno.ENOPKG return errno.ENOPKG
return flocklab.SUCCESS return flocklab.SUCCESS
### END stop_fetcher ### END stop_fetcher
...@@ -801,12 +807,12 @@ class WorkManager(): ...@@ -801,12 +807,12 @@ class WorkManager():
self.worklist = {} self.worklist = {}
self.pattern = re.compile("_[0-9].*") self.pattern = re.compile("_[0-9].*")
self.workcount = 0 self.workcount = 0
def _next_item_with_state(self, service, obsid): def _next_item_with_state(self, service, obsid):
stateitem = list(self.worklist[service][obsid][1][0]) stateitem = list(self.worklist[service][obsid][1][0])
stateitem[4] = self.worklist[service][obsid][0] stateitem[4] = self.worklist[service][obsid][0]
return tuple(stateitem) return tuple(stateitem)
def add(self, item): def add(self, item):
try: try:
service = self.pattern.sub("",item[3]) service = self.pattern.sub("",item[3])
...@@ -826,7 +832,7 @@ class WorkManager(): ...@@ -826,7 +832,7 @@ class WorkManager():
return None return None
except: except:
logger.error("Error in WorkManager.add(): %s: %s\n%s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc())) logger.error("Error in WorkManager.add(): %s: %s\n%s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc()))
def done(self, item): def done(self, item):
try: try:
service = self.pattern.sub("",item[3]) service = self.pattern.sub("",item[3])
...@@ -844,10 +850,10 @@ class WorkManager(): ...@@ -844,10 +850,10 @@ class WorkManager():
return None return None
except: except:
logger.error("Error in WorkManager.done(): %s: %s\n%s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc())) logger.error("Error in WorkManager.done(): %s: %s\n%s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc()))
def finished(self): def finished(self):
return self.workcount == 0 return self.workcount == 0
### END WorkManager ### END WorkManager
...@@ -872,7 +878,7 @@ def usage(): ...@@ -872,7 +878,7 @@ def usage():
# #
############################################################################## ##############################################################################
def main(argv): def main(argv):
### Get global variables ### ### Get global variables ###
global logger global logger
global debug global debug
...@@ -882,12 +888,12 @@ def main(argv): ...@@ -882,12 +888,12 @@ def main(argv):
global owner_fk global owner_fk
global obsdict_byid global obsdict_byid
global serialdict global serialdict
stop = False stop = False
# Get logger: # Get logger:
logger = flocklab.get_logger() logger = flocklab.get_logger()
# Get the config file --- # Get the config file ---
flocklab.load_config() flocklab.load_config()
...@@ -924,13 +930,13 @@ def main(argv): ...@@ -924,13 +930,13 @@ def main(argv):
print("Wrong API usage") print("Wrong API usage")
logger.warning("Wrong API usage") logger.warning("Wrong API usage")
sys.exit(errno.EINVAL) sys.exit(errno.EINVAL)
# Check if the necessary parameters are set --- # Check if the necessary parameters are set ---
if not testid: if not testid:
print("Wrong API usage") print("Wrong API usage")
logger.warning("Wrong API usage") logger.warning("Wrong API usage")
sys.exit(errno.EINVAL) sys.exit(errno.EINVAL)
# Check if the Test ID exists in the database --- # Check if the Test ID exists in the database ---
try: try:
(cn, cur) = flocklab.connect_to_db() (cn, cur) = flocklab.connect_to_db()
...@@ -946,10 +952,10 @@ def main(argv): ...@@ -946,10 +952,10 @@ def main(argv):
else: else:
msg = "Error when trying to get test ID from database: %s: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1])) msg = "Error when trying to get test ID from database: %s: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]))
flocklab.error_logandexit(msg, errno.EIO) flocklab.error_logandexit(msg, errno.EIO)
# Add Test ID to logger name --- # Add Test ID to logger name ---
logger.name += " (Test %d)"%testid logger.name += " (Test %d)"%testid
# Start / stop the fetcher --- # Start / stop the fetcher ---
ret = flocklab.SUCCESS ret = flocklab.SUCCESS
if stop: if stop:
...@@ -957,7 +963,7 @@ def main(argv): ...@@ -957,7 +963,7 @@ def main(argv):
logger.info("FlockLab fetcher stopped.") logger.info("FlockLab fetcher stopped.")
sys.exit(ret) sys.exit(ret)
# Start the fetcher processes which download data from the observers: # Start the fetcher processes which download data from the observers:
ret = start_fetcher() ret = start_fetcher()
if ret == flocklab.SUCCESS: if ret == flocklab.SUCCESS:
logger.info("FlockLab fetcher started.") logger.info("FlockLab fetcher started.")
...@@ -965,7 +971,7 @@ def main(argv): ...@@ -965,7 +971,7 @@ def main(argv):
msg = "Start function returned error. Exiting..." msg = "Start function returned error. Exiting..."
os.kill(os.getpid(), signal.SIGTERM) os.kill(os.getpid(), signal.SIGTERM)
rs = flocklab.error_logandexit(msg, ret) rs = flocklab.error_logandexit(msg, ret)
# Get needed metadata --- # Get needed metadata ---
try: try:
(cn, cur) = flocklab.connect_to_db() (cn, cur) = flocklab.connect_to_db()
...@@ -983,15 +989,15 @@ def main(argv): ...@@ -983,15 +989,15 @@ def main(argv):
obsdict_byid = None obsdict_byid = None
# Dict for serial service: 'r' means reader (data read from the target), 'w' means writer (data written to the target): # Dict for serial service: 'r' means reader (data read from the target), 'w' means writer (data written to the target):
serialdict = {0: 'r', 1: 'w'} serialdict = {0: 'r', 1: 'w'}
#find out the start and stoptime of the test #find out the start and stoptime of the test
cur.execute("SELECT `time_start_wish`, `time_end_wish` FROM `tbl_serv_tests` WHERE `serv_tests_key` = %d" %testid) cur.execute("SELECT `time_start_wish`, `time_end_wish` FROM `tbl_serv_tests` WHERE `serv_tests_key` = %d" %testid)
# Times are going to be of datetime type: # Times are going to be of datetime type:
ret = cur.fetchone() ret = cur.fetchone()
teststarttime = ret[0] teststarttime = ret[0]
teststoptime = ret[1] teststoptime = ret[1]
ppFileFormat = None ppFileFormat = None
# Find out which services are used to allocate working threads later on --- # Find out which services are used to allocate working threads later on ---
# Get the XML config from the database and check which services are used in the test. # Get the XML config from the database and check which services are used in the test.
servicesUsed_dict = {'gpiotracing': 'gpioTracingConf', 'powerprofiling': 'powerProfilingConf', 'serial': 'serialConf', 'datatrace': 'dataTraceConf'} servicesUsed_dict = {'gpiotracing': 'gpioTracingConf', 'powerprofiling': 'powerProfilingConf', 'serial': 'serialConf', 'datatrace': 'dataTraceConf'}
...@@ -1029,7 +1035,7 @@ def main(argv): ...@@ -1029,7 +1035,7 @@ def main(argv):
# Append log services (always used) # Append log services (always used)
servicesUsed_dict['errorlog'] = True servicesUsed_dict['errorlog'] = True
servicesUsed_dict['timesynclog'] = True servicesUsed_dict['timesynclog'] = True
cur.close() cur.close()
cn.close() cn.close()
if ((owner_fk==None) or (obsdict_byid==None)): if ((owner_fk==None) or (obsdict_byid==None)):
...@@ -1041,12 +1047,12 @@ def main(argv): ...@@ -1041,12 +1047,12 @@ def main(argv):
flocklab.error_logandexit(msg, errno.EAGAIN) flocklab.error_logandexit(msg, errno.EAGAIN)
else: else:
logger.debug("Got all needed metadata.") logger.debug("Got all needed metadata.")
# Start aggregating processes --- # Start aggregating processes ---
""" There is an infinite loop which gets files to process from the fetcher threads which download data from the observers. """ There is an infinite loop which gets files to process from the fetcher threads which download data from the observers.
Downloaded data is then assigned to a worker process for the corresponding service and in the worker process parsed, Downloaded data is then assigned to a worker process for the corresponding service and in the worker process parsed,
converted (if needed) and aggregated into a single file per service. converted (if needed) and aggregated into a single file per service.
The loop is stopped if the program receives a stop signal. In this case, the loops runs until no more database files The loop is stopped if the program receives a stop signal. In this case, the loops runs until no more database files
are there to be processed. are there to be processed.
""" """
if __name__ == '__main__': if __name__ == '__main__':
...@@ -1088,7 +1094,7 @@ def main(argv): ...@@ -1088,7 +1094,7 @@ def main(argv):
thread.start()