Commit 2ad95fd2 authored by Reto Da Forno's avatar Reto Da Forno
Browse files

config and logging cleanup

parent 93fcd7c5
......@@ -3,7 +3,7 @@
[email]
admin_email = [your_email]
flocklab_email = [flocklab_user_email]
mailserver = [your_mailserver]
mailserver = [your_mailserver] ;smtp mail server URL
; database connection configuration
[database]
......@@ -134,7 +134,7 @@ tempdir = /home/flocklab/tmp
venvwrapper = /home/flocklab/tools/wrapper.sh ;activates python virtual environment (leave blank if no venv)
; config for webserver session
[session]
expiretime = 1440 ;Seconds until session expires
dir = /tmp/flocklab_sessions
[webserver]
sessionexpiretime = 1440 ;Seconds until session expires
sessiondir = /tmp/flocklab_sessions
......@@ -4,15 +4,7 @@ import sys, os, getopt, errno, traceback, time, shutil, logging, subprocess, __m
import lib.flocklab as flocklab
### Global variables ###
###
scriptname = os.path.basename(__main__.__file__)
scriptpath = os.path.dirname(os.path.abspath(sys.argv[0]))
name = "Archiver"
###
logger = None
config = None
##############################################################################
......@@ -21,7 +13,7 @@ config = None
#
##############################################################################
def usage():
print("Usage: %s --testid=<int> [--email] [--debug] [--help]" %scriptname)
print("Usage: %s --testid=<int> [--email] [--debug] [--help]" % __file__)
print("Options:")
print(" --testid=<int>\t\tTest ID of test whose results should be archived.")
print(" --email\t\t\tOptional. Send the data to the test owner by email.")
......@@ -39,7 +31,6 @@ def main(argv):
### Global Variables ###
global logger
global config
send_email = False
testid = -1
......@@ -49,13 +40,12 @@ def main(argv):
time.tzset()
# Get logger ---
logger = flocklab.get_logger(loggername=scriptname, loggerpath=scriptpath)
logger = flocklab.get_logger()
# Get config ---
config = flocklab.get_config(configpath=scriptpath)
if not config:
if flocklab.load_config() != flocklab.SUCCESS:
msg = "Could not read configuration file. Exiting..."
flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config)
flocklab.error_logandexit(msg, errno.EAGAIN)
#logger.debug("Read configuration file.")
# Get arguments ---
......@@ -68,7 +58,7 @@ def main(argv):
sys.exit(errno.EINVAL)
except:
msg = "Error when getting arguments: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))
flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config)
flocklab.error_logandexit(msg, errno.EAGAIN)
for opt, arg in opts:
if opt in ("-h", "--help"):
......@@ -101,37 +91,35 @@ def main(argv):
# Connect to the DB ---
try:
(cn, cur) = flocklab.connect_to_db(config, logger)
(cn, cur) = flocklab.connect_to_db()
except:
msg = "Could not connect to database"
flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config)
flocklab.error_logandexit(msg, errno.EAGAIN)
# Check if max number of instances is not reached ---
rs = flocklab.count_running_instances(scriptname)
rs = flocklab.count_running_instances(__file__)
if (rs >= 0):
maxinscount = config.getint('archiver', 'max_instances')
maxinscount = flocklab.config.getint('archiver', 'max_instances')
if rs > maxinscount:
msg = "Maximum number of instances (%d) for script %s with currently %d instances running exceeded. Aborting..."%(maxinscount, scriptname, rs)
flocklab.error_logandexit(msg, errno.EUSERS, name, logger, config)
#else:
#logger.debug("Maximum number of instances (%d) for script %s with currently %d instances running not exceeded."%(maxinscount, scriptname, rs))
msg = "Maximum number of instances (%d) for script %s with currently %d instances running exceeded. Aborting..." % (maxinscount, __file__, rs)
flocklab.error_logandexit(msg, errno.EUSERS)
else:
msg = "Error when trying to count running instances of %s. Function returned with %d"%(scriptname, rs)
flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config)
msg = "Error when trying to count running instances of %s. Function returned with %d" % (__file__, rs)
flocklab.error_logandexit(msg, errno.EAGAIN)
# Check if the Test ID exists in the database ---
rs = flocklab.check_test_id(cur, testid)
if rs != 0:
if rs == 3:
msg = "Test ID %d does not exist in database." %testid
flocklab.error_logandexit(msg, errno.EINVAL, name, logger, config)
flocklab.error_logandexit(msg, errno.EINVAL)
else:
msg = "Error when trying to get test ID from database: %s: %s"%(str(sys.exc_info()[0]), str(sys.exc_info()[1]))
flocklab.error_logandexit(msg, errno.EIO, name, logger, config)
flocklab.error_logandexit(msg, errno.EIO)
# Check directories needed ---
archivedir = config.get('archiver', 'archive_dir')
archivename = "%d%s"%(testid, config.get('archiver','archive_ext'))
archivedir = flocklab.config.get('archiver', 'archive_dir')
archivename = "%d%s"%(testid, flocklab.config.get('archiver','archive_ext'))
archivepath = "%s/%s"%(archivedir, archivename)
if ((not os.path.exists(archivedir)) or (not os.path.isdir(archivedir))):
if not os.path.exists(archivedir):
......@@ -139,17 +127,17 @@ def main(argv):
logger.debug("Directory '%s' created." % (archivedir))
else:
msg = "The path %s does either not exist or is not a directory. Aborting..."%(archivedir)
flocklab.error_logandexit(msg, errno.EINVAL, name, logger, config)
flocklab.error_logandexit(msg, errno.EINVAL)
# Generate archive ---
if ((os.path.exists(archivepath)) and (os.path.isfile(archivepath))):
logger.debug("Archive %s is already existing." %(archivepath))
else:
# Check if testresultsdir directory is existing:
testresultsdir = "%s/%d" %(config.get('fetcher', 'testresults_dir'), testid)
testresultsdir = "%s/%d" %(flocklab.config.get('fetcher', 'testresults_dir'), testid)
if ((not os.path.exists(testresultsdir)) or (not os.path.isdir(testresultsdir))):
msg = "The path %s does either not exist or is not a directory. Aborting..."%(testresultsdir)
flocklab.error_logandexit(msg, errno.EINVAL, name, logger, config)
flocklab.error_logandexit(msg, errno.EINVAL)
else:
logger.debug("Directory %s exists."%(testresultsdir))
# sort tar file, powerprofiling at the end
......@@ -162,9 +150,9 @@ def main(argv):
pp_part.append(os.path.basename(testresultsdir)+'/'+part)
resultparts.extend(pp_part)
# Archive files:
max_cpus = config.get('archiver', 'pigz_max_cpus')
max_cpus = flocklab.config.get('archiver', 'pigz_max_cpus')
try:
nice_level = config.getint('archiver', 'nice_level')
nice_level = flocklab.config.getint('archiver', 'nice_level')
except:
logger.warn("Could not read nice_level from config file. Setting level to 10.")
nice_level = 10
......@@ -193,7 +181,7 @@ def main(argv):
msg += "Tar command returned: %s, %s"%(str(tarout), str(tarerr))
msg += "Gz command returned: %s, %s"%(str(gzout), str(gzerr))
msg += "Error was: %s: %s"%(str(sys.exc_info()[0]), str(sys.exc_info()[1]))
flocklab.error_logandexit(msg, errno.EFAULT, name, logger, config)
flocklab.error_logandexit(msg, errno.EFAULT)
archive_size = os.path.getsize(archivepath)
archive_size_mb = float(archive_size)/1048576
logger.debug("Archive has size %dB (%.3fMB)"%(archive_size, archive_size_mb))
......@@ -208,12 +196,12 @@ def main(argv):
usermail = rs
if ((usermail == 1) or (usermail == 2)):
msg = "Error when trying to get test owner email address for test id %d from database. Aborting..." %testid
flocklab.error_logandexit(msg, errno.EINVAL, name, logger, config)
flocklab.error_logandexit(msg, errno.EINVAL)
else:
logger.debug("Got email of test owner: %s" %(str(usermail)))
# Check the size of the archive and only send it by email if it has a decent size:
if ( archive_size > int(config.get('archiver','email_maxsize')) ):
if ( archive_size > int(flocklab.config.get('archiver','email_maxsize')) ):
msg = "Dear FlockLab user,\n\n\
Measurement data for test with ID %d has been successfully retrieved from the FlockLab database \
but could not be sent by email as it is too big. Please fetch your test results from the user interface.\n\n\
......@@ -238,4 +226,4 @@ if __name__ == "__main__":
main(sys.argv[1:])
except Exception:
msg = "Encountered error: %s: %s\n%s\nCommand line was: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc(), str(sys.argv))
flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config)
flocklab.error_logandexit(msg, errno.EAGAIN)
......@@ -3,15 +3,8 @@
import sys, os, getopt, errno, traceback, logging, time, __main__, shutil, glob, datetime
import lib.flocklab as flocklab
### Global variables ###
###
scriptname = os.path.basename(__main__.__file__)
scriptpath = os.path.dirname(os.path.abspath(sys.argv[0]))
name = "Cleaner"
###
logger = None
config = None
##############################################################################
......@@ -20,7 +13,7 @@ config = None
#
##############################################################################
def usage():
print("Usage: %s [--debug] [--help]" %scriptname)
print("Usage: %s [--debug] [--help]" % __file__)
print("Options:")
print(" --debug\t\t\tOptional. Print debug messages to log.")
print(" --help\t\t\tOptional. Print this help.")
......@@ -36,20 +29,18 @@ def main(argv):
### Global Variables ###
global logger
global config
# Set timezone to UTC:
os.environ['TZ'] = 'UTC'
time.tzset()
# Get logger:
logger = flocklab.get_logger(loggername=scriptname, loggerpath=scriptpath)
logger = flocklab.get_logger()
# Get config ---
config = flocklab.get_config(configpath=scriptpath)
if not config:
if flocklab.load_config() != flocklab.SUCCESS:
msg = "Could not read configuration file. Exiting..."
flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config)
flocklab.error_logandexit(msg, errno.EAGAIN)
#logger.debug("Read configuration file.")
# Get the arguments:
......@@ -62,7 +53,7 @@ def main(argv):
sys.exit(errno.EINVAL)
except:
msg = "Error when getting arguments: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))
flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config)
flocklab.error_logandexit(msg, errno.EAGAIN)
for opt, arg in opts:
if opt in ("-d", "--debug"):
......@@ -75,24 +66,22 @@ def main(argv):
sys.exit(errno.EINVAL)
# Allow only x instances ---
rs = flocklab.count_running_instances(scriptname)
rs = flocklab.count_running_instances(__file__)
if (rs >= 0):
maxinscount = config.getint('cleaner', 'max_instances')
maxinscount = flocklab.config.getint('cleaner', 'max_instances')
if rs > maxinscount:
msg = "Maximum number of instances (%d) for script %s with currently %d instances running exceeded. Aborting..."%(maxinscount, scriptname, rs)
flocklab.error_logandexit(msg, errno.EUSERS, name, logger, config)
#else:
#logger.debug("Maximum number of instances (%d) for script %s with currently %d instances running not exceeded."%(maxinscount, scriptname, rs))
msg = "Maximum number of instances (%d) for script %s with currently %d instances running exceeded. Aborting..." % (maxinscount, __file__, rs)
flocklab.error_logandexit(msg, errno.EUSERS)
else:
msg = "Error when trying to count running instances of %s. Function returned with %d"%(scriptname, rs)
flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config)
msg = "Error when trying to count running instances of %s. Function returned with %d" % (__file__, rs)
flocklab.error_logandexit(msg, errno.EAGAIN)
# Connect to the database ---
try:
(cn, cur) = flocklab.connect_to_db(config, logger)
(cn, cur) = flocklab.connect_to_db()
except:
msg = "Could not connect to database"
flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config)
flocklab.error_logandexit(msg, errno.EAGAIN)
#logger.debug("Connected to database")
# Check for running tests ---
......@@ -136,8 +125,8 @@ def main(argv):
logger.debug("Deleted %i rows of data in table %s for test ID %s in %f seconds" %(num_deleted_rows, table, testid, (time.time()-starttime)))
# Delete cached test results ---
archive_path = "%s/%s%s"%(config.get('archiver','archive_dir'), testid, config.get('archiver','archive_ext'))
viz_pathes = glob.glob("%s/%s_*"%(config.get('viz','imgdir'), testid))
archive_path = "%s/%s%s"%(flocklab.config.get('archiver','archive_dir'), testid, flocklab.config.get('archiver','archive_ext'))
viz_pathes = glob.glob("%s/%s_*"%(flocklab.config.get('viz','imgdir'), testid))
pathes = [archive_path]
pathes.extend(viz_pathes)
for path in pathes:
......@@ -164,9 +153,9 @@ def main(argv):
logger.debug("Set status for test ID %s to 'deleted'" %(testid))
# Delete old entries in viz cache ---
keeptime = config.getint('cleaner', 'keeptime_viz')
keeptime = flocklab.config.getint('cleaner', 'keeptime_viz')
earliest_keeptime = time.time() - (keeptime*86400)
imgdir_path = config.get('viz','imgdir')
imgdir_path = flocklab.config.get('viz','imgdir')
if not os.path.isdir(imgdir_path):
os.mkdir(imgdir_path)
for f in os.listdir(imgdir_path):
......@@ -179,7 +168,7 @@ def main(argv):
msg = "Encountered error: %s: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]))
logger.error(msg)
emails = flocklab.get_admin_emails(cur, config)
msg = "%s on server %s encountered error:\n\n%s" %(scriptname, os.uname()[1], msg)
msg = "%s on server %s encountered error:\n\n%s" % (__file__, os.uname()[1], msg)
flocklab.send_mail(subject="[FlockLab %s]"%name, message=msg, recipients=emails)
finally:
cur.close()
......@@ -194,5 +183,5 @@ if __name__ == "__main__":
main(sys.argv[1:])
except Exception:
msg = "Encountered error: %s: %s\n%s\nCommand line was: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc(), str(sys.argv))
flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config)
flocklab.error_logandexit(msg, errno.EAGAIN)
This diff is collapsed.
......@@ -4,12 +4,6 @@ import os, sys, getopt, traceback, MySQLdb, signal, random, time, errno, multipr
import lib.daemon as daemon
import lib.flocklab as flocklab
### Global variables ###
###
scriptname = os.path.basename(__main__.__file__)
scriptpath = os.path.dirname(os.path.abspath(sys.argv[0]))
name = "Fetcher"
###
logger = None
debug = False
......@@ -18,7 +12,6 @@ errors = []
FetchObsThread_list = []
FetchObsThread_stopEvent = None
FetchObsThread_queue = None
config = None
obsfiledir = None
testresultsdir = None
testresultsfile_dict = {}
......@@ -91,7 +84,7 @@ def sigterm_handler(signum, frame):
# Signal all observer fetcher threads to stop:
logger.debug("Stopping observer fetcher threads...")
shutdown_timeout = config.getint("fetcher", "shutdown_timeout")
shutdown_timeout = flocklab.config.getint("fetcher", "shutdown_timeout")
try:
FetchObsThread_stopEvent.set()
except:
......@@ -104,7 +97,7 @@ def sigterm_handler(signum, frame):
# Set DB status:
logger.debug("Setting test status in DB to 'syncing'...")
try:
(cn, cur) = flocklab.connect_to_db(config, logger)
(cn, cur) = flocklab.connect_to_db()
flocklab.set_test_status(cur, cn, testid, 'syncing')
cur.close()
cn.close()
......@@ -419,9 +412,9 @@ class FetchObsThread(threading.Thread):
self._stopEvent = stopEvent
self._logger = logger
self._min_sleep = config.getint("fetcher", "min_sleeptime")
self._max_randsleep = config.getint("fetcher", "max_rand_sleeptime")
self._obsdbfolder = "%s/%d" % (config.get("observer", "obsdbfolder"), testid)
self._min_sleep = flocklab.config.getint("fetcher", "min_sleeptime")
self._max_randsleep = flocklab.config.getint("fetcher", "max_rand_sleeptime")
self._obsdbfolder = "%s/%d" % (flocklab.config.get("observer", "obsdbfolder"), testid)
def run(self):
try:
......@@ -559,10 +552,10 @@ def start_fetcher():
# Get needed metadata from database ---
try:
(cn, cur) = flocklab.connect_to_db(config, logger)
(cn, cur) = flocklab.connect_to_db()
except:
msg = "Could not connect to database"
flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config)
flocklab.error_logandexit(msg, errno.EAGAIN)
try:
cur.execute(""" SELECT `a`.observer_id, `a`.ethernet_address
FROM `tbl_serv_observer` AS `a`
......@@ -571,7 +564,7 @@ def start_fetcher():
""" %testid)
except MySQLdb.Error as err:
msg = str(err)
flocklab.error_logandexit(msg, errno.EIO, name, logger, config)
flocklab.error_logandexit(msg, errno.EIO)
except:
logger.warn("Error %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1])))
rs = cur.fetchall()
......@@ -584,10 +577,10 @@ def start_fetcher():
# Start fetcher threads ---
# Create a directory structure to store the downloaded files from the DB:
obsfiledir = "%s/%d" %(config.get('fetcher', 'obsfile_dir'), testid)
obsfiledir = "%s/%d" %(flocklab.config.get('fetcher', 'obsfile_dir'), testid)
if not os.path.exists(obsfiledir):
os.makedirs(obsfiledir)
obsfiledebugdir = "%s/%d" %(config.get('fetcher', 'obsfile_debug_dir'), testid)
obsfiledebugdir = "%s/%d" %(flocklab.config.get('fetcher', 'obsfile_debug_dir'), testid)
if not os.path.exists(obsfiledebugdir):
os.makedirs(obsfiledebugdir)
#DEBUG logger.debug("Created %s"%obsfiledir)
......@@ -635,7 +628,7 @@ def stop_fetcher():
try:
os.kill(pid, signal.SIGTERM)
# wait for process to finish (timeout..)
shutdown_timeout = config.getint("fetcher", "shutdown_timeout")
shutdown_timeout = flocklab.config.getint("fetcher", "shutdown_timeout")
pidpath = "/proc/%d"%pid
while os.path.exists(pidpath) & (shutdown_timeout>0):
time.sleep(1)
......@@ -651,7 +644,7 @@ def stop_fetcher():
# Set DB status in order to allow dispatcher and scheduler to go on.:
logger.debug("Setting test status in DB to 'synced'...")
try:
(cn, cur) = flocklab.connect_to_db(config, logger)
(cn, cur) = flocklab.connect_to_db()
flocklab.set_test_status(cur, cn, testid, 'synced')
cur.close()
cn.close()
......@@ -724,7 +717,7 @@ class WorkManager():
#
##############################################################################
def usage():
print("Usage: %s --testid=<int> [--stop] [--debug] [--help]" % scriptname)
print("Usage: %s --testid=<int> [--stop] [--debug] [--help]" % __file__)
print("Options:")
print(" --testid=<int>\t\tTest ID of test to which incoming data belongs.")
print(" --stop\t\t\tOptional. Causes the program to stop a possibly running instance of the fetcher.")
......@@ -744,7 +737,6 @@ def main(argv):
global logger
global debug
global testid
global config
global testresultsdir
global testresultsfile_dict
global owner_fk
......@@ -760,13 +752,12 @@ def main(argv):
time.tzset()
# Get logger:
logger = flocklab.get_logger(loggername=scriptname, loggerpath=scriptpath)
logger = flocklab.get_logger()
# Get the config file ---
config = flocklab.get_config(configpath=scriptpath)
if not config:
if flocklab.load_config() != flocklab.SUCCESS:
msg = "Could not read configuration file. Exiting..."
flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config)
flocklab.error_logandexit(msg, errno.EAGAIN)
#logger.debug("Read configuration file.")
# Get command line parameters ---
......@@ -779,7 +770,7 @@ def main(argv):
sys.exit(errno.EINVAL)
except:
msg = "Error when getting arguments: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))
flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config)
flocklab.error_logandexit(msg, errno.EAGAIN)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
......@@ -812,20 +803,20 @@ def main(argv):
# Check if the Test ID exists in the database ---
try:
(cn, cur) = flocklab.connect_to_db(config, logger)
(cn, cur) = flocklab.connect_to_db()
except:
msg = "Could not connect to database"
flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config)
flocklab.error_logandexit(msg, errno.EAGAIN)
rs = flocklab.check_test_id(cur, testid)
cur.close()
cn.close()
if rs != 0:
if rs == 3:
msg = "Test ID %d does not exist in database." %testid
flocklab.error_logandexit(msg, errno.EINVAL, name, logger, config)
flocklab.error_logandexit(msg, errno.EINVAL)
else:
msg = "Error when trying to get test ID from database: %s: %s"%(str(sys.exc_info()[0]), str(sys.exc_info()[1]))
flocklab.error_logandexit(msg, errno.EIO, name, logger, config)
flocklab.error_logandexit(msg, errno.EIO)
# Add Test ID to logger name ---
logger.name += " (Test %d)"%testid
......@@ -843,14 +834,14 @@ def main(argv):
else:
msg = "Start function returned error. Exiting..."
os.kill(os.getpid(), signal.SIGTERM)
rs = flocklab.error_logandexit(msg, ret, name, logger, config)
rs = flocklab.error_logandexit(msg, ret)
# Get needed metadata ---
try:
(cn, cur) = flocklab.connect_to_db(config, logger)
(cn, cur) = flocklab.connect_to_db()
except:
msg = "Could not connect to database"
flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config)
flocklab.error_logandexit(msg, errno.EAGAIN)
rs = flocklab.get_test_owner(cur, testid)
if isinstance(rs, tuple):
owner_fk = rs[0]
......@@ -908,7 +899,7 @@ def main(argv):
logger.debug("Got XML from database.")
parser = lxml.etree.XMLParser(remove_comments=True)
tree = lxml.etree.fromstring(bytes(bytearray(ret[0], encoding = 'utf-8')), parser)
ns = {'d': config.get('xml', 'namespace')}
ns = {'d': flocklab.config.get('xml', 'namespace')}
for service, xmlname in servicesUsed_dict.items():
if tree.xpath('//d:%s'%xmlname, namespaces=ns):
servicesUsed_dict[service] = True
......@@ -928,7 +919,7 @@ def main(argv):
msg += "Exiting..."
logger.debug(msg)
os.kill(os.getpid(), signal.SIGTERM)
flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config)
flocklab.error_logandexit(msg, errno.EAGAIN)
else:
logger.debug("Got all needed metadata.")
......@@ -941,7 +932,7 @@ def main(argv):
"""
if __name__ == '__main__':
# Create directory and files needed for test results:
testresultsdir = "%s/%d" %(config.get('fetcher', 'testresults_dir'), testid)
testresultsdir = "%s/%d" %(flocklab.config.get('fetcher', 'testresults_dir'), testid)
if not os.path.exists(testresultsdir):
os.makedirs(testresultsdir)
logger.debug("Created %s"%testresultsdir)
......@@ -981,32 +972,32 @@ def main(argv):
PpStatsQueue.put(ppstats)
# Determine the number of CPU's to be used for each aggregating process. If a service is not used, its CPUs are assigned to other services
cpus_free = 0
cpus_errorlog = config.getint('fetcher', 'cpus_errorlog')
cpus_errorlog = flocklab.config.getint('fetcher', 'cpus_errorlog')
# CPUs for serial service:
if servicesUsed_dict['serial'] == True:
cpus_serial = config.getint('fetcher', 'cpus_serial')
cpus_serial = flocklab.config.getint('fetcher', 'cpus_serial')
else:
cpus_serial = 0
cpus_free = cpus_free + config.getint('fetcher', 'cpus_serial')
cpus_free = cpus_free + flocklab.config.getint('fetcher', 'cpus_serial')
# CPUs for GPIO actuation. If the service is not used, assign a CPU anyhow since FlockLab always uses this service to determine start and stop times of a test.
#cpus_errorlog = config.getint('fetcher', 'cpus_errorlog')
#cpus_errorlog = flocklab.config.getint('fetcher', 'cpus_errorlog')
if servicesUsed_dict['gpioactuation'] == True:
cpus_gpiosetting = config.getint('fetcher', 'cpus_gpiosetting')
cpus_gpiosetting = flocklab.config.getint('fetcher', 'cpus_gpiosetting')
else:
cpus_gpiosetting = 1
cpus_free = cpus_free + config.getint('fetcher', 'cpus_gpiosetting') - cpus_gpiosetting
cpus_free = cpus_free + flocklab.config.getint('fetcher', 'cpus_gpiosetting') - cpus_gpiosetting
# CPUs for GPIO tracing:
if servicesUsed_dict['gpiotracing'] == True:
cpus_gpiomonitoring = config.getint('fetcher', 'cpus_gpiomonitoring')
cpus_gpiomonitoring = flocklab.config.getint('fetcher', 'cpus_gpiomonitoring')
else:
cpus_gpiomonitoring = 0
cpus_free = cpus_free + config.getint('fetcher', 'cpus_gpiomonitoring')
cpus_free = cpus_free + flocklab.config.getint('fetcher', 'cpus_gpiomonitoring')
# CPUs for powerprofiling:
if servicesUsed_dict['powerprofiling'] == True:
cpus_powerprofiling = config.getint('fetcher', 'cpus_powerprofiling')
cpus_powerprofiling = flocklab.config.getint('fetcher', 'cpus_powerprofiling')
else:
cpus_powerprofiling = 0
cpus_free = cpus_free + config.getint('fetcher', 'cpus_powerprofiling')
cpus_free = cpus_free + flocklab.config.getint('fetcher', 'cpus_powerprofiling')
# If there are free CPUs left, give them to GPIO tracing and power profiling evenly as these services need the most CPU power:
if cpus_free > 0:
if (cpus_powerprofiling > 0) and (cpus_gpiomonitoring > 0):
......@@ -1044,9 +1035,9 @@ def main(argv):
signal.signal(signal.SIGTERM, sigterm_handler)
signal.signal(signal.SIGINT, sigterm_handler)
# Loop through the folders and assign work to the worker processes:
vizimgdir = config.get('viz','imgdir')
commitsize = config.getint('fetcher', 'commitsize')
enableviz = config.getint('viz','enablepreview')
vizimgdir = flocklab.config.get('viz','imgdir')
commitsize = flocklab.config.getint('fetcher', 'commitsize')
enableviz = flocklab.config.getint<