Commit b72f503b authored by Reto Da Forno's avatar Reto Da Forno
Browse files

default config adjusted, new option added to include xml config in test results

parent cd51a93d
......@@ -10,43 +10,40 @@ mailserver = [your_mailserver]
user = flocklab_testmng
password =
database = flocklab
host = [server_url]
host = localhost
timeformat = %%Y-%%m-%%d %%H:%%M:%%S ;Time format which is used on the MySQL database. Must be parsable by strftime.
; test config
[tests]
; IMPORTANT: Keep the setup/cleanuptimes in sync with the file /home/flocklab/testmanagement/user.ini
setuptime = 3 ;Minutes needed for test setup
cleanuptime = 3 ;Minutes needed for test cleanup
setuptime = 2 ;Minutes needed for test setup
cleanuptime = 2 ;Minutes needed for test cleanup
pidfolder = /tmp/flocklab/ ;Folder for pid files
guard_starttime = 1 ;Minutes needed to start the test (reset pin)
guard_stoptime = 1 ;Minutes needed to stop the test (reset pin)
testvalidator = /home/flocklab/testmanagement/testconfig_validator.py
allowparalleltests = 0 ;Allow parallel test execution on the same observer if resource conflicts avoided
; Observer config
[observer]
testconfigfolder = /home/root/mmc/flocklab/curtest/ ;Folder on the observer into which the test configuration and target image are uploaded
obsdbfolder = /home/root/mmc/flocklab/db/ ;Folder on the observer in which the database and all auxiliary files are located
testconfigfolder = /home/flocklab/data/curtest ;Folder on the observer into which the test configuration and target image are uploaded
testresultfolder = /home/flocklab/data/results ;Folder on the observer in which the database and all auxiliary files are located
timeformat = %%Y-%%m-%%dT%%H:%%M:%%S ;Time format which is used on the observer. Must be parsable by strftime.
starttestscript = flocklab_starttest.py
stoptestscript = flocklab_stoptest.py
serialidscript = tg_serialid.py
starttestscript = /home/flocklab/observer/testmanagement/flocklab_starttest.py
stoptestscript = /home/flocklab/observer/testmanagement/flocklab_stoptest.py
serialidscript = /home/flocklab/observer/testmanagement/tg_serialid.py ;Path to the serial ID readout script on the observer
; Config for fetcher
[fetcher]
min_sleeptime = 60 ;Minimum sleeptime in seconds which DB fetcher waits between polling the observers
max_rand_sleeptime = 15 ;Maximum random time in seconds the fetcher adds to minsleeptime
shutdown_timeout = 240 ;Timeout in seconds until fetcher processes are killed
obsfile_dir = /home/flocklab/fetcher/obsfiles/ ;Folder to store DB files from observers before they are aggregated
obsfile_debug_dir = /home/flocklab/fetcher/debug/ ;Folder to store DB files from observers before they are aggregated
obsfile_dir = /home/flocklab/fetcher/obsfiles ;Folder to store DB files from observers before they are aggregated
obsfile_debug_dir = /home/flocklab/fetcher/debug ;Folder to store DB files from observers before they are aggregated
obsfile_debug_dir_max_age_days = 7 ;Time to keep debug files
testresults_dir = /home/flocklab/fetcher/testresults/ ;Folder to store aggregated test results for being processed by the archiver
testresults_dir = /home/flocklab/fetcher/testresults ;Folder to store aggregated test results for being processed by the archiver
cpus_errorlog = 1 ;Maximum number of CPU's to use for aggregating data from error logging service
cpus_serial = 1 ;Maximum number of CPU's to use for aggregating data from serial service
cpus_gpiosetting = 1 ;Maximum number of CPU's to use for aggregating data from GPIO setting service
cpus_gpiomonitoring = 2 ;Maximum number of CPU's to use for aggregating data from GPIO monitoring service
cpus_gpiomonitoring = 1 ;Maximum number of CPU's to use for aggregating data from GPIO monitoring service
cpus_powerprofiling = 2 ;Maximum number of CPU's to use for converting and aggregating data from power profiling service
commitsize = 10000 ;Maximum number of rows to write to the aggregated files at the same time
......@@ -57,17 +54,18 @@ pigz_max_cpus = 4
nice_level = 15 ;Nice level for tar and pigz processes. Possible values: 0 to 19
email_maxsize = 10485760 ;Maximum size in bytes of archive to be emailed to the user. If archive is bigger, the test results can only be fetched from the user interface
archive_ext = .tar.gz ;Extension of archived files
archive_dir = /home/flocklab/test_archive ;Directory which stores all test results
archive_dir = /home/flocklab/testarchive ;Directory which stores all test results
include_xmlconfig = 1 ;Include XML config file in test results
; Config for cleaner
[cleaner]
max_instances = 1 ;Maximum of concurrently running instances of the cleaner
keeptime_viz = 30 ;Time in days to keep viz data
keeptime_viz = 10 ;Time in days to keep viz data
; Config for the dispatcher
[dispatcher]
schedulerscript = /home/flocklab/testmanagementserver/flocklab_scheduler.py ;Path to scheduler script on testmanagement server
validationscript = /home/flocklab/testmanagement/testconfig_validator.py ;Path to validation script on testmanagement server
validationscript = /home/flocklab/testmanagementserver/testconfig_validator.py ;Path to validation script on testmanagement server
dispatcherscript = /home/flocklab/testmanagementserver/flocklab_dispatcher.py ;Path to dispatcher script on testmanagement server
fetcherscript = /home/flocklab/testmanagementserver/flocklab_fetcher.py ;Path to fetcher script on testmanagement server
archiverscript = /home/flocklab/testmanagementserver/flocklab_archiver.py ;Path to archiver script on testmanagement server
......@@ -76,9 +74,6 @@ serialproxyscript = /home/flocklab/testmanagementserver/flocklab_serialproxy.py
default_tg_voltage = 3.3 ;Default voltage for targets if not specified in XML
default_sampling_divider = 2 ;Default sampling divider for power profiling if not specified in XML
archiver_waittime = 10 ;Wait time between calls to the archiver if the maximum number of archiver instances is reached
binutils_arm = /home/flocklab/binutils/binutils-arm ;Path to ARM binutils
binutils_msp430 = /home/flocklab/binutils/binutils-msp430/usr/bin ;Path to MSP430 binutils
setsymbolsscript = /home/flocklab/binutils/tos-set-symbols ;Path to script used to set symbols (e.g. node ID)
; XML test configuration file settings
[xml]
......@@ -97,7 +92,7 @@ lockfile = /tmp/flocklab/linktest_schedule.lock
; vizualisation of test results
[viz]
enablepreview = 1 ;set to 1 to enable generation of preview data
imgdir = /home/flocklab/viz/ ;path to preview directory
imgdir = /home/flocklab/viz ;path to preview directory
; Cleaner which deletes test results (after a per-user retention time has expired)
[retentioncleaner]
......@@ -112,9 +107,10 @@ shutdown_timeout = 240
; tools for target image validation
[targetimage]
imagevalidator = /home/flocklab/testmanagement/targetimage_validator.py
msp430 = /usr/bin/msp430-readelf
arm = /home/flocklab/binutils/binutils-arm/arm-angstrom-linux-gnueabi-readelf
imagevalidator = /home/flocklab/testmanagementserver/targetimage_validator.py
binutils_arm = /home/flocklab/binutils/binutils-arm ;Path to ARM binutils
binutils_msp430 = /home/flocklab/binutils/binutils-msp430/usr/bin ;Path to MSP430 binutils
setsymbolsscript = /home/flocklab/binutils/tos-set-symbols ;Path to script used to set symbols (e.g. node ID)
; Recaptcha
[recaptcha]
......
......@@ -79,7 +79,7 @@ def main(argv):
sys.exit(errno.EINVAL)
# Add Test ID to logger name ---
logger.name += " (Test %d)"%testid
logger.name += " (Test %d)" % testid
# Connect to the DB ---
try:
......@@ -103,43 +103,43 @@ def main(argv):
rs = flocklab.check_test_id(cur, testid)
if rs != 0:
if rs == 3:
msg = "Test ID %d does not exist in database." %testid
msg = "Test ID %d does not exist in database." % testid
flocklab.error_logandexit(msg, errno.EINVAL)
else:
msg = "Error when trying to get test ID from database: %s: %s"%(str(sys.exc_info()[0]), str(sys.exc_info()[1]))
msg = "Error when trying to get test ID from database: %s: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]))
flocklab.error_logandexit(msg, errno.EIO)
# Check directories needed ---
archivedir = flocklab.config.get('archiver', 'archive_dir')
archivename = "%d%s"%(testid, flocklab.config.get('archiver','archive_ext'))
archivepath = "%s/%s"%(archivedir, archivename)
archivedir = flocklab.config.get('archiver', 'archive_dir')
archivename = "%d%s"%(testid, flocklab.config.get('archiver','archive_ext'))
archivepath = "%s/%s"%(archivedir, archivename)
if ((not os.path.exists(archivedir)) or (not os.path.isdir(archivedir))):
if not os.path.exists(archivedir):
os.makedirs(archivedir)
logger.debug("Directory '%s' created." % (archivedir))
else:
msg = "The path %s does either not exist or is not a directory. Aborting..."%(archivedir)
msg = "The path %s does either not exist or is not a directory. Aborting..." % (archivedir)
flocklab.error_logandexit(msg, errno.EINVAL)
# Generate archive ---
if ((os.path.exists(archivepath)) and (os.path.isfile(archivepath))):
logger.debug("Archive %s is already existing." %(archivepath))
logger.debug("Archive %s is already existing." % (archivepath))
else:
# Check if testresultsdir directory is existing:
testresultsdir = "%s/%d" %(flocklab.config.get('fetcher', 'testresults_dir'), testid)
testresultsdir = "%s/%d" % (flocklab.config.get('fetcher', 'testresults_dir'), testid)
if ((not os.path.exists(testresultsdir)) or (not os.path.isdir(testresultsdir))):
msg = "The path %s does either not exist or is not a directory. Aborting..."%(testresultsdir)
msg = "The path %s does either not exist or is not a directory. Aborting..." % (testresultsdir)
flocklab.error_logandexit(msg, errno.EINVAL)
else:
logger.debug("Directory %s exists."%(testresultsdir))
logger.debug("Directory %s exists." % (testresultsdir))
# sort tar file, powerprofiling at the end
pp_part = []
resultparts = []
for part in os.listdir(testresultsdir):
if part!='powerprofiling.csv':
resultparts.append(os.path.basename(testresultsdir)+'/'+part)
if part != 'powerprofiling.csv':
resultparts.append(os.path.basename(testresultsdir) + '/' + part)
else:
pp_part.append(os.path.basename(testresultsdir)+'/'+part)
pp_part.append(os.path.basename(testresultsdir) + '/' + part)
resultparts.extend(pp_part)
# Archive files:
max_cpus = flocklab.config.get('archiver', 'pigz_max_cpus')
......@@ -156,7 +156,7 @@ def main(argv):
# Use pigz instead of gz because pigz makes use of multiple processors.
gzcmd = ['pigz', '-p', max_cpus]
outfile = open(archivepath, 'w+')
logger.debug("Starting to write archive %s using max %s CPUs and level %d for compressing..."%(archivepath, max_cpus, nice_level))
logger.debug("Starting to write archive %s using max %s CPUs and level %d for compressing..." % (archivepath, max_cpus, nice_level))
ptar = subprocess.Popen(tarcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, preexec_fn=lambda : os.nice(nice_level))
pgz = subprocess.Popen(gzcmd, stdin=ptar.stdout, stdout=outfile, stderr=subprocess.PIPE, universal_newlines=True, preexec_fn=lambda : os.nice(nice_level))
gzout, gzerr = pgz.communicate()
......@@ -168,15 +168,15 @@ def main(argv):
shutil.rmtree(testresultsdir)
logger.debug("Removed directory %s"%testresultsdir)
else:
msg = "Error %d when creating archive %s"%(pgz.returncode, archivepath)
msg += "Tried to pipe commands %s and %s"%(str(tarcmd), str(gzcmd))
msg += "Tar command returned: %s, %s"%(str(tarout), str(tarerr))
msg += "Gz command returned: %s, %s"%(str(gzout), str(gzerr))
msg += "Error was: %s: %s"%(str(sys.exc_info()[0]), str(sys.exc_info()[1]))
msg = "Error %d when creating archive %s" % (pgz.returncode, archivepath)
msg += "Tried to pipe commands %s and %s" % (str(tarcmd), str(gzcmd))
msg += "Tar command returned: %s, %s" % (str(tarout), str(tarerr))
msg += "Gz command returned: %s, %s" % (str(gzout), str(gzerr))
msg += "Error was: %s: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]))
flocklab.error_logandexit(msg, errno.EFAULT)
archive_size = os.path.getsize(archivepath)
archive_size_mb = float(archive_size)/1048576
logger.debug("Archive has size %dB (%.3fMB)"%(archive_size, archive_size_mb))
logger.debug("Archive has size %dB (%.3fMB)" % (archive_size, archive_size_mb))
# Send results to test owner ---
if send_email:
......@@ -187,24 +187,24 @@ def main(argv):
else:
usermail = rs
if ((usermail == 1) or (usermail == 2)):
msg = "Error when trying to get test owner email address for test id %d from database. Aborting..." %testid
msg = "Error when trying to get test owner email address for test id %d from database. Aborting..." % testid
flocklab.error_logandexit(msg, errno.EINVAL)
else:
logger.debug("Got email of test owner: %s" %(str(usermail)))
logger.debug("Got email of test owner: %s" % (str(usermail)))
# Check the size of the archive and only send it by email if it has a decent size:
if ( archive_size > int(flocklab.config.get('archiver','email_maxsize')) ):
if ( archive_size > int(flocklab.config.get('archiver','email_maxsize'))):
msg = "Dear FlockLab user,\n\n\
Measurement data for test with ID %d has been successfully retrieved from the FlockLab database \
but could not be sent by email as it is too big. Please fetch your test results from the user interface.\n\n\
Yours faithfully,\nthe FlockLab server" %(testid)
flocklab.send_mail(subject="[FlockLab] Results for Test ID %d" %testid, message=msg, recipients=usermail)
Yours faithfully,\nthe FlockLab server" % (testid)
flocklab.send_mail(subject="[FlockLab] Results for Test ID %d" % testid, message=msg, recipients=usermail)
else:
msg = "Dear FlockLab user,\n\n\
Measurement data for test with ID %d has been successfully retrieved from the FlockLab database, \
compressed and attached to this email. You can find all test results in the attached archive file %s\n\n\
Yours faithfully,\nthe FlockLab server" %(testid, archivename)
flocklab.send_mail(subject="[FlockLab] Results for Test ID %d" %testid, message=msg, recipients=usermail, attachments=[archivepath])
flocklab.send_mail(subject="[FlockLab] Results for Test ID %d" % testid, message=msg, recipients=usermail, attachments=[archivepath])
logger.debug("Sent email to test owner")
cur.close()
......
......@@ -954,6 +954,7 @@ def prepare_testresults(testid, cur):
"""
errors = []
tree = None
logger.debug("Preparing testresults...")
......@@ -961,7 +962,7 @@ def prepare_testresults(testid, cur):
logger.debug("Check if user wants testresults as email...")
emailResults = False
# Get the XML config from the database:
cur.execute("SELECT `testconfig_xml` FROM `tbl_serv_tests` WHERE (`serv_tests_key` = %s)" %testid)
cur.execute("SELECT `testconfig_xml` FROM `tbl_serv_tests` WHERE (`serv_tests_key` = %s)" % testid)
ret = cur.fetchone()
if ret:
parser = lxml.etree.XMLParser(remove_comments=True)
......@@ -980,9 +981,19 @@ def prepare_testresults(testid, cur):
else:
logger.debug("User wants test results as email. Will trigger the email.")
# Add config XML to results directory
if flocklab.config.get('archiver', 'include_xmlconfig'):
testresultsdir = "%s/%d" % (flocklab.config.get('fetcher', 'testresults_dir'), testid)
if (os.path.isdir(testresultsdir) and (not os.path.exists("%s/testconfig.xml" % testresultsdir))):
if tree:
et = lxml.etree.ElementTree(tree)
et.write("%s/testconfig.xml" % testresultsdir, pretty_print=True)
logger.debug("XML config copied to results folder.")
else:
logger.warn("Could not copy XML config to test results directory.")
# Archive test results ---
cmd = [flocklab.config.get('dispatcher', 'archiverscript'),"--testid=%d"%testid]
cmd = [flocklab.config.get('dispatcher', 'archiverscript'),"--testid=%d" % testid]
if emailResults:
cmd.append("--email")
if debug:
......@@ -1001,7 +1012,7 @@ def prepare_testresults(testid, cur):
return errors
if rs == errno.EUSERS:
# Maximum number of instances is reached. Wait some time before calling again.
logger.info("Archiver returned EUSERS. Wait for %d s before trying again..."%waittime)
logger.info("Archiver returned EUSERS. Wait for %d s before trying again..." % waittime)
time.sleep(waittime)
logger.debug("Call to archiver successful.")
......
......@@ -31,12 +31,11 @@ def usage():
#
##############################################################################
class UpdateSlotAssignThread(threading.Thread):
def __init__(self, observerdata, config, logger, searchtime, maxretries, queue):
def __init__(self, observerdata, logger, searchtime, maxretries, queue):
threading.Thread.__init__(self)
self.ObsKey = observerdata[0]
self.ObsHostname = observerdata[1]
self.ObsSerialList = observerdata[2:]
self.Config = config
self.Logger = logger
self.Searchtime = searchtime
self.Maxretries = maxretries
......@@ -44,7 +43,7 @@ class UpdateSlotAssignThread(threading.Thread):
def run(self):
# Get list of ID's for every slot from observer over SSH:
cmd = self.Config.get("observer", "serialidscript")
cmd = flocklab.config.get("observer", "serialidscript")
if self.Searchtime:
cmd = "%s -s%.1f" %(cmd, self.Searchtime)
if self.Maxretries:
......@@ -277,7 +276,7 @@ def main(argv):
for observerdata in rs:
logger.debug("Starting thread for %s" % (observerdata[1]))
try:
t = UpdateSlotAssignThread(observerdata, config, logger, searchtime, maxretries, q)
t = UpdateSlotAssignThread(observerdata, logger, searchtime, maxretries, q)
threadlist.append(t)
t.start()
except:
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment