Commit 11dbf4fc authored by Reto Da Forno's avatar Reto Da Forno
Browse files

initial commit (based on code of public flocklab repo, commit...

initial commit (based on code of public flocklab repo, commit c59a9cbf533104c128cdb56f58c9293c4895880d)
parents
# FLOCKLAB CRON JOBS (TEMPLATE):
#CRONLOG=/dev/null
CRONLOG=/home/flocklab/logs/cron.log
* * * * * /home/flocklab/testmanagementserver/flocklab_scheduler.py --debug >> $CRONLOG 2>&1
*/10 * * * * /home/flocklab/testmanagementserver/flocklab_cleaner.py --debug >> $CRONLOG 2>&1
0 5 * * * /home/flocklab/testmanagementserver/flocklab_retention_cleaner.py --debug >> $CRONLOG 2>&1
0 0 * * * /usr/sbin/logrotate --state /home/flocklab/logrotate.state /home/flocklab/logrotate >> $CRONLOG 2>&1
1 0 1 * * /home/flocklab/testmanagementserver/flocklab_mmccheck.py --debug >> $CRONLOG 2>&1
0 2 * * 1 php /home/flocklab/webserver/update_stats.php >> $CRONLOG 2>&1
# Logrotate configuration for logs on FlockLab server
/home/flocklab/logs/flocklab_debug* {
compress
notifempty
copytruncate
rotate 99
daily
missingok
olddir old
createolddir
sharedscripts
}
/home/flocklab/logs/flocklab_warn* {
mail flocklab@ee.ethz.ch
mailfirst
compress
notifempty
copytruncate
rotate 52
weekly
missingok
olddir old
createolddir
sharedscripts
}
/home/flocklab/logs/webserver_* {
compress
notifempty
copytruncate
rotate 12
monthly
missingok
olddir old
createolddir
sharedscripts
}
; This is the Flocklab testserver configuration file
; Comments start with ';', as in php.ini
[general]
admin_email = [your_email]
; database connection configuration
[database]
user = flocklab_testmng
password =
database = flocklab
host = [server_url]
timeformat = %%Y/%%m/%%d %%H:%%M:%%S ;Time format which is used on the MySQL database. Must be parsable by strftime.
; test config
[tests]
; IMPORTANT: Keep the setup/cleanuptimes in sync with the file /home/flocklab/testmanagement/user.ini
setuptime = 3 ;Minutes needed for test setup
cleanuptime = 3 ;Minutes needed for test cleanup
pidfolder = /tmp/flocklab/ ;Folder for pid files
; Observer config
[observer]
testconfigfolder = /home/root/mmc/flocklab/curtest/ ;Folder on the observer into which the test configuration and target image are uploaded
obsdbfolder = /home/root/mmc/flocklab/db/ ;Folder on the observer in which the database and all auxiliary files are located
timeformat = %%Y-%%m-%%dT%%H:%%M:%%S ;Time format which is used on the observer. Must be parsable by strftime.
daq_pp_nth_sample_default = 2 ;Default value for the nth_sample argument for powerprofiling. This setting only affects DAQ tests.
starttestscript = flocklab_starttest.py
stoptestscript = flocklab_stoptest.py
serialidscript = tg_serialid.py
; Config for fetcher
[fetcher]
min_sleeptime = 60 ;Minimum sleeptime in seconds which DB fetcher waits between polling the observers
max_rand_sleeptime = 15 ;Maximum random time in seconds the fetcher adds to minsleeptime
shutdown_timeout = 240 ;Timeout in seconds until fetcher processes are killed
obsfile_dir = /home/flocklab/fetcher/obsfiles/ ;Folder to store DB files from observers before they are aggregated
obsfile_debug_dir = /home/flocklab/fetcher/debug/ ;Folder to store DB files from observers before they are aggregated
obsfile_debug_dir_max_age_days = 7 ;Time to keep debug files
testresults_dir = /home/flocklab/fetcher/testresults/ ;Folder to store aggregated test results for being processed by the archiver
cpus_errorlog = 1 ;Maximum number of CPU's to use for aggregating data from error logging service
cpus_serial = 1 ;Maximum number of CPU's to use for aggregating data from serial service
cpus_gpiosetting = 1 ;Maximum number of CPU's to use for aggregating data from GPIO setting service
cpus_gpiomonitoring = 2 ;Maximum number of CPU's to use for aggregating data from GPIO monitoring service
cpus_powerprofiling = 2 ;Maximum number of CPU's to use for converting and aggregating data from power profiling service
commitsize = 10000 ;Maximum number of rows to write to the aggregated files at the same time
; Config for archiver
[archiver]
max_instances = 100 ;Maximum of concurrently running instances of the archiver
pigz_max_cpus = 4 ;Maximum number of CPUs to be used by pigz compressor
nice_level = 15 ;Nice level for tar and pigz processes. Possible values: 0 to 19
email_maxsize = 10485760 ;Maximum size in bytes of archive to be emailed to the user. If archive is bigger, the test results can only be fetched from the user interface
archive_ext = .tar.gz ;Extension of archived files
archive_dir = /home/flocklab/test_archive ;Directory which stores all test results
; Config for cleaner
[cleaner]
max_instances = 1 ;Maximum of concurrently running instances of the cleaner
keeptime_viz = 30 ;Time in days to keep viz data
; Config for the dispatcher
[dispatcher]
schedulerscript = /home/flocklab/testmanagementserver/flocklab_scheduler.py ;Path to scheduler script on testmanagement server
validationscript = /home/flocklab/testmanagement/testconfig_validator.py ;Path to validation script on testmanagement server
dispatcherscript = /home/flocklab/testmanagementserver/flocklab_dispatcher.py ;Path to dispatcher script on testmanagement server
fetcherscript = /home/flocklab/testmanagementserver/flocklab_fetcher.py ;Path to fetcher script on testmanagement server
archiverscript = /home/flocklab/testmanagementserver/flocklab_archiver.py ;Path to archiver script on testmanagement server
testtolinkmapscript = /home/flocklab/testmanagementserver/test_to_linkmap.py ;Path to linkmap evaluation script on testmanagement server
serialproxyscript = /home/flocklab/testmanagementserver/flocklab_serialproxy.py ;Path to serial proxy script on testmanagement server
flockdaqconfigscript = /home/flocklab/testmanagementserver/flocklab_daq_config.py ;Path to flockdaq config script on testmanagement server
default_tg_voltage = 3.3 ;Default voltage for targets if not specified in XML
default_sampling_divider = 2 ;Default sampling divider for power profiling if not specified in XML
archiver_waittime = 10 ;Wait time between calls to the archiver if the maximum number of archiver instances is reached
binutils_arm = /home/flocklab/binutils/binutils-arm ;Path to ARM binutils
binutils_msp430 = /home/flocklab/binutils/binutils-msp430/usr/bin ;Path to MSP430 binutils
setsymbolsscript = /home/flocklab/binutils/tos-set-symbols ;Path to script used to set symbols (e.g. node ID)
; XML test configuration file settings
[xml]
namespace = http://www.flocklab.ethz.ch ;Must match namespace declaration in user.ini, XML validation file (flocklab.xsd) and test XML config
; regular link tests
[linktests]
user = flocklab ;User that owns the link measurements
interval_hours = 47 ;Interval between link measurements
interval_random_minutes = 120 ;Random slack (+/-)
testfolder = /home/flocklab/testmanagementserver/linktests
starttest_script = /home/flocklab/tools/flocklab
lockfile = /tmp/flocklab/linktest_schedule.lock
; Vizualisation of test results
[viz]
enablepreview = 1 ;set to 1 to enable generation of preview data
imgdir = /home/flocklab/viz/ ;path to preview directory
; Cleaner which deletes test results (after a per-user retention time has expired)
[retentioncleaner]
max_instances = 1 ;Maximum of concurrently running instances of the script
expiration_leadtime = 14 ;Number of days to warn user before results are purged
; Config for serial proxy
[serialproxy]
startport = 50100 ;Start port for serial proxy to test users. For the real port, the observer ID is added to the start port.
obsdataport = 50001 ;Observer listens on this port for incoming connections from the testmanagement server
shutdown_timeout = 240 ;Timeout in seconds until proxy processes are killed
; Config fo MMC check script
[mmccheck]
reservation_group_id = 99 ;ID of reservation group in tbl_serv_reservations for mmc check
reservation_duration_h = 2 ;Expected upper bound of duration of mmc check
Installation instructions:
--------------------------
1) cd into this folder
2) Compile using:
python setup.py build
3) Install using:
python setup.py install --home=/home/flocklab/testmanagementserver/ext_c_modules/
4) Remove build dir:
rm -rf build
5) The module is now installed in /home/flocklab/testmanagementserver/ext_c_modules/lib/python/ and can be used in python like this:
import sys
sys.path.append('/home/flocklab/testmanagementserver/ext_c_modules/lib/python/')
import cResultfetcher
all: cResultfetcher.c
python setup.py build
install: all
python setup.py install --home=/home/flocklab/testmanagementserver/ext_c_modules/
clean:
rm -rf build
#!/usr/bin/env python3
from distutils.core import setup, Extension
module = Extension( 'cResultfetcher',
libraries = ['pthread', 'z', 'm', 'rt', 'dl'],
sources = ['cResultfetcher.c']
)
name = 'FlockLab Powerprofiling Resultfetcher'
version = '2.0'
author = 'Christoph Walser, ETH Zurich'
author_email = 'walserc@tik.ee.ethz.ch'
lic = 'GPL'
platform = 'Linux Ubuntu'
description = 'Converts powerprofiling results for FlockLab and writes a CSV file out of them.'
setup(name=name, version=version, author=author, author_email=author_email, license=lic, platforms=platform, description=description, ext_modules = [module])
Metadata-Version: 1.0
Name: FlockLab Powerprofiling Resultfetcher
Version: 2.0
Summary: Converts powerprofiling results for FlockLab and writes a CSV file out of them.
Home-page: UNKNOWN
Author: Christoph Walser, ETH Zurich
Author-email: walserc@tik.ee.ethz.ch
License: GPL
Description: UNKNOWN
Platform: Linux Ubuntu
#!/usr/bin/env python3
__author__ = "Christoph Walser <walserc@tik.ee.ethz.ch>, Adnan Mlika"
__copyright__ = "Copyright 2010, ETH Zurich, Switzerland"
__license__ = "GPL"
import sys, os, getopt, errno, traceback, time, shutil, logging, subprocess, __main__, types
# Import local libraries
from lib.flocklab import SUCCESS
import lib.flocklab as flocklab
### Global variables ###
###
scriptname = os.path.basename(__main__.__file__)
scriptpath = os.path.dirname(os.path.abspath(sys.argv[0]))
name = "Archiver"
###
logger = None
config = None
##############################################################################
#
# Error classes
#
##############################################################################
class Error(Exception):
""" Base class for exception. """
pass
### END Error classes
##############################################################################
#
# Usage
#
##############################################################################
def usage():
print("Usage: %s --testid=<int> [--email] [--debug] [--help]" %scriptname)
print("Options:")
print(" --testid=<int>\t\tTest ID of test whose results should be archived.")
print(" --email\t\t\tOptional. Send the data to the test owner by email.")
print(" --debug\t\t\tOptional. Print debug messages to log.")
print(" --help\t\t\tOptional. Print this help.")
### END usage()
##############################################################################
#
# Main
#
##############################################################################
def main(argv):
### Global Variables ###
global logger
global config
send_email = False
testid = -1
# Set timezone to UTC ---
os.environ['TZ'] = 'UTC'
time.tzset()
# Get logger ---
logger = flocklab.get_logger(loggername=scriptname, loggerpath=scriptpath)
# Get config ---
config = flocklab.get_config(configpath=scriptpath)
if not config:
msg = "Could not read configuration file. Exiting..."
flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config)
#logger.debug("Read configuration file.")
# Get arguments ---
try:
opts, args = getopt.getopt(argv, "ehdt:", ["email", "help", "debug", "testid=" ])
except getopt.GetoptError as err:
print(str(err))
logger.warn(str(err))
usage()
sys.exit(errno.EINVAL)
except:
msg = "Error when getting arguments: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))
flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit(SUCCESS)
elif opt in ("-e", "--email"):
send_email = True
elif opt in ("-d", "--debug"):
logger.debug("Detected debug flag.")
logger.setLevel(logging.DEBUG)
elif opt in ("-t", "--testid"):
try:
testid = int(arg)
if testid <= 0:
raise Error
except:
logger.warn("Wrong API usage: testid has to be a positive number")
sys.exit(errno.EINVAL)
else:
logger.warn("Wrong API usage")
sys.exit(errno.EINVAL)
# Check if necessary parameters are set ---
if ((testid == -1)):
logger.warn("Wrong API usage")
sys.exit(errno.EINVAL)
# Add Test ID to logger name ---
logger.name += " (Test %d)"%testid
# Connect to the DB ---
try:
(cn, cur) = flocklab.connect_to_db(config, logger)
except:
msg = "Could not connect to database"
flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config)
# Check if max number of instances is not reached ---
rs = flocklab.count_running_instances(scriptname)
if (rs >= 0):
maxinscount = config.getint('archiver', 'max_instances')
if rs > maxinscount:
msg = "Maximum number of instances (%d) for script %s with currently %d instances running exceeded. Aborting..."%(maxinscount, scriptname, rs)
flocklab.error_logandexit(msg, errno.EUSERS, name, logger, config)
#else:
#logger.debug("Maximum number of instances (%d) for script %s with currently %d instances running not exceeded."%(maxinscount, scriptname, rs))
else:
msg = "Error when trying to count running instances of %s. Function returned with %d"%(scriptname, rs)
flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config)
# Check if the Test ID exists in the database ---
rs = flocklab.check_test_id(cur, testid)
if rs != 0:
if rs == 3:
msg = "Test ID %d does not exist in database." %testid
flocklab.error_logandexit(msg, errno.EINVAL, name, logger, config)
else:
msg = "Error when trying to get test ID from database: %s: %s"%(str(sys.exc_info()[0]), str(sys.exc_info()[1]))
flocklab.error_logandexit(msg, errno.EIO, name, logger, config)
# Check directories needed ---
archivedir = config.get('archiver', 'archive_dir')
archivename = "%d%s"%(testid, config.get('archiver','archive_ext'))
archivepath = "%s/%s"%(archivedir, archivename)
if ((not os.path.exists(archivedir)) or (not os.path.isdir(archivedir))):
if not os.path.exists(archivedir):
os.makedirs(archivedir)
logger.debug("Directory '%s' created." % (archivedir))
else:
msg = "The path %s does either not exist or is not a directory. Aborting..."%(archivedir)
flocklab.error_logandexit(msg, errno.EINVAL, name, logger, config)
# Generate archive ---
if ((os.path.exists(archivepath)) and (os.path.isfile(archivepath))):
logger.debug("Archive %s is already existing." %(archivepath))
else:
# Check if testresultsdir directory is existing:
testresultsdir = "%s/%d" %(config.get('fetcher', 'testresults_dir'), testid)
if ((not os.path.exists(testresultsdir)) or (not os.path.isdir(testresultsdir))):
msg = "The path %s does either not exist or is not a directory. Aborting..."%(testresultsdir)
flocklab.error_logandexit(msg, errno.EINVAL, name, logger, config)
else:
logger.debug("Directory %s exists."%(testresultsdir))
# sort tar file, powerprofiling at the end
pp_part = []
resultparts = []
for part in os.listdir(testresultsdir):
if part!='powerprofiling.csv':
resultparts.append(os.path.basename(testresultsdir)+'/'+part)
else:
pp_part.append(os.path.basename(testresultsdir)+'/'+part)
resultparts.extend(pp_part)
# Archive files:
max_cpus = config.get('archiver', 'pigz_max_cpus')
try:
nice_level = config.getint('archiver', 'nice_level')
except:
logger.warn("Could not read nice_level from config file. Setting level to 10.")
nice_level = 10
if nice_level not in list(range(0,20)):
logger.warn("Defined nice_level %d from config file is out of bounds. Setting level to 10."%nice_level)
nice_level = 10
tarcmd = ['tar', 'cf', '-', '-C', os.path.dirname(testresultsdir)]
tarcmd.extend(resultparts)
# Use pigz instead of gz because pigz makes use of multiple processors.
gzcmd = ['pigz', '-p', max_cpus]
outfile = open(archivepath, 'w+')
logger.debug("Starting to write archive %s using max %s CPUs and level %d for compressing..."%(archivepath, max_cpus, nice_level))
ptar = subprocess.Popen(tarcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, preexec_fn=lambda : os.nice(nice_level))
pgz = subprocess.Popen(gzcmd, stdin=ptar.stdout, stdout=outfile, stderr=subprocess.PIPE, universal_newlines=True, preexec_fn=lambda : os.nice(nice_level))
gzout, gzerr = pgz.communicate()
tarout, tarerr = ptar.communicate()
outfile.close()
if pgz.returncode == 0:
logger.debug("Created archive")
# Remove testresultsdir:
shutil.rmtree(testresultsdir)
logger.debug("Removed directory %s"%testresultsdir)
else:
msg = "Error %d when creating archive %s"%(pgz.returncode, archivepath)
msg += "Tried to pipe commands %s and %s"%(str(tarcmd), str(gzcmd))
msg += "Tar command returned: %s, %s"%(str(tarout), str(tarerr))
msg += "Gz command returned: %s, %s"%(str(gzout), str(gzerr))
msg += "Error was: %s: %s"%(str(sys.exc_info()[0]), str(sys.exc_info()[1]))
flocklab.error_logandexit(msg, errno.EFAULT, name, logger, config)
archive_size = os.path.getsize(archivepath)
archive_size_mb = float(archive_size)/1048576
logger.debug("Archive has size %dB (%.3fMB)"%(archive_size, archive_size_mb))
# Send results to test owner ---
if send_email:
# Get Email of test owner:
rs = flocklab.get_test_owner(cur, testid)
if isinstance(rs, tuple):
usermail = rs[4]
else:
usermail = rs
if ((usermail == 1) or (usermail == 2)):
msg = "Error when trying to get test owner email address for test id %d from database. Aborting..." %testid
flocklab.error_logandexit(msg, errno.EINVAL, name, logger, config)
else:
logger.debug("Got email of test owner: %s" %(str(usermail)))
# Check the size of the archive and only send it by email if it has a decent size:
if ( archive_size > int(config.get('archiver','email_maxsize')) ):
msg = "Dear FlockLab user,\n\n\
Measurement data for test with ID %d has been successfully retrieved from the FlockLab database \
but could not be sent by email as it is too big. Please fetch your test results from the user interface.\n\n\
Yours faithfully,\nthe FlockLab server" %(testid)
flocklab.send_mail(subject="[FlockLab] Results for Test ID %d" %testid, message=msg, recipients=usermail)
else:
msg = "Dear FlockLab user,\n\n\
Measurement data for test with ID %d has been successfully retrieved from the FlockLab database, \
compressed and attached to this email. You can find all test results in the attached archive file %s\n\n\
Yours faithfully,\nthe FlockLab server" %(testid, archivename)
flocklab.send_mail(subject="[FlockLab] Results for Test ID %d" %testid, message=msg, recipients=usermail, attachments=[archivepath])
logger.debug("Sent email to test owner")
cur.close()
cn.close()
sys.exit(SUCCESS)
### END main()
if __name__ == "__main__":
try:
main(sys.argv[1:])
except Exception:
msg = "Encountered error: %s: %s\n%s\nCommand line was: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc(), str(sys.argv))
flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config)
#! /usr/bin/env python3
__author__ = "Christoph Walser <walserc@tik.ee.ethz.ch>"
__copyright__ = "Copyright 2010, ETH Zurich, Switzerland"
__license__ = "GPL"
import sys, os, getopt, errno, traceback, logging, time, __main__, shutil, glob, datetime
# Import local libraries
from lib.flocklab import SUCCESS
import lib.flocklab as flocklab
### Global variables ###
###
scriptname = os.path.basename(__main__.__file__)
scriptpath = os.path.dirname(os.path.abspath(sys.argv[0]))
name = "Cleaner"
###
logger = None
config = None
##############################################################################
#
# Error classes
#
##############################################################################
class Error(Exception):
""" Base class for exception. """
pass
### END Error classes
##############################################################################
#
# Usage
#
##############################################################################
def usage():
print("Usage: %s [--debug] [--help]" %scriptname)
print("Options:")
print(" --debug\t\t\tOptional. Print debug messages to log.")
print(" --help\t\t\tOptional. Print this help.")
### END usage()
##############################################################################
#
# Main
#
##############################################################################
def main(argv):
### Global Variables ###
global logger
global config
# Set timezone to UTC:
os.environ['TZ'] = 'UTC'
time.tzset()
# Get logger:
logger = flocklab.get_logger(loggername=scriptname, loggerpath=scriptpath)
# Get config ---
config = flocklab.get_config(configpath=scriptpath)
if not config:
msg = "Could not read configuration file. Exiting..."
flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config)
#logger.debug("Read configuration file.")
# Get the arguments:
try:
opts, args = getopt.getopt(argv, "dh", ["debug", "help"])
except getopt.GetoptError as err:
print(str(err))
logger.warn(str(err))
usage()
sys.exit(errno.EINVAL)
except:
msg = "Error when getting arguments: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))
flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config)
for opt, arg in opts:
if opt in ("-d", "--debug"):
logger.setLevel(logging.DEBUG)
elif opt in ("-h", "--help"):
usage()
sys.exit(SUCCESS)
else:
logger.warn("Wrong API usage")
sys.exit(errno.EINVAL)
# Allow only x instances ---
rs = flocklab.count_running_instances(scriptname)