diff --git a/testmanagementserver/ext_c_modules/cResultfetcher/setup.py b/testmanagementserver/ext_c_modules/cResultfetcher/setup.py index 313a3c8678e67cc0e79e7686cf3d271b44238fc5..36feb3208692a859ae2d1e3cee84d550215fdb34 100644 --- a/testmanagementserver/ext_c_modules/cResultfetcher/setup.py +++ b/testmanagementserver/ext_c_modules/cResultfetcher/setup.py @@ -2,19 +2,19 @@ from distutils.core import setup, Extension -module = Extension( 'cResultfetcher', - libraries = ['pthread', 'z', 'm', 'rt', 'dl'], - sources = ['cResultfetcher.c'] - ) +module = Extension( 'cResultfetcher', + libraries = ['pthread', 'z', 'm', 'rt', 'dl'], + sources = ['cResultfetcher.c'] + ) -name = 'FlockLab Powerprofiling Resultfetcher' -version = '2.0' -author = 'Christoph Walser, ETH Zurich' -author_email = 'walserc@tik.ee.ethz.ch' -lic = 'GPL' -platform = 'Linux Ubuntu' -description = 'Converts powerprofiling results for FlockLab and writes a CSV file out of them.' +name = 'FlockLab Powerprofiling Resultfetcher' +version = '2.0' +author = 'Christoph Walser, ETH Zurich' +author_email = 'walserc@tik.ee.ethz.ch' +lic = 'GPL' +platform = 'Linux Ubuntu' +description = 'Converts powerprofiling results for FlockLab and writes a CSV file out of them.' setup(name=name, version=version, author=author, author_email=author_email, license=lic, platforms=platform, description=description, ext_modules = [module]) diff --git a/testmanagementserver/flocklab_archiver.py b/testmanagementserver/flocklab_archiver.py index 77c0c777f9c741686926a788a0f2ef4113d6713c..6b6609579ec4d7111f5f13861c3e98ee084a38b8 100755 --- a/testmanagementserver/flocklab_archiver.py +++ b/testmanagementserver/flocklab_archiver.py @@ -1,8 +1,8 @@ #!/usr/bin/env python3 -__author__ = "Christoph Walser <walserc@tik.ee.ethz.ch>, Adnan Mlika" -__copyright__ = "Copyright 2010, ETH Zurich, Switzerland" -__license__ = "GPL" +__author__ = "Christoph Walser <walserc@tik.ee.ethz.ch>, Adnan Mlika" +__copyright__ = "Copyright 2010, ETH Zurich, Switzerland" +__license__ = "GPL" import sys, os, getopt, errno, traceback, time, shutil, logging, subprocess, __main__, types @@ -28,8 +28,8 @@ config = None # ############################################################################## class Error(Exception): - """ Base class for exception. """ - pass + """ Base class for exception. """ + pass ### END Error classes @@ -40,12 +40,12 @@ class Error(Exception): # ############################################################################## def usage(): - print("Usage: %s --testid=<int> [--email] [--debug] [--help]" %scriptname) - print("Options:") - print(" --testid=<int>\t\tTest ID of test whose results should be archived.") - print(" --email\t\t\tOptional. Send the data to the test owner by email.") - print(" --debug\t\t\tOptional. Print debug messages to log.") - print(" --help\t\t\tOptional. Print this help.") + print("Usage: %s --testid=<int> [--email] [--debug] [--help]" %scriptname) + print("Options:") + print(" --testid=<int>\t\tTest ID of test whose results should be archived.") + print(" --email\t\t\tOptional. Send the data to the test owner by email.") + print(" --debug\t\t\tOptional. Print debug messages to log.") + print(" --help\t\t\tOptional. Print this help.") ### END usage() @@ -56,205 +56,205 @@ def usage(): ############################################################################## def main(argv): - ### Global Variables ### - global logger - global config - - send_email = False - testid = -1 - - # Set timezone to UTC --- - os.environ['TZ'] = 'UTC' - time.tzset() - - # Get logger --- - logger = flocklab.get_logger(loggername=scriptname, loggerpath=scriptpath) - - # Get config --- - config = flocklab.get_config(configpath=scriptpath) - if not config: - msg = "Could not read configuration file. Exiting..." - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) - #logger.debug("Read configuration file.") - - # Get arguments --- - try: - opts, args = getopt.getopt(argv, "ehdt:", ["email", "help", "debug", "testid=" ]) - except getopt.GetoptError as err: - print(str(err)) - logger.warn(str(err)) - usage() - sys.exit(errno.EINVAL) - except: - msg = "Error when getting arguments: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1])) - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + ### Global Variables ### + global logger + global config + + send_email = False + testid = -1 + + # Set timezone to UTC --- + os.environ['TZ'] = 'UTC' + time.tzset() + + # Get logger --- + logger = flocklab.get_logger(loggername=scriptname, loggerpath=scriptpath) + + # Get config --- + config = flocklab.get_config(configpath=scriptpath) + if not config: + msg = "Could not read configuration file. Exiting..." + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + #logger.debug("Read configuration file.") + + # Get arguments --- + try: + opts, args = getopt.getopt(argv, "ehdt:", ["email", "help", "debug", "testid=" ]) + except getopt.GetoptError as err: + print(str(err)) + logger.warn(str(err)) + usage() + sys.exit(errno.EINVAL) + except: + msg = "Error when getting arguments: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1])) + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) - for opt, arg in opts: - if opt in ("-h", "--help"): - usage() - sys.exit(SUCCESS) - elif opt in ("-e", "--email"): - send_email = True - elif opt in ("-d", "--debug"): - logger.debug("Detected debug flag.") - logger.setLevel(logging.DEBUG) - elif opt in ("-t", "--testid"): - try: - testid = int(arg) - if testid <= 0: - raise Error - except: - logger.warn("Wrong API usage: testid has to be a positive number") - sys.exit(errno.EINVAL) - else: - logger.warn("Wrong API usage") - sys.exit(errno.EINVAL) + for opt, arg in opts: + if opt in ("-h", "--help"): + usage() + sys.exit(SUCCESS) + elif opt in ("-e", "--email"): + send_email = True + elif opt in ("-d", "--debug"): + logger.debug("Detected debug flag.") + logger.setLevel(logging.DEBUG) + elif opt in ("-t", "--testid"): + try: + testid = int(arg) + if testid <= 0: + raise Error + except: + logger.warn("Wrong API usage: testid has to be a positive number") + sys.exit(errno.EINVAL) + else: + logger.warn("Wrong API usage") + sys.exit(errno.EINVAL) - # Check if necessary parameters are set --- - if ((testid == -1)): - logger.warn("Wrong API usage") - sys.exit(errno.EINVAL) - - # Add Test ID to logger name --- - logger.name += " (Test %d)"%testid - - # Connect to the DB --- - try: - (cn, cur) = flocklab.connect_to_db(config, logger) - except: - msg = "Could not connect to database" - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) - - # Check if max number of instances is not reached --- - rs = flocklab.count_running_instances(scriptname) - if (rs >= 0): - maxinscount = config.getint('archiver', 'max_instances') - if rs > maxinscount: - msg = "Maximum number of instances (%d) for script %s with currently %d instances running exceeded. Aborting..."%(maxinscount, scriptname, rs) - flocklab.error_logandexit(msg, errno.EUSERS, name, logger, config) - #else: - #logger.debug("Maximum number of instances (%d) for script %s with currently %d instances running not exceeded."%(maxinscount, scriptname, rs)) - else: - msg = "Error when trying to count running instances of %s. Function returned with %d"%(scriptname, rs) - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) - - # Check if the Test ID exists in the database --- - rs = flocklab.check_test_id(cur, testid) - if rs != 0: - if rs == 3: - msg = "Test ID %d does not exist in database." %testid - flocklab.error_logandexit(msg, errno.EINVAL, name, logger, config) - else: - msg = "Error when trying to get test ID from database: %s: %s"%(str(sys.exc_info()[0]), str(sys.exc_info()[1])) - flocklab.error_logandexit(msg, errno.EIO, name, logger, config) - - # Check directories needed --- - archivedir = config.get('archiver', 'archive_dir') - archivename = "%d%s"%(testid, config.get('archiver','archive_ext')) - archivepath = "%s/%s"%(archivedir, archivename) - if ((not os.path.exists(archivedir)) or (not os.path.isdir(archivedir))): - if not os.path.exists(archivedir): - os.makedirs(archivedir) - logger.debug("Directory '%s' created." % (archivedir)) - else: - msg = "The path %s does either not exist or is not a directory. Aborting..."%(archivedir) - flocklab.error_logandexit(msg, errno.EINVAL, name, logger, config) - - # Generate archive --- - if ((os.path.exists(archivepath)) and (os.path.isfile(archivepath))): - logger.debug("Archive %s is already existing." %(archivepath)) - else: - # Check if testresultsdir directory is existing: - testresultsdir = "%s/%d" %(config.get('fetcher', 'testresults_dir'), testid) - if ((not os.path.exists(testresultsdir)) or (not os.path.isdir(testresultsdir))): - msg = "The path %s does either not exist or is not a directory. Aborting..."%(testresultsdir) - flocklab.error_logandexit(msg, errno.EINVAL, name, logger, config) - else: - logger.debug("Directory %s exists."%(testresultsdir)) - # sort tar file, powerprofiling at the end - pp_part = [] - resultparts = [] - for part in os.listdir(testresultsdir): - if part!='powerprofiling.csv': - resultparts.append(os.path.basename(testresultsdir)+'/'+part) - else: - pp_part.append(os.path.basename(testresultsdir)+'/'+part) - resultparts.extend(pp_part) - # Archive files: - max_cpus = config.get('archiver', 'pigz_max_cpus') - try: - nice_level = config.getint('archiver', 'nice_level') - except: - logger.warn("Could not read nice_level from config file. Setting level to 10.") - nice_level = 10 - if nice_level not in list(range(0,20)): - logger.warn("Defined nice_level %d from config file is out of bounds. Setting level to 10."%nice_level) - nice_level = 10 - tarcmd = ['tar', 'cf', '-', '-C', os.path.dirname(testresultsdir)] - tarcmd.extend(resultparts) - # Use pigz instead of gz because pigz makes use of multiple processors. - gzcmd = ['pigz', '-p', max_cpus] - outfile = open(archivepath, 'w+') - logger.debug("Starting to write archive %s using max %s CPUs and level %d for compressing..."%(archivepath, max_cpus, nice_level)) - ptar = subprocess.Popen(tarcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, preexec_fn=lambda : os.nice(nice_level)) - pgz = subprocess.Popen(gzcmd, stdin=ptar.stdout, stdout=outfile, stderr=subprocess.PIPE, universal_newlines=True, preexec_fn=lambda : os.nice(nice_level)) - gzout, gzerr = pgz.communicate() - tarout, tarerr = ptar.communicate() - outfile.close() - if pgz.returncode == 0: - logger.debug("Created archive") - # Remove testresultsdir: - shutil.rmtree(testresultsdir) - logger.debug("Removed directory %s"%testresultsdir) - else: - msg = "Error %d when creating archive %s"%(pgz.returncode, archivepath) - msg += "Tried to pipe commands %s and %s"%(str(tarcmd), str(gzcmd)) - msg += "Tar command returned: %s, %s"%(str(tarout), str(tarerr)) - msg += "Gz command returned: %s, %s"%(str(gzout), str(gzerr)) - msg += "Error was: %s: %s"%(str(sys.exc_info()[0]), str(sys.exc_info()[1])) - flocklab.error_logandexit(msg, errno.EFAULT, name, logger, config) - archive_size = os.path.getsize(archivepath) - archive_size_mb = float(archive_size)/1048576 - logger.debug("Archive has size %dB (%.3fMB)"%(archive_size, archive_size_mb)) - - # Send results to test owner --- - if send_email: - # Get Email of test owner: - rs = flocklab.get_test_owner(cur, testid) - if isinstance(rs, tuple): - usermail = rs[4] - else: - usermail = rs - if ((usermail == 1) or (usermail == 2)): - msg = "Error when trying to get test owner email address for test id %d from database. Aborting..." %testid - flocklab.error_logandexit(msg, errno.EINVAL, name, logger, config) - else: - logger.debug("Got email of test owner: %s" %(str(usermail))) - - # Check the size of the archive and only send it by email if it has a decent size: - if ( archive_size > int(config.get('archiver','email_maxsize')) ): - msg = "Dear FlockLab user,\n\n\ + # Check if necessary parameters are set --- + if ((testid == -1)): + logger.warn("Wrong API usage") + sys.exit(errno.EINVAL) + + # Add Test ID to logger name --- + logger.name += " (Test %d)"%testid + + # Connect to the DB --- + try: + (cn, cur) = flocklab.connect_to_db(config, logger) + except: + msg = "Could not connect to database" + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + + # Check if max number of instances is not reached --- + rs = flocklab.count_running_instances(scriptname) + if (rs >= 0): + maxinscount = config.getint('archiver', 'max_instances') + if rs > maxinscount: + msg = "Maximum number of instances (%d) for script %s with currently %d instances running exceeded. Aborting..."%(maxinscount, scriptname, rs) + flocklab.error_logandexit(msg, errno.EUSERS, name, logger, config) + #else: + #logger.debug("Maximum number of instances (%d) for script %s with currently %d instances running not exceeded."%(maxinscount, scriptname, rs)) + else: + msg = "Error when trying to count running instances of %s. Function returned with %d"%(scriptname, rs) + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + + # Check if the Test ID exists in the database --- + rs = flocklab.check_test_id(cur, testid) + if rs != 0: + if rs == 3: + msg = "Test ID %d does not exist in database." %testid + flocklab.error_logandexit(msg, errno.EINVAL, name, logger, config) + else: + msg = "Error when trying to get test ID from database: %s: %s"%(str(sys.exc_info()[0]), str(sys.exc_info()[1])) + flocklab.error_logandexit(msg, errno.EIO, name, logger, config) + + # Check directories needed --- + archivedir = config.get('archiver', 'archive_dir') + archivename = "%d%s"%(testid, config.get('archiver','archive_ext')) + archivepath = "%s/%s"%(archivedir, archivename) + if ((not os.path.exists(archivedir)) or (not os.path.isdir(archivedir))): + if not os.path.exists(archivedir): + os.makedirs(archivedir) + logger.debug("Directory '%s' created." % (archivedir)) + else: + msg = "The path %s does either not exist or is not a directory. Aborting..."%(archivedir) + flocklab.error_logandexit(msg, errno.EINVAL, name, logger, config) + + # Generate archive --- + if ((os.path.exists(archivepath)) and (os.path.isfile(archivepath))): + logger.debug("Archive %s is already existing." %(archivepath)) + else: + # Check if testresultsdir directory is existing: + testresultsdir = "%s/%d" %(config.get('fetcher', 'testresults_dir'), testid) + if ((not os.path.exists(testresultsdir)) or (not os.path.isdir(testresultsdir))): + msg = "The path %s does either not exist or is not a directory. Aborting..."%(testresultsdir) + flocklab.error_logandexit(msg, errno.EINVAL, name, logger, config) + else: + logger.debug("Directory %s exists."%(testresultsdir)) + # sort tar file, powerprofiling at the end + pp_part = [] + resultparts = [] + for part in os.listdir(testresultsdir): + if part!='powerprofiling.csv': + resultparts.append(os.path.basename(testresultsdir)+'/'+part) + else: + pp_part.append(os.path.basename(testresultsdir)+'/'+part) + resultparts.extend(pp_part) + # Archive files: + max_cpus = config.get('archiver', 'pigz_max_cpus') + try: + nice_level = config.getint('archiver', 'nice_level') + except: + logger.warn("Could not read nice_level from config file. Setting level to 10.") + nice_level = 10 + if nice_level not in list(range(0,20)): + logger.warn("Defined nice_level %d from config file is out of bounds. Setting level to 10."%nice_level) + nice_level = 10 + tarcmd = ['tar', 'cf', '-', '-C', os.path.dirname(testresultsdir)] + tarcmd.extend(resultparts) + # Use pigz instead of gz because pigz makes use of multiple processors. + gzcmd = ['pigz', '-p', max_cpus] + outfile = open(archivepath, 'w+') + logger.debug("Starting to write archive %s using max %s CPUs and level %d for compressing..."%(archivepath, max_cpus, nice_level)) + ptar = subprocess.Popen(tarcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, preexec_fn=lambda : os.nice(nice_level)) + pgz = subprocess.Popen(gzcmd, stdin=ptar.stdout, stdout=outfile, stderr=subprocess.PIPE, universal_newlines=True, preexec_fn=lambda : os.nice(nice_level)) + gzout, gzerr = pgz.communicate() + tarout, tarerr = ptar.communicate() + outfile.close() + if pgz.returncode == 0: + logger.debug("Created archive") + # Remove testresultsdir: + shutil.rmtree(testresultsdir) + logger.debug("Removed directory %s"%testresultsdir) + else: + msg = "Error %d when creating archive %s"%(pgz.returncode, archivepath) + msg += "Tried to pipe commands %s and %s"%(str(tarcmd), str(gzcmd)) + msg += "Tar command returned: %s, %s"%(str(tarout), str(tarerr)) + msg += "Gz command returned: %s, %s"%(str(gzout), str(gzerr)) + msg += "Error was: %s: %s"%(str(sys.exc_info()[0]), str(sys.exc_info()[1])) + flocklab.error_logandexit(msg, errno.EFAULT, name, logger, config) + archive_size = os.path.getsize(archivepath) + archive_size_mb = float(archive_size)/1048576 + logger.debug("Archive has size %dB (%.3fMB)"%(archive_size, archive_size_mb)) + + # Send results to test owner --- + if send_email: + # Get Email of test owner: + rs = flocklab.get_test_owner(cur, testid) + if isinstance(rs, tuple): + usermail = rs[4] + else: + usermail = rs + if ((usermail == 1) or (usermail == 2)): + msg = "Error when trying to get test owner email address for test id %d from database. Aborting..." %testid + flocklab.error_logandexit(msg, errno.EINVAL, name, logger, config) + else: + logger.debug("Got email of test owner: %s" %(str(usermail))) + + # Check the size of the archive and only send it by email if it has a decent size: + if ( archive_size > int(config.get('archiver','email_maxsize')) ): + msg = "Dear FlockLab user,\n\n\ Measurement data for test with ID %d has been successfully retrieved from the FlockLab database \ but could not be sent by email as it is too big. Please fetch your test results from the user interface.\n\n\ Yours faithfully,\nthe FlockLab server" %(testid) - flocklab.send_mail(subject="[FlockLab] Results for Test ID %d" %testid, message=msg, recipients=usermail) - else: - msg = "Dear FlockLab user,\n\n\ + flocklab.send_mail(subject="[FlockLab] Results for Test ID %d" %testid, message=msg, recipients=usermail) + else: + msg = "Dear FlockLab user,\n\n\ Measurement data for test with ID %d has been successfully retrieved from the FlockLab database, \ compressed and attached to this email. You can find all test results in the attached archive file %s\n\n\ Yours faithfully,\nthe FlockLab server" %(testid, archivename) - flocklab.send_mail(subject="[FlockLab] Results for Test ID %d" %testid, message=msg, recipients=usermail, attachments=[archivepath]) - logger.debug("Sent email to test owner") - - cur.close() - cn.close() - sys.exit(SUCCESS) + flocklab.send_mail(subject="[FlockLab] Results for Test ID %d" %testid, message=msg, recipients=usermail, attachments=[archivepath]) + logger.debug("Sent email to test owner") + + cur.close() + cn.close() + sys.exit(SUCCESS) ### END main() if __name__ == "__main__": - try: - main(sys.argv[1:]) - except Exception: - msg = "Encountered error: %s: %s\n%s\nCommand line was: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc(), str(sys.argv)) - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + try: + main(sys.argv[1:]) + except Exception: + msg = "Encountered error: %s: %s\n%s\nCommand line was: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc(), str(sys.argv)) + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) diff --git a/testmanagementserver/flocklab_cleaner.py b/testmanagementserver/flocklab_cleaner.py index a24d75c0c31807152da3eb2be0c04d6ec4911c52..e52fd5206e48def10d7d4131942e0dc76ac8bdee 100755 --- a/testmanagementserver/flocklab_cleaner.py +++ b/testmanagementserver/flocklab_cleaner.py @@ -1,8 +1,8 @@ #! /usr/bin/env python3 -__author__ = "Christoph Walser <walserc@tik.ee.ethz.ch>" +__author__ = "Christoph Walser <walserc@tik.ee.ethz.ch>" __copyright__ = "Copyright 2010, ETH Zurich, Switzerland" -__license__ = "GPL" +__license__ = "GPL" import sys, os, getopt, errno, traceback, logging, time, __main__, shutil, glob, datetime @@ -27,8 +27,8 @@ config = None # ############################################################################## class Error(Exception): - """ Base class for exception. """ - pass + """ Base class for exception. """ + pass ### END Error classes @@ -39,10 +39,10 @@ class Error(Exception): # ############################################################################## def usage(): - print("Usage: %s [--debug] [--help]" %scriptname) - print("Options:") - print(" --debug\t\t\tOptional. Print debug messages to log.") - print(" --help\t\t\tOptional. Print this help.") + print("Usage: %s [--debug] [--help]" %scriptname) + print("Options:") + print(" --debug\t\t\tOptional. Print debug messages to log.") + print(" --help\t\t\tOptional. Print this help.") ### END usage() @@ -54,165 +54,165 @@ def usage(): ############################################################################## def main(argv): - ### Global Variables ### - global logger - global config - - # Set timezone to UTC: - os.environ['TZ'] = 'UTC' - time.tzset() - - # Get logger: - logger = flocklab.get_logger(loggername=scriptname, loggerpath=scriptpath) - - # Get config --- - config = flocklab.get_config(configpath=scriptpath) - if not config: - msg = "Could not read configuration file. Exiting..." - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) - #logger.debug("Read configuration file.") - - # Get the arguments: - try: - opts, args = getopt.getopt(argv, "dh", ["debug", "help"]) - except getopt.GetoptError as err: - print(str(err)) - logger.warn(str(err)) - usage() - sys.exit(errno.EINVAL) - except: - msg = "Error when getting arguments: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1])) - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) - - for opt, arg in opts: - if opt in ("-d", "--debug"): - logger.setLevel(logging.DEBUG) - elif opt in ("-h", "--help"): - usage() - sys.exit(SUCCESS) - else: - logger.warn("Wrong API usage") - sys.exit(errno.EINVAL) - - # Allow only x instances --- - rs = flocklab.count_running_instances(scriptname) - if (rs >= 0): - maxinscount = config.getint('cleaner', 'max_instances') - if rs > maxinscount: - msg = "Maximum number of instances (%d) for script %s with currently %d instances running exceeded. Aborting..."%(maxinscount, scriptname, rs) - flocklab.error_logandexit(msg, errno.EUSERS, name, logger, config) - #else: - #logger.debug("Maximum number of instances (%d) for script %s with currently %d instances running not exceeded."%(maxinscount, scriptname, rs)) - else: - msg = "Error when trying to count running instances of %s. Function returned with %d"%(scriptname, rs) - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) - - # Connect to the database --- - try: - (cn, cur) = flocklab.connect_to_db(config, logger) - except: - msg = "Could not connect to database" - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) - #logger.debug("Connected to database") - - # Check for running tests --- - testisrunning = flocklab.is_test_running(cur) - - # Check for work --- - if testisrunning: - logger.debug("A test is running, thus exiting...") - else: - try: - # Check for tests to delete --- - sql = """ SELECT `serv_tests_key`, `time_start_wish` - FROM `tbl_serv_tests` - WHERE (`test_status` = 'todelete') - """ - #logger.info("Looking in DB for tests which are marked to be deleted...") - if ( cur.execute(sql) <= 0 ): - logger.info("No tests found which are marked to be deleted.") - else: - rs = cur.fetchall() - for (testid, starttime) in rs: - testid = str(testid) - logger.debug("Found test ID %s to delete."%testid) - # If a test is to be deleted which has not run yet, delete it completely. Otherwise, keep the metadata of the test for statistics: - if (starttime > datetime.datetime.today()): - delete_all = True - logger.debug("Test ID %s did not run yet, thus all data (including the test metadata) will be deleted."%testid) - else: - delete_all = False - # Clean through all relevant tables --- - relevant_tables = ['tbl_serv_errorlog'] - if delete_all: - relevant_tables.append('tbl_serv_map_test_observer_targetimages') - for table in relevant_tables: - sql = """ DELETE FROM %s - WHERE (`test_fk` = %s) - """ - starttime = time.time() - num_deleted_rows = cur.execute(sql%(table, testid)) - cn.commit() - logger.debug("Deleted %i rows of data in table %s for test ID %s in %f seconds" %(num_deleted_rows, table, testid, (time.time()-starttime))) - - # Delete cached test results --- - archive_path = "%s/%s%s"%(config.get('archiver','archive_dir'), testid, config.get('archiver','archive_ext')) - viz_pathes = glob.glob("%s/%s_*"%(config.get('viz','imgdir'), testid)) - pathes = [archive_path] - pathes.extend(viz_pathes) - for path in pathes: - if os.path.exists(path): - if os.path.isfile(path): - os.remove(path) - else: - shutil.rmtree(path) - logger.debug("Removed path %s for test %s."%(path, testid)) - - # Delete test itself --- - if delete_all: - # Delete test itself: - sql = """ DELETE FROM `tbl_serv_tests` - WHERE (`serv_tests_key` = %s) - """ - starttime = time.time() - num_deleted_rows = cur.execute(sql%(testid)) - cn.commit() - logger.debug("Deleted %i rows of data in table tbl_serv_tests for test ID %s in %f seconds" %(num_deleted_rows, testid, (time.time()-starttime))) - else: - # Set test status to deleted but keep metadata --- - flocklab.set_test_status(cur, cn, int(testid), "deleted") - logger.debug("Set status for test ID %s to 'deleted'" %(testid)) - - # Delete old entries in viz cache --- - keeptime = config.getint('cleaner', 'keeptime_viz') - earliest_keeptime = time.time() - (keeptime*86400) - imgdir_path = config.get('viz','imgdir') - if not os.path.isdir(imgdir_path): - os.mkdir(imgdir_path) - for f in os.listdir(imgdir_path): - path = os.path.join(imgdir_path, f) - if os.stat(path).st_mtime < earliest_keeptime: - logger.debug("Removing viz cache %s..."%path) - shutil.rmtree(path) - - except: - msg = "Encountered error: %s: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1])) - logger.error(msg) - emails = flocklab.get_admin_emails(cur, config) - msg = "%s on server %s encountered error:\n\n%s" %(scriptname, os.uname()[1], msg) - flocklab.send_mail(subject="[FlockLab %s]"%name, message=msg, recipients=emails) - finally: - cur.close() - cn.close() - - #logger.debug("Finished. Exit program.") - sys.exit(SUCCESS) + ### Global Variables ### + global logger + global config + + # Set timezone to UTC: + os.environ['TZ'] = 'UTC' + time.tzset() + + # Get logger: + logger = flocklab.get_logger(loggername=scriptname, loggerpath=scriptpath) + + # Get config --- + config = flocklab.get_config(configpath=scriptpath) + if not config: + msg = "Could not read configuration file. Exiting..." + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + #logger.debug("Read configuration file.") + + # Get the arguments: + try: + opts, args = getopt.getopt(argv, "dh", ["debug", "help"]) + except getopt.GetoptError as err: + print(str(err)) + logger.warn(str(err)) + usage() + sys.exit(errno.EINVAL) + except: + msg = "Error when getting arguments: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1])) + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + + for opt, arg in opts: + if opt in ("-d", "--debug"): + logger.setLevel(logging.DEBUG) + elif opt in ("-h", "--help"): + usage() + sys.exit(SUCCESS) + else: + logger.warn("Wrong API usage") + sys.exit(errno.EINVAL) + + # Allow only x instances --- + rs = flocklab.count_running_instances(scriptname) + if (rs >= 0): + maxinscount = config.getint('cleaner', 'max_instances') + if rs > maxinscount: + msg = "Maximum number of instances (%d) for script %s with currently %d instances running exceeded. Aborting..."%(maxinscount, scriptname, rs) + flocklab.error_logandexit(msg, errno.EUSERS, name, logger, config) + #else: + #logger.debug("Maximum number of instances (%d) for script %s with currently %d instances running not exceeded."%(maxinscount, scriptname, rs)) + else: + msg = "Error when trying to count running instances of %s. Function returned with %d"%(scriptname, rs) + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + + # Connect to the database --- + try: + (cn, cur) = flocklab.connect_to_db(config, logger) + except: + msg = "Could not connect to database" + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + #logger.debug("Connected to database") + + # Check for running tests --- + testisrunning = flocklab.is_test_running(cur) + + # Check for work --- + if testisrunning: + logger.debug("A test is running, thus exiting...") + else: + try: + # Check for tests to delete --- + sql = """ SELECT `serv_tests_key`, `time_start_wish` + FROM `tbl_serv_tests` + WHERE (`test_status` = 'todelete') + """ + #logger.info("Looking in DB for tests which are marked to be deleted...") + if ( cur.execute(sql) <= 0 ): + logger.info("No tests found which are marked to be deleted.") + else: + rs = cur.fetchall() + for (testid, starttime) in rs: + testid = str(testid) + logger.debug("Found test ID %s to delete."%testid) + # If a test is to be deleted which has not run yet, delete it completely. Otherwise, keep the metadata of the test for statistics: + if (starttime > datetime.datetime.today()): + delete_all = True + logger.debug("Test ID %s did not run yet, thus all data (including the test metadata) will be deleted."%testid) + else: + delete_all = False + # Clean through all relevant tables --- + relevant_tables = ['tbl_serv_errorlog'] + if delete_all: + relevant_tables.append('tbl_serv_map_test_observer_targetimages') + for table in relevant_tables: + sql = """ DELETE FROM %s + WHERE (`test_fk` = %s) + """ + starttime = time.time() + num_deleted_rows = cur.execute(sql%(table, testid)) + cn.commit() + logger.debug("Deleted %i rows of data in table %s for test ID %s in %f seconds" %(num_deleted_rows, table, testid, (time.time()-starttime))) + + # Delete cached test results --- + archive_path = "%s/%s%s"%(config.get('archiver','archive_dir'), testid, config.get('archiver','archive_ext')) + viz_pathes = glob.glob("%s/%s_*"%(config.get('viz','imgdir'), testid)) + pathes = [archive_path] + pathes.extend(viz_pathes) + for path in pathes: + if os.path.exists(path): + if os.path.isfile(path): + os.remove(path) + else: + shutil.rmtree(path) + logger.debug("Removed path %s for test %s."%(path, testid)) + + # Delete test itself --- + if delete_all: + # Delete test itself: + sql = """ DELETE FROM `tbl_serv_tests` + WHERE (`serv_tests_key` = %s) + """ + starttime = time.time() + num_deleted_rows = cur.execute(sql%(testid)) + cn.commit() + logger.debug("Deleted %i rows of data in table tbl_serv_tests for test ID %s in %f seconds" %(num_deleted_rows, testid, (time.time()-starttime))) + else: + # Set test status to deleted but keep metadata --- + flocklab.set_test_status(cur, cn, int(testid), "deleted") + logger.debug("Set status for test ID %s to 'deleted'" %(testid)) + + # Delete old entries in viz cache --- + keeptime = config.getint('cleaner', 'keeptime_viz') + earliest_keeptime = time.time() - (keeptime*86400) + imgdir_path = config.get('viz','imgdir') + if not os.path.isdir(imgdir_path): + os.mkdir(imgdir_path) + for f in os.listdir(imgdir_path): + path = os.path.join(imgdir_path, f) + if os.stat(path).st_mtime < earliest_keeptime: + logger.debug("Removing viz cache %s..."%path) + shutil.rmtree(path) + + except: + msg = "Encountered error: %s: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1])) + logger.error(msg) + emails = flocklab.get_admin_emails(cur, config) + msg = "%s on server %s encountered error:\n\n%s" %(scriptname, os.uname()[1], msg) + flocklab.send_mail(subject="[FlockLab %s]"%name, message=msg, recipients=emails) + finally: + cur.close() + cn.close() + + #logger.debug("Finished. Exit program.") + sys.exit(SUCCESS) ### END main() if __name__ == "__main__": - try: - main(sys.argv[1:]) - except Exception: - msg = "Encountered error: %s: %s\n%s\nCommand line was: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc(), str(sys.argv)) - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) - + try: + main(sys.argv[1:]) + except Exception: + msg = "Encountered error: %s: %s\n%s\nCommand line was: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc(), str(sys.argv)) + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + diff --git a/testmanagementserver/flocklab_daq_config.py b/testmanagementserver/flocklab_daq_config.py index 16e9e725aa6d8f3e1f77e33b8cd5140271e99c7c..b2d7995aabde9bd3c7f41883d7f138257156c157 100755 --- a/testmanagementserver/flocklab_daq_config.py +++ b/testmanagementserver/flocklab_daq_config.py @@ -1,8 +1,8 @@ #! /usr/bin/env python3 -__author__ = "Balz Maag <bmaag@ee.ethz.ch>" -__copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Balz Maag" -__license__ = "GPL" +__author__ = "Balz Maag <bmaag@ee.ethz.ch>" +__copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Balz Maag" +__license__ = "GPL" import os, sys, subprocess, getopt, errno, tempfile, time, shutil, struct, __main__, operator @@ -41,22 +41,22 @@ b_max = 60 ############################################################################## # -# timeformat_xml2epoch - Convert between different timeformats of -# XML config file and FlockLab services +# timeformat_xml2epoch - Convert between different timeformats of +# XML config file and FlockLab services # ############################################################################## def timeformat_xml2epoch(config=None, timestring=""): - if (not config) or (not timestring): - return errno.EINVAL - try: - # First convert time from xml-string to time format: - servicetime = time.strptime(timestring, config.get("observer", "timeformat")) - # Now convert to epoch: - servicetimeepoch = int(time.mktime(servicetime)) - except: - return errno.EFAULT - - return servicetimeepoch + if (not config) or (not timestring): + return errno.EINVAL + try: + # First convert time from xml-string to time format: + servicetime = time.strptime(timestring, config.get("observer", "timeformat")) + # Now convert to epoch: + servicetimeepoch = int(time.mktime(servicetime)) + except: + return errno.EFAULT + + return servicetimeepoch ### END timeformat_xml2epoch() ############################################################################## @@ -64,14 +64,14 @@ def timeformat_xml2epoch(config=None, timestring=""): # Usage # ############################################################################## -def usage(): - print("Usage: %s --xml=<path> --outfile=<path> [--debug] [--help]" %sys.argv[0]) - print("Convert a test-configuration in the provided XMl to a binary command file for FPGA UART transmission") - print("Options:") - print(" --xml=<path>\t\t\tPath to the XML file with the testconfiguration.") - print(" --outfile=<path>\t\t\tPath to the file where the configuration is written to.") - print(" --debug\t\t\tOptional. Print debug messages to log.") - print(" --help\t\t\tOptional. Print this help.") +def usage(): + print("Usage: %s --xml=<path> --outfile=<path> [--debug] [--help]" %sys.argv[0]) + print("Convert a test-configuration in the provided XMl to a binary command file for FPGA UART transmission") + print("Options:") + print(" --xml=<path>\t\t\tPath to the XML file with the testconfiguration.") + print(" --outfile=<path>\t\t\tPath to the file where the configuration is written to.") + print(" --debug\t\t\tOptional. Print debug messages to log.") + print(" --help\t\t\tOptional. Print this help.") ### END usage() @@ -84,272 +84,272 @@ def usage(): # ############################################################################## def main(argv): - global debug - global logger - xmlfile = None - outfile = None - last_barrier = 0 - - # Get logger: - logger = flocklab.get_logger(loggername=scriptname, loggerpath=scriptpath) - # get config - config = flocklab.get_config() + global debug + global logger + xmlfile = None + outfile = None + last_barrier = 0 + + # Get logger: + logger = flocklab.get_logger(loggername=scriptname, loggerpath=scriptpath) + # get config + config = flocklab.get_config() - # Get command line parameters. - try: - opts, args = getopt.getopt(argv, "hdx:o:", ["help", "debug", "xml=", "outfile="]) - except getopt.GetoptError as err: - print(str(err)) - logger.error(str(err)) - usage() - sys.exit(errno.EINVAL) - except: - logger.error("Error: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - sys.exit(errno.EINVAL) - for opt, arg in opts: - if opt in ("-x", "--xml"): - xmlfile = arg - if not (os.path.exists(xmlfile)): - err = "Error: file %s does not exist" %(str(xmlfile)) - logger.error(err) - sys.exit(errno.EINVAL) - elif opt in ("-o", "--outfile"): - outfile = arg - elif opt in ("-h", "--help"): - debug = True - usage() - sys.exit(SUCCESS) - elif opt in ("-d", "--debug"): - debug = True - else: - logger.error("Wrong API usage") - usage() - sys.exit(errno.EINVAL) - - # Check for mandatory arguments: - if not xmlfile: - print("Wrong API usage") - logger.error("Wrong API usage") - usage() - sys.exit(errno.EINVAL) - - errors = [] - - # open uart config file - try: - f = open(outfile,"wb") - except: - logger.warn("Error opening binary config file") - sys.exit(errno.EINVAL) - # parse xml file - #xmlfile = "current.xml" - try: - tree = ElementTree() - tree.parse(xmlfile) - if debug: - logger.debug("Parsed XML.") - except: - msg = "Could not find or open XML file at %s."%(str(xmlfile)) - errors.append(msg) - if debug: - logger.error(msg) + # Get command line parameters. + try: + opts, args = getopt.getopt(argv, "hdx:o:", ["help", "debug", "xml=", "outfile="]) + except getopt.GetoptError as err: + print(str(err)) + logger.error(str(err)) + usage() + sys.exit(errno.EINVAL) + except: + logger.error("Error: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + sys.exit(errno.EINVAL) + for opt, arg in opts: + if opt in ("-x", "--xml"): + xmlfile = arg + if not (os.path.exists(xmlfile)): + err = "Error: file %s does not exist" %(str(xmlfile)) + logger.error(err) + sys.exit(errno.EINVAL) + elif opt in ("-o", "--outfile"): + outfile = arg + elif opt in ("-h", "--help"): + debug = True + usage() + sys.exit(SUCCESS) + elif opt in ("-d", "--debug"): + debug = True + else: + logger.error("Wrong API usage") + usage() + sys.exit(errno.EINVAL) + + # Check for mandatory arguments: + if not xmlfile: + print("Wrong API usage") + logger.error("Wrong API usage") + usage() + sys.exit(errno.EINVAL) + + errors = [] + + # open uart config file + try: + f = open(outfile,"wb") + except: + logger.warn("Error opening binary config file") + sys.exit(errno.EINVAL) + # parse xml file + #xmlfile = "current.xml" + try: + tree = ElementTree() + tree.parse(xmlfile) + if debug: + logger.debug("Parsed XML.") + except: + msg = "Could not find or open XML file at %s."%(str(xmlfile)) + errors.append(msg) + if debug: + logger.error(msg) - # sequence in binary file must be: - # 1. route off - # 2. tracing - # 3. nth sample - # 4. reset pin actuation at time 0.0 - # 5. start at time StartTime - - ############################################################################## - # - # ROUTING - # - ############################################################################## - # routing off, no xml parsing needed: - try: - f.write((int(headerBin['route'] + onOffBin['off'],2).to_bytes(1, 'big'))) - except: - logger.error("Error writing route off command to cmd-file") - - ############################################################################## - # - # TRACING - # - ############################################################################## - tracePins = [] - if (tree.find('obsGpioMonitorConf') != None): - if debug: - logger.debug("Found config for GPIO monitoring.") - - # find all tracing pins - - subtree = tree.find('obsGpioMonitorConf') - pinconfs = list(subtree.getiterator("pinConf")) - for pinconf in pinconfs: - tracePins.append(pinconf.find('pin').text) + # sequence in binary file must be: + # 1. route off + # 2. tracing + # 3. nth sample + # 4. reset pin actuation at time 0.0 + # 5. start at time StartTime + + ############################################################################## + # + # ROUTING + # + ############################################################################## + # routing off, no xml parsing needed: + try: + f.write((int(headerBin['route'] + onOffBin['off'],2).to_bytes(1, 'big'))) + except: + logger.error("Error writing route off command to cmd-file") + + ############################################################################## + # + # TRACING + # + ############################################################################## + tracePins = [] + if (tree.find('obsGpioMonitorConf') != None): + if debug: + logger.debug("Found config for GPIO monitoring.") + + # find all tracing pins + + subtree = tree.find('obsGpioMonitorConf') + pinconfs = list(subtree.getiterator("pinConf")) + for pinconf in pinconfs: + tracePins.append(pinconf.find('pin').text) - # set appropriate bit in tracing-pin byte - traceBin = 0; - for i in tracePins: - traceBin |= (1 << flocklab.daqpin_abbr2num(i)) - + # set appropriate bit in tracing-pin byte + traceBin = 0; + for i in tracePins: + traceBin |= (1 << flocklab.daqpin_abbr2num(i)) + - ############################################################################## - # - # ACTUATION - # - ############################################################################## - - # Format: actuatePins = ['startimeEpoch','microsecs','pin','level'] - actuatePins = [] - rstTime = []; - if (tree.find('obsGpioSettingConf') != None): - if debug: - logger.debug("Found config for GPIO setting.") - subtree = tree.find('obsGpioSettingConf') - pinconfs = list(subtree.getiterator("pinConf")) - # first find start and stop time of test - for i in pinconfs: - pin = flocklab.daqpin_abbr2num(i.find('pin').text) - starttimeEpoch = timeformat_xml2epoch(config, i.find('absoluteTime/absoluteDateTime').text) - if pin == 0: - rstTime.append(starttimeEpoch) - - rstTime.sort() - # get the actual test start time - testStartTime = rstTime[0] - # get the actual test stop time - testStopTime = rstTime[len(rstTime)-1] - - if testStartTime > testStopTime: - logger.error("Invalid test start and stop time") - sys.exit(EINVAL) - - invalid_actuation_cnt = 0 - for pinconf in pinconfs: - pin = flocklab.daqpin_abbr2num(pinconf.find('pin').text) - # we want also to monitor pins which are actuated - traceBin |= (1 << pin) - # tracing and actuation pins do not share the same bit-field in the corresponding uart packet (act. is shifted by 1 bit to left compared to trace.) - pin = pin + 1; - level = flocklab.daqlevel_str2abbr(pinconf.find('level').text) - interval = int(pinconf.find('intervalMicrosecs').text) - count = int(pinconf.find('count').text) - # Get time and bring it into right format: - starttimeEpoch = timeformat_xml2epoch(config, pinconf.find('absoluteTime/absoluteDateTime').text) - microsecs = int(pinconf.find('absoluteTime/absoluteMicrosecs').text) - # add actuations - for j in range(0,count): - intSec = (microsecs + (j)*interval)/1000000 - intMicro = (microsecs +(j)*interval)%1000000 - if starttimeEpoch + intSec - testStartTime + intSec >= 0 and intMicro >= 0 and testStopTime - starttimeEpoch - intSec >= 0: - actuatePins.append([starttimeEpoch-testStartTime + intSec, intMicro, pin, level]) - else: - invalid_actuation_cnt = invalid_actuation_cnt + 1 - if invalid_actuation_cnt > 0: - logger.debug("There were %d invalid actuation times." % (invalid_actuation_cnt)) - - # sort all the gathered configs by time - traceBinString = str(bin(traceBin))[2:].zfill(8) - # write the tracing config command to the config file - f.write((int(headerBin['tracing'] + traceBinString[0:4],2).to_bytes(1, 'big'))) - f.write((int('1' + traceBinString[4:8] + '000',2).to_bytes(1, 'big'))) - - ############################################################################## - # - # POWER PROFILING: basically is an actuation - # - ############################################################################## - # - # TODO: multiple sammpling rates??? complicates the config a bit as we need to change the sampling rate right before the next actuation command which starts the powerprofiling - nthsample = 1 - if (tree.find('obsPowerprofConf') != None): - if debug: - logger.debug("Found config for powerprofiling.") - # Cycle through all powerprof configs and insert them into file: - subtree = tree.find('obsPowerprofConf') - profconfs = list(subtree.getiterator("profConf")) - for profconf in profconfs: - duration = int(profconf.find('duration').text) - # Get time and bring it into right format: - starttimeEpoch = timeformat_xml2epoch(config, profconf.find('absoluteTime/absoluteDateTime').text) - microsecs = int(profconf.find('absoluteTime/absoluteMicrosecs').text) - nthsample = profconf.find('samplingDivider') - if nthsample != None: - nthsample = int(nthsample.text) - else: - nthsample = int(config.get('observer', 'daq_pp_nth_sample_default')) - # set adc_off_act to low, at the starttime and to high after duration - if starttimeEpoch - testStartTime >= 0 and microsecs >= 0: - actuatePins.append([starttimeEpoch - testStartTime, microsecs, 0, 0]) - else: - logger.error("Invalid actuation time") - - durMicro = (duration*1000)%1000000 - durSec = int(duration/1000) - - if starttimeEpoch - testStartTime + durSec >= 0 and microsecs + durMicro >= 0: - actuatePins.append([starttimeEpoch - testStartTime + durSec, microsecs + durMicro, 0, 1]) - else: - logger.error("Invalid actuation time") - - # write nth-sample to the file - if nthsample != None: - nthString = str(bin(nthsample))[2:].zfill(11) - f.write((int(headerBin['nth'] + nthString[0:4],2).to_bytes(1, 'big'))) - f.write((int('1' + nthString[4:11] ,2).to_bytes(1, 'big'))) - - # sort the commands by: second -> microsecond -> pin-number - actuatePins.sort(key=operator.itemgetter(0,1,2)) - # update toggle states - pinstates = {} - for i in range(0,len(actuatePins)): - if actuatePins[i][3]==flocklab.TOGGLE: - if actuatePins[i][2] in pinstates: - actuatePins[i][3]=(pinstates[actuatePins[i][2]] + 1) % 2 # toggle - else: - actuatePins[i][3]=flocklab.HIGH # initial state, set to HIGH - pinstates[actuatePins[i][2]] = actuatePins[i][3]; - - # now we can write the test-start packet - f.write((int(headerBin['start'] + onOffBin['on'],2).to_bytes(1, 'big'))) # 0x4f - f.write(pack('>I',testStartTime)) + ############################################################################## + # + # ACTUATION + # + ############################################################################## + + # Format: actuatePins = ['startimeEpoch','microsecs','pin','level'] + actuatePins = [] + rstTime = []; + if (tree.find('obsGpioSettingConf') != None): + if debug: + logger.debug("Found config for GPIO setting.") + subtree = tree.find('obsGpioSettingConf') + pinconfs = list(subtree.getiterator("pinConf")) + # first find start and stop time of test + for i in pinconfs: + pin = flocklab.daqpin_abbr2num(i.find('pin').text) + starttimeEpoch = timeformat_xml2epoch(config, i.find('absoluteTime/absoluteDateTime').text) + if pin == 0: + rstTime.append(starttimeEpoch) + + rstTime.sort() + # get the actual test start time + testStartTime = rstTime[0] + # get the actual test stop time + testStopTime = rstTime[len(rstTime)-1] + + if testStartTime > testStopTime: + logger.error("Invalid test start and stop time") + sys.exit(EINVAL) + + invalid_actuation_cnt = 0 + for pinconf in pinconfs: + pin = flocklab.daqpin_abbr2num(pinconf.find('pin').text) + # we want also to monitor pins which are actuated + traceBin |= (1 << pin) + # tracing and actuation pins do not share the same bit-field in the corresponding uart packet (act. is shifted by 1 bit to left compared to trace.) + pin = pin + 1; + level = flocklab.daqlevel_str2abbr(pinconf.find('level').text) + interval = int(pinconf.find('intervalMicrosecs').text) + count = int(pinconf.find('count').text) + # Get time and bring it into right format: + starttimeEpoch = timeformat_xml2epoch(config, pinconf.find('absoluteTime/absoluteDateTime').text) + microsecs = int(pinconf.find('absoluteTime/absoluteMicrosecs').text) + # add actuations + for j in range(0,count): + intSec = (microsecs + (j)*interval)/1000000 + intMicro = (microsecs +(j)*interval)%1000000 + if starttimeEpoch + intSec - testStartTime + intSec >= 0 and intMicro >= 0 and testStopTime - starttimeEpoch - intSec >= 0: + actuatePins.append([starttimeEpoch-testStartTime + intSec, intMicro, pin, level]) + else: + invalid_actuation_cnt = invalid_actuation_cnt + 1 + if invalid_actuation_cnt > 0: + logger.debug("There were %d invalid actuation times." % (invalid_actuation_cnt)) + + # sort all the gathered configs by time + traceBinString = str(bin(traceBin))[2:].zfill(8) + # write the tracing config command to the config file + f.write((int(headerBin['tracing'] + traceBinString[0:4],2).to_bytes(1, 'big'))) + f.write((int('1' + traceBinString[4:8] + '000',2).to_bytes(1, 'big'))) + + ############################################################################## + # + # POWER PROFILING: basically is an actuation + # + ############################################################################## + # + # TODO: multiple sammpling rates??? complicates the config a bit as we need to change the sampling rate right before the next actuation command which starts the powerprofiling + nthsample = 1 + if (tree.find('obsPowerprofConf') != None): + if debug: + logger.debug("Found config for powerprofiling.") + # Cycle through all powerprof configs and insert them into file: + subtree = tree.find('obsPowerprofConf') + profconfs = list(subtree.getiterator("profConf")) + for profconf in profconfs: + duration = int(profconf.find('duration').text) + # Get time and bring it into right format: + starttimeEpoch = timeformat_xml2epoch(config, profconf.find('absoluteTime/absoluteDateTime').text) + microsecs = int(profconf.find('absoluteTime/absoluteMicrosecs').text) + nthsample = profconf.find('samplingDivider') + if nthsample != None: + nthsample = int(nthsample.text) + else: + nthsample = int(config.get('observer', 'daq_pp_nth_sample_default')) + # set adc_off_act to low, at the starttime and to high after duration + if starttimeEpoch - testStartTime >= 0 and microsecs >= 0: + actuatePins.append([starttimeEpoch - testStartTime, microsecs, 0, 0]) + else: + logger.error("Invalid actuation time") + + durMicro = (duration*1000)%1000000 + durSec = int(duration/1000) + + if starttimeEpoch - testStartTime + durSec >= 0 and microsecs + durMicro >= 0: + actuatePins.append([starttimeEpoch - testStartTime + durSec, microsecs + durMicro, 0, 1]) + else: + logger.error("Invalid actuation time") + + # write nth-sample to the file + if nthsample != None: + nthString = str(bin(nthsample))[2:].zfill(11) + f.write((int(headerBin['nth'] + nthString[0:4],2).to_bytes(1, 'big'))) + f.write((int('1' + nthString[4:11] ,2).to_bytes(1, 'big'))) + + # sort the commands by: second -> microsecond -> pin-number + actuatePins.sort(key=operator.itemgetter(0,1,2)) + # update toggle states + pinstates = {} + for i in range(0,len(actuatePins)): + if actuatePins[i][3]==flocklab.TOGGLE: + if actuatePins[i][2] in pinstates: + actuatePins[i][3]=(pinstates[actuatePins[i][2]] + 1) % 2 # toggle + else: + actuatePins[i][3]=flocklab.HIGH # initial state, set to HIGH + pinstates[actuatePins[i][2]] = actuatePins[i][3]; + + # now we can write the test-start packet + f.write((int(headerBin['start'] + onOffBin['on'],2).to_bytes(1, 'big'))) # 0x4f + f.write(pack('>I',testStartTime)) - actCmds = list(range(0,len(actuatePins))) - for ind in actCmds: - actPin = 0 - actPin |= 1 << actuatePins[ind][2] - actPin = str(bin(actPin))[2:].zfill(4) - actLevel = 0 - actLevel |= actuatePins[ind][3] << actuatePins[ind][2] - actLevel = str(bin(actLevel))[2:].zfill(4) - actSec = str(bin(int(actuatePins[ind][0]) * 5 + int(actuatePins[ind][1] / 200000)))[2:].zfill(17) # '1s' on the FPGA is 200ms - actSubsec = str(bin((actuatePins[ind][1] % 200000) * 10))[2:].zfill(21) # subsec has a resolution of 100ns - # insert a barrier every time the actuation time is > last_barrier + b_min - if actuatePins[ind][0] > last_barrier + b_max: - last_barrier = actuatePins[ind][0] - b_min - f.write((int(headerBin['barrier'] + '0000',2).to_bytes(1, 'big'))) - f.write(pack('>I',int(last_barrier + testStartTime))) + actCmds = list(range(0,len(actuatePins))) + for ind in actCmds: + actPin = 0 + actPin |= 1 << actuatePins[ind][2] + actPin = str(bin(actPin))[2:].zfill(4) + actLevel = 0 + actLevel |= actuatePins[ind][3] << actuatePins[ind][2] + actLevel = str(bin(actLevel))[2:].zfill(4) + actSec = str(bin(int(actuatePins[ind][0]) * 5 + int(actuatePins[ind][1] / 200000)))[2:].zfill(17) # '1s' on the FPGA is 200ms + actSubsec = str(bin((actuatePins[ind][1] % 200000) * 10))[2:].zfill(21) # subsec has a resolution of 100ns + # insert a barrier every time the actuation time is > last_barrier + b_min + if actuatePins[ind][0] > last_barrier + b_max: + last_barrier = actuatePins[ind][0] - b_min + f.write((int(headerBin['barrier'] + '0000',2).to_bytes(1, 'big'))) + f.write(pack('>I',int(last_barrier + testStartTime))) - f.write((int(headerBin['actuation'] + actPin,2).to_bytes(1, 'big'))) - f.write((int('1' + actLevel + actSec[0:3],2).to_bytes(1, 'big'))) - f.write((int('1' + actSec[3:10],2).to_bytes(1, 'big'))) - f.write((int('1' + actSec[10:17],2).to_bytes(1, 'big'))) - f.write((int('1' + actSubsec[0:7],2).to_bytes(1, 'big'))) #5 data - f.write((int('1' + actSubsec[7:14],2).to_bytes(1, 'big'))) #6 data - f.write((int('1' + actSubsec[14:21],2).to_bytes(1, 'big'))) #7 data - - f.flush() - os.fsync(f.fileno()) - f.close() - sys.exit(SUCCESS) + f.write((int(headerBin['actuation'] + actPin,2).to_bytes(1, 'big'))) + f.write((int('1' + actLevel + actSec[0:3],2).to_bytes(1, 'big'))) + f.write((int('1' + actSec[3:10],2).to_bytes(1, 'big'))) + f.write((int('1' + actSec[10:17],2).to_bytes(1, 'big'))) + f.write((int('1' + actSubsec[0:7],2).to_bytes(1, 'big'))) #5 data + f.write((int('1' + actSubsec[7:14],2).to_bytes(1, 'big'))) #6 data + f.write((int('1' + actSubsec[14:21],2).to_bytes(1, 'big'))) #7 data + + f.flush() + os.fsync(f.fileno()) + f.close() + sys.exit(SUCCESS) if __name__ == "__main__": - try: - main(sys.argv[1:]) - except Exception: - msg = "Encountered error: %s: %s\n%s\nCommand line was: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc(), str(sys.argv)) - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + try: + main(sys.argv[1:]) + except Exception: + msg = "Encountered error: %s: %s\n%s\nCommand line was: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc(), str(sys.argv)) + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) diff --git a/testmanagementserver/flocklab_dispatcher.py b/testmanagementserver/flocklab_dispatcher.py index 565b0d1601fab83a4806fab00e036fefdf80ed71..0f3e250f86aadf7fd52e1c17d9dd7c8d3449e93b 100755 --- a/testmanagementserver/flocklab_dispatcher.py +++ b/testmanagementserver/flocklab_dispatcher.py @@ -1,8 +1,8 @@ #! /usr/bin/env python3 -__author__ = "Christoph Walser <walserc@tik.ee.ethz.ch>" +__author__ = "Christoph Walser <walserc@tik.ee.ethz.ch>" __copyright__ = "Copyright 2010, ETH Zurich, Switzerland" -__license__ = "GPL" +__license__ = "GPL" import sys, os, getopt, errno, threading, shutil, time, datetime, subprocess, tempfile, queue, re, logging, traceback, __main__, types, hashlib @@ -32,8 +32,8 @@ config = None # ############################################################################## class Error(Exception): - """ Base class for exception. """ - pass + """ Base class for exception. """ + pass ### END Error classes @@ -44,81 +44,81 @@ class Error(Exception): # ############################################################################## class StopTestThread(threading.Thread): - """ Thread which calls the test stop script on an observer. - """ - def __init__(self, obskey, obsdict_key, errors_queue, testid): - threading.Thread.__init__(self) - self._obskey = obskey - self._obsdict_key = obsdict_key - self._errors_queue = errors_queue - self._abortEvent = threading.Event() - self._testid = testid - - def run(self): - try: - logger.debug("Start StopTestThread for observer ID %d"%(self._obsdict_key[self._obskey][1])) - errors = [] - # First test if the observer is online and if the SD card is mounted: - cmd = ['ssh', '%s'%(self._obsdict_key[self._obskey][2]), "mount | grep /dev/mmcblk0p1"] - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) - while p.returncode == None: - self._abortEvent.wait(1.0) - p.poll() - if self._abortEvent.is_set(): - p.kill() - else: - out, err = p.communicate() - rs = p.returncode - if (rs != 0): - if (rs == 1): - if ("No such file or directory" in err): - msg = "SD card on observer ID %s is not mounted, observer will thus be omitted for this test."%(self._obsdict_key[self._obskey][1]) - else: - msg = "Observer ID %s is not reachable (returned: %d: %s, %s)."%(self._obsdict_key[self._obskey][1], rs, out, err) - else: - msg = "Observer ID %s is not responsive (SSH returned %d)."%(self._obsdict_key[self._obskey][1], rs) - errors.append((msg, errno.EHOSTUNREACH, self._obsdict_key[self._obskey][1])) - logger.error(msg) - else: - # Call the script on the observer which stops the test: - remote_cmd = config.get("observer", "stoptestscript") + " --testid=%d" % self._testid - if debug: - remote_cmd += " --debug" - cmd = ['ssh' ,'%s'%(self._obsdict_key[self._obskey][2]), remote_cmd] - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) - while p.returncode == None: - self._abortEvent.wait(1.0) - p.poll() - if self._abortEvent.is_set(): - p.kill() - else: - out, err = p.communicate() - rs = p.returncode - if (rs == SUCCESS): - logger.debug("Test-stop script on observer ID %s succeeded." %(self._obsdict_key[self._obskey][1])) - elif (rs == 255): - msg = "Observer ID %s is not reachable, thus not able to stop test. Dataloss occurred possibly for this observer."%(self._obsdict_key[self._obskey][1]) - errors.append((msg, errno.EHOSTUNREACH, self._obsdict_key[self._obskey][1])) - logger.error(msg) - else: - msg = "Test-stop script on observer ID %s failed with error code %s and error message %s" %(str(self._obsdict_key[self._obskey][1]), str(errno.errorcode[rs]), str(out)) - errors.append((msg, rs, self._obsdict_key[self._obskey][1])) - logger.error(msg) - logger.error("Tried to execute %s"%str(cmd)) - except Error: - # Main thread requested abort. - # Close a possibly still running subprocess: - if (p is not None) and (p.poll() is not None): - p.kill() - msg = "StopTestThread for observer ID %d aborted."%(self._obsdict_key[self._obskey][1]) - errors.append((msg, errno.ECOMM, self._obsdict_key[self._obskey][1])) - logger.error(msg) - finally: - if (len(errors) > 0): - self._errors_queue.put((self._obskey, errors)) - - def abort(self): - self._abortEvent.set() + """ Thread which calls the test stop script on an observer. + """ + def __init__(self, obskey, obsdict_key, errors_queue, testid): + threading.Thread.__init__(self) + self._obskey = obskey + self._obsdict_key = obsdict_key + self._errors_queue = errors_queue + self._abortEvent = threading.Event() + self._testid = testid + + def run(self): + try: + logger.debug("Start StopTestThread for observer ID %d"%(self._obsdict_key[self._obskey][1])) + errors = [] + # First test if the observer is online and if the SD card is mounted: + cmd = ['ssh', '%s'%(self._obsdict_key[self._obskey][2]), "mount | grep /dev/mmcblk0p1"] + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) + while p.returncode == None: + self._abortEvent.wait(1.0) + p.poll() + if self._abortEvent.is_set(): + p.kill() + else: + out, err = p.communicate() + rs = p.returncode + if (rs != 0): + if (rs == 1): + if ("No such file or directory" in err): + msg = "SD card on observer ID %s is not mounted, observer will thus be omitted for this test."%(self._obsdict_key[self._obskey][1]) + else: + msg = "Observer ID %s is not reachable (returned: %d: %s, %s)."%(self._obsdict_key[self._obskey][1], rs, out, err) + else: + msg = "Observer ID %s is not responsive (SSH returned %d)."%(self._obsdict_key[self._obskey][1], rs) + errors.append((msg, errno.EHOSTUNREACH, self._obsdict_key[self._obskey][1])) + logger.error(msg) + else: + # Call the script on the observer which stops the test: + remote_cmd = config.get("observer", "stoptestscript") + " --testid=%d" % self._testid + if debug: + remote_cmd += " --debug" + cmd = ['ssh' ,'%s'%(self._obsdict_key[self._obskey][2]), remote_cmd] + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) + while p.returncode == None: + self._abortEvent.wait(1.0) + p.poll() + if self._abortEvent.is_set(): + p.kill() + else: + out, err = p.communicate() + rs = p.returncode + if (rs == SUCCESS): + logger.debug("Test-stop script on observer ID %s succeeded." %(self._obsdict_key[self._obskey][1])) + elif (rs == 255): + msg = "Observer ID %s is not reachable, thus not able to stop test. Dataloss occurred possibly for this observer."%(self._obsdict_key[self._obskey][1]) + errors.append((msg, errno.EHOSTUNREACH, self._obsdict_key[self._obskey][1])) + logger.error(msg) + else: + msg = "Test-stop script on observer ID %s failed with error code %s and error message %s" %(str(self._obsdict_key[self._obskey][1]), str(errno.errorcode[rs]), str(out)) + errors.append((msg, rs, self._obsdict_key[self._obskey][1])) + logger.error(msg) + logger.error("Tried to execute %s"%str(cmd)) + except Error: + # Main thread requested abort. + # Close a possibly still running subprocess: + if (p is not None) and (p.poll() is not None): + p.kill() + msg = "StopTestThread for observer ID %d aborted."%(self._obsdict_key[self._obskey][1]) + errors.append((msg, errno.ECOMM, self._obsdict_key[self._obskey][1])) + logger.error(msg) + finally: + if (len(errors) > 0): + self._errors_queue.put((self._obskey, errors)) + + def abort(self): + self._abortEvent.set() ### END StopTestThread @@ -129,134 +129,134 @@ class StopTestThread(threading.Thread): # ############################################################################## class StartTestThread(threading.Thread): - """ Thread which uploads all config files to an observer and - starts the test on the observer. - """ - def __init__(self, obskey, obsdict_key, xmldict_key, imagedict_key, errors_queue, FlockDAQ, testid): - threading.Thread.__init__(self) - self._obskey = obskey - self._obsdict_key = obsdict_key - self._xmldict_key = xmldict_key - self._imagedict_key = imagedict_key - self._errors_queue = errors_queue - self._abortEvent = threading.Event() - self._FlockDAQ = FlockDAQ == 'true' - self._testid = testid - - def run(self): - errors = [] - testconfigfolder = "%s/%d" % (config.get("observer", "testconfigfolder"), self._testid) - obsdataport = config.getint('serialproxy', 'obsdataport') - try: - logger.debug("Start StartTestThread for observer ID %d, FlockDAQ=%s" % (self._obsdict_key[self._obskey][1], str(self._FlockDAQ))) - # First test if the observer is online and if the SD card is mounted: - cmd = ['ssh', '%s'%(self._obsdict_key[self._obskey][2]), "ls ~/mmc/ && mkdir %s" % testconfigfolder] - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) - while p.returncode == None: - self._abortEvent.wait(1.0) - p.poll() - if self._abortEvent.is_set(): - p.kill() - else: - out, err = p.communicate() - rs = p.returncode - if (rs != 0): - if (rs == 1): - if ("No such file or directory" in err): - msg = "SD card on observer ID %s is not mounted, observer will thus be omitted for this test."%(self._obsdict_key[self._obskey][1]) - else: - msg = "Observer ID %s is not reachable, it will thus be omitted for this test (returned: %d: %s, %s)."%(self._obsdict_key[self._obskey][1], rs, out, err) - else: - msg = "Observer ID %s is not responsive, it will thus be omitted for this test (SSH returned %d). Command: %s"%(self._obsdict_key[self._obskey][1], rs, " ".join(cmd)) - errors.append((msg, errno.EHOSTUNREACH, self._obsdict_key[self._obskey][1])) - logger.error(msg) - else: - fileuploadlist = [self._xmldict_key[self._obskey][0]] - # generate DAQ config for daq tests: - if self._FlockDAQ: - (fd, daqconfigpath) = tempfile.mkstemp() - os.close(fd) - cmd = [config.get("dispatcher", "flockdaqconfigscript"), "--xml=%s" % self._xmldict_key[self._obskey][0], "--outfile=%s" % daqconfigpath] - p = subprocess.Popen(cmd, universal_newlines=True) - out, err = p.communicate() - rs = p.returncode - if rs != 0: - logger.error("Error %s returned from %s"%(str(rs), config.get('dispatcher','flockdaqconfigscript'))) - logger.error("Tried to execute %s"%str(cmd)) - errors.append("Generation of DAQ config file failed.") - errors.append("Output of script was: %s %s" % (str(out), str(err))) - else: - fileuploadlist.append(daqconfigpath) - if self._obskey in list(self._imagedict_key.keys()): - for image in self._imagedict_key[self._obskey]: - fileuploadlist.append(image[0]) - # Now upload the image and XML config file: - cmd = ['scp', '-q'] - cmd.extend(fileuploadlist) - cmd.append('%s:%s/.'%(self._obsdict_key[self._obskey][2], testconfigfolder)) - p = subprocess.Popen(cmd) - while p.returncode == None: - self._abortEvent.wait(1.0) - p.poll() - if self._abortEvent.is_set(): - p.kill() - rs = p.returncode - if (rs != SUCCESS): - msg = "Upload of target image and config XML to observer ID %s failed with error number %d" %(self._obsdict_key[self._obskey][1], rs) - errors.append((msg, rs, self._obsdict_key[self._obskey][1])) - logger.error(msg) - logger.error("Tried to execute %s" %(str(cmd))) - else: - logger.debug("Upload of target image and config XML to observer ID %s succeeded." %(self._obsdict_key[self._obskey][1])) - # Start the script on the observer which starts the test: - remote_cmd = config.get("observer", "starttestscript") + " --testid=%d --xml=%s/%s --serialport=%d" % (self._testid, testconfigfolder, os.path.basename(self._xmldict_key[self._obskey][0]), obsdataport) - if self._FlockDAQ: - remote_cmd += " --daqconfig=%s/%s"%(testconfigfolder, os.path.basename(daqconfigpath)) - if debug: - remote_cmd += " --debug" - cmd = ['ssh', '%s'%(self._obsdict_key[self._obskey][2]), remote_cmd] - #DEBUG logger.debug("execute %s" %(str(cmd))) - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) - while p.returncode == None: - self._abortEvent.wait(1.0) - p.poll() - if self._abortEvent.is_set(): - p.kill() - else: - out, err = p.communicate() - rs = p.wait() - if rs != SUCCESS: - msg = "Test-start script on observer ID %s failed with error code %s and error message %s" %(self._obsdict_key[self._obskey][1], errno.errorcode[rs], str(out)) - errors.append((msg, rs, self._obsdict_key[self._obskey][1])) - logger.error(msg) - logger.error("Tried to execute %s" %(str(cmd))) - else: - logger.debug("Test-start script on observer ID %s succeeded." %(self._obsdict_key[self._obskey][1])) - # Remove image file and xml on server: - os.remove(self._xmldict_key[self._obskey][0]) - #DEBUG logger.debug("Removed XML config %s for observer ID %s"%(self._xmldict_key[self._obskey][0], self._obsdict_key[self._obskey][1])) - if self._obskey in list(self._imagedict_key.keys()): - for image in self._imagedict_key[self._obskey]: - os.remove(image[0]) - #DEBUG logger.debug("Removed target image %s for observer ID %s"%(self._imagedict_key[self._obskey][0], self._obsdict_key[self._obskey][1])) - if self._FlockDAQ: - os.remove(daqconfigpath) - - except Error: - # Main thread requested abort. - # Close a possibly still running subprocess: - if (p is not None) and (p.poll() is not None): - p.kill() - msg = "StartTestThread for observer ID %d aborted."%(self._obsdict_key[self._obskey][1]) - errors.append((msg, errno.ECOMM, self._obsdict_key[self._obskey][1])) - logger.error(msg) - finally: - if (len(errors) > 0): - self._errors_queue.put((self._obskey, errors)) - - def abort(self): - self._abortEvent.set() - + """ Thread which uploads all config files to an observer and + starts the test on the observer. + """ + def __init__(self, obskey, obsdict_key, xmldict_key, imagedict_key, errors_queue, FlockDAQ, testid): + threading.Thread.__init__(self) + self._obskey = obskey + self._obsdict_key = obsdict_key + self._xmldict_key = xmldict_key + self._imagedict_key = imagedict_key + self._errors_queue = errors_queue + self._abortEvent = threading.Event() + self._FlockDAQ = FlockDAQ == 'true' + self._testid = testid + + def run(self): + errors = [] + testconfigfolder = "%s/%d" % (config.get("observer", "testconfigfolder"), self._testid) + obsdataport = config.getint('serialproxy', 'obsdataport') + try: + logger.debug("Start StartTestThread for observer ID %d, FlockDAQ=%s" % (self._obsdict_key[self._obskey][1], str(self._FlockDAQ))) + # First test if the observer is online and if the SD card is mounted: + cmd = ['ssh', '%s'%(self._obsdict_key[self._obskey][2]), "ls ~/mmc/ && mkdir %s" % testconfigfolder] + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) + while p.returncode == None: + self._abortEvent.wait(1.0) + p.poll() + if self._abortEvent.is_set(): + p.kill() + else: + out, err = p.communicate() + rs = p.returncode + if (rs != 0): + if (rs == 1): + if ("No such file or directory" in err): + msg = "SD card on observer ID %s is not mounted, observer will thus be omitted for this test."%(self._obsdict_key[self._obskey][1]) + else: + msg = "Observer ID %s is not reachable, it will thus be omitted for this test (returned: %d: %s, %s)."%(self._obsdict_key[self._obskey][1], rs, out, err) + else: + msg = "Observer ID %s is not responsive, it will thus be omitted for this test (SSH returned %d). Command: %s"%(self._obsdict_key[self._obskey][1], rs, " ".join(cmd)) + errors.append((msg, errno.EHOSTUNREACH, self._obsdict_key[self._obskey][1])) + logger.error(msg) + else: + fileuploadlist = [self._xmldict_key[self._obskey][0]] + # generate DAQ config for daq tests: + if self._FlockDAQ: + (fd, daqconfigpath) = tempfile.mkstemp() + os.close(fd) + cmd = [config.get("dispatcher", "flockdaqconfigscript"), "--xml=%s" % self._xmldict_key[self._obskey][0], "--outfile=%s" % daqconfigpath] + p = subprocess.Popen(cmd, universal_newlines=True) + out, err = p.communicate() + rs = p.returncode + if rs != 0: + logger.error("Error %s returned from %s"%(str(rs), config.get('dispatcher','flockdaqconfigscript'))) + logger.error("Tried to execute %s"%str(cmd)) + errors.append("Generation of DAQ config file failed.") + errors.append("Output of script was: %s %s" % (str(out), str(err))) + else: + fileuploadlist.append(daqconfigpath) + if self._obskey in list(self._imagedict_key.keys()): + for image in self._imagedict_key[self._obskey]: + fileuploadlist.append(image[0]) + # Now upload the image and XML config file: + cmd = ['scp', '-q'] + cmd.extend(fileuploadlist) + cmd.append('%s:%s/.'%(self._obsdict_key[self._obskey][2], testconfigfolder)) + p = subprocess.Popen(cmd) + while p.returncode == None: + self._abortEvent.wait(1.0) + p.poll() + if self._abortEvent.is_set(): + p.kill() + rs = p.returncode + if (rs != SUCCESS): + msg = "Upload of target image and config XML to observer ID %s failed with error number %d" %(self._obsdict_key[self._obskey][1], rs) + errors.append((msg, rs, self._obsdict_key[self._obskey][1])) + logger.error(msg) + logger.error("Tried to execute %s" %(str(cmd))) + else: + logger.debug("Upload of target image and config XML to observer ID %s succeeded." %(self._obsdict_key[self._obskey][1])) + # Start the script on the observer which starts the test: + remote_cmd = config.get("observer", "starttestscript") + " --testid=%d --xml=%s/%s --serialport=%d" % (self._testid, testconfigfolder, os.path.basename(self._xmldict_key[self._obskey][0]), obsdataport) + if self._FlockDAQ: + remote_cmd += " --daqconfig=%s/%s"%(testconfigfolder, os.path.basename(daqconfigpath)) + if debug: + remote_cmd += " --debug" + cmd = ['ssh', '%s'%(self._obsdict_key[self._obskey][2]), remote_cmd] + #DEBUG logger.debug("execute %s" %(str(cmd))) + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) + while p.returncode == None: + self._abortEvent.wait(1.0) + p.poll() + if self._abortEvent.is_set(): + p.kill() + else: + out, err = p.communicate() + rs = p.wait() + if rs != SUCCESS: + msg = "Test-start script on observer ID %s failed with error code %s and error message %s" %(self._obsdict_key[self._obskey][1], errno.errorcode[rs], str(out)) + errors.append((msg, rs, self._obsdict_key[self._obskey][1])) + logger.error(msg) + logger.error("Tried to execute %s" %(str(cmd))) + else: + logger.debug("Test-start script on observer ID %s succeeded." %(self._obsdict_key[self._obskey][1])) + # Remove image file and xml on server: + os.remove(self._xmldict_key[self._obskey][0]) + #DEBUG logger.debug("Removed XML config %s for observer ID %s"%(self._xmldict_key[self._obskey][0], self._obsdict_key[self._obskey][1])) + if self._obskey in list(self._imagedict_key.keys()): + for image in self._imagedict_key[self._obskey]: + os.remove(image[0]) + #DEBUG logger.debug("Removed target image %s for observer ID %s"%(self._imagedict_key[self._obskey][0], self._obsdict_key[self._obskey][1])) + if self._FlockDAQ: + os.remove(daqconfigpath) + + except Error: + # Main thread requested abort. + # Close a possibly still running subprocess: + if (p is not None) and (p.poll() is not None): + p.kill() + msg = "StartTestThread for observer ID %d aborted."%(self._obsdict_key[self._obskey][1]) + errors.append((msg, errno.ECOMM, self._obsdict_key[self._obskey][1])) + logger.error(msg) + finally: + if (len(errors) > 0): + self._errors_queue.put((self._obskey, errors)) + + def abort(self): + self._abortEvent.set() + ### END StartTestThread @@ -267,604 +267,604 @@ class StartTestThread(threading.Thread): # ############################################################################## def start_test(testid, cur, cn, obsdict_key, obsdict_id): - errors = [] - warnings = [] - FlockDAQ = "false" - - try: - logger.debug("Entering start_test() function...") - # First, validate the XML file again. If validation fails, return immediately: - cmd = [config.get('dispatcher','validationscript'), '--testid=%d'%testid] - p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) - out, err = p.communicate() - rs = p.returncode - if rs != 0: - logger.error("Error %s returned from %s"%(str(rs), config.get('dispatcher','validationscript'))) - logger.error("Tried to execute %s"%str(cmd)) - errors.append("Validation of XML failed. Output of script was: %s %s" % (str(out), str(err))) - - if len(errors) == 0: - # Update DB status --- - # Update the status of the test in the db: - flocklab.set_test_status(cur, cn, testid, 'preparing') - - # Get start/stop time --- - cur.execute("SELECT `time_start_wish`, `time_end_wish`, `owner_fk` FROM `tbl_serv_tests` WHERE `serv_tests_key` = %d" %testid) - # Times are going to be of datetime type: - ret = cur.fetchone() - starttime = ret[0] - stoptime = ret[1] - owner_fk = ret[2] - logger.debug("Got start time wish for test from database: %s" %starttime) - logger.debug("Got end time wish for test from database: %s" %stoptime) - - cur.execute("SELECT `use_daq` FROM `tbl_serv_users` WHERE (`serv_users_key` = %s)" %owner_fk) - ret = cur.fetchone() - if ret[0] == 1: - FlockDAQ = "true" - else: - FlockDAQ = "false" - - # Image processing --- - # Get all images from the database: - imagedict_key = {} - sql_image = """ SELECT `t`.`binary`, `m`.`observer_fk`, `m`.`node_id`, LOWER(`a`.`architecture`), LOWER(`o`.`name`) AS `osname`, `t`.`serv_targetimages_key`, LOWER(`p`.`name`) AS `platname`, `a`.`core` AS `core` - FROM `tbl_serv_targetimages` AS `t` - LEFT JOIN `tbl_serv_map_test_observer_targetimages` AS `m` - ON `t`.`serv_targetimages_key` = `m`.`targetimage_fk` - LEFT JOIN `tbl_serv_platforms` AS `p` - ON `t`.`platforms_fk` = `p`.`serv_platforms_key` - LEFT JOIN `tbl_serv_operatingsystems` AS `o` - ON `t`.`operatingsystems_fk` = `o`.`serv_operatingsystems_key` - LEFT JOIN `tbl_serv_architectures` AS `a` - ON `t`.`core` = `a`.`core` AND `p`.`serv_platforms_key` = `a`.`platforms_fk` - WHERE `m`.`test_fk` = %d - """ - cur.execute(sql_image%testid) - ret = cur.fetchall() - for r in ret: - binary = r[0] - obs_fk = r[1] - obs_id = obsdict_key[obs_fk][1] - node_id = r[2] - arch = r[3] - osname = r[4].lower() - tgimage_key = r[5] - platname = r[6] - core = r[7] - - # Prepare image --- - (fd, imagepath) = tempfile.mkstemp() - binpath = "%s" %(os.path.splitext(imagepath)[0]) - imagefile = os.fdopen(fd, 'w+b') - imagefile.write(binary) - imagefile.close() - removeimage = True - logger.debug("Got target image ID %s for observer ID %s with node ID %s from database and wrote it to temp file %s (hash %s)" %(str(tgimage_key), str(obs_id), str(node_id), imagepath, hashlib.sha1(binary).hexdigest())) - - # Convert image to binary format and, depending on operating system and platform architecture, write the node ID (if specified) to the image: - logger.debug("Found %s target platform architecture with %s operating system on platform %s for observer ID %s (node ID to be used: %s)." %(arch, osname, platname, str(obs_id), str(node_id))) - set_symbols_tool = config.get('dispatcher', 'setsymbolsscript') - symbol_node_id = "FLOCKLAB_NODE_ID" - # keep <os> tag for backwards compatibility - if ((node_id != None) and (osname == 'tinyos')): - symbol_node_id = "TOS_NODE_ID" - elif (osname == 'contiki'): - symbol_node_id = None # don't set node ID for OS Contiki - if (arch == 'msp430'): - binutils_path = config.get('dispatcher', 'binutils_msp430') - binpath = "%s.ihex"%binpath - if symbol_node_id: - cmd = ['%s'%(set_symbols_tool), '--objcopy', '%s/msp430-objcopy'%(binutils_path), '--objdump', '%s/msp430-objdump'%(binutils_path), '--target', 'ihex', imagepath, binpath, '%s=%s'%(symbol_node_id, node_id), 'ActiveMessageAddressC$addr=%s'%(node_id), 'ActiveMessageAddressC__addr=%s'%(node_id)] - try: - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - rs = p.wait() - if rs != 0: - logger.error("Error %d returned from %s" % (rs, set_symbols_tool)) - logger.error("Tried to execute %s"%str(cmd)) - errors.append("Could not set node ID %s for target image %s" %(str(node_id), str(tgimage_key))) - else: - logger.debug("Set symbols and converted file to ihex.") - # Remove the temporary exe file - os.remove("%s.exe"%imagepath) - #logger.debug("Removed intermediate image %s.exe"%(str(imagepath))) - except OSError as err: - msg = "Error in subprocess: tried calling %s. Error was: %s"%(str(cmd), str(err)) - logger.error(msg) - errors.append(msg) - removeimage = False - else: - cmd = ['%s/msp430-objcopy'%(binutils_path), '--output-target', 'ihex', imagepath, binpath] - try: - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - rs = p.wait() - if rs != 0: - logger.error("Error %d returned from msp430-objcopy" %rs) - logger.error("Tried to execute %s"%str(cmd)) - errors.append("Could not convert target image %s to ihex" %str(tgimage_key)) - else: - logger.debug("Converted file to ihex.") - except OSError as err: - msg = "Error in subprocess: tried calling %s. Error was: %s"%(str(cmd), str(err)) - logger.error(msg) - errors.append(msg) - removeimage = False - elif (arch == 'arm'): - if (platname == 'dpp'): - imgformat = 'ihex' - binpath = "%s.ihex"%binpath - else: - imgformat = 'binary' - binpath = "%s.bin"%binpath - # Set library path for arm-binutils: - arm_binutils_path = config.get('dispatcher', 'binutils_arm') - arm_env = os.environ - if 'LD_LIBRARY_PATH' not in arm_env: - arm_env['LD_LIBRARY_PATH'] = '' - arm_env['LD_LIBRARY_PATH'] += ':%s/%s'%(arm_binutils_path, "usr/x86_64-linux-gnu/arm-linux-gnu/lib") - if symbol_node_id: - cmd = ['%s'%(set_symbols_tool), '--objcopy', '%s/%s'%(arm_binutils_path, "usr/bin/arm-linux-gnu-objcopy"), '--objdump', '%s/%s'%(arm_binutils_path, "usr/bin/arm-linux-gnu-objdump"), '--target', imgformat, imagepath, binpath, '%s=%s'%(symbol_node_id, node_id), 'ActiveMessageAddressC$addr=%s'%(node_id), 'ActiveMessageAddressC__addr=%s'%(node_id)] - try: - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=arm_env) - rs = p.wait() - if rs != 0: - logger.error("Error %d returned from %s" % (rs, set_symbols_tool)) - logger.error("Tried to execute %s"%str(cmd)) - errors.append("Could not set node ID %s for target image %s" %(str(node_id), str(tgimage_key))) - else: - logger.debug("Set symbols and converted file to bin.") - except OSError as err: - msg = "Error in subprocess: tried calling %s. Error was: %s"%(str(cmd), str(err)) - logger.error(msg) - errors.append(msg) - removeimage = False - else: - cmd = ['%s/%s'%(arm_binutils_path, "usr/bin/arm-linux-gnu-objcopy"), '--output-target', imgformat, imagepath, binpath] - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=arm_env) - rs = p.wait() - if rs != 0: - logger.error("Error %d returned from arm-linux-gnu-objcopy" %rs) - logger.error("Tried to execute %s"%str(cmd)) - errors.append("Could not convert target image %s to bin" %str(tgimage_key)) - else: - logger.debug("Converted file to bin.") - else: - msg = "Unknown architecture %s found. The original target image (ID %s) file will be used without modification." %(arch, str(tgimage_key)) - errors.append(msg) - logger.error(msg) - orig = open(imagepath, "r+b") - binfile = open(binpath, "w+b") - binfile.write(orig.read()) - orig.close() - binfile.close() - logger.debug("Copied image to binary file without modification.") - - # Remove the original file which is not used anymore: - if removeimage: - os.remove(imagepath) - #logger.debug("Removed image %s"%(str(imagepath))) - else: - logger.warn("Image %s has not been removed."%(str(imagepath))) - - - # Slot detection --- - # Find out which slot number to use on the observer. - #logger.debug("Detecting adapter for %s on observer ID %s" %(platname, obs_id)) - ret = flocklab.get_slot(cur, int(obs_fk), platname) - if ret in range(1,5): - slot = ret - logger.debug("Found adapter for %s on observer ID %s in slot %d"%(platname, obs_id, slot)) - elif ret == 0: - slot = None - msg = "Could not find an adapter for %s on observer ID %s" %(platname, obs_id) - errors.append(msg) - logger.error(msg) - else: - slot = None - msg = "Error when detecting adapter for %s on observer ID %s: function returned %d" %(platname, obs_id, ret) - errors.append(msg) - logger.error(msg) - - # Write the dictionary for the image: - if not obs_fk in imagedict_key: - imagedict_key[obs_fk] = [] - imagedict_key[obs_fk].append((binpath, slot, platname, osname, 0.0, core)) - - logger.info("Processed all target images from database.") - - # XML processing --- - # Get the XML config from the database and generate a separate file for every observer used: - cur.execute("SELECT `testconfig_xml` FROM `tbl_serv_tests` WHERE (`serv_tests_key` = %s)" %testid) - ret = cur.fetchone() - if not ret: - msg = "No XML found in database for testid %d." %testid - errors.append(msg) - logger.error(msg) - else: - parser = etree.XMLParser(remove_comments=True) - tree = etree.fromstring(bytes(bytearray(ret[0], encoding = 'utf-8')), parser) - ns = {'d': config.get('xml', 'namespace')} - logger.debug("Got XML from database.") - # Create XML files --- - # Create an empty XML config file for every observer used and organize them in a dictionary: - xmldict_key = {} - for obs_key, obs_id, obs_ether in obsdict_key.values(): - (fd, xmlpath) = tempfile.mkstemp() - xmlfhand = os.fdopen(fd, 'w+') - xmldict_key[obs_key] = (xmlpath, xmlfhand) - xmlfhand.write('<?xml version="1.0" encoding="UTF-8"?>\n\n<obsConf>\n\n') - # Go through the blocks of the XML file and write the configs to the affected observer XML configs: - # targetConf --- - targetconfs = tree.xpath('//d:targetConf', namespaces=ns) - if not targetconfs: - msg = "no <targetConf> element found in XML config (wrong namespace?)" - errors.append(msg) - logger.error(msg) - for targetconf in targetconfs: - obsids = targetconf.xpath('d:obsIds', namespaces=ns)[0].text.strip().split() - ret = targetconf.xpath('d:voltage', namespaces=ns) - if ret: - voltage = ret[0].text.strip() - else: - voltage = str(config.get("dispatcher", "default_tg_voltage")) - ret = targetconf.xpath('d:noImage', namespaces=ns) - if ret: - noImageSlot = ret[0].text.strip() - else: - noImageSlot = None - for obsid in obsids: - obsid = int(obsid) - obskey = obsdict_id[obsid][0] - xmldict_key[obskey][1].write("<obsTargetConf>\n") - xmldict_key[obskey][1].write("\t<voltage>%s</voltage>\n"%voltage) - if noImageSlot: - slot = noImageSlot - xmldict_key[obskey][1].write("\t<slotnr>%s</slotnr>\n"%(slot)) - else: - xmldict_key[obskey][1].write("\t<firmware>%s</firmware>\n"%(imagedict_key[obskey][0][4])) - for coreimage in imagedict_key[obskey]: - xmldict_key[obskey][1].write("\t<image core=\"%d\">%s%d/%s</image>\n"%(coreimage[5], config.get("observer", "testconfigfolder"),testid, os.path.basename(coreimage[0]))) - xmldict_key[obskey][1].write("\t<slotnr>%s</slotnr>\n"%(imagedict_key[obskey][0][1])) - xmldict_key[obskey][1].write("\t<platform>%s</platform>\n"%(imagedict_key[obskey][0][2])) - xmldict_key[obskey][1].write("\t<os>%s</os>\n"%(imagedict_key[obskey][0][3])) - slot = imagedict_key[obskey][0][1] - xmldict_key[obskey][1].write("\t<FlockDAQ>%s</FlockDAQ>\n"%FlockDAQ) - xmldict_key[obskey][1].write("</obsTargetConf>\n\n") - #logger.debug("Wrote obsTargetConf XML for observer ID %s" %obsid) - # update test_image mapping with slot information - cur.execute("UPDATE `tbl_serv_map_test_observer_targetimages` SET `slot` = %s WHERE `observer_fk` = %d AND `test_fk`=%d" % (slot, obskey, testid)) - cn.commit() - - - # serialConf --- - srconfs = tree.xpath('//d:serialConf', namespaces=ns) - serialProxyUsed = False - if srconfs: - # only use serialproxy if remote IP specified in xml - if tree.xpath('//d:serialConf/d:remoteIp', namespaces=ns): - serialProxyUsed = True - for srconf in srconfs: - obsids = srconf.xpath('d:obsIds', namespaces=ns)[0].text.strip().split() - xmlblock = "<obsSerialConf>\n" - port = srconf.xpath('d:port', namespaces=ns) - if port: - port = srconf.xpath('d:port', namespaces=ns)[0].text.strip() - xmlblock += "\t<port>%s</port>\n" %port - baudrate = srconf.xpath('d:baudrate', namespaces=ns) - if baudrate: - baudrate = srconf.xpath('d:baudrate', namespaces=ns)[0].text.strip() - xmlblock += "\t<baudrate>%s</baudrate>\n" %baudrate - mode = srconf.xpath('d:mode', namespaces=ns) - if mode: - mode = srconf.xpath('d:mode', namespaces=ns)[0].text.strip() - xmlblock += "\t<mode>%s</mode>\n" %mode - xmlblock += "</obsSerialConf>\n\n" - for obsid in obsids: - obsid = int(obsid) - obskey = obsdict_id[obsid][0] - xmldict_key[obskey][1].write(xmlblock) - #logger.debug("Wrote obsSerialConf XML for observer ID %s" %obsid) - else: - logger.debug("No <serialConf> found, not using serial service") - - # gpioTracingConf --- - gmconfs = tree.xpath('//d:gpioTracingConf', namespaces=ns) - for gmconf in gmconfs: - obsids = gmconf.xpath('d:obsIds', namespaces=ns)[0].text.strip().split() - pinconfs = gmconf.xpath('d:pinConf', namespaces=ns) - xmlblock = "<obsGpioMonitorConf>\n" - for pinconf in pinconfs: - pin = pinconf.xpath('d:pin', namespaces=ns)[0].text.strip() - edge = pinconf.xpath('d:edge', namespaces=ns)[0].text.strip() - mode = pinconf.xpath('d:mode', namespaces=ns)[0].text.strip() - xmlblock += "\t<pinConf>\n\t\t<pin>%s</pin>\n\t\t<edge>%s</edge>\n\t\t<mode>%s</mode>\n" %(pin, edge, mode) - cb_gs_add = pinconf.xpath('d:callbackGpioActAdd', namespaces=ns) - if cb_gs_add: - pin = cb_gs_add[0].xpath('d:pin', namespaces=ns)[0].text.strip() - level = cb_gs_add[0].xpath('d:level', namespaces=ns)[0].text.strip() - offsets = cb_gs_add[0].xpath('d:offsetSecs', namespaces=ns)[0].text.strip() - offsetms = cb_gs_add[0].xpath('d:offsetMicrosecs', namespaces=ns)[0].text.strip() - xmlblock += "\t\t<callbackGpioSetAdd>\n\t\t\t<pin>%s</pin>\n\t\t\t<level>%s</level>\n\t\t\t<offsetSecs>%s</offsetSecs>\n\t\t\t<offsetMicrosecs>%s</offsetMicrosecs>\n\t\t</callbackGpioSetAdd>\n" %(pin, level, offsets, offsetms) - cb_pp_add = pinconf.xpath('d:callbackPowerProfAdd', namespaces=ns) - if cb_pp_add: - duration = cb_pp_add[0].xpath('d:durationMillisecs', namespaces=ns)[0].text.strip() - offsets = cb_pp_add[0].xpath('d:offsetSecs', namespaces=ns)[0].text.strip() - offsetms = cb_pp_add[0].xpath('d:offsetMicrosecs', namespaces=ns)[0].text.strip() - xmlblock += "\t\t<callbackPowerprofAdd>\n\t\t\t<duration>%s</duration>\n\t\t\t<offsetSecs>%s</offsetSecs>\n\t\t\t<offsetMicrosecs>%s</offsetMicrosecs>\n\t\t</callbackPowerprofAdd>\n" %(duration, offsets, offsetms) - xmlblock += "\t</pinConf>\n" - xmlblock += "</obsGpioMonitorConf>\n\n" - for obsid in obsids: - obsid = int(obsid) - obskey = obsdict_id[obsid][0] - xmldict_key[obskey][1].write(xmlblock) - #logger.debug("Wrote obsGpioMonitorConf XML for observer ID %s" %obsid) - - - # gpioActuationConf --- - # Create 2 pin settings for every observer used in the test: - # 1) Pull reset pin of target low when test is to start - # 2) Pull reset pin of target high when test is to stop - xmlblock = "<obsGpioSettingConf>\n" - startdatetime = starttime.strftime(config.get("observer", "timeformat")) - startmicrosecs = starttime.microsecond - xmlblock += "\t<pinConf>\n\t\t<pin>RST</pin>\n\t\t<level>low</level>\n\t\t<absoluteTime>\n\t\t\t<absoluteDateTime>%s</absoluteDateTime>\n\t\t\t<absoluteMicrosecs>%d</absoluteMicrosecs>\n\t\t</absoluteTime>\n\t\t<intervalMicrosecs>0</intervalMicrosecs>\n\t\t<count>1</count>\n\t</pinConf>\n" %(startdatetime, startmicrosecs) - stopdatetime = stoptime.strftime(config.get("observer", "timeformat")) - stopmicrosecs = stoptime.microsecond - xmlblock += "\t<pinConf>\n\t\t<pin>RST</pin>\n\t\t<level>high</level>\n\t\t<absoluteTime>\n\t\t\t<absoluteDateTime>%s</absoluteDateTime>\n\t\t\t<absoluteMicrosecs>%d</absoluteMicrosecs>\n\t\t</absoluteTime>\n\t\t<intervalMicrosecs>0</intervalMicrosecs>\n\t\t<count>1</count>\n\t</pinConf>\n" %(stopdatetime, stopmicrosecs) - for obskey in obsdict_key.keys(): - xmldict_key[obskey][1].write(xmlblock) - # Now write the per-observer config: - gsconfs = tree.xpath('//d:gpioActuationConf', namespaces=ns) - for gsconf in gsconfs: - xmlblock = "" - obsids = gsconf.xpath('d:obsIds', namespaces=ns)[0].text.strip().split() - pinconfs = gsconf.xpath('d:pinConf', namespaces=ns) - for pinconf in pinconfs: - pin = pinconf.xpath('d:pin', namespaces=ns)[0].text.strip() - level = pinconf.xpath('d:level', namespaces=ns)[0].text.strip() - abs_tim = pinconf.xpath('d:absoluteTime', namespaces=ns) - if abs_tim: - absdatetime = absolute2absoluteUTC_time(abs_tim[0].xpath('d:absoluteDateTime', namespaces=ns)[0].text.strip()) - ret = abs_tim[0].xpath('d:absoluteMicrosecs', namespaces=ns) - if ret: - absmicrosec = int(ret[0].text.strip()) - else: - absmicrosec = 0 - rel_tim = pinconf.xpath('d:relativeTime', namespaces=ns) - if rel_tim: - relsec = int(rel_tim[0].xpath('d:offsetSecs', namespaces=ns)[0].text.strip()) - ret = rel_tim[0].xpath('d:offsetMicrosecs', namespaces=ns) - if ret: - relmicrosec = int(ret[0].text.strip()) - else: - relmicrosec = 0 - # Relative times need to be converted into absolute times: - absmicrosec, absdatetime = relative2absolute_time(starttime, relsec, relmicrosec) - periodic = pinconf.xpath('d:periodic', namespaces=ns) - if periodic: - interval = int(periodic[0].xpath('d:intervalMicrosecs', namespaces=ns)[0].text.strip()) - count = int(periodic[0].xpath('d:count', namespaces=ns)[0].text.strip()) - else: - interval = 0 - count = 1 - xmlblock += "\t<pinConf>\n\t\t<pin>%s</pin>\n\t\t<level>%s</level>\n\t\t<absoluteTime>\n\t\t\t<absoluteDateTime>%s</absoluteDateTime>\n\t\t\t<absoluteMicrosecs>%s</absoluteMicrosecs>\n\t\t</absoluteTime>\n\t\t<intervalMicrosecs>%i</intervalMicrosecs>\n\t\t<count>%i</count>\n\t</pinConf>\n" %(pin, level, absdatetime, absmicrosec, interval, count) - for obsid in obsids: - obsid = int(obsid) - obskey = obsdict_id[obsid][0] - xmldict_key[obskey][1].write(xmlblock) - #logger.debug("Wrote obsGpioSettingConf XML for observer ID %s" %obsid) - xmlblock = "</obsGpioSettingConf>\n\n" - for obskey in obsdict_key.keys(): - xmldict_key[obskey][1].write(xmlblock) - - # powerProfilingConf --- - ppconfs = tree.xpath('//d:powerProfilingConf', namespaces=ns) - for ppconf in ppconfs: - obsids = ppconf.xpath('d:obsIds', namespaces=ns)[0].text.strip().split() - profconfs = ppconf.xpath('d:profConf', namespaces=ns) - xmlblock = "<obsPowerprofConf>\n" - for profconf in profconfs: - duration = profconf.xpath('d:durationMillisecs', namespaces=ns)[0].text.strip() - xmlblock += "\t<profConf>\n\t\t<duration>%s</duration>" %duration - abs_tim = profconf.xpath('d:absoluteTime', namespaces=ns) - if abs_tim: - absdatetime = absolute2absoluteUTC_time(abs_tim[0].xpath('d:absoluteDateTime', namespaces=ns)[0].text.strip()) # parse xml date - ret = abs_tim[0].xpath('d:absoluteMicrosecs', namespaces=ns) - if ret: - absmicrosec = ret[0].text.strip() - else: - absmicrosec = 0 - rel_tim = profconf.xpath('d:relativeTime', namespaces=ns) - if rel_tim: - relsec = int(rel_tim[0].xpath('d:offsetSecs', namespaces=ns)[0].text.strip()) - ret = rel_tim[0].xpath('d:offsetMicrosecs', namespaces=ns) - if ret: - relmicrosec = int(ret[0].text.strip()) - else: - relmicrosec = 0 - # Relative times need to be converted into absolute times: - absmicrosec, absdatetime = relative2absolute_time(starttime, relsec, relmicrosec) - xmlblock += "\n\t\t<absoluteTime>\n\t\t\t<absoluteDateTime>%s</absoluteDateTime>\n\t\t\t<absoluteMicrosecs>%s</absoluteMicrosecs>\n\t\t</absoluteTime>" %(absdatetime, absmicrosec) - samplingdivider = profconf.xpath('d:samplingDivider', namespaces=ns) - if samplingdivider: - samplingdivider = samplingdivider[0].text.strip() - else: - samplingdivider = config.get('dispatcher', 'default_sampling_divider') - xmlblock += "\n\t\t<samplingDivider>%s</samplingDivider>"%samplingdivider - xmlblock += "\n\t</profConf>\n" - xmlblock += "</obsPowerprofConf>\n\n" - for obsid in obsids: - obsid = int(obsid) - obskey = obsdict_id[obsid][0] - xmldict_key[obskey][1].write(xmlblock) - #logger.debug("Wrote obsPowerprofConf XML for observer ID %s" %obsid) - logger.debug("Wrote all observer XML configs.") - - # Close XML files --- - for xmlpath, xmlfhand in xmldict_key.values(): - xmlfhand.write("</obsConf>\n") - xmlfhand.close() - #logger.debug("Closed observer XML config %s"%xmlpath) - #logger.debug("Closed all observer XML configs.") - - # Upload configs to observers and start test --- - if len(errors) == 0: - if not db_register_activity(testid, cur, cn, 'start', iter(obsdict_key.keys())): - msg = "Could not access all needed observers for testid %d." %testid - errors.append(msg) - logger.error(msg) - if len(errors) == 0: - # -- START OF CRITICAL SECTION where dispatcher accesses used observers - # Start a thread for each observer which uploads the config and calls the test start script on the observer - thread_list = [] - errors_queue = queue.Queue() - for obskey in obsdict_key.keys(): - thread = StartTestThread(obskey, obsdict_key, xmldict_key, imagedict_key, errors_queue, FlockDAQ,testid) - thread_list.append((thread, obskey)) - thread.start() - #DEBUG logger.debug("Started thread for test start on observer ID %s" %(str(obsdict_key[obskey][1]))) - # Wait for all threads to finish: - for (thread, obskey) in thread_list: - # Wait max 75% of the setuptime: - thread.join(timeout=(config.getint('tests','setuptime')*0.75*60)) - if thread.isAlive(): - # Timeout occurred. Signal the thread to abort: - logger.error("Telling thread for test start on observer ID %s to abort..." %(str(obsdict_key[obskey][1]))) - thread.abort() - # Wait again for the aborted threads: - for (thread, obskey) in thread_list: - thread.join(timeout=10) - if thread.isAlive(): - msg = "Thread for test start on observer ID %s is still alive but should be aborted now." %(str(obsdict_key[obskey][1])) - errors.append(msg) - logger.error(msg) - # -- END OF CRITICAL SECTION where dispatcher accesses used observers - db_unregister_activity(testid, cur, cn, 'start') - - # Get all errors (if any). Observers which return errors are not regarded as a general error. In this - # case, the test is just started without the faulty observers if there is at least 1 observer that succeeded: - obs_error = [] - if not errors_queue.empty(): - logger.error("Queue with errors from test start thread is not empty. Getting errors...") - while not errors_queue.empty(): - errs = errors_queue.get() - for err in errs[1]: - logger.error("Error from test start thread for observer %s: %s" %(str(err[2]), str(err[0]))) - obs_error.append(err[2]) - warnings.append(err[0]) - # Check if there is at least 1 observer which succeeded: - if len(obs_error) > 0: - if (len(obsdict_id) == len(set(obs_error))): - msg = "None of the requested observers could successfully start the test." - errors.append(msg) - logger.error(msg) - - # Start proxy for serial service --- - if len(errors) == 0: - if serialProxyUsed: - # Start serial proxy: - cmd = [config.get("dispatcher", "serialproxyscript"), "--notify"] - if debug: - cmd.append("--debug") - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - rs = p.wait() - if (rs != 0): - msg = "Serial proxy for test ID %d could not be started. Serial proxy returned %d."%(testid, rs) - errors.append(msg) - logger.error(msg) - logger.debug("Executed command was: %s"%(str(cmd))) - else: - logger.debug("Started serial proxy.") - - # Start obsdbfetcher --- - if len(errors) == 0: - logger.debug("Starting DB fetcher...") - cmd = [config.get("dispatcher", "fetcherscript"), "--testid=%d"%testid] - if debug: - cmd.append("--debug") - p = subprocess.Popen(cmd) - rs = p.wait() - if rs != 0: - msg = "Could not start database fetcher for test ID %d. Fetcher returned error %d"%(testid, rs) - errors.append(msg) - logger.error(msg) - logger.error("Tried to execute %s"%str(cmd)) + errors = [] + warnings = [] + FlockDAQ = "false" + + try: + logger.debug("Entering start_test() function...") + # First, validate the XML file again. If validation fails, return immediately: + cmd = [config.get('dispatcher','validationscript'), '--testid=%d'%testid] + p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) + out, err = p.communicate() + rs = p.returncode + if rs != 0: + logger.error("Error %s returned from %s"%(str(rs), config.get('dispatcher','validationscript'))) + logger.error("Tried to execute %s"%str(cmd)) + errors.append("Validation of XML failed. Output of script was: %s %s" % (str(out), str(err))) + + if len(errors) == 0: + # Update DB status --- + # Update the status of the test in the db: + flocklab.set_test_status(cur, cn, testid, 'preparing') + + # Get start/stop time --- + cur.execute("SELECT `time_start_wish`, `time_end_wish`, `owner_fk` FROM `tbl_serv_tests` WHERE `serv_tests_key` = %d" %testid) + # Times are going to be of datetime type: + ret = cur.fetchone() + starttime = ret[0] + stoptime = ret[1] + owner_fk = ret[2] + logger.debug("Got start time wish for test from database: %s" %starttime) + logger.debug("Got end time wish for test from database: %s" %stoptime) + + cur.execute("SELECT `use_daq` FROM `tbl_serv_users` WHERE (`serv_users_key` = %s)" %owner_fk) + ret = cur.fetchone() + if ret[0] == 1: + FlockDAQ = "true" + else: + FlockDAQ = "false" + + # Image processing --- + # Get all images from the database: + imagedict_key = {} + sql_image = """ SELECT `t`.`binary`, `m`.`observer_fk`, `m`.`node_id`, LOWER(`a`.`architecture`), LOWER(`o`.`name`) AS `osname`, `t`.`serv_targetimages_key`, LOWER(`p`.`name`) AS `platname`, `a`.`core` AS `core` + FROM `tbl_serv_targetimages` AS `t` + LEFT JOIN `tbl_serv_map_test_observer_targetimages` AS `m` + ON `t`.`serv_targetimages_key` = `m`.`targetimage_fk` + LEFT JOIN `tbl_serv_platforms` AS `p` + ON `t`.`platforms_fk` = `p`.`serv_platforms_key` + LEFT JOIN `tbl_serv_operatingsystems` AS `o` + ON `t`.`operatingsystems_fk` = `o`.`serv_operatingsystems_key` + LEFT JOIN `tbl_serv_architectures` AS `a` + ON `t`.`core` = `a`.`core` AND `p`.`serv_platforms_key` = `a`.`platforms_fk` + WHERE `m`.`test_fk` = %d + """ + cur.execute(sql_image%testid) + ret = cur.fetchall() + for r in ret: + binary = r[0] + obs_fk = r[1] + obs_id = obsdict_key[obs_fk][1] + node_id = r[2] + arch = r[3] + osname = r[4].lower() + tgimage_key = r[5] + platname = r[6] + core = r[7] + + # Prepare image --- + (fd, imagepath) = tempfile.mkstemp() + binpath = "%s" %(os.path.splitext(imagepath)[0]) + imagefile = os.fdopen(fd, 'w+b') + imagefile.write(binary) + imagefile.close() + removeimage = True + logger.debug("Got target image ID %s for observer ID %s with node ID %s from database and wrote it to temp file %s (hash %s)" %(str(tgimage_key), str(obs_id), str(node_id), imagepath, hashlib.sha1(binary).hexdigest())) + + # Convert image to binary format and, depending on operating system and platform architecture, write the node ID (if specified) to the image: + logger.debug("Found %s target platform architecture with %s operating system on platform %s for observer ID %s (node ID to be used: %s)." %(arch, osname, platname, str(obs_id), str(node_id))) + set_symbols_tool = config.get('dispatcher', 'setsymbolsscript') + symbol_node_id = "FLOCKLAB_NODE_ID" + # keep <os> tag for backwards compatibility + if ((node_id != None) and (osname == 'tinyos')): + symbol_node_id = "TOS_NODE_ID" + elif (osname == 'contiki'): + symbol_node_id = None # don't set node ID for OS Contiki + if (arch == 'msp430'): + binutils_path = config.get('dispatcher', 'binutils_msp430') + binpath = "%s.ihex"%binpath + if symbol_node_id: + cmd = ['%s'%(set_symbols_tool), '--objcopy', '%s/msp430-objcopy'%(binutils_path), '--objdump', '%s/msp430-objdump'%(binutils_path), '--target', 'ihex', imagepath, binpath, '%s=%s'%(symbol_node_id, node_id), 'ActiveMessageAddressC$addr=%s'%(node_id), 'ActiveMessageAddressC__addr=%s'%(node_id)] + try: + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + rs = p.wait() + if rs != 0: + logger.error("Error %d returned from %s" % (rs, set_symbols_tool)) + logger.error("Tried to execute %s"%str(cmd)) + errors.append("Could not set node ID %s for target image %s" %(str(node_id), str(tgimage_key))) + else: + logger.debug("Set symbols and converted file to ihex.") + # Remove the temporary exe file + os.remove("%s.exe"%imagepath) + #logger.debug("Removed intermediate image %s.exe"%(str(imagepath))) + except OSError as err: + msg = "Error in subprocess: tried calling %s. Error was: %s"%(str(cmd), str(err)) + logger.error(msg) + errors.append(msg) + removeimage = False + else: + cmd = ['%s/msp430-objcopy'%(binutils_path), '--output-target', 'ihex', imagepath, binpath] + try: + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + rs = p.wait() + if rs != 0: + logger.error("Error %d returned from msp430-objcopy" %rs) + logger.error("Tried to execute %s"%str(cmd)) + errors.append("Could not convert target image %s to ihex" %str(tgimage_key)) + else: + logger.debug("Converted file to ihex.") + except OSError as err: + msg = "Error in subprocess: tried calling %s. Error was: %s"%(str(cmd), str(err)) + logger.error(msg) + errors.append(msg) + removeimage = False + elif (arch == 'arm'): + if (platname == 'dpp'): + imgformat = 'ihex' + binpath = "%s.ihex"%binpath + else: + imgformat = 'binary' + binpath = "%s.bin"%binpath + # Set library path for arm-binutils: + arm_binutils_path = config.get('dispatcher', 'binutils_arm') + arm_env = os.environ + if 'LD_LIBRARY_PATH' not in arm_env: + arm_env['LD_LIBRARY_PATH'] = '' + arm_env['LD_LIBRARY_PATH'] += ':%s/%s'%(arm_binutils_path, "usr/x86_64-linux-gnu/arm-linux-gnu/lib") + if symbol_node_id: + cmd = ['%s'%(set_symbols_tool), '--objcopy', '%s/%s'%(arm_binutils_path, "usr/bin/arm-linux-gnu-objcopy"), '--objdump', '%s/%s'%(arm_binutils_path, "usr/bin/arm-linux-gnu-objdump"), '--target', imgformat, imagepath, binpath, '%s=%s'%(symbol_node_id, node_id), 'ActiveMessageAddressC$addr=%s'%(node_id), 'ActiveMessageAddressC__addr=%s'%(node_id)] + try: + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=arm_env) + rs = p.wait() + if rs != 0: + logger.error("Error %d returned from %s" % (rs, set_symbols_tool)) + logger.error("Tried to execute %s"%str(cmd)) + errors.append("Could not set node ID %s for target image %s" %(str(node_id), str(tgimage_key))) + else: + logger.debug("Set symbols and converted file to bin.") + except OSError as err: + msg = "Error in subprocess: tried calling %s. Error was: %s"%(str(cmd), str(err)) + logger.error(msg) + errors.append(msg) + removeimage = False + else: + cmd = ['%s/%s'%(arm_binutils_path, "usr/bin/arm-linux-gnu-objcopy"), '--output-target', imgformat, imagepath, binpath] + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=arm_env) + rs = p.wait() + if rs != 0: + logger.error("Error %d returned from arm-linux-gnu-objcopy" %rs) + logger.error("Tried to execute %s"%str(cmd)) + errors.append("Could not convert target image %s to bin" %str(tgimage_key)) + else: + logger.debug("Converted file to bin.") + else: + msg = "Unknown architecture %s found. The original target image (ID %s) file will be used without modification." %(arch, str(tgimage_key)) + errors.append(msg) + logger.error(msg) + orig = open(imagepath, "r+b") + binfile = open(binpath, "w+b") + binfile.write(orig.read()) + orig.close() + binfile.close() + logger.debug("Copied image to binary file without modification.") + + # Remove the original file which is not used anymore: + if removeimage: + os.remove(imagepath) + #logger.debug("Removed image %s"%(str(imagepath))) + else: + logger.warn("Image %s has not been removed."%(str(imagepath))) + + + # Slot detection --- + # Find out which slot number to use on the observer. + #logger.debug("Detecting adapter for %s on observer ID %s" %(platname, obs_id)) + ret = flocklab.get_slot(cur, int(obs_fk), platname) + if ret in range(1,5): + slot = ret + logger.debug("Found adapter for %s on observer ID %s in slot %d"%(platname, obs_id, slot)) + elif ret == 0: + slot = None + msg = "Could not find an adapter for %s on observer ID %s" %(platname, obs_id) + errors.append(msg) + logger.error(msg) + else: + slot = None + msg = "Error when detecting adapter for %s on observer ID %s: function returned %d" %(platname, obs_id, ret) + errors.append(msg) + logger.error(msg) + + # Write the dictionary for the image: + if not obs_fk in imagedict_key: + imagedict_key[obs_fk] = [] + imagedict_key[obs_fk].append((binpath, slot, platname, osname, 0.0, core)) + + logger.info("Processed all target images from database.") + + # XML processing --- + # Get the XML config from the database and generate a separate file for every observer used: + cur.execute("SELECT `testconfig_xml` FROM `tbl_serv_tests` WHERE (`serv_tests_key` = %s)" %testid) + ret = cur.fetchone() + if not ret: + msg = "No XML found in database for testid %d." %testid + errors.append(msg) + logger.error(msg) + else: + parser = etree.XMLParser(remove_comments=True) + tree = etree.fromstring(bytes(bytearray(ret[0], encoding = 'utf-8')), parser) + ns = {'d': config.get('xml', 'namespace')} + logger.debug("Got XML from database.") + # Create XML files --- + # Create an empty XML config file for every observer used and organize them in a dictionary: + xmldict_key = {} + for obs_key, obs_id, obs_ether in obsdict_key.values(): + (fd, xmlpath) = tempfile.mkstemp() + xmlfhand = os.fdopen(fd, 'w+') + xmldict_key[obs_key] = (xmlpath, xmlfhand) + xmlfhand.write('<?xml version="1.0" encoding="UTF-8"?>\n\n<obsConf>\n\n') + # Go through the blocks of the XML file and write the configs to the affected observer XML configs: + # targetConf --- + targetconfs = tree.xpath('//d:targetConf', namespaces=ns) + if not targetconfs: + msg = "no <targetConf> element found in XML config (wrong namespace?)" + errors.append(msg) + logger.error(msg) + for targetconf in targetconfs: + obsids = targetconf.xpath('d:obsIds', namespaces=ns)[0].text.strip().split() + ret = targetconf.xpath('d:voltage', namespaces=ns) + if ret: + voltage = ret[0].text.strip() + else: + voltage = str(config.get("dispatcher", "default_tg_voltage")) + ret = targetconf.xpath('d:noImage', namespaces=ns) + if ret: + noImageSlot = ret[0].text.strip() + else: + noImageSlot = None + for obsid in obsids: + obsid = int(obsid) + obskey = obsdict_id[obsid][0] + xmldict_key[obskey][1].write("<obsTargetConf>\n") + xmldict_key[obskey][1].write("\t<voltage>%s</voltage>\n"%voltage) + if noImageSlot: + slot = noImageSlot + xmldict_key[obskey][1].write("\t<slotnr>%s</slotnr>\n"%(slot)) + else: + xmldict_key[obskey][1].write("\t<firmware>%s</firmware>\n"%(imagedict_key[obskey][0][4])) + for coreimage in imagedict_key[obskey]: + xmldict_key[obskey][1].write("\t<image core=\"%d\">%s%d/%s</image>\n"%(coreimage[5], config.get("observer", "testconfigfolder"),testid, os.path.basename(coreimage[0]))) + xmldict_key[obskey][1].write("\t<slotnr>%s</slotnr>\n"%(imagedict_key[obskey][0][1])) + xmldict_key[obskey][1].write("\t<platform>%s</platform>\n"%(imagedict_key[obskey][0][2])) + xmldict_key[obskey][1].write("\t<os>%s</os>\n"%(imagedict_key[obskey][0][3])) + slot = imagedict_key[obskey][0][1] + xmldict_key[obskey][1].write("\t<FlockDAQ>%s</FlockDAQ>\n"%FlockDAQ) + xmldict_key[obskey][1].write("</obsTargetConf>\n\n") + #logger.debug("Wrote obsTargetConf XML for observer ID %s" %obsid) + # update test_image mapping with slot information + cur.execute("UPDATE `tbl_serv_map_test_observer_targetimages` SET `slot` = %s WHERE `observer_fk` = %d AND `test_fk`=%d" % (slot, obskey, testid)) + cn.commit() + + + # serialConf --- + srconfs = tree.xpath('//d:serialConf', namespaces=ns) + serialProxyUsed = False + if srconfs: + # only use serialproxy if remote IP specified in xml + if tree.xpath('//d:serialConf/d:remoteIp', namespaces=ns): + serialProxyUsed = True + for srconf in srconfs: + obsids = srconf.xpath('d:obsIds', namespaces=ns)[0].text.strip().split() + xmlblock = "<obsSerialConf>\n" + port = srconf.xpath('d:port', namespaces=ns) + if port: + port = srconf.xpath('d:port', namespaces=ns)[0].text.strip() + xmlblock += "\t<port>%s</port>\n" %port + baudrate = srconf.xpath('d:baudrate', namespaces=ns) + if baudrate: + baudrate = srconf.xpath('d:baudrate', namespaces=ns)[0].text.strip() + xmlblock += "\t<baudrate>%s</baudrate>\n" %baudrate + mode = srconf.xpath('d:mode', namespaces=ns) + if mode: + mode = srconf.xpath('d:mode', namespaces=ns)[0].text.strip() + xmlblock += "\t<mode>%s</mode>\n" %mode + xmlblock += "</obsSerialConf>\n\n" + for obsid in obsids: + obsid = int(obsid) + obskey = obsdict_id[obsid][0] + xmldict_key[obskey][1].write(xmlblock) + #logger.debug("Wrote obsSerialConf XML for observer ID %s" %obsid) + else: + logger.debug("No <serialConf> found, not using serial service") + + # gpioTracingConf --- + gmconfs = tree.xpath('//d:gpioTracingConf', namespaces=ns) + for gmconf in gmconfs: + obsids = gmconf.xpath('d:obsIds', namespaces=ns)[0].text.strip().split() + pinconfs = gmconf.xpath('d:pinConf', namespaces=ns) + xmlblock = "<obsGpioMonitorConf>\n" + for pinconf in pinconfs: + pin = pinconf.xpath('d:pin', namespaces=ns)[0].text.strip() + edge = pinconf.xpath('d:edge', namespaces=ns)[0].text.strip() + mode = pinconf.xpath('d:mode', namespaces=ns)[0].text.strip() + xmlblock += "\t<pinConf>\n\t\t<pin>%s</pin>\n\t\t<edge>%s</edge>\n\t\t<mode>%s</mode>\n" %(pin, edge, mode) + cb_gs_add = pinconf.xpath('d:callbackGpioActAdd', namespaces=ns) + if cb_gs_add: + pin = cb_gs_add[0].xpath('d:pin', namespaces=ns)[0].text.strip() + level = cb_gs_add[0].xpath('d:level', namespaces=ns)[0].text.strip() + offsets = cb_gs_add[0].xpath('d:offsetSecs', namespaces=ns)[0].text.strip() + offsetms = cb_gs_add[0].xpath('d:offsetMicrosecs', namespaces=ns)[0].text.strip() + xmlblock += "\t\t<callbackGpioSetAdd>\n\t\t\t<pin>%s</pin>\n\t\t\t<level>%s</level>\n\t\t\t<offsetSecs>%s</offsetSecs>\n\t\t\t<offsetMicrosecs>%s</offsetMicrosecs>\n\t\t</callbackGpioSetAdd>\n" %(pin, level, offsets, offsetms) + cb_pp_add = pinconf.xpath('d:callbackPowerProfAdd', namespaces=ns) + if cb_pp_add: + duration = cb_pp_add[0].xpath('d:durationMillisecs', namespaces=ns)[0].text.strip() + offsets = cb_pp_add[0].xpath('d:offsetSecs', namespaces=ns)[0].text.strip() + offsetms = cb_pp_add[0].xpath('d:offsetMicrosecs', namespaces=ns)[0].text.strip() + xmlblock += "\t\t<callbackPowerprofAdd>\n\t\t\t<duration>%s</duration>\n\t\t\t<offsetSecs>%s</offsetSecs>\n\t\t\t<offsetMicrosecs>%s</offsetMicrosecs>\n\t\t</callbackPowerprofAdd>\n" %(duration, offsets, offsetms) + xmlblock += "\t</pinConf>\n" + xmlblock += "</obsGpioMonitorConf>\n\n" + for obsid in obsids: + obsid = int(obsid) + obskey = obsdict_id[obsid][0] + xmldict_key[obskey][1].write(xmlblock) + #logger.debug("Wrote obsGpioMonitorConf XML for observer ID %s" %obsid) + + + # gpioActuationConf --- + # Create 2 pin settings for every observer used in the test: + # 1) Pull reset pin of target low when test is to start + # 2) Pull reset pin of target high when test is to stop + xmlblock = "<obsGpioSettingConf>\n" + startdatetime = starttime.strftime(config.get("observer", "timeformat")) + startmicrosecs = starttime.microsecond + xmlblock += "\t<pinConf>\n\t\t<pin>RST</pin>\n\t\t<level>low</level>\n\t\t<absoluteTime>\n\t\t\t<absoluteDateTime>%s</absoluteDateTime>\n\t\t\t<absoluteMicrosecs>%d</absoluteMicrosecs>\n\t\t</absoluteTime>\n\t\t<intervalMicrosecs>0</intervalMicrosecs>\n\t\t<count>1</count>\n\t</pinConf>\n" %(startdatetime, startmicrosecs) + stopdatetime = stoptime.strftime(config.get("observer", "timeformat")) + stopmicrosecs = stoptime.microsecond + xmlblock += "\t<pinConf>\n\t\t<pin>RST</pin>\n\t\t<level>high</level>\n\t\t<absoluteTime>\n\t\t\t<absoluteDateTime>%s</absoluteDateTime>\n\t\t\t<absoluteMicrosecs>%d</absoluteMicrosecs>\n\t\t</absoluteTime>\n\t\t<intervalMicrosecs>0</intervalMicrosecs>\n\t\t<count>1</count>\n\t</pinConf>\n" %(stopdatetime, stopmicrosecs) + for obskey in obsdict_key.keys(): + xmldict_key[obskey][1].write(xmlblock) + # Now write the per-observer config: + gsconfs = tree.xpath('//d:gpioActuationConf', namespaces=ns) + for gsconf in gsconfs: + xmlblock = "" + obsids = gsconf.xpath('d:obsIds', namespaces=ns)[0].text.strip().split() + pinconfs = gsconf.xpath('d:pinConf', namespaces=ns) + for pinconf in pinconfs: + pin = pinconf.xpath('d:pin', namespaces=ns)[0].text.strip() + level = pinconf.xpath('d:level', namespaces=ns)[0].text.strip() + abs_tim = pinconf.xpath('d:absoluteTime', namespaces=ns) + if abs_tim: + absdatetime = absolute2absoluteUTC_time(abs_tim[0].xpath('d:absoluteDateTime', namespaces=ns)[0].text.strip()) + ret = abs_tim[0].xpath('d:absoluteMicrosecs', namespaces=ns) + if ret: + absmicrosec = int(ret[0].text.strip()) + else: + absmicrosec = 0 + rel_tim = pinconf.xpath('d:relativeTime', namespaces=ns) + if rel_tim: + relsec = int(rel_tim[0].xpath('d:offsetSecs', namespaces=ns)[0].text.strip()) + ret = rel_tim[0].xpath('d:offsetMicrosecs', namespaces=ns) + if ret: + relmicrosec = int(ret[0].text.strip()) + else: + relmicrosec = 0 + # Relative times need to be converted into absolute times: + absmicrosec, absdatetime = relative2absolute_time(starttime, relsec, relmicrosec) + periodic = pinconf.xpath('d:periodic', namespaces=ns) + if periodic: + interval = int(periodic[0].xpath('d:intervalMicrosecs', namespaces=ns)[0].text.strip()) + count = int(periodic[0].xpath('d:count', namespaces=ns)[0].text.strip()) + else: + interval = 0 + count = 1 + xmlblock += "\t<pinConf>\n\t\t<pin>%s</pin>\n\t\t<level>%s</level>\n\t\t<absoluteTime>\n\t\t\t<absoluteDateTime>%s</absoluteDateTime>\n\t\t\t<absoluteMicrosecs>%s</absoluteMicrosecs>\n\t\t</absoluteTime>\n\t\t<intervalMicrosecs>%i</intervalMicrosecs>\n\t\t<count>%i</count>\n\t</pinConf>\n" %(pin, level, absdatetime, absmicrosec, interval, count) + for obsid in obsids: + obsid = int(obsid) + obskey = obsdict_id[obsid][0] + xmldict_key[obskey][1].write(xmlblock) + #logger.debug("Wrote obsGpioSettingConf XML for observer ID %s" %obsid) + xmlblock = "</obsGpioSettingConf>\n\n" + for obskey in obsdict_key.keys(): + xmldict_key[obskey][1].write(xmlblock) + + # powerProfilingConf --- + ppconfs = tree.xpath('//d:powerProfilingConf', namespaces=ns) + for ppconf in ppconfs: + obsids = ppconf.xpath('d:obsIds', namespaces=ns)[0].text.strip().split() + profconfs = ppconf.xpath('d:profConf', namespaces=ns) + xmlblock = "<obsPowerprofConf>\n" + for profconf in profconfs: + duration = profconf.xpath('d:durationMillisecs', namespaces=ns)[0].text.strip() + xmlblock += "\t<profConf>\n\t\t<duration>%s</duration>" %duration + abs_tim = profconf.xpath('d:absoluteTime', namespaces=ns) + if abs_tim: + absdatetime = absolute2absoluteUTC_time(abs_tim[0].xpath('d:absoluteDateTime', namespaces=ns)[0].text.strip()) # parse xml date + ret = abs_tim[0].xpath('d:absoluteMicrosecs', namespaces=ns) + if ret: + absmicrosec = ret[0].text.strip() + else: + absmicrosec = 0 + rel_tim = profconf.xpath('d:relativeTime', namespaces=ns) + if rel_tim: + relsec = int(rel_tim[0].xpath('d:offsetSecs', namespaces=ns)[0].text.strip()) + ret = rel_tim[0].xpath('d:offsetMicrosecs', namespaces=ns) + if ret: + relmicrosec = int(ret[0].text.strip()) + else: + relmicrosec = 0 + # Relative times need to be converted into absolute times: + absmicrosec, absdatetime = relative2absolute_time(starttime, relsec, relmicrosec) + xmlblock += "\n\t\t<absoluteTime>\n\t\t\t<absoluteDateTime>%s</absoluteDateTime>\n\t\t\t<absoluteMicrosecs>%s</absoluteMicrosecs>\n\t\t</absoluteTime>" %(absdatetime, absmicrosec) + samplingdivider = profconf.xpath('d:samplingDivider', namespaces=ns) + if samplingdivider: + samplingdivider = samplingdivider[0].text.strip() + else: + samplingdivider = config.get('dispatcher', 'default_sampling_divider') + xmlblock += "\n\t\t<samplingDivider>%s</samplingDivider>"%samplingdivider + xmlblock += "\n\t</profConf>\n" + xmlblock += "</obsPowerprofConf>\n\n" + for obsid in obsids: + obsid = int(obsid) + obskey = obsdict_id[obsid][0] + xmldict_key[obskey][1].write(xmlblock) + #logger.debug("Wrote obsPowerprofConf XML for observer ID %s" %obsid) + logger.debug("Wrote all observer XML configs.") + + # Close XML files --- + for xmlpath, xmlfhand in xmldict_key.values(): + xmlfhand.write("</obsConf>\n") + xmlfhand.close() + #logger.debug("Closed observer XML config %s"%xmlpath) + #logger.debug("Closed all observer XML configs.") + + # Upload configs to observers and start test --- + if len(errors) == 0: + if not db_register_activity(testid, cur, cn, 'start', iter(obsdict_key.keys())): + msg = "Could not access all needed observers for testid %d." %testid + errors.append(msg) + logger.error(msg) + if len(errors) == 0: + # -- START OF CRITICAL SECTION where dispatcher accesses used observers + # Start a thread for each observer which uploads the config and calls the test start script on the observer + thread_list = [] + errors_queue = queue.Queue() + for obskey in obsdict_key.keys(): + thread = StartTestThread(obskey, obsdict_key, xmldict_key, imagedict_key, errors_queue, FlockDAQ,testid) + thread_list.append((thread, obskey)) + thread.start() + #DEBUG logger.debug("Started thread for test start on observer ID %s" %(str(obsdict_key[obskey][1]))) + # Wait for all threads to finish: + for (thread, obskey) in thread_list: + # Wait max 75% of the setuptime: + thread.join(timeout=(config.getint('tests','setuptime')*0.75*60)) + if thread.isAlive(): + # Timeout occurred. Signal the thread to abort: + logger.error("Telling thread for test start on observer ID %s to abort..." %(str(obsdict_key[obskey][1]))) + thread.abort() + # Wait again for the aborted threads: + for (thread, obskey) in thread_list: + thread.join(timeout=10) + if thread.isAlive(): + msg = "Thread for test start on observer ID %s is still alive but should be aborted now." %(str(obsdict_key[obskey][1])) + errors.append(msg) + logger.error(msg) + # -- END OF CRITICAL SECTION where dispatcher accesses used observers + db_unregister_activity(testid, cur, cn, 'start') + + # Get all errors (if any). Observers which return errors are not regarded as a general error. In this + # case, the test is just started without the faulty observers if there is at least 1 observer that succeeded: + obs_error = [] + if not errors_queue.empty(): + logger.error("Queue with errors from test start thread is not empty. Getting errors...") + while not errors_queue.empty(): + errs = errors_queue.get() + for err in errs[1]: + logger.error("Error from test start thread for observer %s: %s" %(str(err[2]), str(err[0]))) + obs_error.append(err[2]) + warnings.append(err[0]) + # Check if there is at least 1 observer which succeeded: + if len(obs_error) > 0: + if (len(obsdict_id) == len(set(obs_error))): + msg = "None of the requested observers could successfully start the test." + errors.append(msg) + logger.error(msg) + + # Start proxy for serial service --- + if len(errors) == 0: + if serialProxyUsed: + # Start serial proxy: + cmd = [config.get("dispatcher", "serialproxyscript"), "--notify"] + if debug: + cmd.append("--debug") + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + rs = p.wait() + if (rs != 0): + msg = "Serial proxy for test ID %d could not be started. Serial proxy returned %d."%(testid, rs) + errors.append(msg) + logger.error(msg) + logger.debug("Executed command was: %s"%(str(cmd))) + else: + logger.debug("Started serial proxy.") + + # Start obsdbfetcher --- + if len(errors) == 0: + logger.debug("Starting DB fetcher...") + cmd = [config.get("dispatcher", "fetcherscript"), "--testid=%d"%testid] + if debug: + cmd.append("--debug") + p = subprocess.Popen(cmd) + rs = p.wait() + if rs != 0: + msg = "Could not start database fetcher for test ID %d. Fetcher returned error %d"%(testid, rs) + errors.append(msg) + logger.error(msg) + logger.error("Tried to execute %s"%str(cmd)) - # check if we're still in time - # - now = time.strftime(config.get("database", "timeformat"), time.gmtime()) - cur.execute("SELECT `serv_tests_key` FROM `tbl_serv_tests` WHERE `serv_tests_key` = %d AND `time_start_wish` <= '%s'" % (testid, now)) - if cur.fetchone() is not None: - msg = "Setup for test ID %d took too much time."%(testid) - errors.append(msg) - logger.error(msg) + # check if we're still in time + # + now = time.strftime(config.get("database", "timeformat"), time.gmtime()) + cur.execute("SELECT `serv_tests_key` FROM `tbl_serv_tests` WHERE `serv_tests_key` = %d AND `time_start_wish` <= '%s'" % (testid, now)) + if cur.fetchone() is not None: + msg = "Setup for test ID %d took too much time."%(testid) + errors.append(msg) + logger.error(msg) - # Update DB status, set start time --- - if len(errors) == 0: - logger.debug("Setting test status in DB to running...") - flocklab.set_test_status(cur, cn, testid, 'running') - cur.execute("UPDATE `tbl_serv_tests` SET `time_start_act` = `time_start_wish` WHERE `serv_tests_key` = %d" %testid) - cn.commit() - else: - logger.debug("Setting test status in DB to aborting...") - flocklab.set_test_status(cur, cn, testid, 'aborting') - cur.execute("UPDATE `tbl_serv_tests` SET `time_start_act` = `time_start_wish`, `time_end_act` = UTC_TIMESTAMP() WHERE `serv_tests_key` = %d" %testid) - cn.commit() - logger.debug("At end of start_test(). Returning...") - - # Set a time for the scheduler to check for the test to stop --- - # This is done using the 'at' command: - if len(errors) == 0: - lag = 5 - # avoid scheduling a scheduler around full minute +/- 5s - if (stoptime.second+lag) % 60 < 5: - lag = lag + 5 - ((stoptime.second+lag) % 60) - elif (stoptime.second+lag) % 60 > 55: - lag = lag + 60 - ((stoptime.second+lag) % 60) + 5 - # Only schedule scheduler if it's the only one at that time - cmd = ['atq'] - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) - out, err = p.communicate() - rs = p.returncode - if rs == 0: - #logger.debug("Output of atq is: %s"%(out)) - stopTimeString = str(stoptime).split()[1] - if not out or stopTimeString not in out: - logger.debug("Scheduling scheduler for %s +%ds using at command..."%(stoptime, lag)) - (fd, tmppath) = tempfile.mkstemp() - tmpfile = os.fdopen(fd, 'w') - # The at command can only schedule with a minute resolution. Thus let the script sleep for the time required and add some slack: - tmpfile.write("sleep %d;\n"%(stoptime.second+lag)) - tmpfile.write("%s "%(config.get("dispatcher", "schedulerscript"))) - if debug: - tmpfile.write("--debug ") - tmpfile.write(">> /dev/null 2>&1\n") - tmpfile.close() - # Register the command: - cmd = ['at', '-M', '-t', stoptime.strftime('%Y%m%d%H%M'), '-f', tmppath] - p = subprocess.Popen(cmd, stderr=subprocess.PIPE) - rs = p.wait() - # Delete the temp script: - os.unlink(tmppath) - if rs != 0: - msg = "Could not schedule scheduler for test ID %d. at command returned error %d"%(testid, rs) - warnings.append(msg) - logger.error(msg) - logger.error("Tried to execute %s"%str(cmd)) - else: - logger.debug("Scheduled scheduler.") + # Update DB status, set start time --- + if len(errors) == 0: + logger.debug("Setting test status in DB to running...") + flocklab.set_test_status(cur, cn, testid, 'running') + cur.execute("UPDATE `tbl_serv_tests` SET `time_start_act` = `time_start_wish` WHERE `serv_tests_key` = %d" %testid) + cn.commit() + else: + logger.debug("Setting test status in DB to aborting...") + flocklab.set_test_status(cur, cn, testid, 'aborting') + cur.execute("UPDATE `tbl_serv_tests` SET `time_start_act` = `time_start_wish`, `time_end_act` = UTC_TIMESTAMP() WHERE `serv_tests_key` = %d" %testid) + cn.commit() + logger.debug("At end of start_test(). Returning...") + + # Set a time for the scheduler to check for the test to stop --- + # This is done using the 'at' command: + if len(errors) == 0: + lag = 5 + # avoid scheduling a scheduler around full minute +/- 5s + if (stoptime.second+lag) % 60 < 5: + lag = lag + 5 - ((stoptime.second+lag) % 60) + elif (stoptime.second+lag) % 60 > 55: + lag = lag + 60 - ((stoptime.second+lag) % 60) + 5 + # Only schedule scheduler if it's the only one at that time + cmd = ['atq'] + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) + out, err = p.communicate() + rs = p.returncode + if rs == 0: + #logger.debug("Output of atq is: %s"%(out)) + stopTimeString = str(stoptime).split()[1] + if not out or stopTimeString not in out: + logger.debug("Scheduling scheduler for %s +%ds using at command..."%(stoptime, lag)) + (fd, tmppath) = tempfile.mkstemp() + tmpfile = os.fdopen(fd, 'w') + # The at command can only schedule with a minute resolution. Thus let the script sleep for the time required and add some slack: + tmpfile.write("sleep %d;\n"%(stoptime.second+lag)) + tmpfile.write("%s "%(config.get("dispatcher", "schedulerscript"))) + if debug: + tmpfile.write("--debug ") + tmpfile.write(">> /dev/null 2>&1\n") + tmpfile.close() + # Register the command: + cmd = ['at', '-M', '-t', stoptime.strftime('%Y%m%d%H%M'), '-f', tmppath] + p = subprocess.Popen(cmd, stderr=subprocess.PIPE) + rs = p.wait() + # Delete the temp script: + os.unlink(tmppath) + if rs != 0: + msg = "Could not schedule scheduler for test ID %d. at command returned error %d"%(testid, rs) + warnings.append(msg) + logger.error(msg) + logger.error("Tried to execute %s"%str(cmd)) + else: + logger.debug("Scheduled scheduler.") - else: - logger.debug("Already scheduler scheduled for %s"%stoptime) - else: - logger.debug("Could not execute atq, continue") + else: + logger.debug("Already scheduler scheduled for %s"%stoptime) + else: + logger.debug("Could not execute atq, continue") - return (errors, warnings) - except Exception: - msg = "Unexpected error: %s: %s\n%s"%(str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc()) - print(msg) - logger.warn(msg) - raise + return (errors, warnings) + except Exception: + msg = "Unexpected error: %s: %s\n%s"%(str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc()) + print(msg) + logger.warn(msg) + raise ### END start_test() @@ -875,118 +875,118 @@ def start_test(testid, cur, cn, obsdict_key, obsdict_id): # ############################################################################## def stop_test(testid, cur, cn, obsdict_key, obsdict_id, abort=False): - errors = [] - warnings = [] - - try: - logger.info("Stopping test %d..."%testid) - - # Update DB status --- - if abort: - status = 'aborting' - else: - status = 'cleaning up' - logger.debug("Setting test status in DB to %s..." %status) - flocklab.set_test_status(cur, cn, testid, status) - - # Stop serial proxy --- - # Get the XML config from the database and check if the serial service was used in the test: - cur.execute("SELECT `testconfig_xml` FROM `tbl_serv_tests` WHERE (`serv_tests_key` = %s)" %testid) - ret = cur.fetchone() - if not ret: - msg = "No XML found in database for testid %d." %testid - errors.append(msg) - logger.error(msg) - else: - parser = etree.XMLParser(remove_comments=True) - tree = etree.fromstring(bytes(bytearray(ret[0], encoding = 'utf-8')), parser) - ns = {'d': config.get('xml', 'namespace')} - logger.debug("Got XML from database.") - # only stop serialproxy if remote IP specified in xml - if tree.xpath('//d:serialConf/d:remoteIp', namespaces=ns): - # Serial service was used. Thus stop the serial proxy: - logger.debug("Usage of serial service detected. Stopping serial proxy...") - cmd = [config.get("dispatcher", "serialproxyscript"), "--notify"] - if debug: - cmd.append("--debug") - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - rs = p.wait() - if (rs != 0): - msg = "Serial proxy for test ID %d could not be stopped. Serial proxy returned %d."%(testid, rs) - errors.append(msg) - logger.error(msg) - logger.debug("Executed command was: %s"%(str(cmd))) - else: - logger.debug("Stopped serial proxy.") - - # Stop test on observers --- - if not db_register_activity(testid, cur, cn, 'stop', iter(obsdict_key.keys())): - msg = "Some observers were occupied while stopping test." - logger.warn(msg) - warnings.append(msg) - # Start a thread for each observer which calls the test stop script on the observer - logger.info("Stopping test on observers...") - thread_list = [] - errors_queue = queue.Queue() - for obskey in obsdict_key.keys(): - thread = StopTestThread(obskey, obsdict_key, errors_queue,testid) - thread_list.append((thread, obskey)) - thread.start() - logger.debug("Started thread for test stop on observer ID %s" %(str(obsdict_key[obskey][1]))) - # Wait for all threads to finish: - for (thread, obskey) in thread_list: - thread.join(timeout=(config.getint('tests','cleanuptime')*0.75*60)) - if thread.isAlive(): - # Timeout occurred. Signal the thread to abort: - msg = "Telling thread for test stop on observer ID %s to abort..." %(str(obsdict_key[obskey][1])) - logger.error(msg) - warnings.append(msg) - thread.abort() - # Wait again for the aborted threads: - for (thread, obskey) in thread_list: - thread.join(timeout=10) - if thread.isAlive(): - msg = "Thread for test stop on observer ID %s is still alive but should be aborted now." %(str(obsdict_key[obskey][1])) - errors.append(msg) - logger.error(msg) - db_unregister_activity(testid, cur, cn, 'stop') - # cleanup resource allocation - now = time.strftime(config.get("database", "timeformat"), time.gmtime()) - cur.execute("DELETE FROM tbl_serv_resource_allocation where `time_end` < '%s' OR `test_fk` = %d" % (now, testid)) - cn.commit() - # Stop fetcher --- - # This has to be done regardless of previous errors. - logger.info("Stopping fetcher...") - cmd = [config.get("dispatcher", "fetcherscript"),"--testid=%d"%testid, "--stop"] - if debug: - cmd.append("--debug") - p = subprocess.Popen(cmd) - rs = p.wait() - if rs not in (SUCCESS, errno.ENOPKG): # SUCCESS (0) is successful stop, ENOPKG (65) means the service was not running. - msg = "Could not stop database fetcher for test ID %d. Fetcher returned error %d"%(testid, rs) - errors.append(msg) - logger.error(msg) - logger.error("Tried to execute %s"%str(cmd)) - - # Get all errors (if any). Observers which return errors are not regarded as a general error. - if not errors_queue.empty(): - logger.error("Queue with errors from test stop thread is not empty. Getting errors...") - while not errors_queue.empty(): - errs = errors_queue.get() - for err in errs[1]: - logger.error("Error from test stop thread: %s" %(str(err[0]))) - warnings.append(err[0]) - - # Set stop time in DB --- - cur.execute("UPDATE `tbl_serv_tests` SET `time_end_act` = UTC_TIMESTAMP() WHERE `serv_tests_key` = %d" %testid) - cn.commit() - - return (errors, warnings) - except Exception: - msg = "Unexpected error: %s: %s\n%s"%(str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc()) - print(msg) - logger.warn(msg) - raise + errors = [] + warnings = [] + + try: + logger.info("Stopping test %d..."%testid) + + # Update DB status --- + if abort: + status = 'aborting' + else: + status = 'cleaning up' + logger.debug("Setting test status in DB to %s..." %status) + flocklab.set_test_status(cur, cn, testid, status) + + # Stop serial proxy --- + # Get the XML config from the database and check if the serial service was used in the test: + cur.execute("SELECT `testconfig_xml` FROM `tbl_serv_tests` WHERE (`serv_tests_key` = %s)" %testid) + ret = cur.fetchone() + if not ret: + msg = "No XML found in database for testid %d." %testid + errors.append(msg) + logger.error(msg) + else: + parser = etree.XMLParser(remove_comments=True) + tree = etree.fromstring(bytes(bytearray(ret[0], encoding = 'utf-8')), parser) + ns = {'d': config.get('xml', 'namespace')} + logger.debug("Got XML from database.") + # only stop serialproxy if remote IP specified in xml + if tree.xpath('//d:serialConf/d:remoteIp', namespaces=ns): + # Serial service was used. Thus stop the serial proxy: + logger.debug("Usage of serial service detected. Stopping serial proxy...") + cmd = [config.get("dispatcher", "serialproxyscript"), "--notify"] + if debug: + cmd.append("--debug") + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + rs = p.wait() + if (rs != 0): + msg = "Serial proxy for test ID %d could not be stopped. Serial proxy returned %d."%(testid, rs) + errors.append(msg) + logger.error(msg) + logger.debug("Executed command was: %s"%(str(cmd))) + else: + logger.debug("Stopped serial proxy.") + + # Stop test on observers --- + if not db_register_activity(testid, cur, cn, 'stop', iter(obsdict_key.keys())): + msg = "Some observers were occupied while stopping test." + logger.warn(msg) + warnings.append(msg) + # Start a thread for each observer which calls the test stop script on the observer + logger.info("Stopping test on observers...") + thread_list = [] + errors_queue = queue.Queue() + for obskey in obsdict_key.keys(): + thread = StopTestThread(obskey, obsdict_key, errors_queue,testid) + thread_list.append((thread, obskey)) + thread.start() + logger.debug("Started thread for test stop on observer ID %s" %(str(obsdict_key[obskey][1]))) + # Wait for all threads to finish: + for (thread, obskey) in thread_list: + thread.join(timeout=(config.getint('tests','cleanuptime')*0.75*60)) + if thread.isAlive(): + # Timeout occurred. Signal the thread to abort: + msg = "Telling thread for test stop on observer ID %s to abort..." %(str(obsdict_key[obskey][1])) + logger.error(msg) + warnings.append(msg) + thread.abort() + # Wait again for the aborted threads: + for (thread, obskey) in thread_list: + thread.join(timeout=10) + if thread.isAlive(): + msg = "Thread for test stop on observer ID %s is still alive but should be aborted now." %(str(obsdict_key[obskey][1])) + errors.append(msg) + logger.error(msg) + db_unregister_activity(testid, cur, cn, 'stop') + # cleanup resource allocation + now = time.strftime(config.get("database", "timeformat"), time.gmtime()) + cur.execute("DELETE FROM tbl_serv_resource_allocation where `time_end` < '%s' OR `test_fk` = %d" % (now, testid)) + cn.commit() + # Stop fetcher --- + # This has to be done regardless of previous errors. + logger.info("Stopping fetcher...") + cmd = [config.get("dispatcher", "fetcherscript"),"--testid=%d"%testid, "--stop"] + if debug: + cmd.append("--debug") + p = subprocess.Popen(cmd) + rs = p.wait() + if rs not in (SUCCESS, errno.ENOPKG): # SUCCESS (0) is successful stop, ENOPKG (65) means the service was not running. + msg = "Could not stop database fetcher for test ID %d. Fetcher returned error %d"%(testid, rs) + errors.append(msg) + logger.error(msg) + logger.error("Tried to execute %s"%str(cmd)) + + # Get all errors (if any). Observers which return errors are not regarded as a general error. + if not errors_queue.empty(): + logger.error("Queue with errors from test stop thread is not empty. Getting errors...") + while not errors_queue.empty(): + errs = errors_queue.get() + for err in errs[1]: + logger.error("Error from test stop thread: %s" %(str(err[0]))) + warnings.append(err[0]) + + # Set stop time in DB --- + cur.execute("UPDATE `tbl_serv_tests` SET `time_end_act` = UTC_TIMESTAMP() WHERE `serv_tests_key` = %d" %testid) + cn.commit() + + return (errors, warnings) + except Exception: + msg = "Unexpected error: %s: %s\n%s"%(str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc()) + print(msg) + logger.warn(msg) + raise ### END stop_test() @@ -996,66 +996,66 @@ def stop_test(testid, cur, cn, obsdict_key, obsdict_id, abort=False): # ############################################################################## def prepare_testresults(testid, cur): - """ This function prepares testresults for the user. It calls the archiver. - If several instances of the archiver - are running, it may take a long time for this function to finish as it will wait - for these functions to succeed. - """ + """ This function prepares testresults for the user. It calls the archiver. + If several instances of the archiver + are running, it may take a long time for this function to finish as it will wait + for these functions to succeed. + """ - errors = [] - - logger.debug("Preparing testresults...") - - # Check if user wants test results as email --- - logger.debug("Check if user wants testresults as email...") - emailResults = False - # Get the XML config from the database: - cur.execute("SELECT `testconfig_xml` FROM `tbl_serv_tests` WHERE (`serv_tests_key` = %s)" %testid) - ret = cur.fetchone() - if ret: - parser = etree.XMLParser(remove_comments=True) - tree = etree.fromstring(bytes(bytearray(ret[0], encoding = 'utf-8')), parser) - ns = {'d': config.get('xml', 'namespace')} - logger.debug("Got XML from database.") - # Check if user wants results as email - ret = tree.xpath('//d:generalConf/d:emailResults', namespaces=ns) - if not ret: - logger.debug("Could not get relevant XML value <emailResults>, thus not emailing results to user.") - else: - if (ret[0].text.lower() == 'yes'): - emailResults = True - if not emailResults: - logger.debug("User does not want test results as email.") - else: - logger.debug("User wants test results as email. Will trigger the email.") - - - # Archive test results --- - cmd = [config.get('dispatcher', 'archiverscript'),"--testid=%d"%testid] - if emailResults: - cmd.append("--email") - if debug: - cmd.append("--debug") - # Call the script until it succeeds: - waittime = config.getint('dispatcher', 'archiver_waittime') - rs = errno.EUSERS - while rs == errno.EUSERS: - p = subprocess.Popen(cmd) - rs = p.wait() - if rs not in (SUCCESS, errno.EUSERS): # SUCCESS (0) is successful stop, EUSERS (87) means the maximum number of allowed instances is reached. - msg = "Could not trigger archiver. Archiver returned error %d"%(rs) - logger.error(msg) - logger.error("Tried to execute %s"%str(cmd)) - errors.append(msg) - return errors - if rs == errno.EUSERS: - # Maximum number of instances is reached. Wait some time before calling again. - logger.info("Archiver returned EUSERS. Wait for %d s before trying again..."%waittime) - time.sleep(waittime) - logger.debug("Call to archiver successful.") - - logger.debug("Prepared testresults.") - return errors + errors = [] + + logger.debug("Preparing testresults...") + + # Check if user wants test results as email --- + logger.debug("Check if user wants testresults as email...") + emailResults = False + # Get the XML config from the database: + cur.execute("SELECT `testconfig_xml` FROM `tbl_serv_tests` WHERE (`serv_tests_key` = %s)" %testid) + ret = cur.fetchone() + if ret: + parser = etree.XMLParser(remove_comments=True) + tree = etree.fromstring(bytes(bytearray(ret[0], encoding = 'utf-8')), parser) + ns = {'d': config.get('xml', 'namespace')} + logger.debug("Got XML from database.") + # Check if user wants results as email + ret = tree.xpath('//d:generalConf/d:emailResults', namespaces=ns) + if not ret: + logger.debug("Could not get relevant XML value <emailResults>, thus not emailing results to user.") + else: + if (ret[0].text.lower() == 'yes'): + emailResults = True + if not emailResults: + logger.debug("User does not want test results as email.") + else: + logger.debug("User wants test results as email. Will trigger the email.") + + + # Archive test results --- + cmd = [config.get('dispatcher', 'archiverscript'),"--testid=%d"%testid] + if emailResults: + cmd.append("--email") + if debug: + cmd.append("--debug") + # Call the script until it succeeds: + waittime = config.getint('dispatcher', 'archiver_waittime') + rs = errno.EUSERS + while rs == errno.EUSERS: + p = subprocess.Popen(cmd) + rs = p.wait() + if rs not in (SUCCESS, errno.EUSERS): # SUCCESS (0) is successful stop, EUSERS (87) means the maximum number of allowed instances is reached. + msg = "Could not trigger archiver. Archiver returned error %d"%(rs) + logger.error(msg) + logger.error("Tried to execute %s"%str(cmd)) + errors.append(msg) + return errors + if rs == errno.EUSERS: + # Maximum number of instances is reached. Wait some time before calling again. + logger.info("Archiver returned EUSERS. Wait for %d s before trying again..."%waittime) + time.sleep(waittime) + logger.debug("Call to archiver successful.") + + logger.debug("Prepared testresults.") + return errors ### END prepare_testresults() @@ -1065,22 +1065,22 @@ def prepare_testresults(testid, cur): # ############################################################################## def evalute_linkmeasurement(testid, cur): - errors = [] - # if link measurement, evaluate data - cur.execute("SELECT `username` FROM `tbl_serv_tests` LEFT JOIN `tbl_serv_users` ON (`serv_users_key`=`owner_fk`) WHERE (`serv_tests_key` = %s)" %testid) - ret = cur.fetchone() - if ret and ret[0]==config.get('linktests', 'user'): - logger.debug("Evaluating link measurements.") - cmd = [config.get('dispatcher', 'testtolinkmapscript')] - p = subprocess.Popen(cmd) - rs = p.wait() - if rs != SUCCESS: - msg = "Error %s returned from testtolinkmap script" % str(rs) - logger.error(msg) - errors.append(msg) - else: - logger.debug("Link measurement evaluations finished.") - return errors + errors = [] + # if link measurement, evaluate data + cur.execute("SELECT `username` FROM `tbl_serv_tests` LEFT JOIN `tbl_serv_users` ON (`serv_users_key`=`owner_fk`) WHERE (`serv_tests_key` = %s)" %testid) + ret = cur.fetchone() + if ret and ret[0]==config.get('linktests', 'user'): + logger.debug("Evaluating link measurements.") + cmd = [config.get('dispatcher', 'testtolinkmapscript')] + p = subprocess.Popen(cmd) + rs = p.wait() + if rs != SUCCESS: + msg = "Error %s returned from testtolinkmap script" % str(rs) + logger.error(msg) + errors.append(msg) + else: + logger.debug("Link measurement evaluations finished.") + return errors ### END evalute_linkmeasurement() @@ -1090,59 +1090,59 @@ def evalute_linkmeasurement(testid, cur): # ############################################################################## def inform_user(testid, cur, job, errors, warnings): - if len(errors) != 0: - subj = "Error notification" - if job == 'start': - msg = "The test with ID %d could not be started as planned because of the following errors:\n\n" %testid - elif job == 'stop': - msg = "The test with ID %d could not be stopped as planned because of the following errors:\n\n" %testid - elif job == 'abort': - msg = "The test with ID %d could not be aborted as requested because of the following errors:\n\n" %testid - for error in errors: - msg += "\t * %s\n" %error - for warn in warnings: - msg += "\t * %s\n" %warn - ret = errno.EPERM - elif len(warnings) != 0: - if job == 'start': - subj = "Test %d starting with warnings" %testid - msg = "Your test has been prepared and is going to start as planned, but consider the following warnings:\n\n" - elif job == 'stop': - subj = "Test %d stopped with warnings" %testid - msg = "Your test has been stopped as planned and the results will be available on the website soon.\nTest results are also accessible using webdav: webdavs://www.flocklab.ethz.ch/user/webdav/\nConsider the following warnings:\n\n" - elif job == 'abort': - subj = "Test %d aborted with warnings" %testid - msg = "Your test has been aborted as requested and the results (if any) will be available on the website soon\nTest results are also accessible using webdav: webdavs://www.flocklab.ethz.ch/user/webdav/\nConsider the following warnings:\n\n" - for warn in warnings: - msg += "\t * %s\n" %warn - ret = SUCCESS - else: - ret = SUCCESS - """ - if job == 'start': - subj = "Test %d starting as planned" %testid - msg = "Your test has been prepared and is going to start as planned." - elif job == 'stop': - subj = "Test %d stopped as planned" %testid - msg = "Your test has been stopped as planned. The results will be available on the website soon.\nTest results are also accessible using webdav: webdavs://www.flocklab.ethz.ch/user/webdav/" - elif job == 'abort': - subj = "Test %d aborted as requested" %testid - msg = "Your test has been aborted as requested. The results (if any) will be available on the website soon.\nTest results are also accessible using webdav: webdavs://www.flocklab.ethz.ch/user/webdav/" - """ - - rs = flocklab.get_test_owner(cur, testid) - if isinstance(rs, tuple): - owner_email = rs[4] - disable_infomails = int(rs[5]) - # Only send email to test owner if she didn't disable reception of info mails or if there were warnings/errors: - if ((len(warnings) != 0) or (len(errors) != 0) or (disable_infomails != 1)): - flocklab.send_mail(subject="[FlockLab %s] %s"%(name, subj), message=msg, recipients=owner_email) - else: - msg = "Error %s returned when trying to get test owner information" % str(rs) - logger.error(msg) - errors.append(msg) - - return ret + if len(errors) != 0: + subj = "Error notification" + if job == 'start': + msg = "The test with ID %d could not be started as planned because of the following errors:\n\n" %testid + elif job == 'stop': + msg = "The test with ID %d could not be stopped as planned because of the following errors:\n\n" %testid + elif job == 'abort': + msg = "The test with ID %d could not be aborted as requested because of the following errors:\n\n" %testid + for error in errors: + msg += "\t * %s\n" %error + for warn in warnings: + msg += "\t * %s\n" %warn + ret = errno.EPERM + elif len(warnings) != 0: + if job == 'start': + subj = "Test %d starting with warnings" %testid + msg = "Your test has been prepared and is going to start as planned, but consider the following warnings:\n\n" + elif job == 'stop': + subj = "Test %d stopped with warnings" %testid + msg = "Your test has been stopped as planned and the results will be available on the website soon.\nTest results are also accessible using webdav: webdavs://www.flocklab.ethz.ch/user/webdav/\nConsider the following warnings:\n\n" + elif job == 'abort': + subj = "Test %d aborted with warnings" %testid + msg = "Your test has been aborted as requested and the results (if any) will be available on the website soon\nTest results are also accessible using webdav: webdavs://www.flocklab.ethz.ch/user/webdav/\nConsider the following warnings:\n\n" + for warn in warnings: + msg += "\t * %s\n" %warn + ret = SUCCESS + else: + ret = SUCCESS + """ + if job == 'start': + subj = "Test %d starting as planned" %testid + msg = "Your test has been prepared and is going to start as planned." + elif job == 'stop': + subj = "Test %d stopped as planned" %testid + msg = "Your test has been stopped as planned. The results will be available on the website soon.\nTest results are also accessible using webdav: webdavs://www.flocklab.ethz.ch/user/webdav/" + elif job == 'abort': + subj = "Test %d aborted as requested" %testid + msg = "Your test has been aborted as requested. The results (if any) will be available on the website soon.\nTest results are also accessible using webdav: webdavs://www.flocklab.ethz.ch/user/webdav/" + """ + + rs = flocklab.get_test_owner(cur, testid) + if isinstance(rs, tuple): + owner_email = rs[4] + disable_infomails = int(rs[5]) + # Only send email to test owner if she didn't disable reception of info mails or if there were warnings/errors: + if ((len(warnings) != 0) or (len(errors) != 0) or (disable_infomails != 1)): + flocklab.send_mail(subject="[FlockLab %s] %s"%(name, subj), message=msg, recipients=owner_email) + else: + msg = "Error %s returned when trying to get test owner information" % str(rs) + logger.error(msg) + errors.append(msg) + + return ret ### END inform_user() @@ -1152,27 +1152,27 @@ def inform_user(testid, cur, job, errors, warnings): # ############################################################################## def write_errwarn(testid, cur, cn, errors, warnings): - for warn in warnings: - logger.warn(warn) - flocklab.write_errorlog(cursor=cur, conn=cn, testid=testid, message=warn) - for err in errors: - logger.error(err) - flocklab.write_errorlog(cursor=cur, conn=cn, testid=testid, message=err) + for warn in warnings: + logger.warn(warn) + flocklab.write_errorlog(cursor=cur, conn=cn, testid=testid, message=warn) + for err in errors: + logger.error(err) + flocklab.write_errorlog(cursor=cur, conn=cn, testid=testid, message=err) ### END write_errwarn() ############################################################################## # # relative2absolute_time - Convert a relative time from the XML config into -# an absolute time by adding it to the starttime of the test +# an absolute time by adding it to the starttime of the test # ############################################################################## def relative2absolute_time(starttime, relative_secs, relative_microsecs): - tempdatetime = starttime + datetime.timedelta(seconds=relative_secs, microseconds=relative_microsecs) - absolute_microsecs = tempdatetime.microsecond - absolute_datetime = tempdatetime.strftime(config.get("observer", "timeformat")) - - return (absolute_microsecs, absolute_datetime) + tempdatetime = starttime + datetime.timedelta(seconds=relative_secs, microseconds=relative_microsecs) + absolute_microsecs = tempdatetime.microsecond + absolute_datetime = tempdatetime.strftime(config.get("observer", "timeformat")) + + return (absolute_microsecs, absolute_datetime) ### END relative2absolute_time() ############################################################################## @@ -1189,37 +1189,37 @@ def absolute2absoluteUTC_time(timestring): ### END relative2absolute_time() def db_register_activity(testid, cur, cn, action, obskeys): - pid = os.getpid() - register_ok = True - spin = True - while spin: - spin = False - try: - # remove obsolete values, just in case there was something going wrong.. - sql = 'DELETE FROM tbl_serv_dispatcher_activity WHERE (`time_start` < date_add(NOW(), interval - %d minute))' % (max((config.getint('tests','setuptime'),config.getint('tests','cleanuptime'))) * 2) - cur.execute(sql) - for obskey in obskeys: - sql = 'INSERT INTO tbl_serv_dispatcher_activity (`pid`,`action`,`observer_fk`,`test_fk`,`time_start`) VALUES (%d,"%s",%d,%d,NOW())' % (pid,action,obskey,testid) - cur.execute(sql) - cn.commit() - except MySQLdb.IntegrityError: - sql = 'DELETE FROM tbl_serv_dispatcher_activity WHERE (`pid` = %d AND `action`="%s" AND `test_fk` = %d)' % (pid,action,testid) - cur.execute(sql) - cn.commit() - register_ok = False - except MySQLdb.OperationalError as e: # retry if deadlock - if e.args[0] == MySQLErrors.LOCK_DEADLOCK: - time.sleep(1) - spin = True - else: - raise - return register_ok - + pid = os.getpid() + register_ok = True + spin = True + while spin: + spin = False + try: + # remove obsolete values, just in case there was something going wrong.. + sql = 'DELETE FROM tbl_serv_dispatcher_activity WHERE (`time_start` < date_add(NOW(), interval - %d minute))' % (max((config.getint('tests','setuptime'),config.getint('tests','cleanuptime'))) * 2) + cur.execute(sql) + for obskey in obskeys: + sql = 'INSERT INTO tbl_serv_dispatcher_activity (`pid`,`action`,`observer_fk`,`test_fk`,`time_start`) VALUES (%d,"%s",%d,%d,NOW())' % (pid,action,obskey,testid) + cur.execute(sql) + cn.commit() + except MySQLdb.IntegrityError: + sql = 'DELETE FROM tbl_serv_dispatcher_activity WHERE (`pid` = %d AND `action`="%s" AND `test_fk` = %d)' % (pid,action,testid) + cur.execute(sql) + cn.commit() + register_ok = False + except MySQLdb.OperationalError as e: # retry if deadlock + if e.args[0] == MySQLErrors.LOCK_DEADLOCK: + time.sleep(1) + spin = True + else: + raise + return register_ok + def db_unregister_activity(testid, cur, cn, action): - pid = os.getpid() - sql = 'DELETE FROM tbl_serv_dispatcher_activity WHERE (`pid` = %d AND `action`="%s" AND `test_fk` = %d)' % (pid,action,testid) - cur.execute(sql) - cn.commit() + pid = os.getpid() + sql = 'DELETE FROM tbl_serv_dispatcher_activity WHERE (`pid` = %d AND `action`="%s" AND `test_fk` = %d)' % (pid,action,testid) + cur.execute(sql) + cn.commit() ############################################################################## # @@ -1227,13 +1227,13 @@ def db_unregister_activity(testid, cur, cn, action): # ############################################################################## def usage(): - print("Usage: %s --testid=<int> [--start] [--stop] [--abort] [--debug] [--help]" %scriptname) - print(" --testid=<int>\t\tTest ID of test dispatch.") - print(" --start\t\t\tOptional. Tell dispatcher to start the test. Either --start, --stop or --aborted has to be specified.") - print(" --stop\t\t\tOptional. Tell dispatcher to stop the test. Either --start, --stop or --aborted has to be specified.") - print(" --abort\t\t\tOptional. Tell dispatcher to abort the test. Either --start, --stop or --aborted has to be specified.") - print(" --debug\t\t\tOptional. Print debug messages to log.") - print(" --help\t\t\tOptional. Print this help.") + print("Usage: %s --testid=<int> [--start] [--stop] [--abort] [--debug] [--help]" %scriptname) + print(" --testid=<int>\t\tTest ID of test dispatch.") + print(" --start\t\t\tOptional. Tell dispatcher to start the test. Either --start, --stop or --aborted has to be specified.") + print(" --stop\t\t\tOptional. Tell dispatcher to stop the test. Either --start, --stop or --aborted has to be specified.") + print(" --abort\t\t\tOptional. Tell dispatcher to abort the test. Either --start, --stop or --aborted has to be specified.") + print(" --debug\t\t\tOptional. Print debug messages to log.") + print(" --help\t\t\tOptional. Print this help.") ### END usage() @@ -1243,262 +1243,262 @@ def usage(): # ############################################################################## def main(argv): - global PIDFILE - global logger - global debug - global config - testid = None - action = None - errors = [] - warnings = [] - - # Set timezone to UTC: - os.environ['TZ'] = 'UTC' - time.tzset() - - # Get logger: - logger = flocklab.get_logger(loggername=scriptname, loggerpath=scriptpath) - - # Get the config file: - config = flocklab.get_config(configpath=scriptpath) - if not config: - msg = "Could not read configuration file. Exiting..." - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) - #logger.debug("Read configuration file.") - PIDFILE = "%s/%s" %(config.get("tests", "pidfolder"), "flocklab_dispatcher.pid") - - # Get the arguments: - try: - opts, args = getopt.getopt(argv, "seadht:", ["start", "stop", "abort", "debug", "help", "testid="]) - except getopt.GetoptError as err: - print(str(err)) - logger.warn(str(err)) - usage() - sys.exit(errno.EINVAL) - except: - msg = "Error when getting arguments: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1])) - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) - - for opt, arg in opts: - if opt in ("-s", "--start"): - action = 'start' - elif opt in ("-e", "--stop"): - action = 'stop' - elif opt in ("-a", "--abort"): - action = 'abort' - elif opt in ("-d", "--debug"): - debug = True - logger.setLevel(logging.DEBUG) - logger.debug("Detected debug flag.") - elif opt in ("-h", "--help"): - usage() - sys.exit(SUCCESS) - elif opt in ("-t", "--testid"): - try: - testid = int(arg) - if testid <= 0: - raise Error - except: - logger.warn("Wrong API usage: testid has to be a positive number") - sys.exit(errno.EINVAL) - else: - logger.warn("Wrong API usage") - sys.exit(errno.EINVAL) + global PIDFILE + global logger + global debug + global config + testid = None + action = None + errors = [] + warnings = [] + + # Set timezone to UTC: + os.environ['TZ'] = 'UTC' + time.tzset() + + # Get logger: + logger = flocklab.get_logger(loggername=scriptname, loggerpath=scriptpath) + + # Get the config file: + config = flocklab.get_config(configpath=scriptpath) + if not config: + msg = "Could not read configuration file. Exiting..." + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + #logger.debug("Read configuration file.") + PIDFILE = "%s/%s" %(config.get("tests", "pidfolder"), "flocklab_dispatcher.pid") + + # Get the arguments: + try: + opts, args = getopt.getopt(argv, "seadht:", ["start", "stop", "abort", "debug", "help", "testid="]) + except getopt.GetoptError as err: + print(str(err)) + logger.warn(str(err)) + usage() + sys.exit(errno.EINVAL) + except: + msg = "Error when getting arguments: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1])) + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + + for opt, arg in opts: + if opt in ("-s", "--start"): + action = 'start' + elif opt in ("-e", "--stop"): + action = 'stop' + elif opt in ("-a", "--abort"): + action = 'abort' + elif opt in ("-d", "--debug"): + debug = True + logger.setLevel(logging.DEBUG) + logger.debug("Detected debug flag.") + elif opt in ("-h", "--help"): + usage() + sys.exit(SUCCESS) + elif opt in ("-t", "--testid"): + try: + testid = int(arg) + if testid <= 0: + raise Error + except: + logger.warn("Wrong API usage: testid has to be a positive number") + sys.exit(errno.EINVAL) + else: + logger.warn("Wrong API usage") + sys.exit(errno.EINVAL) - # Check if the necessary parameters are set: testid and either start, stop or abort has to be specified but not all. - if ((not testid) or (action == None)): - logger.warn("Wrong API usage") - sys.exit(errno.EINVAL) + # Check if the necessary parameters are set: testid and either start, stop or abort has to be specified but not all. + if ((not testid) or (action == None)): + logger.warn("Wrong API usage") + sys.exit(errno.EINVAL) - try: - #logger.info("Called dispatcher with test ID %d" %testid) - - # Add testid to logger name - logger.name += " (Test %d)"%testid - - # Get PID of process and write it to pid file: - if not os.path.isdir(os.path.dirname(PIDFILE)): - shutil.rmtree(PIDFILE, ignore_errors=True) - if not os.path.exists(os.path.dirname(PIDFILE)): - os.makedirs(os.path.dirname(PIDFILE)) - open(PIDFILE,'w').write("%d"%(os.getpid())) - #logger.debug("Wrote pid %d into file %s" %(os.getpid(), PIDFILE)) - - # Connect to the database: - try: - (cn, cur) = flocklab.connect_to_db(config, logger) - except: - msg = "Could not connect to database" - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) - #logger.debug("Connected to database") - - # Check test ID: - ret = flocklab.check_test_id(cur, testid) - if (ret != 0): - cur.close() - cn.close() - try: - os.remove(PIDFILE) - except OSError: - pass - if ret == 3: - msg = "Test ID %d does not exist in database." %testid - flocklab.error_logandexit(msg, errno.EINVAL, name, logger, config) - else: - msg = "Error when trying to get test ID from database: %s: %s"%(str(sys.exc_info()[0]), str(sys.exc_info()[1])) - flocklab.error_logandexit(msg, errno.EIO, name, logger, config) - else: - logger.debug("Checking test ID %d passed"%testid) - - # Build obsdict_key, obsdict_id --- - # Get all observers which are used in the test and build a dictionary out of them: - sql = """ SELECT `a`.serv_observer_key, `a`.observer_id, `a`.ethernet_address - FROM `tbl_serv_observer` AS `a` - LEFT JOIN `tbl_serv_map_test_observer_targetimages` AS `b` - ON `a`.serv_observer_key = `b`.observer_fk - WHERE `b`.test_fk = %d; - """ - cur.execute(sql%testid) - ret = cur.fetchall() - if not ret: - logger.debug("No used observers found in database for test ID %d. Exiting..." %testid) - logger.debug("Setting test status in DB to 'failed'...") - status = 'failed' - flocklab.set_test_status(cur, cn, testid, status) - cur.close() - cn.close() - try: - os.remove(PIDFILE) - except OSError: - pass - sys.exit(errno.EINVAL) - obsdict_key = {} - obsdict_id = {} - for obs in ret: - # Dict searchable by serv_observer_key: - obsdict_key[obs[0]] = (obs[0], obs[1], obs[2]) - # Dict searchable by observer_id: - obsdict_id[obs[1]] = (obs[0], obs[1], obs[2]) - - - # Start/stop/abort test --- - if (action == 'start'): - # Try to start test: - starttime = time.time() - errors, warnings = start_test(testid, cur, cn, obsdict_key, obsdict_id) - # Record time needed to set up test for statistics in DB: - time_needed = time.time() - starttime - sql = """ UPDATE `tbl_serv_tests` - SET `setuptime` = %d - WHERE `serv_tests_key` = %d; - """ - cur.execute(sql%(int(time_needed), testid)) - cn.commit() - if len(errors) != 0: - # Test start failed. Make it abort: - logger.warn("Going to abort test because of errors when trying to start it.") - # Write errors and warnings to DB: - write_errwarn(testid, cur, cn, errors, warnings) - # Inform user: - ret = inform_user(testid, cur, action, errors, warnings) - elif ((action == 'stop') or (action == 'abort')): - # Stop test: - if action == 'abort': - abort = True - else: - abort = False - starttime = time.time() - errors, warnings = stop_test(testid, cur, cn, obsdict_key, obsdict_id, abort) - # Record time needed to set up test for statistics in DB: - time_needed = time.time() - starttime - sql = """ UPDATE `tbl_serv_tests` - SET `cleanuptime` = %d - WHERE `serv_tests_key` = %d; - """ - cur.execute(sql%(int(time_needed), testid)) - cn.commit() - # Inform user: - ret = inform_user(testid, cur, action, errors, warnings) - # Write errors and warnings to DB: - write_errwarn(testid, cur, cn, errors, warnings) - # Wait until test has status synced or no more fetcher is running: - status = flocklab.get_test_status(cur, cn, testid) - while (status not in ('synced', 'finished', 'failed')): - logger.debug("Fetcher has not yet set test status to 'synced', 'finished' or 'failed' (currently in status '%s'). Going to sleep 5s..."%(status)) - # Disconnect from database (important to avoid timeout for longer processing) - try: - cur.close() - cn.close() - except: - pass - time.sleep(5) - # Reconnect to the database: - try: - (cn, cur) = flocklab.connect_to_db(config, logger) - except: - msg = "Could not connect to database" - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) - continue # try to connect again in 5s - status = flocklab.get_test_status(cur, cn, testid) - if (flocklab.get_fetcher_pid(testid) < 0): - # no fetcher is running: set test status to failed - status = 'failed' - break - logger.debug("Fetcher has set test status to '%s'."%status) - - # Check the actual runtime: if < 0, test failed - cur.execute("SELECT TIME_TO_SEC(TIMEDIFF(`time_end_act`, `time_start_act`)) FROM `tbl_serv_tests` WHERE `serv_tests_key` = %d" % testid) - test_runtime = int(cur.fetchone()[0]) - if test_runtime < 0: - logger.info("Negative runtime detected, marking test as 'failed'.") - - # Prepare testresults: - if (len(errors) == 0) and (test_runtime > 0): - err = prepare_testresults(testid, cur) - for e in err: - errors.append(e) - # Evaluate link measurement: - err = evalute_linkmeasurement(testid, cur) - for e in err: - errors.append(e) - # Update DB status and statistics: - if (len(errors) == 0) and (test_runtime > 0): - status = 'finished' - else: - status = 'failed' - logger.debug("Setting test status in DB to '%s'..."%status) - flocklab.set_test_status(cur, cn, testid, status) - logger.info("Test %d is stopped."%testid) - - # Close db connection --- - try: - cur.close() - cn.close() - except: - pass - - # Inform admins of errors and exit --- - if ((len(errors) > 0) or (len(warnings) > 0)): - msg = "The test %s with ID %d reported the following errors/warnings:\n\n" %(action, testid) - for error in errors: - msg = msg + "\t * ERROR: %s\n" %(str(error)) - for warn in warnings: - msg = msg + "\t * WARNING: %s\n" %(str(warn)) - logger.debug("Finished with %d errors and %d warnings"%(len(errors), len(warnings))) - flocklab.error_logandexit(msg, errno.EFAULT, name, logger, config) - except Exception: - msg = "Unexpected error: %s: %s\n%s"%(str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc()) - print(msg) - flocklab.error_logandexit(msg, errno.EFAULT, name, logger, config) - sys.exit(SUCCESS) - + try: + #logger.info("Called dispatcher with test ID %d" %testid) + + # Add testid to logger name + logger.name += " (Test %d)"%testid + + # Get PID of process and write it to pid file: + if not os.path.isdir(os.path.dirname(PIDFILE)): + shutil.rmtree(PIDFILE, ignore_errors=True) + if not os.path.exists(os.path.dirname(PIDFILE)): + os.makedirs(os.path.dirname(PIDFILE)) + open(PIDFILE,'w').write("%d"%(os.getpid())) + #logger.debug("Wrote pid %d into file %s" %(os.getpid(), PIDFILE)) + + # Connect to the database: + try: + (cn, cur) = flocklab.connect_to_db(config, logger) + except: + msg = "Could not connect to database" + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + #logger.debug("Connected to database") + + # Check test ID: + ret = flocklab.check_test_id(cur, testid) + if (ret != 0): + cur.close() + cn.close() + try: + os.remove(PIDFILE) + except OSError: + pass + if ret == 3: + msg = "Test ID %d does not exist in database." %testid + flocklab.error_logandexit(msg, errno.EINVAL, name, logger, config) + else: + msg = "Error when trying to get test ID from database: %s: %s"%(str(sys.exc_info()[0]), str(sys.exc_info()[1])) + flocklab.error_logandexit(msg, errno.EIO, name, logger, config) + else: + logger.debug("Checking test ID %d passed"%testid) + + # Build obsdict_key, obsdict_id --- + # Get all observers which are used in the test and build a dictionary out of them: + sql = """ SELECT `a`.serv_observer_key, `a`.observer_id, `a`.ethernet_address + FROM `tbl_serv_observer` AS `a` + LEFT JOIN `tbl_serv_map_test_observer_targetimages` AS `b` + ON `a`.serv_observer_key = `b`.observer_fk + WHERE `b`.test_fk = %d; + """ + cur.execute(sql%testid) + ret = cur.fetchall() + if not ret: + logger.debug("No used observers found in database for test ID %d. Exiting..." %testid) + logger.debug("Setting test status in DB to 'failed'...") + status = 'failed' + flocklab.set_test_status(cur, cn, testid, status) + cur.close() + cn.close() + try: + os.remove(PIDFILE) + except OSError: + pass + sys.exit(errno.EINVAL) + obsdict_key = {} + obsdict_id = {} + for obs in ret: + # Dict searchable by serv_observer_key: + obsdict_key[obs[0]] = (obs[0], obs[1], obs[2]) + # Dict searchable by observer_id: + obsdict_id[obs[1]] = (obs[0], obs[1], obs[2]) + + + # Start/stop/abort test --- + if (action == 'start'): + # Try to start test: + starttime = time.time() + errors, warnings = start_test(testid, cur, cn, obsdict_key, obsdict_id) + # Record time needed to set up test for statistics in DB: + time_needed = time.time() - starttime + sql = """ UPDATE `tbl_serv_tests` + SET `setuptime` = %d + WHERE `serv_tests_key` = %d; + """ + cur.execute(sql%(int(time_needed), testid)) + cn.commit() + if len(errors) != 0: + # Test start failed. Make it abort: + logger.warn("Going to abort test because of errors when trying to start it.") + # Write errors and warnings to DB: + write_errwarn(testid, cur, cn, errors, warnings) + # Inform user: + ret = inform_user(testid, cur, action, errors, warnings) + elif ((action == 'stop') or (action == 'abort')): + # Stop test: + if action == 'abort': + abort = True + else: + abort = False + starttime = time.time() + errors, warnings = stop_test(testid, cur, cn, obsdict_key, obsdict_id, abort) + # Record time needed to set up test for statistics in DB: + time_needed = time.time() - starttime + sql = """ UPDATE `tbl_serv_tests` + SET `cleanuptime` = %d + WHERE `serv_tests_key` = %d; + """ + cur.execute(sql%(int(time_needed), testid)) + cn.commit() + # Inform user: + ret = inform_user(testid, cur, action, errors, warnings) + # Write errors and warnings to DB: + write_errwarn(testid, cur, cn, errors, warnings) + # Wait until test has status synced or no more fetcher is running: + status = flocklab.get_test_status(cur, cn, testid) + while (status not in ('synced', 'finished', 'failed')): + logger.debug("Fetcher has not yet set test status to 'synced', 'finished' or 'failed' (currently in status '%s'). Going to sleep 5s..."%(status)) + # Disconnect from database (important to avoid timeout for longer processing) + try: + cur.close() + cn.close() + except: + pass + time.sleep(5) + # Reconnect to the database: + try: + (cn, cur) = flocklab.connect_to_db(config, logger) + except: + msg = "Could not connect to database" + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + continue # try to connect again in 5s + status = flocklab.get_test_status(cur, cn, testid) + if (flocklab.get_fetcher_pid(testid) < 0): + # no fetcher is running: set test status to failed + status = 'failed' + break + logger.debug("Fetcher has set test status to '%s'."%status) + + # Check the actual runtime: if < 0, test failed + cur.execute("SELECT TIME_TO_SEC(TIMEDIFF(`time_end_act`, `time_start_act`)) FROM `tbl_serv_tests` WHERE `serv_tests_key` = %d" % testid) + test_runtime = int(cur.fetchone()[0]) + if test_runtime < 0: + logger.info("Negative runtime detected, marking test as 'failed'.") + + # Prepare testresults: + if (len(errors) == 0) and (test_runtime > 0): + err = prepare_testresults(testid, cur) + for e in err: + errors.append(e) + # Evaluate link measurement: + err = evalute_linkmeasurement(testid, cur) + for e in err: + errors.append(e) + # Update DB status and statistics: + if (len(errors) == 0) and (test_runtime > 0): + status = 'finished' + else: + status = 'failed' + logger.debug("Setting test status in DB to '%s'..."%status) + flocklab.set_test_status(cur, cn, testid, status) + logger.info("Test %d is stopped."%testid) + + # Close db connection --- + try: + cur.close() + cn.close() + except: + pass + + # Inform admins of errors and exit --- + if ((len(errors) > 0) or (len(warnings) > 0)): + msg = "The test %s with ID %d reported the following errors/warnings:\n\n" %(action, testid) + for error in errors: + msg = msg + "\t * ERROR: %s\n" %(str(error)) + for warn in warnings: + msg = msg + "\t * WARNING: %s\n" %(str(warn)) + logger.debug("Finished with %d errors and %d warnings"%(len(errors), len(warnings))) + flocklab.error_logandexit(msg, errno.EFAULT, name, logger, config) + except Exception: + msg = "Unexpected error: %s: %s\n%s"%(str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc()) + print(msg) + flocklab.error_logandexit(msg, errno.EFAULT, name, logger, config) + sys.exit(SUCCESS) + ### END main() if __name__ == "__main__": - try: - main(sys.argv[1:]) - except Exception: - msg = "Encountered error: %s: %s\n%s\nCommand line was: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc(), str(sys.argv)) - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + try: + main(sys.argv[1:]) + except Exception: + msg = "Encountered error: %s: %s\n%s\nCommand line was: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc(), str(sys.argv)) + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) diff --git a/testmanagementserver/flocklab_fetcher.py b/testmanagementserver/flocklab_fetcher.py index ff26181d245811bae91b99c0e78ef9b9a15ac33d..93489e49aab3318752343020b8f438142dd43235 100755 --- a/testmanagementserver/flocklab_fetcher.py +++ b/testmanagementserver/flocklab_fetcher.py @@ -1,8 +1,8 @@ #! /usr/bin/env python3 -__author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" +__author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" -__license__ = "GPL" +__license__ = "GPL" import os, sys, getopt, traceback, MySQLdb, signal, random, time, errno, multiprocessing, subprocess, re, logging, __main__, threading, struct, types, queue, math, shutil @@ -20,23 +20,23 @@ scriptpath = os.path.dirname(os.path.abspath(sys.argv[0])) name = "Fetcher" ### -logger = None -debug = False -testid = None -errors = [] -FetchObsThread_list = [] -FetchObsThread_stopEvent = None -FetchObsThread_queue = None -config = None -obsfiledir = None -testresultsdir = None -testresultsfile_dict = {} -mainloop_stop = False -owner_fk = None -pindict = None -obsdict_byid = None -servicedict = None -serialdict = None +logger = None +debug = False +testid = None +errors = [] +FetchObsThread_list = [] +FetchObsThread_stopEvent = None +FetchObsThread_queue = None +config = None +obsfiledir = None +testresultsdir = None +testresultsfile_dict = {} +mainloop_stop = False +owner_fk = None +pindict = None +obsdict_byid = None +servicedict = None +serialdict = None ITEM_TO_PROCESS = 0 ITEM_PROCESSED = 1 @@ -47,17 +47,17 @@ ITEM_PROCESSED = 1 # ############################################################################## class Error(Exception): - """ Base class for exception. """ - pass + """ Base class for exception. """ + pass class DbFileEof(Exception): - pass + pass class DbFileReadError(Exception): - def __init__(self, expectedSize, actualSize, fpos): - self.expectedSize = expectedSize - self.actualSize = actualSize - self.fpos = fpos + def __init__(self, expectedSize, actualSize, fpos): + self.expectedSize = expectedSize + self.actualSize = actualSize + self.fpos = fpos ### END Error classes @@ -68,21 +68,21 @@ class DbFileReadError(Exception): # ############################################################################## class ServiceInfo(): - def __init__(self, servicename): - self.servicename = servicename - self.files = [] - self.pattern = "^%s_[0-9]*\.db$" % servicename - - def matchFileName(self, filename): - return re.search(self.pattern, os.path.basename(filename)) is not None - - def addFile(self, filename): - self.files.append(filename) - - def stripFileList(self, removelast=True): - self.files.sort() - if ((len(self.files) > 0) and removelast): - self.files.pop() + def __init__(self, servicename): + self.servicename = servicename + self.files = [] + self.pattern = "^%s_[0-9]*\.db$" % servicename + + def matchFileName(self, filename): + return re.search(self.pattern, os.path.basename(filename)) is not None + + def addFile(self, filename): + self.files.append(filename) + + def stripFileList(self, removelast=True): + self.files.sort() + if ((len(self.files) > 0) and removelast): + self.files.pop() ### END ServiceInfo @@ -93,40 +93,40 @@ class ServiceInfo(): # ############################################################################## def sigterm_handler(signum, frame): - """If the program is terminated by sending it the signal SIGTERM - (e.g. by executing 'kill') or SIGINT (pressing ctrl-c), - this signal handler is invoked for cleanup.""" - - global mainloop_stop - global FetchObsThread_stopEvent - - logger.info("Process received SIGTERM or SIGINT signal") - - # Signal all observer fetcher threads to stop: - logger.debug("Stopping observer fetcher threads...") - shutdown_timeout = config.getint("fetcher", "shutdown_timeout") - try: - FetchObsThread_stopEvent.set() - except: - pass - for thread in FetchObsThread_list: - try: - thread.join(shutdown_timeout) - except: - logger.warn("Fetcher thread did not stop within %d seconds." % shutdown_timeout) - # Set DB status: - logger.debug("Setting test status in DB to 'syncing'...") - try: - (cn, cur) = flocklab.connect_to_db(config, logger) - flocklab.set_test_status(cur, cn, testid, 'syncing') - cur.close() - cn.close() - except: - logger.warn("Could not connect to database") - - # Tell the main loop to stop: - mainloop_stop = True - logger.debug("Set stop signal for main loop.") + """If the program is terminated by sending it the signal SIGTERM + (e.g. by executing 'kill') or SIGINT (pressing ctrl-c), + this signal handler is invoked for cleanup.""" + + global mainloop_stop + global FetchObsThread_stopEvent + + logger.info("Process received SIGTERM or SIGINT signal") + + # Signal all observer fetcher threads to stop: + logger.debug("Stopping observer fetcher threads...") + shutdown_timeout = config.getint("fetcher", "shutdown_timeout") + try: + FetchObsThread_stopEvent.set() + except: + pass + for thread in FetchObsThread_list: + try: + thread.join(shutdown_timeout) + except: + logger.warn("Fetcher thread did not stop within %d seconds." % shutdown_timeout) + # Set DB status: + logger.debug("Setting test status in DB to 'syncing'...") + try: + (cn, cur) = flocklab.connect_to_db(config, logger) + flocklab.set_test_status(cur, cn, testid, 'syncing') + cur.close() + cn.close() + except: + logger.warn("Could not connect to database") + + # Tell the main loop to stop: + mainloop_stop = True + logger.debug("Set stop signal for main loop.") ### END sigterm_handler @@ -137,23 +137,23 @@ def sigterm_handler(signum, frame): # ############################################################################## def parse_gpio_setting(buf): - _data = struct.unpack("<Iiiiii",buf) #unsigned int gpio;int value;struct timeval time_planned;struct timeval time_executed; - return (_data[0], str(_data[1]), "%i.%06i"%(_data[2],_data[3]), "%i.%06i"%(_data[4],_data[5])) + _data = struct.unpack("<Iiiiii",buf) #unsigned int gpio;int value;struct timeval time_planned;struct timeval time_executed; + return (_data[0], str(_data[1]), "%i.%06i"%(_data[2],_data[3]), "%i.%06i"%(_data[4],_data[5])) def parse_gpio_monitor(buf): - _data = str(buf).split(";") - logger.debug("BUFFER CONTENT: %s" %str(buf)) - return (_data[0], _data[1], _data[2]) - #_data = struct.unpack("<Iiii",buf) #unsigned int gpio;enum en_edge edge;struct timeval timestamp; - #return (_data[0], str(_data[1]), "%i.%06i"%(_data[2],_data[3])) + _data = str(buf).split(";") + logger.debug("BUFFER CONTENT: %s" %str(buf)) + return (_data[0], _data[1], _data[2]) + #_data = struct.unpack("<Iiii",buf) #unsigned int gpio;enum en_edge edge;struct timeval timestamp; + #return (_data[0], str(_data[1]), "%i.%06i"%(_data[2],_data[3])) def parse_serial(buf): - _data = struct.unpack("iii%ds" % (len(buf) - 12),buf) #int service; struct timeval timestamp;char * data - return (_data[0], _data[3], "%i.%06i"%(_data[1],_data[2])) - + _data = struct.unpack("iii%ds" % (len(buf) - 12),buf) #int service; struct timeval timestamp;char * data + return (_data[0], _data[3], "%i.%06i"%(_data[1],_data[2])) + def parse_error_log(buf): - _data = struct.unpack("<iii%ds" % (len(buf) - 12),buf) #struct timeval timestamp; int service_fk; char errormessage[1024]; - return (str(_data[2]), _data[3], "%i.%06i"%(_data[0],_data[1])) + _data = struct.unpack("<iii%ds" % (len(buf) - 12),buf) #struct timeval timestamp; int service_fk; char errormessage[1024]; + return (str(_data[2]), _data[3], "%i.%06i"%(_data[0],_data[1])) @@ -163,16 +163,16 @@ def parse_error_log(buf): # ############################################################################## def convert_gpio_setting(obsdata, observer_id, node_id): - return "%s,%s,%s,%s,%s,%s\n" %(obsdata[2], obsdata[3], observer_id, node_id, pindict[obsdata[0]][0], obsdata[1]) + return "%s,%s,%s,%s,%s,%s\n" %(obsdata[2], obsdata[3], observer_id, node_id, pindict[obsdata[0]][0], obsdata[1]) def convert_gpio_monitor(obsdata, observer_id, node_id): - return "%s,%s,%s,%s,%s\n" %(obsdata[1], observer_id, node_id, obsdata[0], obsdata[2]) + return "%s,%s,%s,%s,%s\n" %(obsdata[1], observer_id, node_id, obsdata[0], obsdata[2]) def convert_serial(obsdata, observer_id, node_id): - return "%s,%s,%s,%s,%s\n" %(obsdata[2], observer_id, node_id, serialdict[obsdata[0]], obsdata[1]) + return "%s,%s,%s,%s,%s\n" %(obsdata[2], observer_id, node_id, serialdict[obsdata[0]], obsdata[1]) def convert_error_log(obsdata, observer_id, node_id): - return "%s,%s,%s,%s\n" %(obsdata[2], observer_id, node_id, obsdata[1]) + return "%s,%s,%s,%s\n" %(obsdata[2], observer_id, node_id, obsdata[1]) @@ -180,20 +180,20 @@ def convert_error_log(obsdata, observer_id, node_id): # # read_from_db_file: Read from an open DB file from an observer # -############################################################################## +############################################################################## def read_from_db_file(dbfile): - _buf = dbfile.read(4) - if len(_buf) < 4: - dbfile.close() - raise DbFileEof() - else: - _size = struct.unpack("<I",_buf) - _buf = dbfile.read(_size[0]) - if len(_buf) != _size[0]: - _fpos = dbfile.tell() - 4 - len(_buf) - dbfile.close() - raise DbFileReadError(_size[0], len(_buf), _fpos) - return _buf + _buf = dbfile.read(4) + if len(_buf) < 4: + dbfile.close() + raise DbFileEof() + else: + _size = struct.unpack("<I",_buf) + _buf = dbfile.read(_size[0]) + if len(_buf) != _size[0]: + _fpos = dbfile.tell() - 4 - len(_buf) + dbfile.close() + raise DbFileReadError(_size[0], len(_buf), _fpos) + return _buf ### END read_from_db_file @@ -201,89 +201,89 @@ def read_from_db_file(dbfile): ############################################################################## # # worker_convert_and_aggregate: Worker function for multiprocessing pools. -# Parses observer DB files for all services, converts values (if needed) -# and aggregates them into single test result files. +# Parses observer DB files for all services, converts values (if needed) +# and aggregates them into single test result files. # ############################################################################## def worker_convert_and_aggregate(queueitem=None, nodeid=None, resultfile_path=None, resultfile_lock=None, commitsize=1, vizimgdir=None, parse_f=None, convert_f=None, viz_f=None, logqueue=None): - try: - _errors = [] - cur_p = multiprocessing.current_process() - (itemtype, obsid, fdir, f, workerstate) = queueitem - obsdbfile_path = "%s/%s"%(fdir,f) - loggername = "(%s).(Observer %d)"%(cur_p.name, obsid) - #logqueue.put_nowait((loggername, logging.DEBUG, "Import file %s"%obsdbfile_path)) - # Open file: - dbfile = open(obsdbfile_path, 'rb') - rows = 0 - viz_values = [] - conv_values = [] - while not dbfile.closed: - # Process DB file line by line: - try: - # Parse one line: - buf = read_from_db_file(dbfile) - obsdata = parse_f(buf) - viz_values.append(obsdata) - # Convert data if needed: - if convert_f != None: - conv_data = convert_f(obsdata, obsid, nodeid) - conv_values.append(conv_data) - rows += 1 - # Visualize data: - if (commitsize > 0) & (rows >= commitsize): - if viz_f != None: - #logqueue.put_nowait((loggername, logging.DEBUG, "Viz started...")) - viz_f(testid, owner_fk, viz_values, obsid, vizimgdir, logger) - #logqueue.put_nowait((loggername, logging.DEBUG, "Viz done.")) - # Write data to file: - #logqueue.put_nowait((loggername, logging.DEBUG, "Opening file %s for writing..."%(resultfile_path))) - resultfile_lock.acquire() - f = open(resultfile_path, 'a') - f.writelines(conv_values) - f.close() - resultfile_lock.release() - logqueue.put_nowait((loggername, logging.DEBUG, "Committed results to %s after %d rows"%(resultfile_path, rows))) - rows = 0 - conv_values = [] - viz_values = [] - except DbFileEof: - # logqueue.put_nowait((loggername, logging.DEBUG, "DbFileEof has occurred.")) - break # dbfile has been closed in parser (most likely because EOF was reached) - except DbFileReadError as err: - msg = "%s: Packet size (%i) did not match payload size (%i) @ %d." %(obsdbfile_path, err.expectedSize, err.actualSize, err.fpos) - _errors.append((msg, errno.EIO, obsid)) - logqueue.put_nowait((loggername, logging.ERROR, msg)) - except: - raise - try: - if (len(conv_values) > 0): - # There is still data left. Do a last commit: - if (viz_f != None) and (len(viz_values) > 0): - #logqueue.put_nowait((loggername, logging.DEBUG, "Viz started...")) - viz_f(testid, owner_fk, viz_values, obsid, vizimgdir, logger) - #logqueue.put_nowait((loggername, logging.DEBUG, "Viz done.")) - # Write data to file: - #logqueue.put_nowait((loggername, logging.DEBUG, "Opening file %s for final writing..."%(resultfile_path))) - resultfile_lock.acquire() - f = open(resultfile_path, 'a') - f.writelines(conv_values) - f.close() - resultfile_lock.release() - logqueue.put_nowait((loggername, logging.DEBUG, "Committed final results to %s after %d rows"%(resultfile_path, rows))) - # Remove processed file: - #logqueue.put_nowait((loggername, logging.DEBUG, "Remove %s"%(obsdbfile_path))) - os.unlink(obsdbfile_path) - except: - raise - except: - msg = "Error in worker process: %s: %s\n%s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc()) - _errors.append((msg, errno.ECOMM, obsid)) - logqueue.put_nowait((loggername, logging.ERROR, msg)) - finally: - processeditem = list(queueitem) - processeditem[0] = ITEM_PROCESSED - return (_errors, tuple(processeditem)) + try: + _errors = [] + cur_p = multiprocessing.current_process() + (itemtype, obsid, fdir, f, workerstate) = queueitem + obsdbfile_path = "%s/%s"%(fdir,f) + loggername = "(%s).(Observer %d)"%(cur_p.name, obsid) + #logqueue.put_nowait((loggername, logging.DEBUG, "Import file %s"%obsdbfile_path)) + # Open file: + dbfile = open(obsdbfile_path, 'rb') + rows = 0 + viz_values = [] + conv_values = [] + while not dbfile.closed: + # Process DB file line by line: + try: + # Parse one line: + buf = read_from_db_file(dbfile) + obsdata = parse_f(buf) + viz_values.append(obsdata) + # Convert data if needed: + if convert_f != None: + conv_data = convert_f(obsdata, obsid, nodeid) + conv_values.append(conv_data) + rows += 1 + # Visualize data: + if (commitsize > 0) & (rows >= commitsize): + if viz_f != None: + #logqueue.put_nowait((loggername, logging.DEBUG, "Viz started...")) + viz_f(testid, owner_fk, viz_values, obsid, vizimgdir, logger) + #logqueue.put_nowait((loggername, logging.DEBUG, "Viz done.")) + # Write data to file: + #logqueue.put_nowait((loggername, logging.DEBUG, "Opening file %s for writing..."%(resultfile_path))) + resultfile_lock.acquire() + f = open(resultfile_path, 'a') + f.writelines(conv_values) + f.close() + resultfile_lock.release() + logqueue.put_nowait((loggername, logging.DEBUG, "Committed results to %s after %d rows"%(resultfile_path, rows))) + rows = 0 + conv_values = [] + viz_values = [] + except DbFileEof: + # logqueue.put_nowait((loggername, logging.DEBUG, "DbFileEof has occurred.")) + break # dbfile has been closed in parser (most likely because EOF was reached) + except DbFileReadError as err: + msg = "%s: Packet size (%i) did not match payload size (%i) @ %d." %(obsdbfile_path, err.expectedSize, err.actualSize, err.fpos) + _errors.append((msg, errno.EIO, obsid)) + logqueue.put_nowait((loggername, logging.ERROR, msg)) + except: + raise + try: + if (len(conv_values) > 0): + # There is still data left. Do a last commit: + if (viz_f != None) and (len(viz_values) > 0): + #logqueue.put_nowait((loggername, logging.DEBUG, "Viz started...")) + viz_f(testid, owner_fk, viz_values, obsid, vizimgdir, logger) + #logqueue.put_nowait((loggername, logging.DEBUG, "Viz done.")) + # Write data to file: + #logqueue.put_nowait((loggername, logging.DEBUG, "Opening file %s for final writing..."%(resultfile_path))) + resultfile_lock.acquire() + f = open(resultfile_path, 'a') + f.writelines(conv_values) + f.close() + resultfile_lock.release() + logqueue.put_nowait((loggername, logging.DEBUG, "Committed final results to %s after %d rows"%(resultfile_path, rows))) + # Remove processed file: + #logqueue.put_nowait((loggername, logging.DEBUG, "Remove %s"%(obsdbfile_path))) + os.unlink(obsdbfile_path) + except: + raise + except: + msg = "Error in worker process: %s: %s\n%s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc()) + _errors.append((msg, errno.ECOMM, obsid)) + logqueue.put_nowait((loggername, logging.ERROR, msg)) + finally: + processeditem = list(queueitem) + processeditem[0] = ITEM_PROCESSED + return (_errors, tuple(processeditem)) ### END worker_convert_and_aggregate @@ -330,38 +330,38 @@ def worker_gpiotracing(queueitem=None, nodeid=None, resultfile_path=None, slotca ############################################################################## # # worker_powerprof: Worker function for converting and aggregating power -# profiling data. Unlike for the other services, this function works on -# whole observer DB files. +# profiling data. Unlike for the other services, this function works on +# whole observer DB files. # ############################################################################## def worker_powerprof(queueitem=None, nodeid=None, resultfile_path=None, slotcalib_factor=1, slotcalib_offset=0, vizimgdir=None, viz_f=None, logqueue=None, PpStatsQueue=None): - try: - _errors = [] - cur_p = multiprocessing.current_process() - (itemtype, obsid, fdir, f, workerstate) = queueitem - obsdbfile_path = "%s/%s"%(fdir,f) - loggername = "(%s).(Observer %d)"%(cur_p.name, obsid) - - with open(resultfile_path, "a") as outfile: - infile = open(obsdbfile_path, "r") - for line in infile: - outfile.write("%s,%s,%s" % (obsid, nodeid, str(line))) - infile.close() - - os.remove(obsdbfile_path) - logger.debug("Finished with powerprof collection.") - processeditem = list(queueitem) - processeditem[0] = ITEM_PROCESSED - - return (_errors, processeditem) - except: - msg = "Error in powerprof worker process: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1])) - _errors.append((msg, errno.ECOMM, obsid)) - logqueue.put_nowait((loggername, logging.ERROR, msg)) - finally: - processeditem = list(queueitem) - processeditem[0] = ITEM_PROCESSED - return (_errors, tuple(processeditem)) + try: + _errors = [] + cur_p = multiprocessing.current_process() + (itemtype, obsid, fdir, f, workerstate) = queueitem + obsdbfile_path = "%s/%s"%(fdir,f) + loggername = "(%s).(Observer %d)"%(cur_p.name, obsid) + + with open(resultfile_path, "a") as outfile: + infile = open(obsdbfile_path, "r") + for line in infile: + outfile.write("%s,%s,%s" % (obsid, nodeid, str(line))) + infile.close() + + os.remove(obsdbfile_path) + logger.debug("Finished with powerprof collection.") + processeditem = list(queueitem) + processeditem[0] = ITEM_PROCESSED + + return (_errors, processeditem) + except: + msg = "Error in powerprof worker process: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1])) + _errors.append((msg, errno.ECOMM, obsid)) + logqueue.put_nowait((loggername, logging.ERROR, msg)) + finally: + processeditem = list(queueitem) + processeditem[0] = ITEM_PROCESSED + return (_errors, tuple(processeditem)) ### END worker_powerprof # @@ -376,87 +376,87 @@ def worker_powerprof(queueitem=None, nodeid=None, resultfile_path=None, slotcali ############################################################################## # worker_args = [obsid, nodeid, obsdbfilepath, testresultsfile_dict['gpiotracing'][0], testresultsfile_dict['gpioactuation'][0], testresultsfile_dict['powerprofiling'][0], testresultsfile_dict['errorlog'][0], obsdict_byid[obsid][1][1], obsdict_byid[obsid][1][0], epoch_teststarttime, epoch_eteststoptime, vizimgdir, None, logqueue, PpStatsQueue] def worker_flockdaq(queueitem=None, nodeid=None, tracingresults_path=None, actuationresults_path=None, powerprofresults_path=None, error_path=None, slotcalib_factor=1, slotcalib_offset=0, test_start_time=0, test_stop_time=-1, vizimgdir=None, viz_f_pp=None, viz_f_tr=None, logqueue=None, PpStatsQueue=None): - try: - _errors = [] - cur_p = multiprocessing.current_process() - (itemtype, obsid, fdir, f, workerstate) = queueitem - obsdbfile_path = "%s/%s"%(fdir,f) - loggername = "(%s).(Observer %d)"%(cur_p.name, obsid) - if workerstate is None: - pin_level_prev = 1 - p_sample_list = () - p_current_second = 0 - pp_start_sec = 0 - p_start_500ns = 0 - t_current_second = 0 - else: - (pin_level_prev, p_sample_list, p_current_second, pp_start_sec, p_start_500ns, t_current_second) = workerstate - # Rename file: - logqueue.put_nowait((loggername, logging.DEBUG, "Import file %s"%obsdbfile_path)) - - # Use fast C-implementation to fetch values: - try: - #logqueue.put_nowait((loggername, logging.DEBUG, "cResultfetcher started. obsid: %d, nodeid: %d, slotcalib_factor: %f, slotcalib_offset: %f"%(int(obsid), int(nodeid), slotcalib_factor, slotcalib_offset))) - # arguments: - # obsid: Observer ID integer - # nodeid: Node ID integer - # obsdbfilepath: Database file string - # tracingresults: Output file for tracing results string - # actuationresults: Output file for actuation results string - # powerprofresults: Output file for powerprof results string - # errorlog: Output file for error messages string - # slotcalib_factor: Calibration factor double - # slotcalib_offset: Calibration offset double - # start_test_epoch: Start-Time of Test (UNIX timestamp in seconds) integer - # stop_test_epoch: Stop-Time of Test (UNIX timestamp in seconds) integer - - - ret = cResultfetcher.getdaqresults(obsid=int(obsid), nodeid=int(nodeid), obsdbfilepath=obsdbfile_path, tracingresults=tracingresults_path, actuationresults=actuationresults_path, powerprofresults=powerprofresults_path, errorlog=error_path,slotcalib_factor=slotcalib_factor, slotcalib_offset=slotcalib_offset, start_test_epoch=test_start_time, stop_test_epoch=test_stop_time, pin_level_prev=pin_level_prev, p_sample_list=p_sample_list, p_current_second=p_current_second, pp_start_sec=pp_start_sec, p_start_500ns=p_start_500ns, t_current_second=t_current_second) - #logqueue.put_nowait((loggername, logging.DEBUG, "cResultfetcher done.")) - except: - raise - - logger.debug("Done converting FlockDAQ results...") - - # Do vizualisation: - workerstate = None - if ((type(ret) == list) and (len(ret) > 0)): - #logger.debug("return value had len %d"%len(ret)) - if viz_f_pp != None: - #logqueue.put_nowait((loggername, logging.DEBUG, "Viz started...")) - viz_f_pp(testid, owner_fk, ret[0:2], obsid, vizimgdir, logger) - #logqueue.put_nowait((loggername, logging.DEBUG, "Viz done.")) - if viz_f_tr != None: - #logqueue.put_nowait((loggername, logging.DEBUG, "Viz started...")) - viz_f_tr(testid, owner_fk, ret[2], obsid, vizimgdir, logger) - #logqueue.put_nowait((loggername, logging.DEBUG, "Viz done.")) - ppstats = PpStatsQueue.get() - (totalavg, totalcount) = ppstats[obsid] - newcount = totalcount + ret[4] - if (newcount != 0): - avg = float(totalcount) / newcount * totalavg + float(ret[4]) / newcount * ret[3] - ppstats[obsid] = (avg,newcount) - PpStatsQueue.put(ppstats) - #logqueue.put_nowait((loggername, logging.DEBUG, "AVG values: %f %d + (%f %d) -> %f %d" % (totalavg, totalcount, ret[3], ret[4], avg, newcount))) - workerstate = tuple(ret[5:11]) - - # Remove processed file: - os.unlink(obsdbfile_path) - - #if workerstate is None: - # logger.debug("workerstate is None") - #else: - # logger.debug("workerstate is %s" % str(workerstate)) - - except: - msg = "Error in flockdaq worker process: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1])) - _errors.append((msg, errno.ECOMM, obsid)) - logqueue.put_nowait((loggername, logging.ERROR, msg)) - finally: - processeditem = list(queueitem) - processeditem[0] = ITEM_PROCESSED - processeditem[4] = workerstate - return (_errors, tuple(processeditem)) + try: + _errors = [] + cur_p = multiprocessing.current_process() + (itemtype, obsid, fdir, f, workerstate) = queueitem + obsdbfile_path = "%s/%s"%(fdir,f) + loggername = "(%s).(Observer %d)"%(cur_p.name, obsid) + if workerstate is None: + pin_level_prev = 1 + p_sample_list = () + p_current_second = 0 + pp_start_sec = 0 + p_start_500ns = 0 + t_current_second = 0 + else: + (pin_level_prev, p_sample_list, p_current_second, pp_start_sec, p_start_500ns, t_current_second) = workerstate + # Rename file: + logqueue.put_nowait((loggername, logging.DEBUG, "Import file %s"%obsdbfile_path)) + + # Use fast C-implementation to fetch values: + try: + #logqueue.put_nowait((loggername, logging.DEBUG, "cResultfetcher started. obsid: %d, nodeid: %d, slotcalib_factor: %f, slotcalib_offset: %f"%(int(obsid), int(nodeid), slotcalib_factor, slotcalib_offset))) + # arguments: + # obsid: Observer ID integer + # nodeid: Node ID integer + # obsdbfilepath: Database file string + # tracingresults: Output file for tracing results string + # actuationresults: Output file for actuation results string + # powerprofresults: Output file for powerprof results string + # errorlog: Output file for error messages string + # slotcalib_factor: Calibration factor double + # slotcalib_offset: Calibration offset double + # start_test_epoch: Start-Time of Test (UNIX timestamp in seconds) integer + # stop_test_epoch: Stop-Time of Test (UNIX timestamp in seconds) integer + + + ret = cResultfetcher.getdaqresults(obsid=int(obsid), nodeid=int(nodeid), obsdbfilepath=obsdbfile_path, tracingresults=tracingresults_path, actuationresults=actuationresults_path, powerprofresults=powerprofresults_path, errorlog=error_path,slotcalib_factor=slotcalib_factor, slotcalib_offset=slotcalib_offset, start_test_epoch=test_start_time, stop_test_epoch=test_stop_time, pin_level_prev=pin_level_prev, p_sample_list=p_sample_list, p_current_second=p_current_second, pp_start_sec=pp_start_sec, p_start_500ns=p_start_500ns, t_current_second=t_current_second) + #logqueue.put_nowait((loggername, logging.DEBUG, "cResultfetcher done.")) + except: + raise + + logger.debug("Done converting FlockDAQ results...") + + # Do vizualisation: + workerstate = None + if ((type(ret) == list) and (len(ret) > 0)): + #logger.debug("return value had len %d"%len(ret)) + if viz_f_pp != None: + #logqueue.put_nowait((loggername, logging.DEBUG, "Viz started...")) + viz_f_pp(testid, owner_fk, ret[0:2], obsid, vizimgdir, logger) + #logqueue.put_nowait((loggername, logging.DEBUG, "Viz done.")) + if viz_f_tr != None: + #logqueue.put_nowait((loggername, logging.DEBUG, "Viz started...")) + viz_f_tr(testid, owner_fk, ret[2], obsid, vizimgdir, logger) + #logqueue.put_nowait((loggername, logging.DEBUG, "Viz done.")) + ppstats = PpStatsQueue.get() + (totalavg, totalcount) = ppstats[obsid] + newcount = totalcount + ret[4] + if (newcount != 0): + avg = float(totalcount) / newcount * totalavg + float(ret[4]) / newcount * ret[3] + ppstats[obsid] = (avg,newcount) + PpStatsQueue.put(ppstats) + #logqueue.put_nowait((loggername, logging.DEBUG, "AVG values: %f %d + (%f %d) -> %f %d" % (totalavg, totalcount, ret[3], ret[4], avg, newcount))) + workerstate = tuple(ret[5:11]) + + # Remove processed file: + os.unlink(obsdbfile_path) + + #if workerstate is None: + # logger.debug("workerstate is None") + #else: + # logger.debug("workerstate is %s" % str(workerstate)) + + except: + msg = "Error in flockdaq worker process: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1])) + _errors.append((msg, errno.ECOMM, obsid)) + logqueue.put_nowait((loggername, logging.ERROR, msg)) + finally: + processeditem = list(queueitem) + processeditem[0] = ITEM_PROCESSED + processeditem[4] = workerstate + return (_errors, tuple(processeditem)) ### END worker_flockdaq @@ -464,24 +464,24 @@ def worker_flockdaq(queueitem=None, nodeid=None, tracingresults_path=None, actua ############################################################################## # # worker_callback: Callback function which reports errors from worker processes -# back to the main process +# back to the main process # ############################################################################## def worker_callback(result): - global errors - global FetchObsThread_queue - - if len(result[0]) > 0: - for (err, eno, obsid) in result: - msg = "Error %d when processing results for Observer ID %s: %s"%(eno, obsid, err) - errors.append(msg) - - try: - FetchObsThread_queue.put(item=result[1], block=True, timeout=10) - except queue.Full: - msg = "Queue full after processing element" - logger.error(msg) - return 0 + global errors + global FetchObsThread_queue + + if len(result[0]) > 0: + for (err, eno, obsid) in result: + msg = "Error %d when processing results for Observer ID %s: %s"%(eno, obsid, err) + errors.append(msg) + + try: + FetchObsThread_queue.put(item=result[1], block=True, timeout=10) + except queue.Full: + msg = "Queue full after processing element" + logger.error(msg) + return 0 ### END worker_callback @@ -492,27 +492,27 @@ def worker_callback(result): # ############################################################################## class LogQueueThread(threading.Thread): - """ Thread which logs from queue to logfile. - """ - def __init__(self, logqueue, logger, stopEvent): - threading.Thread.__init__(self) - self._logger = logger - self._stopEvent = stopEvent - self._logqueue = logqueue - - def run(self): - self._logger.info("LogQueueThread started") - - # Let thread run until someone calls terminate() on it: - while not self._stopEvent.is_set(): - try: - (loggername, loglevel, msg) = self._logqueue.get(block=True, timeout=1) - self._logger.log(loglevel, loggername + msg) - except queue.Empty: - pass - - # Stop the process: - self._logger.info("LogQueueThread stopped") + """ Thread which logs from queue to logfile. + """ + def __init__(self, logqueue, logger, stopEvent): + threading.Thread.__init__(self) + self._logger = logger + self._stopEvent = stopEvent + self._logqueue = logqueue + + def run(self): + self._logger.info("LogQueueThread started") + + # Let thread run until someone calls terminate() on it: + while not self._stopEvent.is_set(): + try: + (loggername, loglevel, msg) = self._logqueue.get(block=True, timeout=1) + self._logger.log(loglevel, loggername + msg) + except queue.Empty: + pass + + # Stop the process: + self._logger.info("LogQueueThread stopped") ### END LogQueueThread @@ -523,136 +523,136 @@ class LogQueueThread(threading.Thread): # ############################################################################## class FetchObsThread(threading.Thread): - """ Thread which downloads database files from an observer to the server. - """ - def __init__(self, obsid, obsethernet, dirname, debugdirname, config, logger, workQueue, stopEvent): - threading.Thread.__init__(self) - self._obsid = obsid - self._obsethernet = obsethernet - self._obsfiledir = dirname - self._obsfiledebugdir = debugdirname - self._workQueue = workQueue - self._stopEvent = stopEvent - self._logger = logger - - self._min_sleep = config.getint("fetcher", "min_sleeptime") - self._max_randsleep = config.getint("fetcher", "max_rand_sleeptime") - self._obsdbfolder = "%s/%d" % (config.get("observer", "obsdbfolder"), testid) - - def run(self): - try: - self._loggerprefix = "(FetchObsThread).(Observer %d): "%self._obsid - self._logger.info(self._loggerprefix + "FetchObsThread starting...") - removelast = True - - # Let thread run until someone calls terminate() on it: - while removelast == True: - """ Get data from the observer over SCP. - Then request data from the observer and store it in the server's filesystem. - Then sleep some random time before fetching data again. - """ - # Wait for some random time: - waittime =self._min_sleep + random.randrange(0,self._max_randsleep) - #DEBUG self._logger.debug(self._loggerprefix + "Going to wait for %d seconds" %(waittime)) - self._stopEvent.wait(waittime) # The wait will be interrupted if the stop signal has been set causing the thread to download all remaining files - if self._stopEvent.is_set(): - removelast = False - #self._logger.debug(self._loggerprefix + "Woke up") - # Get list of available files - cmd = ['ssh' ,'%s'%(self._obsethernet), "ls %s/" % self._obsdbfolder] - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) # universal_newlines makes sure that a string is returned instead of a byte object - out, err = p.communicate(None) - rs = p.returncode - if (rs == SUCCESS): - services = {} - for servicename in [ "gpio_setting","gpio_monitor","powerprofiling","serial","flockdaq"]: - services[servicename] = ServiceInfo(servicename) - services["error_%s"%servicename] = ServiceInfo("error_%s"%servicename) - # Read filenames - for dbfile in out.split(): - # Check name and append to corresponding list - for service in services.values(): - if service.matchFileName(dbfile): - service.addFile("%s/%s" % (self._obsdbfolder, dbfile)) - break - copyfilelist = [] - # Remove latest from each list as the observer might still be writing into it (unless stop event has been set). - for service in services.values(): - service.stripFileList(removelast) - for dbfile in service.files: - copyfilelist.append(dbfile) - #if (len(service.files) > 0): - # self._logger.debug(self._loggerprefix + "Will process files %s for service %s" % (service.files, service.servicename)) - - if len(copyfilelist) > 0: - # Download the database files: - self._logger.debug(self._loggerprefix + "Downloading database files...") - cmd = ['scp', '-q' ] - cmd.extend(["%s:%s"%(self._obsethernet, x) for x in copyfilelist]) - cmd.append("%s/"%self._obsfiledir) - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) - out, err = p.communicate(None) - rs = p.wait() - if rs != 0: - self._logger.debug(self._loggerprefix + "Could not download all DB files from observer. Dataloss occurred for this observer.") - self._logger.debug(self._loggerprefix + "Tried to execute %s, result was %d, stdout: %s, error: %s."%(str(cmd), rs, out, err)) - else: - #self._logger.debug("Downloaded all observer DB files from observer.") - # put a copy to the debug directory - for f in copyfilelist: - fname = os.path.basename(f) - shutil.copyfile("%s/%s"%(self._obsfiledir, fname), "%s/%s"%(self._obsfiledebugdir, fname)) - # Tell the fetcher to start working on the files: - for f in copyfilelist: - fname = os.path.basename(f) - try: - self._workQueue.put(item=(ITEM_TO_PROCESS, self._obsid, self._obsfiledir, fname, None), block=True, timeout=10) - except queue.Full: - # Make sure the file is downloaded again at a later point: - copyfilelist.remove(f) - os.unlink("%s/%s"%(self._obsfiledir, fname)) - self._logger.warn(self._loggerprefix + "FetchObsThread queue is full. Cannot put %s/%s on it."%(self._obsfiledir, fname)) - #DEBUG self._logger.debug(self._loggerprefix + "Put all files onto queue.") - # Remove remote files if any are left: - if (len(copyfilelist) > 0): - cmd = ['ssh' ,'%s'%(self._obsethernet), "cd %s;"%self._obsdbfolder, "rm"] - cmd.extend(copyfilelist) - self._logger.debug(self._loggerprefix + "Removing files on observer...") - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) - out, err = p.communicate(None) - rs = p.wait() - if (rs != SUCCESS): - self._logger.error(self._loggerprefix + "Could not remove files on observer, result was %d, stdout: %s, error: %s."%(rs, out, err)) - else: - self._logger.debug(self._loggerprefix + "No files left to delete on observer.") - else: - self._logger.debug(self._loggerprefix + "No files to download from observer.") - - if removelast == False: # this is the last execution of the while loop - cmd = ['ssh' ,'%s'%(self._obsethernet), "rm -rf %s" % self._obsdbfolder] - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) - out, err = p.communicate(None) - rs = p.wait() - if (rs != SUCCESS): - self._logger.error(self._loggerprefix + "Could not remove db directory from observer, result was %d, stdout: %s, error: %s."%(rs, out, err)) - else: - cause = "unknown error" - abort = False - if (rs == 255): - cause = "observer offline" - elif (rs == 1): - if ("No such file or directory" in str(err)): - cause = "SD card not mounted" - abort = True - self._logger.error(self._loggerprefix + "SSH to observer did not succeed (%s), result was %d, stdout: %s, error: %s."%(cause, rs, out, err)) - if abort: - break - - except: - logger.error(self._loggerprefix + "FetchObsThread crashed: %s, %s\n%s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc())) - - # Stop the process: - self._logger.info(self._loggerprefix + "FetchObsThread stopped") + """ Thread which downloads database files from an observer to the server. + """ + def __init__(self, obsid, obsethernet, dirname, debugdirname, config, logger, workQueue, stopEvent): + threading.Thread.__init__(self) + self._obsid = obsid + self._obsethernet = obsethernet + self._obsfiledir = dirname + self._obsfiledebugdir = debugdirname + self._workQueue = workQueue + self._stopEvent = stopEvent + self._logger = logger + + self._min_sleep = config.getint("fetcher", "min_sleeptime") + self._max_randsleep = config.getint("fetcher", "max_rand_sleeptime") + self._obsdbfolder = "%s/%d" % (config.get("observer", "obsdbfolder"), testid) + + def run(self): + try: + self._loggerprefix = "(FetchObsThread).(Observer %d): "%self._obsid + self._logger.info(self._loggerprefix + "FetchObsThread starting...") + removelast = True + + # Let thread run until someone calls terminate() on it: + while removelast == True: + """ Get data from the observer over SCP. + Then request data from the observer and store it in the server's filesystem. + Then sleep some random time before fetching data again. + """ + # Wait for some random time: + waittime =self._min_sleep + random.randrange(0,self._max_randsleep) + #DEBUG self._logger.debug(self._loggerprefix + "Going to wait for %d seconds" %(waittime)) + self._stopEvent.wait(waittime) # The wait will be interrupted if the stop signal has been set causing the thread to download all remaining files + if self._stopEvent.is_set(): + removelast = False + #self._logger.debug(self._loggerprefix + "Woke up") + # Get list of available files + cmd = ['ssh' ,'%s'%(self._obsethernet), "ls %s/" % self._obsdbfolder] + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) # universal_newlines makes sure that a string is returned instead of a byte object + out, err = p.communicate(None) + rs = p.returncode + if (rs == SUCCESS): + services = {} + for servicename in [ "gpio_setting","gpio_monitor","powerprofiling","serial","flockdaq"]: + services[servicename] = ServiceInfo(servicename) + services["error_%s"%servicename] = ServiceInfo("error_%s"%servicename) + # Read filenames + for dbfile in out.split(): + # Check name and append to corresponding list + for service in services.values(): + if service.matchFileName(dbfile): + service.addFile("%s/%s" % (self._obsdbfolder, dbfile)) + break + copyfilelist = [] + # Remove latest from each list as the observer might still be writing into it (unless stop event has been set). + for service in services.values(): + service.stripFileList(removelast) + for dbfile in service.files: + copyfilelist.append(dbfile) + #if (len(service.files) > 0): + # self._logger.debug(self._loggerprefix + "Will process files %s for service %s" % (service.files, service.servicename)) + + if len(copyfilelist) > 0: + # Download the database files: + self._logger.debug(self._loggerprefix + "Downloading database files...") + cmd = ['scp', '-q' ] + cmd.extend(["%s:%s"%(self._obsethernet, x) for x in copyfilelist]) + cmd.append("%s/"%self._obsfiledir) + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) + out, err = p.communicate(None) + rs = p.wait() + if rs != 0: + self._logger.debug(self._loggerprefix + "Could not download all DB files from observer. Dataloss occurred for this observer.") + self._logger.debug(self._loggerprefix + "Tried to execute %s, result was %d, stdout: %s, error: %s."%(str(cmd), rs, out, err)) + else: + #self._logger.debug("Downloaded all observer DB files from observer.") + # put a copy to the debug directory + for f in copyfilelist: + fname = os.path.basename(f) + shutil.copyfile("%s/%s"%(self._obsfiledir, fname), "%s/%s"%(self._obsfiledebugdir, fname)) + # Tell the fetcher to start working on the files: + for f in copyfilelist: + fname = os.path.basename(f) + try: + self._workQueue.put(item=(ITEM_TO_PROCESS, self._obsid, self._obsfiledir, fname, None), block=True, timeout=10) + except queue.Full: + # Make sure the file is downloaded again at a later point: + copyfilelist.remove(f) + os.unlink("%s/%s"%(self._obsfiledir, fname)) + self._logger.warn(self._loggerprefix + "FetchObsThread queue is full. Cannot put %s/%s on it."%(self._obsfiledir, fname)) + #DEBUG self._logger.debug(self._loggerprefix + "Put all files onto queue.") + # Remove remote files if any are left: + if (len(copyfilelist) > 0): + cmd = ['ssh' ,'%s'%(self._obsethernet), "cd %s;"%self._obsdbfolder, "rm"] + cmd.extend(copyfilelist) + self._logger.debug(self._loggerprefix + "Removing files on observer...") + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) + out, err = p.communicate(None) + rs = p.wait() + if (rs != SUCCESS): + self._logger.error(self._loggerprefix + "Could not remove files on observer, result was %d, stdout: %s, error: %s."%(rs, out, err)) + else: + self._logger.debug(self._loggerprefix + "No files left to delete on observer.") + else: + self._logger.debug(self._loggerprefix + "No files to download from observer.") + + if removelast == False: # this is the last execution of the while loop + cmd = ['ssh' ,'%s'%(self._obsethernet), "rm -rf %s" % self._obsdbfolder] + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) + out, err = p.communicate(None) + rs = p.wait() + if (rs != SUCCESS): + self._logger.error(self._loggerprefix + "Could not remove db directory from observer, result was %d, stdout: %s, error: %s."%(rs, out, err)) + else: + cause = "unknown error" + abort = False + if (rs == 255): + cause = "observer offline" + elif (rs == 1): + if ("No such file or directory" in str(err)): + cause = "SD card not mounted" + abort = True + self._logger.error(self._loggerprefix + "SSH to observer did not succeed (%s), result was %d, stdout: %s, error: %s."%(cause, rs, out, err)) + if abort: + break + + except: + logger.error(self._loggerprefix + "FetchObsThread crashed: %s, %s\n%s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc())) + + # Stop the process: + self._logger.info(self._loggerprefix + "FetchObsThread stopped") ### END FetchObsThread @@ -663,74 +663,74 @@ class FetchObsThread(threading.Thread): # ############################################################################## def start_fetcher(): - global obsfiledir - global FetchObsThread_list - global FetchObsThread_queue - global FetchObsThread_stopEvent - global obsfetcher_dict - - # Daemonize the process --- - daemon.daemonize(None, closedesc=False) - logger.info("Daemon started") - logger.info("Going to fetch data for test ID %d" %testid) - - # Get needed metadata from database --- - try: - (cn, cur) = flocklab.connect_to_db(config, logger) - except: - msg = "Could not connect to database" - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) - try: - cur.execute(""" SELECT `a`.observer_id, `a`.ethernet_address - FROM `tbl_serv_observer` AS `a` - LEFT JOIN `tbl_serv_map_test_observer_targetimages` AS `b` ON `a`.serv_observer_key = `b`.observer_fk - WHERE `b`.test_fk = %d GROUP BY `a`.observer_id; - """ %testid) - except MySQLdb.Error as err: - msg = str(err) - flocklab.error_logandexit(msg, errno.EIO, name, logger, config) - except: - logger.warn("Error %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - rs = cur.fetchall() - cur.close() - cn.close() - logger.debug("Got list of FlockLab observers from database.") - if not rs: - logger.info("No observers found for this test. Nothing has to be done, thus exiting...") - return errno.ENODATA - - # Start fetcher threads --- - # Create a directory structure to store the downloaded files from the DB: - obsfiledir = "%s/%d" %(config.get('fetcher', 'obsfile_dir'), testid) - if not os.path.exists(obsfiledir): - os.makedirs(obsfiledir) - obsfiledebugdir = "%s/%d" %(config.get('fetcher', 'obsfile_debug_dir'), testid) - if not os.path.exists(obsfiledebugdir): - os.makedirs(obsfiledebugdir) - #DEBUG logger.debug("Created %s"%obsfiledir) - # Start one fetching thread per observer - FetchObsThread_stopEvent = threading.Event() - FetchObsThread_queue = queue.Queue(maxsize=10000) - for observer in rs: - obsid = int(observer[0]) - # Create needed directories: - dirname = "%s/%d"%(obsfiledir, obsid) - if (not os.path.exists(dirname)): - os.makedirs(dirname) - debugdirname = "%s/%d"%(obsfiledebugdir, obsid) - if (not os.path.exists(debugdirname)): - os.makedirs(debugdirname) - # Start thread: - try: - thread = FetchObsThread(obsid, observer[1], dirname, debugdirname, config, logger, FetchObsThread_queue, FetchObsThread_stopEvent) - FetchObsThread_list.append(thread) - thread.start() - logger.debug("Started fetcher thread for observer %d" %(obsid)) - except: - logger.warn("Error when starting fetcher thread for observer %d: %s: %s" %(obsid, str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - continue - - return SUCCESS + global obsfiledir + global FetchObsThread_list + global FetchObsThread_queue + global FetchObsThread_stopEvent + global obsfetcher_dict + + # Daemonize the process --- + daemon.daemonize(None, closedesc=False) + logger.info("Daemon started") + logger.info("Going to fetch data for test ID %d" %testid) + + # Get needed metadata from database --- + try: + (cn, cur) = flocklab.connect_to_db(config, logger) + except: + msg = "Could not connect to database" + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + try: + cur.execute(""" SELECT `a`.observer_id, `a`.ethernet_address + FROM `tbl_serv_observer` AS `a` + LEFT JOIN `tbl_serv_map_test_observer_targetimages` AS `b` ON `a`.serv_observer_key = `b`.observer_fk + WHERE `b`.test_fk = %d GROUP BY `a`.observer_id; + """ %testid) + except MySQLdb.Error as err: + msg = str(err) + flocklab.error_logandexit(msg, errno.EIO, name, logger, config) + except: + logger.warn("Error %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + rs = cur.fetchall() + cur.close() + cn.close() + logger.debug("Got list of FlockLab observers from database.") + if not rs: + logger.info("No observers found for this test. Nothing has to be done, thus exiting...") + return errno.ENODATA + + # Start fetcher threads --- + # Create a directory structure to store the downloaded files from the DB: + obsfiledir = "%s/%d" %(config.get('fetcher', 'obsfile_dir'), testid) + if not os.path.exists(obsfiledir): + os.makedirs(obsfiledir) + obsfiledebugdir = "%s/%d" %(config.get('fetcher', 'obsfile_debug_dir'), testid) + if not os.path.exists(obsfiledebugdir): + os.makedirs(obsfiledebugdir) + #DEBUG logger.debug("Created %s"%obsfiledir) + # Start one fetching thread per observer + FetchObsThread_stopEvent = threading.Event() + FetchObsThread_queue = queue.Queue(maxsize=10000) + for observer in rs: + obsid = int(observer[0]) + # Create needed directories: + dirname = "%s/%d"%(obsfiledir, obsid) + if (not os.path.exists(dirname)): + os.makedirs(dirname) + debugdirname = "%s/%d"%(obsfiledebugdir, obsid) + if (not os.path.exists(debugdirname)): + os.makedirs(debugdirname) + # Start thread: + try: + thread = FetchObsThread(obsid, observer[1], dirname, debugdirname, config, logger, FetchObsThread_queue, FetchObsThread_stopEvent) + FetchObsThread_list.append(thread) + thread.start() + logger.debug("Started fetcher thread for observer %d" %(obsid)) + except: + logger.warn("Error when starting fetcher thread for observer %d: %s: %s" %(obsid, str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + continue + + return SUCCESS ### END start_fetcher @@ -741,44 +741,44 @@ def start_fetcher(): # ############################################################################## def stop_fetcher(): - # Get oldest running instance of the fetcher for the selected test ID which is the main process and send it the terminate signal: - try: - pid = flocklab.get_fetcher_pid(testid) - # Signal the process to stop: - if (pid > 0): - # Do not stop this instance if it is the only one running: - if (pid == os.getpid()): - raise Error - logger.debug("Sending SIGTERM signal to process %d" %pid) - try: - os.kill(pid, signal.SIGTERM) - # wait for process to finish (timeout..) - shutdown_timeout = config.getint("fetcher", "shutdown_timeout") - pidpath = "/proc/%d"%pid - while os.path.exists(pidpath) & (shutdown_timeout>0): - time.sleep(1) - shutdown_timeout = shutdown_timeout - 1 - if os.path.exists(pidpath): - logger.warn("Fetcher is still running.") - except: - pass - else: - raise Error - except (ValueError, Error): - logger.debug("Fetcher daemon was not running, thus it cannot be stopped.") - # Set DB status in order to allow dispatcher and scheduler to go on.: - logger.debug("Setting test status in DB to 'synced'...") - try: - (cn, cur) = flocklab.connect_to_db(config, logger) - flocklab.set_test_status(cur, cn, testid, 'synced') - cur.close() - cn.close() - except: - logger.warn("Could not connect to database") - - return errno.ENOPKG - - return SUCCESS + # Get oldest running instance of the fetcher for the selected test ID which is the main process and send it the terminate signal: + try: + pid = flocklab.get_fetcher_pid(testid) + # Signal the process to stop: + if (pid > 0): + # Do not stop this instance if it is the only one running: + if (pid == os.getpid()): + raise Error + logger.debug("Sending SIGTERM signal to process %d" %pid) + try: + os.kill(pid, signal.SIGTERM) + # wait for process to finish (timeout..) + shutdown_timeout = config.getint("fetcher", "shutdown_timeout") + pidpath = "/proc/%d"%pid + while os.path.exists(pidpath) & (shutdown_timeout>0): + time.sleep(1) + shutdown_timeout = shutdown_timeout - 1 + if os.path.exists(pidpath): + logger.warn("Fetcher is still running.") + except: + pass + else: + raise Error + except (ValueError, Error): + logger.debug("Fetcher daemon was not running, thus it cannot be stopped.") + # Set DB status in order to allow dispatcher and scheduler to go on.: + logger.debug("Setting test status in DB to 'synced'...") + try: + (cn, cur) = flocklab.connect_to_db(config, logger) + flocklab.set_test_status(cur, cn, testid, 'synced') + cur.close() + cn.close() + except: + logger.warn("Could not connect to database") + + return errno.ENOPKG + + return SUCCESS ### END stop_fetcher @@ -788,51 +788,51 @@ def stop_fetcher(): # ############################################################################## class WorkManager(): - def __init__(self): - self.worklist = {} - self.pattern = re.compile("_[0-9].*") - self.workcount = 0 - - def _next_item_with_state(self, service, obsid): - stateitem = list(self.worklist[service][obsid][1][0]) - stateitem[4] = self.worklist[service][obsid][0] - return tuple(stateitem) - - def add(self, item): - service = self.pattern.sub("",item[3]) - obsid = item[1] - if service not in self.worklist: - self.worklist[service] = {} - if obsid not in self.worklist[service]: - self.worklist[service][obsid] = [None, []] # workerstate / worklist - # if list is empty, we're good to process, otherwise just append it and return None - if len(self.worklist[service][obsid][1]) == 0: - self.worklist[service][obsid][1].append(item) - self.workcount = self.workcount + 1 - return self._next_item_with_state(service, obsid) - else: - self.worklist[service][obsid][1].append(item) - self.workcount = self.workcount + 1 - return None - - def done(self, item): - service = self.pattern.sub("",item[3]) - obsid = item[1] - if item[1:-1] == self.worklist[service][obsid][1][0][1:-1]: - self.worklist[service][obsid][0] = item[4] # save state - self.worklist[service][obsid][1].pop(0) - self.workcount = self.workcount - 1 - else: - logger.error("work done for item that was not enqueued: %s" % str(item)) - # if there is more work to do, return next item - if len(self.worklist[service][obsid][1]) > 0: - return self._next_item_with_state(service, obsid) - else: - return None - - def finished(self): - return self.workcount == 0 - + def __init__(self): + self.worklist = {} + self.pattern = re.compile("_[0-9].*") + self.workcount = 0 + + def _next_item_with_state(self, service, obsid): + stateitem = list(self.worklist[service][obsid][1][0]) + stateitem[4] = self.worklist[service][obsid][0] + return tuple(stateitem) + + def add(self, item): + service = self.pattern.sub("",item[3]) + obsid = item[1] + if service not in self.worklist: + self.worklist[service] = {} + if obsid not in self.worklist[service]: + self.worklist[service][obsid] = [None, []] # workerstate / worklist + # if list is empty, we're good to process, otherwise just append it and return None + if len(self.worklist[service][obsid][1]) == 0: + self.worklist[service][obsid][1].append(item) + self.workcount = self.workcount + 1 + return self._next_item_with_state(service, obsid) + else: + self.worklist[service][obsid][1].append(item) + self.workcount = self.workcount + 1 + return None + + def done(self, item): + service = self.pattern.sub("",item[3]) + obsid = item[1] + if item[1:-1] == self.worklist[service][obsid][1][0][1:-1]: + self.worklist[service][obsid][0] = item[4] # save state + self.worklist[service][obsid][1].pop(0) + self.workcount = self.workcount - 1 + else: + logger.error("work done for item that was not enqueued: %s" % str(item)) + # if there is more work to do, return next item + if len(self.worklist[service][obsid][1]) > 0: + return self._next_item_with_state(service, obsid) + else: + return None + + def finished(self): + return self.workcount == 0 + ### END WorkManager @@ -842,12 +842,12 @@ class WorkManager(): # ############################################################################## def usage(): - print("Usage: %s --testid=<int> [--stop] [--debug] [--help]" %scriptname) - print("Options:") - print(" --testid=<int>\t\tTest ID of test to which incoming data belongs.") - print(" --stop\t\t\tOptional. Causes the program to stop a possibly running instance of the fetcher.") - print(" --debug\t\t\tOptional. Print debug messages to log.") - print(" --help\t\t\tOptional. Print this help.") + print("Usage: %s --testid=<int> [--stop] [--debug] [--help]" %scriptname) + print("Options:") + print(" --testid=<int>\t\tTest ID of test to which incoming data belongs.") + print(" --stop\t\t\tOptional. Causes the program to stop a possibly running instance of the fetcher.") + print(" --debug\t\t\tOptional. Print debug messages to log.") + print(" --help\t\t\tOptional. Print this help.") ### END usage() @@ -858,467 +858,467 @@ def usage(): # ############################################################################## def main(argv): - - ### Get global variables ### - global logger - global debug - global testid - global config - global testresultsdir - global testresultsfile_dict - global owner_fk - global pindict - global obsdict_byid - global servicedict - global serialdict - - stop = False - - # Set timezone to UTC --- - os.environ['TZ'] = 'UTC' - time.tzset() - - # Get logger: - logger = flocklab.get_logger(loggername=scriptname, loggerpath=scriptpath) - - # Get the config file --- - config = flocklab.get_config(configpath=scriptpath) - if not config: - msg = "Could not read configuration file. Exiting..." - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) - #logger.debug("Read configuration file.") - - # Get command line parameters --- - try: - opts, args = getopt.getopt(argv, "hedt:", ["help", "stop", "debug", "testid="]) - except getopt.GetoptError as err: - print(str(err)) - logger.warn(str(err)) - usage() - sys.exit(errno.EINVAL) - except: - msg = "Error when getting arguments: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1])) - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) - for opt, arg in opts: - if opt in ("-h", "--help"): - usage() - sys.exit(SUCCESS) - elif opt in ("-d", "--debug"): - debug = True - logger.debug("Detected debug flag.") - logger.setLevel(logging.DEBUG) - elif opt in ("-t", "--testid"): - try: - testid = int(arg) - except ValueError: - err = "Wrong API usage: testid has to be integer" - print(str(err)) - logger.warn(str(err)) - usage() - sys.exit(errno.EINVAL) - elif opt in ("-e", "--stop"): - stop = True - else: - print("Wrong API usage") - logger.warn("Wrong API usage") - sys.exit(errno.EINVAL) - - # Check if the necessary parameters are set --- - if not testid: - print("Wrong API usage") - logger.warn("Wrong API usage") - sys.exit(errno.EINVAL) - - # Check if the Test ID exists in the database --- - try: - (cn, cur) = flocklab.connect_to_db(config, logger) - except: - msg = "Could not connect to database" - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) - rs = flocklab.check_test_id(cur, testid) - cur.close() - cn.close() - if rs != 0: - if rs == 3: - msg = "Test ID %d does not exist in database." %testid - flocklab.error_logandexit(msg, errno.EINVAL, name, logger, config) - else: - msg = "Error when trying to get test ID from database: %s: %s"%(str(sys.exc_info()[0]), str(sys.exc_info()[1])) - flocklab.error_logandexit(msg, errno.EIO, name, logger, config) - - # Add Test ID to logger name --- - logger.name += " (Test %d)"%testid - - # Start / stop the fetcher --- - ret = SUCCESS - if stop: - ret = stop_fetcher() - logger.info("FlockLab fetcher stopped.") - else: - # Start the fetcher processes which download data from the observers: - ret = start_fetcher() - if ret == SUCCESS: - logger.info("FlockLab fetcher started.") - else: - msg = "Start function returned error. Exiting..." - os.kill(os.getpid(), signal.SIGTERM) - rs = flocklab.error_logandexit(msg, ret, name, logger, config) - - # Get needed metadata --- - try: - (cn, cur) = flocklab.connect_to_db(config, logger) - except: - msg = "Could not connect to database" - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) - rs = flocklab.get_test_owner(cur, testid) - if isinstance(rs, tuple): - owner_fk = rs[0] - else: - owner_fk = None - rs = flocklab.get_pinmappings(cur) - if isinstance(rs, dict): - pindict = rs - else: - pindict = None - rs = flocklab.get_test_obs(cur, testid) - if isinstance(rs, tuple): - obsdict_byid = rs[1] - else: - obsdict_byid = None - # Dict for serial service: 'r' means reader (data read from the target), 'w' means writer (data written to the target): - serialdict = {0: 'r', 1: 'w'} - # Get calibration data for used slots and add it to obsdict --- - ppstats={} - for (obsid, (obskey, nodeid)) in obsdict_byid.items(): - ppstats[obsid]=(0.0,0) - rs = flocklab.get_slot_calib(cur, int(obskey), testid) - if isinstance(rs, tuple): - obsdict_byid[obsid] = (nodeid, rs) - else: - obsdict_byid = None - break - rs = flocklab.get_servicemappings(cur) - if isinstance(rs, dict): - servicedict = rs - else: - servicedict = None - - #find out the start and stoptime of the test - cur.execute("SELECT `time_start_wish`, `time_end_wish` FROM `tbl_serv_tests` WHERE `serv_tests_key` = %d" %testid) - # Times are going to be of datetime type: - ret = cur.fetchone() - teststarttime = ret[0] - teststoptime = ret[1] - FlockDAQ = False - - # Find out which services are used to allocate working threads later on --- - # Get the XML config from the database and check which services are used in the test. - servicesUsed_dict = {'gpiotracing': 'gpioTracingConf', 'gpioactuation': 'gpioActuationConf', 'powerprofiling': 'powerProfilingConf', 'serial': 'serialConf'} - cur.execute("SELECT `testconfig_xml` FROM `tbl_serv_tests` WHERE (`serv_tests_key` = %s)" %testid) - ret = cur.fetchone() - if not ret: - msg = "No XML found in database for testid %d." %testid - errors.append(msg) - logger.error(msg) - for service, xmlname in servicesUsed_dict.items(): - servicesUsed_dict[service] = True - else: - try: - logger.debug("Got XML from database.") - parser = etree.XMLParser(remove_comments=True) - tree = etree.fromstring(bytes(bytearray(ret[0], encoding = 'utf-8')), parser) - ns = {'d': config.get('xml', 'namespace')} - for service, xmlname in servicesUsed_dict.items(): - if tree.xpath('//d:%s'%xmlname, namespaces=ns): - servicesUsed_dict[service] = True - logger.debug("Usage of %s detected."%service) - else: - servicesUsed_dict[service] = False - except: - msg = "XML parsing failed: %s: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1])) - errors.append(msg) - logger.error(msg) - - # check if user is allowed to use daq-services - cur.execute("SELECT `use_daq` FROM `tbl_serv_users` WHERE (`serv_users_key` = %s)" %owner_fk) - ret = cur.fetchone() - if ret[0] == 1: - FlockDAQ = "true" - logger.debug("Test is using FlockDAQ services") - else: - FlockDAQ = "false" - cur.close() - cn.close() - if ((owner_fk==None) or (pindict==None) or (obsdict_byid==None) or (servicedict==None)): - msg = "Error when getting metadata.\n" - msg += "owner_fk: %s\npindict: %s\nobsdict_byid: %s\nservicedict: %s\n"%(str(owner_fk), str(pindict), str(obsdict_byid), str(servicedict)) - msg += "Exiting..." - logger.debug(msg) - os.kill(os.getpid(), signal.SIGTERM) - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) - else: - logger.debug("Got all needed metadata.") - - # Start aggregating processes --- - """ There is an infinite loop which gets files to process from the fetcher threads which download data from the observers. - Downloaded data is then assigned to a worker process for the corresponding service and in the worker process parsed, - converted (if needed) and aggregated into a single file per service. - The loop is stopped if the program receives a stop signal. In this case, the loops runs until no more database files - are there to be processed. - """ - if __name__ == '__main__': - # Create directory and files needed for test results: - testresultsdir = "%s/%d" %(config.get('fetcher', 'testresults_dir'), testid) - if not os.path.exists(testresultsdir): - os.makedirs(testresultsdir) - logger.debug("Created %s"%testresultsdir) - manager = multiprocessing.Manager() - for service in ('errorlog', 'gpiotracing', 'gpioactuation', 'powerprofiling', 'serial', 'powerprofilingstats'): - path = "%s/%s.csv" %(testresultsdir, service) - lock = manager.Lock() - testresultsfile_dict[service] = (path, lock) - # Create file and write header: - if service == 'errorlog': - header = '# timestamp,observer_id,node_id,errormessage\n' - elif service == 'gpiotracing': - header = 'observer_id,node_id,pin_name,# timestamp,value\n' - elif service == 'gpioactuation': - header = '# timestamp_planned,timestamp_executed,observer_id,node_id,pin_name,value\n' - elif service == 'powerprofiling': - header = 'This is a RocketLogger File with additional information about the observer id and the node id.\n' - elif service == 'serial': - header = '# timestamp,observer_id,node_id,direction,output\n' - elif service == 'powerprofilingstats': - header = '# observer_id,node_id,mean_mA\n' - lock.acquire() - f = open(path, 'w') - f.write(header) - f.close() - lock.release() - # Start logging thread: - logqueue = manager.Queue(maxsize=10000) - LogQueueThread_stopEvent = threading.Event() - try: - thread = LogQueueThread(logqueue, logger, LogQueueThread_stopEvent) - thread.start() - except: - logger.warn("Error when starting log queue thread: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - - PpStatsQueue = manager.Queue(maxsize=1) - PpStatsQueue.put(ppstats) - # Determine the number of CPU's to be used for each aggregating process. If a service is not used, its CPUs are assigned to other services - cpus_free = 0 - cpus_errorlog = config.getint('fetcher', 'cpus_errorlog') - # CPUs for serial service: - if servicesUsed_dict['serial'] == True: - cpus_serial = config.getint('fetcher', 'cpus_serial') - else: - cpus_serial = 0 - cpus_free = cpus_free + config.getint('fetcher', 'cpus_serial') - # CPUs for GPIO actuation. If the service is not used, assign a CPU anyhow since FlockLab always uses this service to determine start and stop times of a test. - if FlockDAQ != "true": - cpus_flockdaq = 0 - #cpus_errorlog = config.getint('fetcher', 'cpus_errorlog') - if servicesUsed_dict['gpioactuation'] == True: - cpus_gpiosetting = config.getint('fetcher', 'cpus_gpiosetting') - else: - cpus_gpiosetting = 1 - cpus_free = cpus_free + config.getint('fetcher', 'cpus_gpiosetting') - cpus_gpiosetting - # CPUs for GPIO tracing: - if servicesUsed_dict['gpiotracing'] == True: - cpus_gpiomonitoring = config.getint('fetcher', 'cpus_gpiomonitoring') - else: - cpus_gpiomonitoring = 0 - cpus_free = cpus_free + config.getint('fetcher', 'cpus_gpiomonitoring') - # CPUs for powerprofiling: - if servicesUsed_dict['powerprofiling'] == True: - cpus_powerprofiling = config.getint('fetcher', 'cpus_powerprofiling') - else: - cpus_powerprofiling = 0 - cpus_free = cpus_free + config.getint('fetcher', 'cpus_powerprofiling') - # If there are free CPUs left, give them to GPIO tracing and power profiling evenly as these services need the most CPU power: - if cpus_free > 0: - if (cpus_powerprofiling > 0) and (cpus_gpiomonitoring > 0): - # Both services are used, distribute the free CPUS evenly: - cpus_powerprofiling = cpus_powerprofiling + int(math.ceil(float(cpus_free)/2)) - cpus_gpiomonitoring = cpus_gpiomonitoring + int(math.floor(float(cpus_free)/2)) - elif cpus_powerprofiling > 0: - # GPIO monitoring/tracing is not used, so give all CPUs to powerprofiling: - cpus_powerprofiling = cpus_powerprofiling + cpus_free - elif cpus_gpiomonitoring > 0: - # Powerprofiling is not used, so give all CPUs to GPIO monitoring/tracing: - cpus_gpiomonitoring = cpus_gpiomonitoring + cpus_free - else: - # Neither of the services is used, so give it to one of the other services: - if cpus_serial > 0: - cpus_serial = cpus_serial + cpus_free - elif cpus_gpiosetting > 0: - cpus_gpiosetting = cpus_gpiosetting + cpus_free - cpus_total = cpus_errorlog + cpus_serial + cpus_gpiosetting + cpus_gpiomonitoring + cpus_powerprofiling - else: - cpus_gpiomonitoring = 0 - cpus_powerprofiling = 0 - cpus_gpiosetting = 0 - cpus_flockdaq = config.getint('fetcher', 'cpus_gpiosetting') + config.getint('fetcher', 'cpus_gpiomonitoring') + config.getint('fetcher', 'cpus_powerprofiling') + cpus_free - cpus_total = cpus_flockdaq + cpus_serial + cpus_errorlog - - service_pools_dict = {'errorlog': cpus_errorlog, 'serial': cpus_serial, 'gpioactuation': cpus_gpiosetting, 'gpiotracing': cpus_gpiomonitoring, 'powerprofiling': cpus_powerprofiling, 'flockdaq': cpus_flockdaq} - if (cpus_total > multiprocessing.cpu_count()): - logger.warn("Number of requested CPUs for all aggregating processes (%d) is higher than number of available CPUs (%d) on system."%(cpus_total, multiprocessing.cpu_count())) - - # Start a worker process pool for every service: - for service, cpus in service_pools_dict.items(): - if cpus > 0: - pool = multiprocessing.Pool(processes=cpus) - logger.debug("Created pool for %s workers with %d processes"%(service, cpus)) - service_pools_dict[service] = pool - else: - service_pools_dict[service] = None - logger.debug("Created all worker pools for services.") - # Catch kill signals --- - signal.signal(signal.SIGTERM, sigterm_handler) - signal.signal(signal.SIGINT, sigterm_handler) - # Loop through the folders and assign work to the worker processes: - vizimgdir = config.get('viz','imgdir') - commitsize = config.getint('fetcher', 'commitsize') - enableviz = config.getint('viz','enablepreview') - loggerprefix = "(Mainloop): " - workmanager = WorkManager() - # Main loop --- - while 1: - if mainloop_stop and workmanager.finished() and FetchObsThread_queue.empty(): - # exit main loop - logger.debug("work manager has nothing more to do, finishing up..") - break - # Wait for FetchObsThreads to put items on queue: - try: - item = FetchObsThread_queue.get(block=True, timeout=5) - (itemtype, obsid, fdir, f) = item[:4] - logger.debug(loggerprefix + "Got element from queue: %d, %s, %s/%s"%(itemtype, str(obsid), fdir, f)) - except queue.Empty: - # No one put any data onto the queue. - # In normal operation, just ignore the error and try again: - continue - if itemtype == ITEM_TO_PROCESS: - nextitem = workmanager.add(item) - else: # type is ITEM_PROCESSED - nextitem = workmanager.done(item) - if nextitem is None: - logger.debug(loggerprefix + "Next item is None.") - continue - (itemtype, obsid, fdir, f, workerstate) = nextitem - logger.debug(loggerprefix + "Next item is %s, %s/%s" % (str(obsid), fdir, f)) - nodeid = obsdict_byid[obsid][0] - callback_f = worker_callback - worker_f = worker_convert_and_aggregate - quick_test = False - # Match the filename against the patterns and schedule an appropriate worker function: - if (re.search("^gpio_setting_[0-9]{14}\.db$", f) != None): - #logger.debug(loggerprefix + "File %s contains GPIO setting results"%f) - pool = service_pools_dict['gpioactuation'] - worker_args = [nextitem, nodeid, testresultsfile_dict['gpioactuation'][0], testresultsfile_dict['gpioactuation'][1], commitsize, vizimgdir, parse_gpio_setting, convert_gpio_setting, None, logqueue] - elif (re.search("^gpio_monitor_[0-9]{14}\.db$", f) != None): - #logger.debug(loggerprefix + "File %s contains GPIO monitoring results"%f) - pool = service_pools_dict['gpiotracing'] - logger.debug(loggerprefix + "resultfile_path: %s" % str(testresultsfile_dict['gpiotracing'][0])) - logger.debug(loggerprefix + "queue item: %s" % str(nextitem)) - logger.debug(loggerprefix + "node id: %s" % str(nodeid)) - worker_args = [nextitem, nodeid, testresultsfile_dict['gpiotracing'][0], obsdict_byid[obsid][1][1], obsdict_byid[obsid][1][0], vizimgdir, None, logqueue] - worker_f = worker_gpiotracing - quick_test = True - if (enableviz == 1): - worker_args[6] = flocklab.viz_gpio_monitor - elif (re.search("^powerprofiling_[0-9]{14}\.db$", f) != None): - #logger.debug(loggerprefix + "File %s contains power profiling results"%f) - # Power profiling has a special worker function which parses the whole file in a C module: - pool = service_pools_dict['powerprofiling'] - worker_args = [nextitem, nodeid, testresultsfile_dict['powerprofiling'][0], obsdict_byid[obsid][1][1], obsdict_byid[obsid][1][0], vizimgdir, None, logqueue, PpStatsQueue] - worker_f = worker_powerprof - if (enableviz == 1): - worker_args[6] = flocklab.viz_powerprofiling - elif (re.search("^serial_[0-9]{14}\.db$", f) != None): - #logger.debug(loggerprefix + "File %s contains serial service results"%f) - pool = service_pools_dict['serial'] - worker_args = [nextitem, nodeid, testresultsfile_dict['serial'][0], testresultsfile_dict['serial'][1], commitsize, vizimgdir, parse_serial, convert_serial, None, logqueue] - elif (re.search("^error_.*_[0-9]{14}\.db$", f) != None): - logger.debug(loggerprefix + "File %s contains error logs"%f) - pool = service_pools_dict['errorlog'] - worker_args = [nextitem, nodeid, testresultsfile_dict['errorlog'][0], testresultsfile_dict['errorlog'][1], commitsize, vizimgdir, parse_error_log, convert_error_log, None, logqueue] - # flockdaq service - elif (re.search("^flockdaq_[0-9]{14}\.db$", f) != None): - pool = service_pools_dict['flockdaq'] - # teststarttime and teststoptime: see line 892 - epoch_teststarttime = int(time.mktime(time.strptime(str(teststarttime), config.get('database', 'timeformat')))) - epoch_teststoptime = int(time.mktime(time.strptime(str(teststoptime), config.get('database', 'timeformat')))) - worker_args = [nextitem, nodeid, testresultsfile_dict['gpiotracing'][0], testresultsfile_dict['gpioactuation'][0], testresultsfile_dict['powerprofiling'][0], testresultsfile_dict['errorlog'][0], float(obsdict_byid[int(obsid)][1][1]), float(obsdict_byid[int(obsid)][1][0]), epoch_teststarttime, epoch_teststoptime, vizimgdir, None, None, logqueue, PpStatsQueue] - worker_f = worker_flockdaq - if (enableviz == 1): - worker_args[11] = flocklab.viz_powerprofiling - worker_args[12] = flocklab.viz_gpio_monitor - else: - logger.warn(loggerprefix + "DB file %s/%s from observer %s did not match any of the known patterns" %(fdir, f, obsid)) - continue - # Schedule worker function from the service's pool. The result will be reported to the callback function. - if quick_test: - logger.debug("GPIO MONITOR GETS STARTED") - else: - logger.debug("OTHER SERVICE DATA GETS COLLECTED") - - pool.apply_async(func=worker_f, args=tuple(worker_args), callback=callback_f) - # Stop signal for main loop has been set --- - # Stop worker pool: - for service, pool in service_pools_dict.items(): - if pool: - logger.debug("Closing pool for %s..."%service) - pool.close() - for service, pool in service_pools_dict.items(): - if pool: - logger.debug("Waiting for pool for %s to close..."%service) - pool.join() - logger.debug("Closed all pools.") - # Write pp stats - ppstats = PpStatsQueue.get() - f = open(testresultsfile_dict['powerprofilingstats'][0], 'a') - for (obsid, (avg, count)) in ppstats.items(): - nodeid = obsdict_byid[obsid][0] - f.write("%d,%d,%0.6f\n" % (obsid, nodeid, avg)) - f.close() - - # Stop logging: - logger.debug("Stopping log queue thread...") - LogQueueThread_stopEvent.set() - - # Set DB status: - logger.debug("Setting test status in DB to 'synced'...") - try: - (cn, cur) = flocklab.connect_to_db(config, logger) - flocklab.set_test_status(cur, cn, testid, 'synced') - cur.close() - cn.close() - except: - logger.warn("Could not connect to database") - - - # Delete the obsfile directories as they are not needed anymore: - if ((obsfiledir != None) and (os.path.exists(obsfiledir))): - shutil.rmtree(obsfiledir) - # Delete old debug files - if os.path.exists(config.get('fetcher', 'obsfile_debug_dir')): - for d in [fn for fn in os.listdir(config.get('fetcher', 'obsfile_debug_dir')) if os.stat("%s/%s" % (config.get('fetcher', 'obsfile_debug_dir'), fn)).st_mtime < int(time.time()) - int(config.get('fetcher', 'obsfile_debug_dir_max_age_days')) * 24 * 3600]: - shutil.rmtree("%s/%s" % (config.get('fetcher', 'obsfile_debug_dir'),d)) - if len(errors) > 1: - msg = "" - for error in errors: - msg += error - flocklab.error_logandexit(msg, errno.EBADMSG, name, logger, config) - else: - ret = SUCCESS - sys.exit(ret) + + ### Get global variables ### + global logger + global debug + global testid + global config + global testresultsdir + global testresultsfile_dict + global owner_fk + global pindict + global obsdict_byid + global servicedict + global serialdict + + stop = False + + # Set timezone to UTC --- + os.environ['TZ'] = 'UTC' + time.tzset() + + # Get logger: + logger = flocklab.get_logger(loggername=scriptname, loggerpath=scriptpath) + + # Get the config file --- + config = flocklab.get_config(configpath=scriptpath) + if not config: + msg = "Could not read configuration file. Exiting..." + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + #logger.debug("Read configuration file.") + + # Get command line parameters --- + try: + opts, args = getopt.getopt(argv, "hedt:", ["help", "stop", "debug", "testid="]) + except getopt.GetoptError as err: + print(str(err)) + logger.warn(str(err)) + usage() + sys.exit(errno.EINVAL) + except: + msg = "Error when getting arguments: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1])) + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + for opt, arg in opts: + if opt in ("-h", "--help"): + usage() + sys.exit(SUCCESS) + elif opt in ("-d", "--debug"): + debug = True + logger.debug("Detected debug flag.") + logger.setLevel(logging.DEBUG) + elif opt in ("-t", "--testid"): + try: + testid = int(arg) + except ValueError: + err = "Wrong API usage: testid has to be integer" + print(str(err)) + logger.warn(str(err)) + usage() + sys.exit(errno.EINVAL) + elif opt in ("-e", "--stop"): + stop = True + else: + print("Wrong API usage") + logger.warn("Wrong API usage") + sys.exit(errno.EINVAL) + + # Check if the necessary parameters are set --- + if not testid: + print("Wrong API usage") + logger.warn("Wrong API usage") + sys.exit(errno.EINVAL) + + # Check if the Test ID exists in the database --- + try: + (cn, cur) = flocklab.connect_to_db(config, logger) + except: + msg = "Could not connect to database" + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + rs = flocklab.check_test_id(cur, testid) + cur.close() + cn.close() + if rs != 0: + if rs == 3: + msg = "Test ID %d does not exist in database." %testid + flocklab.error_logandexit(msg, errno.EINVAL, name, logger, config) + else: + msg = "Error when trying to get test ID from database: %s: %s"%(str(sys.exc_info()[0]), str(sys.exc_info()[1])) + flocklab.error_logandexit(msg, errno.EIO, name, logger, config) + + # Add Test ID to logger name --- + logger.name += " (Test %d)"%testid + + # Start / stop the fetcher --- + ret = SUCCESS + if stop: + ret = stop_fetcher() + logger.info("FlockLab fetcher stopped.") + else: + # Start the fetcher processes which download data from the observers: + ret = start_fetcher() + if ret == SUCCESS: + logger.info("FlockLab fetcher started.") + else: + msg = "Start function returned error. Exiting..." + os.kill(os.getpid(), signal.SIGTERM) + rs = flocklab.error_logandexit(msg, ret, name, logger, config) + + # Get needed metadata --- + try: + (cn, cur) = flocklab.connect_to_db(config, logger) + except: + msg = "Could not connect to database" + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + rs = flocklab.get_test_owner(cur, testid) + if isinstance(rs, tuple): + owner_fk = rs[0] + else: + owner_fk = None + rs = flocklab.get_pinmappings(cur) + if isinstance(rs, dict): + pindict = rs + else: + pindict = None + rs = flocklab.get_test_obs(cur, testid) + if isinstance(rs, tuple): + obsdict_byid = rs[1] + else: + obsdict_byid = None + # Dict for serial service: 'r' means reader (data read from the target), 'w' means writer (data written to the target): + serialdict = {0: 'r', 1: 'w'} + # Get calibration data for used slots and add it to obsdict --- + ppstats={} + for (obsid, (obskey, nodeid)) in obsdict_byid.items(): + ppstats[obsid]=(0.0,0) + rs = flocklab.get_slot_calib(cur, int(obskey), testid) + if isinstance(rs, tuple): + obsdict_byid[obsid] = (nodeid, rs) + else: + obsdict_byid = None + break + rs = flocklab.get_servicemappings(cur) + if isinstance(rs, dict): + servicedict = rs + else: + servicedict = None + + #find out the start and stoptime of the test + cur.execute("SELECT `time_start_wish`, `time_end_wish` FROM `tbl_serv_tests` WHERE `serv_tests_key` = %d" %testid) + # Times are going to be of datetime type: + ret = cur.fetchone() + teststarttime = ret[0] + teststoptime = ret[1] + FlockDAQ = False + + # Find out which services are used to allocate working threads later on --- + # Get the XML config from the database and check which services are used in the test. + servicesUsed_dict = {'gpiotracing': 'gpioTracingConf', 'gpioactuation': 'gpioActuationConf', 'powerprofiling': 'powerProfilingConf', 'serial': 'serialConf'} + cur.execute("SELECT `testconfig_xml` FROM `tbl_serv_tests` WHERE (`serv_tests_key` = %s)" %testid) + ret = cur.fetchone() + if not ret: + msg = "No XML found in database for testid %d." %testid + errors.append(msg) + logger.error(msg) + for service, xmlname in servicesUsed_dict.items(): + servicesUsed_dict[service] = True + else: + try: + logger.debug("Got XML from database.") + parser = etree.XMLParser(remove_comments=True) + tree = etree.fromstring(bytes(bytearray(ret[0], encoding = 'utf-8')), parser) + ns = {'d': config.get('xml', 'namespace')} + for service, xmlname in servicesUsed_dict.items(): + if tree.xpath('//d:%s'%xmlname, namespaces=ns): + servicesUsed_dict[service] = True + logger.debug("Usage of %s detected."%service) + else: + servicesUsed_dict[service] = False + except: + msg = "XML parsing failed: %s: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1])) + errors.append(msg) + logger.error(msg) + + # check if user is allowed to use daq-services + cur.execute("SELECT `use_daq` FROM `tbl_serv_users` WHERE (`serv_users_key` = %s)" %owner_fk) + ret = cur.fetchone() + if ret[0] == 1: + FlockDAQ = "true" + logger.debug("Test is using FlockDAQ services") + else: + FlockDAQ = "false" + cur.close() + cn.close() + if ((owner_fk==None) or (pindict==None) or (obsdict_byid==None) or (servicedict==None)): + msg = "Error when getting metadata.\n" + msg += "owner_fk: %s\npindict: %s\nobsdict_byid: %s\nservicedict: %s\n"%(str(owner_fk), str(pindict), str(obsdict_byid), str(servicedict)) + msg += "Exiting..." + logger.debug(msg) + os.kill(os.getpid(), signal.SIGTERM) + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + else: + logger.debug("Got all needed metadata.") + + # Start aggregating processes --- + """ There is an infinite loop which gets files to process from the fetcher threads which download data from the observers. + Downloaded data is then assigned to a worker process for the corresponding service and in the worker process parsed, + converted (if needed) and aggregated into a single file per service. + The loop is stopped if the program receives a stop signal. In this case, the loops runs until no more database files + are there to be processed. + """ + if __name__ == '__main__': + # Create directory and files needed for test results: + testresultsdir = "%s/%d" %(config.get('fetcher', 'testresults_dir'), testid) + if not os.path.exists(testresultsdir): + os.makedirs(testresultsdir) + logger.debug("Created %s"%testresultsdir) + manager = multiprocessing.Manager() + for service in ('errorlog', 'gpiotracing', 'gpioactuation', 'powerprofiling', 'serial', 'powerprofilingstats'): + path = "%s/%s.csv" %(testresultsdir, service) + lock = manager.Lock() + testresultsfile_dict[service] = (path, lock) + # Create file and write header: + if service == 'errorlog': + header = '# timestamp,observer_id,node_id,errormessage\n' + elif service == 'gpiotracing': + header = 'observer_id,node_id,pin_name,# timestamp,value\n' + elif service == 'gpioactuation': + header = '# timestamp_planned,timestamp_executed,observer_id,node_id,pin_name,value\n' + elif service == 'powerprofiling': + header = 'This is a RocketLogger File with additional information about the observer id and the node id.\n' + elif service == 'serial': + header = '# timestamp,observer_id,node_id,direction,output\n' + elif service == 'powerprofilingstats': + header = '# observer_id,node_id,mean_mA\n' + lock.acquire() + f = open(path, 'w') + f.write(header) + f.close() + lock.release() + # Start logging thread: + logqueue = manager.Queue(maxsize=10000) + LogQueueThread_stopEvent = threading.Event() + try: + thread = LogQueueThread(logqueue, logger, LogQueueThread_stopEvent) + thread.start() + except: + logger.warn("Error when starting log queue thread: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + + PpStatsQueue = manager.Queue(maxsize=1) + PpStatsQueue.put(ppstats) + # Determine the number of CPU's to be used for each aggregating process. If a service is not used, its CPUs are assigned to other services + cpus_free = 0 + cpus_errorlog = config.getint('fetcher', 'cpus_errorlog') + # CPUs for serial service: + if servicesUsed_dict['serial'] == True: + cpus_serial = config.getint('fetcher', 'cpus_serial') + else: + cpus_serial = 0 + cpus_free = cpus_free + config.getint('fetcher', 'cpus_serial') + # CPUs for GPIO actuation. If the service is not used, assign a CPU anyhow since FlockLab always uses this service to determine start and stop times of a test. + if FlockDAQ != "true": + cpus_flockdaq = 0 + #cpus_errorlog = config.getint('fetcher', 'cpus_errorlog') + if servicesUsed_dict['gpioactuation'] == True: + cpus_gpiosetting = config.getint('fetcher', 'cpus_gpiosetting') + else: + cpus_gpiosetting = 1 + cpus_free = cpus_free + config.getint('fetcher', 'cpus_gpiosetting') - cpus_gpiosetting + # CPUs for GPIO tracing: + if servicesUsed_dict['gpiotracing'] == True: + cpus_gpiomonitoring = config.getint('fetcher', 'cpus_gpiomonitoring') + else: + cpus_gpiomonitoring = 0 + cpus_free = cpus_free + config.getint('fetcher', 'cpus_gpiomonitoring') + # CPUs for powerprofiling: + if servicesUsed_dict['powerprofiling'] == True: + cpus_powerprofiling = config.getint('fetcher', 'cpus_powerprofiling') + else: + cpus_powerprofiling = 0 + cpus_free = cpus_free + config.getint('fetcher', 'cpus_powerprofiling') + # If there are free CPUs left, give them to GPIO tracing and power profiling evenly as these services need the most CPU power: + if cpus_free > 0: + if (cpus_powerprofiling > 0) and (cpus_gpiomonitoring > 0): + # Both services are used, distribute the free CPUS evenly: + cpus_powerprofiling = cpus_powerprofiling + int(math.ceil(float(cpus_free)/2)) + cpus_gpiomonitoring = cpus_gpiomonitoring + int(math.floor(float(cpus_free)/2)) + elif cpus_powerprofiling > 0: + # GPIO monitoring/tracing is not used, so give all CPUs to powerprofiling: + cpus_powerprofiling = cpus_powerprofiling + cpus_free + elif cpus_gpiomonitoring > 0: + # Powerprofiling is not used, so give all CPUs to GPIO monitoring/tracing: + cpus_gpiomonitoring = cpus_gpiomonitoring + cpus_free + else: + # Neither of the services is used, so give it to one of the other services: + if cpus_serial > 0: + cpus_serial = cpus_serial + cpus_free + elif cpus_gpiosetting > 0: + cpus_gpiosetting = cpus_gpiosetting + cpus_free + cpus_total = cpus_errorlog + cpus_serial + cpus_gpiosetting + cpus_gpiomonitoring + cpus_powerprofiling + else: + cpus_gpiomonitoring = 0 + cpus_powerprofiling = 0 + cpus_gpiosetting = 0 + cpus_flockdaq = config.getint('fetcher', 'cpus_gpiosetting') + config.getint('fetcher', 'cpus_gpiomonitoring') + config.getint('fetcher', 'cpus_powerprofiling') + cpus_free + cpus_total = cpus_flockdaq + cpus_serial + cpus_errorlog + + service_pools_dict = {'errorlog': cpus_errorlog, 'serial': cpus_serial, 'gpioactuation': cpus_gpiosetting, 'gpiotracing': cpus_gpiomonitoring, 'powerprofiling': cpus_powerprofiling, 'flockdaq': cpus_flockdaq} + if (cpus_total > multiprocessing.cpu_count()): + logger.warn("Number of requested CPUs for all aggregating processes (%d) is higher than number of available CPUs (%d) on system."%(cpus_total, multiprocessing.cpu_count())) + + # Start a worker process pool for every service: + for service, cpus in service_pools_dict.items(): + if cpus > 0: + pool = multiprocessing.Pool(processes=cpus) + logger.debug("Created pool for %s workers with %d processes"%(service, cpus)) + service_pools_dict[service] = pool + else: + service_pools_dict[service] = None + logger.debug("Created all worker pools for services.") + # Catch kill signals --- + signal.signal(signal.SIGTERM, sigterm_handler) + signal.signal(signal.SIGINT, sigterm_handler) + # Loop through the folders and assign work to the worker processes: + vizimgdir = config.get('viz','imgdir') + commitsize = config.getint('fetcher', 'commitsize') + enableviz = config.getint('viz','enablepreview') + loggerprefix = "(Mainloop): " + workmanager = WorkManager() + # Main loop --- + while 1: + if mainloop_stop and workmanager.finished() and FetchObsThread_queue.empty(): + # exit main loop + logger.debug("work manager has nothing more to do, finishing up..") + break + # Wait for FetchObsThreads to put items on queue: + try: + item = FetchObsThread_queue.get(block=True, timeout=5) + (itemtype, obsid, fdir, f) = item[:4] + logger.debug(loggerprefix + "Got element from queue: %d, %s, %s/%s"%(itemtype, str(obsid), fdir, f)) + except queue.Empty: + # No one put any data onto the queue. + # In normal operation, just ignore the error and try again: + continue + if itemtype == ITEM_TO_PROCESS: + nextitem = workmanager.add(item) + else: # type is ITEM_PROCESSED + nextitem = workmanager.done(item) + if nextitem is None: + logger.debug(loggerprefix + "Next item is None.") + continue + (itemtype, obsid, fdir, f, workerstate) = nextitem + logger.debug(loggerprefix + "Next item is %s, %s/%s" % (str(obsid), fdir, f)) + nodeid = obsdict_byid[obsid][0] + callback_f = worker_callback + worker_f = worker_convert_and_aggregate + quick_test = False + # Match the filename against the patterns and schedule an appropriate worker function: + if (re.search("^gpio_setting_[0-9]{14}\.db$", f) != None): + #logger.debug(loggerprefix + "File %s contains GPIO setting results"%f) + pool = service_pools_dict['gpioactuation'] + worker_args = [nextitem, nodeid, testresultsfile_dict['gpioactuation'][0], testresultsfile_dict['gpioactuation'][1], commitsize, vizimgdir, parse_gpio_setting, convert_gpio_setting, None, logqueue] + elif (re.search("^gpio_monitor_[0-9]{14}\.db$", f) != None): + #logger.debug(loggerprefix + "File %s contains GPIO monitoring results"%f) + pool = service_pools_dict['gpiotracing'] + logger.debug(loggerprefix + "resultfile_path: %s" % str(testresultsfile_dict['gpiotracing'][0])) + logger.debug(loggerprefix + "queue item: %s" % str(nextitem)) + logger.debug(loggerprefix + "node id: %s" % str(nodeid)) + worker_args = [nextitem, nodeid, testresultsfile_dict['gpiotracing'][0], obsdict_byid[obsid][1][1], obsdict_byid[obsid][1][0], vizimgdir, None, logqueue] + worker_f = worker_gpiotracing + quick_test = True + if (enableviz == 1): + worker_args[6] = flocklab.viz_gpio_monitor + elif (re.search("^powerprofiling_[0-9]{14}\.db$", f) != None): + #logger.debug(loggerprefix + "File %s contains power profiling results"%f) + # Power profiling has a special worker function which parses the whole file in a C module: + pool = service_pools_dict['powerprofiling'] + worker_args = [nextitem, nodeid, testresultsfile_dict['powerprofiling'][0], obsdict_byid[obsid][1][1], obsdict_byid[obsid][1][0], vizimgdir, None, logqueue, PpStatsQueue] + worker_f = worker_powerprof + if (enableviz == 1): + worker_args[6] = flocklab.viz_powerprofiling + elif (re.search("^serial_[0-9]{14}\.db$", f) != None): + #logger.debug(loggerprefix + "File %s contains serial service results"%f) + pool = service_pools_dict['serial'] + worker_args = [nextitem, nodeid, testresultsfile_dict['serial'][0], testresultsfile_dict['serial'][1], commitsize, vizimgdir, parse_serial, convert_serial, None, logqueue] + elif (re.search("^error_.*_[0-9]{14}\.db$", f) != None): + logger.debug(loggerprefix + "File %s contains error logs"%f) + pool = service_pools_dict['errorlog'] + worker_args = [nextitem, nodeid, testresultsfile_dict['errorlog'][0], testresultsfile_dict['errorlog'][1], commitsize, vizimgdir, parse_error_log, convert_error_log, None, logqueue] + # flockdaq service + elif (re.search("^flockdaq_[0-9]{14}\.db$", f) != None): + pool = service_pools_dict['flockdaq'] + # teststarttime and teststoptime: see line 892 + epoch_teststarttime = int(time.mktime(time.strptime(str(teststarttime), config.get('database', 'timeformat')))) + epoch_teststoptime = int(time.mktime(time.strptime(str(teststoptime), config.get('database', 'timeformat')))) + worker_args = [nextitem, nodeid, testresultsfile_dict['gpiotracing'][0], testresultsfile_dict['gpioactuation'][0], testresultsfile_dict['powerprofiling'][0], testresultsfile_dict['errorlog'][0], float(obsdict_byid[int(obsid)][1][1]), float(obsdict_byid[int(obsid)][1][0]), epoch_teststarttime, epoch_teststoptime, vizimgdir, None, None, logqueue, PpStatsQueue] + worker_f = worker_flockdaq + if (enableviz == 1): + worker_args[11] = flocklab.viz_powerprofiling + worker_args[12] = flocklab.viz_gpio_monitor + else: + logger.warn(loggerprefix + "DB file %s/%s from observer %s did not match any of the known patterns" %(fdir, f, obsid)) + continue + # Schedule worker function from the service's pool. The result will be reported to the callback function. + if quick_test: + logger.debug("GPIO MONITOR GETS STARTED") + else: + logger.debug("OTHER SERVICE DATA GETS COLLECTED") + + pool.apply_async(func=worker_f, args=tuple(worker_args), callback=callback_f) + # Stop signal for main loop has been set --- + # Stop worker pool: + for service, pool in service_pools_dict.items(): + if pool: + logger.debug("Closing pool for %s..."%service) + pool.close() + for service, pool in service_pools_dict.items(): + if pool: + logger.debug("Waiting for pool for %s to close..."%service) + pool.join() + logger.debug("Closed all pools.") + # Write pp stats + ppstats = PpStatsQueue.get() + f = open(testresultsfile_dict['powerprofilingstats'][0], 'a') + for (obsid, (avg, count)) in ppstats.items(): + nodeid = obsdict_byid[obsid][0] + f.write("%d,%d,%0.6f\n" % (obsid, nodeid, avg)) + f.close() + + # Stop logging: + logger.debug("Stopping log queue thread...") + LogQueueThread_stopEvent.set() + + # Set DB status: + logger.debug("Setting test status in DB to 'synced'...") + try: + (cn, cur) = flocklab.connect_to_db(config, logger) + flocklab.set_test_status(cur, cn, testid, 'synced') + cur.close() + cn.close() + except: + logger.warn("Could not connect to database") + + + # Delete the obsfile directories as they are not needed anymore: + if ((obsfiledir != None) and (os.path.exists(obsfiledir))): + shutil.rmtree(obsfiledir) + # Delete old debug files + if os.path.exists(config.get('fetcher', 'obsfile_debug_dir')): + for d in [fn for fn in os.listdir(config.get('fetcher', 'obsfile_debug_dir')) if os.stat("%s/%s" % (config.get('fetcher', 'obsfile_debug_dir'), fn)).st_mtime < int(time.time()) - int(config.get('fetcher', 'obsfile_debug_dir_max_age_days')) * 24 * 3600]: + shutil.rmtree("%s/%s" % (config.get('fetcher', 'obsfile_debug_dir'),d)) + if len(errors) > 1: + msg = "" + for error in errors: + msg += error + flocklab.error_logandexit(msg, errno.EBADMSG, name, logger, config) + else: + ret = SUCCESS + sys.exit(ret) ### END main() if __name__ == "__main__": - try: - main(sys.argv[1:]) - except Exception: - msg = "Encountered error: %s: %s\n%s\nCommand line was: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc(), str(sys.argv)) - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + try: + main(sys.argv[1:]) + except Exception: + msg = "Encountered error: %s: %s\n%s\nCommand line was: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc(), str(sys.argv)) + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) diff --git a/testmanagementserver/flocklab_mmccheck.py b/testmanagementserver/flocklab_mmccheck.py index 821078869d5bda6439a5eb8038fb37d5cb913b39..d466ebc070ca8dd5258e15a69c5dbaea3a8f3f34 100755 --- a/testmanagementserver/flocklab_mmccheck.py +++ b/testmanagementserver/flocklab_mmccheck.py @@ -16,10 +16,10 @@ scriptname = os.path.basename(__main__.__file__) scriptpath = os.path.dirname(os.path.abspath(sys.argv[0])) name = "MMC check" ### -logger = None -debug = False -config = None -zabbixServer = "http://carrel.ethz.ch/zabbix/api_jsonrpc.php" +logger = None +debug = False +config = None +zabbixServer = "http://carrel.ethz.ch/zabbix/api_jsonrpc.php" @@ -29,8 +29,8 @@ zabbixServer = "http://carrel.ethz.ch/zabbix/api_jsonrpc.php" # ############################################################################## class Error(Exception): - """ Base class for exception. """ - pass + """ Base class for exception. """ + pass ### END Error classes @@ -41,191 +41,191 @@ class Error(Exception): # ############################################################################## class MmcCheckThread(threading.Thread): - """ Thread which calls MMC check script on an observer. - """ - def __init__(self, observer_ethernet, errors_queue, setupscriptpath): - threading.Thread.__init__(self) - self._observer_ethernet = observer_ethernet - self._setupscriptpath = setupscriptpath - self._errors_queue = errors_queue - self._abortEvent = threading.Event() - self._errors = [] - self._p = None - self._restore_logs = False - - def run(self): - try: - logger.debug("MmcCheckThread for %s starting..."%(self._observer_ethernet)) - # Download log data from observer because we want to restore it after the check: - logger.debug("Downloading log files from %s..." %(self._observer_ethernet)) - (self._logtar_fd, self._logtar_name) = tempfile.mkstemp(".tar.gz", "flocklab_") - cmd = ['ssh' ,'%s'%(self._observer_ethernet), "tar czf - /media/card/log/"] - self._p = subprocess.Popen(cmd, stdout=self._logtar_fd, stderr=subprocess.PIPE) - while self._p.returncode == None: - self._abortEvent.wait(1.0) - self._p.poll() - if self._abortEvent.is_set(): - self._p.kill() - else: - out, err = self._p.communicate() - os.close(self._logtar_fd) - rs = self._p.returncode - if (rs == SUCCESS): - logger.debug("Downloaded log files from %s to file %s" %(self._observer_ethernet, self._logtar_name)) - self._restore_logs = True - elif (rs == 255): - msg = "%s is not reachable, thus MMC check script failed."%(self._observer_ethernet) - self._errors.append((msg, errno.EHOSTUNREACH, self._observer_ethernet)) - logger.error(msg) - raise Error - else: - msg = "Could not download log files from %s: failed with error code %s and error message %s" %(str(self._observer_ethernet), str(errno.errorcode[rs]), str(out)) - self._errors.append((msg, rs, self._observer_ethernet)) - logger.error(msg) - logger.error("Tried to execute %s"%str(cmd)) - # continue without keeping old log files - - # Call the MMC check script on the observer: - logger.debug("Executing MMC check script on %s..." %(self._observer_ethernet)) - cmd = ['ssh' ,'%s'%(self._observer_ethernet), "mmc_check.sh"] - self._p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - while self._p.returncode == None: - self._abortEvent.wait(1.0) - self._p.poll() - if self._abortEvent.is_set(): - self._p.kill() - else: - out, err = self._p.communicate() - rs = self._p.returncode - if (rs == SUCCESS): - logger.debug("MMC check script on %s succeeded." %(self._observer_ethernet)) - elif (rs == 255): - msg = "%s is not reachable, thus MMC check script failed."%(self._observer_ethernet) - self._errors.append((msg, errno.EHOSTUNREACH, self._observer_ethernet)) - logger.error(msg) - raise Error - else: - msg = "MMC check script on %s failed with error code %s and error message %s" %(str(self._observer_ethernet), str(errno.errorcode[rs]), str(out)) - self._errors.append((msg, rs, self._observer_ethernet)) - logger.error(msg) - logger.error("Tried to execute %s"%str(cmd)) - raise Error - - # Restore the media card on the observer: - logger.debug("Re-creating contents of MMC card on %s..." %(self._observer_ethernet)) - cmd = ["%s/setup_new_observer.sh"%self._setupscriptpath, self._observer_ethernet, "-sdsetuponly"] - self._p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self._setupscriptpath) - while self._p.returncode == None: - self._abortEvent.wait(1.0) - self._p.poll() - if self._abortEvent.is_set(): - self._p.kill() - else: - out, err = self._p.communicate() - rs = self._p.returncode - if (rs == SUCCESS): - logger.debug("MMC card re-created on %s." %(self._observer_ethernet)) - elif (rs == 255): - msg = "%s is not reachable, thus MMC check script failed."%(self._observer_ethernet) - self._errors.append((msg, errno.EHOSTUNREACH, self._observer_ethernet)) - logger.error(msg) - raise Error - else: - msg = "Could not re-create MMC card on %s: failed with error code %s and error message %s" %(str(self._observer_ethernet), str(errno.errorcode[rs]), str(out)) - self._errors.append((msg, rs, self._observer_ethernet)) - logger.error(msg) - logger.error("Tried to execute %s"%str(cmd)) - raise Error - - # Upload log data to observer: - if self._restore_logs: - logger.debug("Restoring log files to MMC card on %s..." %(self._observer_ethernet)) - self._logtar_fd = open(self._logtar_name) - cmd = ['ssh', '%s'%(self._observer_ethernet), 'tar xzf - -C /'] - self._p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=self._logtar_fd) - while self._p.returncode == None: - self._abortEvent.wait(1.0) - self._p.poll() - if self._abortEvent.is_set(): - self._p.kill() - else: - out, err = self._p.communicate() - rs = self._p.returncode - self._logtar_fd.close() - if (rs == SUCCESS): - logger.debug("Log files restored to MMC card on %s." %(self._observer_ethernet)) - elif (rs == 255): - msg = "%s is not reachable, thus MMC check script failed."%(self._observer_ethernet) - self._errors.append((msg, errno.EHOSTUNREACH, self._observer_ethernet)) - logger.error(msg) - raise Error - else: - msg = "Could not restore log files to MMC card on %s: failed with error code %s and error message %s" %(str(self._observer_ethernet), str(errno.errorcode[rs]), str(out)) - self._errors.append((msg, rs, self._observer_ethernet)) - logger.error(msg) - logger.error("Tried to execute %s"%str(cmd)) - raise Error - else: - logger.debug("Skipping restoration of log files to MMC card on %s..." %(self._observer_ethernet)) - - # Determine number of badblocks on MMC card: - logger.debug("Checking number of badblocks on MMC card on %s..." %(self._observer_ethernet)) - cmd = ['ssh' ,'%s'%(self._observer_ethernet), 'dumpe2fs -b /dev/mmcblk0p1 | wc -l'] - self._p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) - while self._p.returncode == None: - self._abortEvent.wait(1.0) - self._p.poll() - if self._abortEvent.is_set(): - self._p.kill() - else: - out, err = self._p.communicate() - rs = self._p.returncode - if (rs == SUCCESS): - n = int(out) - logger.debug("Number of badblocks on MMC card on %s: %d" %(self._observer_ethernet, n)) - if n > 0: - if n > 200: - msg = "Number of badblocks on MMC on %s: %d. Consider replacing the card as there are quite many blocks bad"%(self._observer_ethernet, n) - else: - msg = "Number of badblocks on MMC on %s: %d. Everything is fine, no action required."%(self._observer_ethernet, n) - self._errors.append((msg, errno.EIO, self._observer_ethernet)) - logger.error(msg) - elif (rs == 255): - msg = "%s is not reachable, thus MMC check script failed."%(self._observer_ethernet) - self._errors.append((msg, errno.EHOSTUNREACH, self._observer_ethernet)) - logger.error(msg) - raise Error - else: - msg = "Could not check number of badblocks on MMC card on %s: failed with error code %s and error message %s" %(str(self._observer_ethernet), str(errno.errorcode[rs]), str(out)) - self._errors.append((msg, rs, self._observer_ethernet)) - logger.error(msg) - logger.error("Tried to execute %s"%str(cmd)) - raise Error - - # Restart observer: - logger.debug("Reboot %s..." %(self._observer_ethernet)) - cmd = ['ssh' ,'%s'%(self._observer_ethernet), 'reboot'] - self._p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - - except: - # Main thread requested abort. - # Close a possibly still running subprocess: - if (self._p is not None) and (self._p.poll() is not None): - self._p.kill() - msg = "MmcCheckThread for %s aborted because of error: %s: %s"%(self._observer_ethernet, str(sys.exc_info()[0]), str(sys.exc_info()[1])) - self._errors.append((msg, errno.ECOMM, self._observer_ethernet)) - logger.error(msg) - finally: - if (len(self._errors) > 0): - self._errors_queue.put((self._observer_ethernet, self._errors)) - try: - os.unlink(self._logtar_name) - except OSError: - pass # ignore if file is not there - - def abort(self): - self._abortEvent.set() - + """ Thread which calls MMC check script on an observer. + """ + def __init__(self, observer_ethernet, errors_queue, setupscriptpath): + threading.Thread.__init__(self) + self._observer_ethernet = observer_ethernet + self._setupscriptpath = setupscriptpath + self._errors_queue = errors_queue + self._abortEvent = threading.Event() + self._errors = [] + self._p = None + self._restore_logs = False + + def run(self): + try: + logger.debug("MmcCheckThread for %s starting..."%(self._observer_ethernet)) + # Download log data from observer because we want to restore it after the check: + logger.debug("Downloading log files from %s..." %(self._observer_ethernet)) + (self._logtar_fd, self._logtar_name) = tempfile.mkstemp(".tar.gz", "flocklab_") + cmd = ['ssh' ,'%s'%(self._observer_ethernet), "tar czf - /media/card/log/"] + self._p = subprocess.Popen(cmd, stdout=self._logtar_fd, stderr=subprocess.PIPE) + while self._p.returncode == None: + self._abortEvent.wait(1.0) + self._p.poll() + if self._abortEvent.is_set(): + self._p.kill() + else: + out, err = self._p.communicate() + os.close(self._logtar_fd) + rs = self._p.returncode + if (rs == SUCCESS): + logger.debug("Downloaded log files from %s to file %s" %(self._observer_ethernet, self._logtar_name)) + self._restore_logs = True + elif (rs == 255): + msg = "%s is not reachable, thus MMC check script failed."%(self._observer_ethernet) + self._errors.append((msg, errno.EHOSTUNREACH, self._observer_ethernet)) + logger.error(msg) + raise Error + else: + msg = "Could not download log files from %s: failed with error code %s and error message %s" %(str(self._observer_ethernet), str(errno.errorcode[rs]), str(out)) + self._errors.append((msg, rs, self._observer_ethernet)) + logger.error(msg) + logger.error("Tried to execute %s"%str(cmd)) + # continue without keeping old log files + + # Call the MMC check script on the observer: + logger.debug("Executing MMC check script on %s..." %(self._observer_ethernet)) + cmd = ['ssh' ,'%s'%(self._observer_ethernet), "mmc_check.sh"] + self._p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + while self._p.returncode == None: + self._abortEvent.wait(1.0) + self._p.poll() + if self._abortEvent.is_set(): + self._p.kill() + else: + out, err = self._p.communicate() + rs = self._p.returncode + if (rs == SUCCESS): + logger.debug("MMC check script on %s succeeded." %(self._observer_ethernet)) + elif (rs == 255): + msg = "%s is not reachable, thus MMC check script failed."%(self._observer_ethernet) + self._errors.append((msg, errno.EHOSTUNREACH, self._observer_ethernet)) + logger.error(msg) + raise Error + else: + msg = "MMC check script on %s failed with error code %s and error message %s" %(str(self._observer_ethernet), str(errno.errorcode[rs]), str(out)) + self._errors.append((msg, rs, self._observer_ethernet)) + logger.error(msg) + logger.error("Tried to execute %s"%str(cmd)) + raise Error + + # Restore the media card on the observer: + logger.debug("Re-creating contents of MMC card on %s..." %(self._observer_ethernet)) + cmd = ["%s/setup_new_observer.sh"%self._setupscriptpath, self._observer_ethernet, "-sdsetuponly"] + self._p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self._setupscriptpath) + while self._p.returncode == None: + self._abortEvent.wait(1.0) + self._p.poll() + if self._abortEvent.is_set(): + self._p.kill() + else: + out, err = self._p.communicate() + rs = self._p.returncode + if (rs == SUCCESS): + logger.debug("MMC card re-created on %s." %(self._observer_ethernet)) + elif (rs == 255): + msg = "%s is not reachable, thus MMC check script failed."%(self._observer_ethernet) + self._errors.append((msg, errno.EHOSTUNREACH, self._observer_ethernet)) + logger.error(msg) + raise Error + else: + msg = "Could not re-create MMC card on %s: failed with error code %s and error message %s" %(str(self._observer_ethernet), str(errno.errorcode[rs]), str(out)) + self._errors.append((msg, rs, self._observer_ethernet)) + logger.error(msg) + logger.error("Tried to execute %s"%str(cmd)) + raise Error + + # Upload log data to observer: + if self._restore_logs: + logger.debug("Restoring log files to MMC card on %s..." %(self._observer_ethernet)) + self._logtar_fd = open(self._logtar_name) + cmd = ['ssh', '%s'%(self._observer_ethernet), 'tar xzf - -C /'] + self._p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=self._logtar_fd) + while self._p.returncode == None: + self._abortEvent.wait(1.0) + self._p.poll() + if self._abortEvent.is_set(): + self._p.kill() + else: + out, err = self._p.communicate() + rs = self._p.returncode + self._logtar_fd.close() + if (rs == SUCCESS): + logger.debug("Log files restored to MMC card on %s." %(self._observer_ethernet)) + elif (rs == 255): + msg = "%s is not reachable, thus MMC check script failed."%(self._observer_ethernet) + self._errors.append((msg, errno.EHOSTUNREACH, self._observer_ethernet)) + logger.error(msg) + raise Error + else: + msg = "Could not restore log files to MMC card on %s: failed with error code %s and error message %s" %(str(self._observer_ethernet), str(errno.errorcode[rs]), str(out)) + self._errors.append((msg, rs, self._observer_ethernet)) + logger.error(msg) + logger.error("Tried to execute %s"%str(cmd)) + raise Error + else: + logger.debug("Skipping restoration of log files to MMC card on %s..." %(self._observer_ethernet)) + + # Determine number of badblocks on MMC card: + logger.debug("Checking number of badblocks on MMC card on %s..." %(self._observer_ethernet)) + cmd = ['ssh' ,'%s'%(self._observer_ethernet), 'dumpe2fs -b /dev/mmcblk0p1 | wc -l'] + self._p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) + while self._p.returncode == None: + self._abortEvent.wait(1.0) + self._p.poll() + if self._abortEvent.is_set(): + self._p.kill() + else: + out, err = self._p.communicate() + rs = self._p.returncode + if (rs == SUCCESS): + n = int(out) + logger.debug("Number of badblocks on MMC card on %s: %d" %(self._observer_ethernet, n)) + if n > 0: + if n > 200: + msg = "Number of badblocks on MMC on %s: %d. Consider replacing the card as there are quite many blocks bad"%(self._observer_ethernet, n) + else: + msg = "Number of badblocks on MMC on %s: %d. Everything is fine, no action required."%(self._observer_ethernet, n) + self._errors.append((msg, errno.EIO, self._observer_ethernet)) + logger.error(msg) + elif (rs == 255): + msg = "%s is not reachable, thus MMC check script failed."%(self._observer_ethernet) + self._errors.append((msg, errno.EHOSTUNREACH, self._observer_ethernet)) + logger.error(msg) + raise Error + else: + msg = "Could not check number of badblocks on MMC card on %s: failed with error code %s and error message %s" %(str(self._observer_ethernet), str(errno.errorcode[rs]), str(out)) + self._errors.append((msg, rs, self._observer_ethernet)) + logger.error(msg) + logger.error("Tried to execute %s"%str(cmd)) + raise Error + + # Restart observer: + logger.debug("Reboot %s..." %(self._observer_ethernet)) + cmd = ['ssh' ,'%s'%(self._observer_ethernet), 'reboot'] + self._p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + + except: + # Main thread requested abort. + # Close a possibly still running subprocess: + if (self._p is not None) and (self._p.poll() is not None): + self._p.kill() + msg = "MmcCheckThread for %s aborted because of error: %s: %s"%(self._observer_ethernet, str(sys.exc_info()[0]), str(sys.exc_info()[1])) + self._errors.append((msg, errno.ECOMM, self._observer_ethernet)) + logger.error(msg) + finally: + if (len(self._errors) > 0): + self._errors_queue.put((self._observer_ethernet, self._errors)) + try: + os.unlink(self._logtar_name) + except OSError: + pass # ignore if file is not there + + def abort(self): + self._abortEvent.set() + ### END MmcCheckThread @@ -236,18 +236,18 @@ class MmcCheckThread(threading.Thread): # ############################################################################## def zabbixAuthenticate(server): - data = json.dumps({"jsonrpc":"2.0","method":"user.login","params":{"user":"flocklab_api","password":"flockrock"},"id":"1"}) - req = urllib.request.Request(server, data, headers = {'Content-Type': 'application/json'}) - ret = urllib.request.urlopen(req) - out = ret.read() - try: - pairs = out.split(',')[1] - authkey = pairs.split(':')[1] - authkey = authkey[1:-1] - except: - authkey = None - finally: - return authkey + data = json.dumps({"jsonrpc":"2.0","method":"user.login","params":{"user":"flocklab_api","password":"flockrock"},"id":"1"}) + req = urllib.request.Request(server, data, headers = {'Content-Type': 'application/json'}) + ret = urllib.request.urlopen(req) + out = ret.read() + try: + pairs = out.split(',')[1] + authkey = pairs.split(':')[1] + authkey = authkey[1:-1] + except: + authkey = None + finally: + return authkey ### END zabbixAuthenticate() @@ -258,10 +258,10 @@ def zabbixAuthenticate(server): # ############################################################################## def usage(): - print("Usage: %s [--obslist <list>] [--debug] [--help]" %scriptname) - print(" --obslist\t\t\tOptional. Only check observers in list. List is a colon separated string.") - print(" --debug\t\t\tOptional. Print debug messages to log.") - print(" --help\t\t\tOptional. Print this help.") + print("Usage: %s [--obslist <list>] [--debug] [--help]" %scriptname) + print(" --obslist\t\t\tOptional. Only check observers in list. List is a colon separated string.") + print(" --debug\t\t\tOptional. Print debug messages to log.") + print(" --help\t\t\tOptional. Print this help.") ### END usage() @@ -272,255 +272,255 @@ def usage(): # ############################################################################## def main(argv): - global logger - global debug - global config - errors = [] - warnings = [] - obslist = [] - - # Set timezone to UTC: - os.environ['TZ'] = 'UTC' - time.tzset() - - # Get logger: - logger = flocklab.get_logger(loggername=scriptname, loggerpath=scriptpath) - - # Get the config file: - config = flocklab.get_config(configpath=scriptpath) - if not config: - msg = "Could not read configuration file. Exiting..." - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) - #logger.debug("Read configuration file.") - - # Get the arguments: - try: - opts, args = getopt.getopt(argv, "o:dh", ["obslist=", "debug", "help"]) - except getopt.GetoptError as err: - print(str(err)) - logger.warn(str(err)) - usage() - sys.exit(errno.EINVAL) - except: - msg = "Error when getting arguments: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1])) - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) - - for opt, arg in opts: - if opt in ("-o", "--obslist"): - obslist = arg.split(':') - try: - obslist = list(map(int, obslist)) - except: - msg = "Could not parse observer list. Exiting..." - flocklab.error_logandexit(msg, errno.EINVAL, name, logger, config) - elif opt in ("-d", "--debug"): - debug = True - logger.setLevel(logging.DEBUG) - logger.debug("Detected debug flag.") - elif opt in ("-h", "--help"): - usage() - sys.exit(SUCCESS) - else: - logger.warn("Wrong API usage") - sys.exit(errno.EINVAL) - - try: - # Reserve testbed --- - # Connect to the database: - try: - (cn, cur) = flocklab.connect_to_db(config, logger) - except: - msg = "Could not connect to database" - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) - # find next available slot - starttime = datetime.datetime.now() - sql = "SELECT DATE_SUB(time_start_wish, INTERVAL %d MINUTE), DATE_ADD(time_end_wish, INTERVAL %d MINUTE) FROM tbl_serv_tests WHERE time_end_wish > '%s' AND test_status IN ('planned','preparing','running','cleaning up','syncing','synced','aborting') ORDER BY time_start_wish ASC" - cur.execute(sql % (int(config.get('tests', 'setuptime')), int(config.get('tests', 'cleanuptime')), starttime.strftime(config.get("database", "timeformat")))) - ret = cur.fetchall() - for r in ret: - if r[0] - starttime > datetime.timedelta(hours=int(config.get("mmccheck", "reservation_duration_h"))): - break - starttime = r[1] - # add reservation slot - sql = "INSERT INTO tbl_serv_reservations (group_id_fk, time_start, time_end) VALUES (%d, '%s', '%s');" - cur.execute(sql % (int(config.get('mmccheck', 'reservation_group_id')), starttime.strftime(config.get("database", "timeformat")), (starttime + datetime.timedelta(hours=int(config.get("mmccheck", "reservation_duration_h")))).strftime(config.get("database", "timeformat")))) - cn.commit() - cur.close() - cn.close() - # ---- - logger.debug("Starttime of testbed reservation is: %s" % str(starttime)) - starttime = time.mktime(starttime.timetuple()) - # Wait for testbed reservation to start --- - if len(errors) == 0: - waittime = starttime - time.time() + 10 - while waittime >= 0.0: - # Interrupt waiting every 5 minutes if needed: - waittime = 300 if waittime > 300 else waittime - logger.info("Going to sleep for %f seconds."%waittime) - time.sleep(waittime) - logger.info("Woke up") - waittime = starttime - time.time() + 10 - # Put Flocklab-Group in Zabbix into maintenance mode --- - logger.debug("Putting FlockLab group in Zabbix into maintenance mode...") - maintenance_id = None - logger.debug("Authenticating on Zabbix server...") - authkey = zabbixAuthenticate(zabbixServer) - if authkey == None: - errors.append("Could not authenticate on Zabbix server.") - else: - logger.debug("Authenticated on Zabbix server") - now = time.time() - 60 - nowint = int(float(now)) - duration = 3600*int(config.get('mmccheck', 'reservation_duration_h')) - data = json.dumps({"jsonrpc":"2.0","method":"maintenance.create","params":[{"groupids":[11],"name":"Maintenance for FLockLab","maintenance_type":"0","description":"Maintenance for FlockLab", "active_since":nowint,"active_till":nowint + duration + 100,"timeperiods":[{"timeperiod_type":"0","start_date":nowint,"period":duration}],}],"id":"3","auth":authkey}) - req = urllib.request.Request(zabbixServer, data, headers = {'Content-Type': 'application/json'}) - ret = urllib.request.urlopen(req) - out = ret.read() - try: - pairs = out.split(',')[1] - maintenance_id = pairs.split(':')[2] - maintenance_id = maintenance_id[2:-3] - logger.debug("Put FlockLab group on Zabbix into maintenance mode (maintenance ID %s)"%maintenance_id) - except: - errors.append("Could not put FlockLab group on Zabbix into maintenance mode.") - maintenance_id = None - - # Get all available observers --- - if len(errors) == 0: - try: - (cn, cur) = flocklab.connect_to_db(config, logger) - logger.debug("Connected to database.") - sql = """ SELECT `ethernet_address` - FROM `tbl_serv_observer` - WHERE `status` = 'online' - """ - if len(obslist) > 0: - sql = sql + ' AND `observer_id` IN (%s)' % (','.join(map(str,obslist))) - sql = sql + ';' - rows = cur.execute(sql) - rs = cur.fetchall() - if not rs: - errors.append("Found no observers which are online.") - else: - logger.debug("Found %d online observers."%rows) - observers = [] - for obs in rs: - observers.append(obs[0]) - try: - cur.close() - cn.close() - except: - pass - except: - errors.append("Could not connect to database.") - - # Run MMC check on observers --- - if len(errors) == 0: - # Start a thread for each observer: - thread_list = [] - errors_queue = queue.Queue() - for observer in observers: - thread = MmcCheckThread(observer, errors_queue, setupscriptpath) - thread_list.append(thread) - thread.start() - logger.debug("Started thread for %s" %(observer)) - # Wait for all threads to finish: - for thread in thread_list: - # Wait at most as long as the testbed is reserved for the threads to finish. - thread.join(timeout=3600*int(config.get('mmccheck', 'reservation_duration_h'))) - if thread.isAlive(): - # Timeout occurred. Signal the thread to abort: - logger.error("Telling thread for observer to abort...") - thread.abort() - # Wait again for the aborted threads: - for thread in thread_list: - thread.join(timeout=20) - if thread.isAlive(): - msg = "Thread for observer is still alive but should be aborted now." - errors.append(msg) - logger.error(msg) - - # Get all errors (if any): - if not errors_queue.empty(): - logger.error("Queue with errors from observer threads is not empty. Getting errors...") - while not errors_queue.empty(): - errs = errors_queue.get() - for err in errs[1]: - logger.error("Error from observer thread for %s: %s" %(str(err[2]), str(err[0]))) - errors.append(err[0]) - - # Take Flocklab group on Zabbix out of maintenance mode: - if maintenance_id: - logger.debug("Taking FlockLab group in Zabbix out of maintenance mode...") - logger.debug("Authenticating on Zabbix server...") - authkey = zabbixAuthenticate(zabbixServer) - if authkey == None: - errors.append("Could not authenticate on Zabbix server.") - errors.append("Flocklab group not taken out of maintenance mode on Zabbix server due to errors. Please do so manually.") - else: - logger.debug("Authenticated on Zabbix server.") - data = json.dumps({"jsonrpc":"2.0","method":"maintenance.delete","params":[maintenance_id],"auth":authkey,"id":"4"}) - req = urllib.request.Request(zabbixServer, data, headers = {'Content-Type': 'application/json'}) - ret = urllib.request.urlopen(req) - out = ret.read() - if '"maintenanceids":["%s'%maintenance_id in out: - logger.debug("FlockLab group taken out of maintenance mode on Zabbix server.") - else: - errors.append("Flocklab group not taken out of maintenance mode on Zabbix server due to errors. Please do so manually.") - - - # Free testbed from reservation --- - try: - (cn, cur) = flocklab.connect_to_db(config, logger) - logger.info("Removing reservation to free testbed...") - sql = "DELETE FROM tbl_serv_reservations WHERE group_id_fk = %d;" - cur.execute(sql % int(config.get('mmccheck', 'reservation_group_id'))) - cn.commit() - cur.close() - cn.close() - except: - msg = "Could not connect to database" - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) - - # Inform admins --- - if ((len(errors) > 0) or (len(warnings) > 0)): - msg = "The following errors/warnings occurred:\n\n" - for error in errors: - msg = msg + "\t * ERROR: %s\n" %(str(error)) - for warn in warnings: - msg = msg + "\t * WARNING: %s\n" %(str(warn)) - logger.debug("Finished with %d errors and %d warnings"%(len(errors), len(warnings))) - flocklab.error_logandexit(msg, errno.EFAULT, name, logger, config) - else: - # Send email to admin: - try: - cn, cur = flocklab.connect_to_db(config, logger) - admin_emails = flocklab.get_admin_emails(cur, config) - cur.close() - cn.close() - if ((admin_emails == 1) or (admin_emails == 2)): - logger.error("Error when getting admin emails from database") - raise - except: - # Use backup email address: - admin_emails = "flocklab@tik.ee.ethz.ch" - finally: - msg = "Successfully finished MMC checks on observers." - logger.debug(msg) - flocklab.send_mail(subject="[FlockLab %s]"%(scriptname.capitalize()), message=msg, recipients=admin_emails) - except Exception: - msg = "Unexpected error: %s: %s"%(str(sys.exc_info()[0]), str(sys.exc_info()[1])) - print(msg) - flocklab.error_logandexit(msg, errno.EFAULT, name, logger, config) - sys.exit(SUCCESS) - + global logger + global debug + global config + errors = [] + warnings = [] + obslist = [] + + # Set timezone to UTC: + os.environ['TZ'] = 'UTC' + time.tzset() + + # Get logger: + logger = flocklab.get_logger(loggername=scriptname, loggerpath=scriptpath) + + # Get the config file: + config = flocklab.get_config(configpath=scriptpath) + if not config: + msg = "Could not read configuration file. Exiting..." + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + #logger.debug("Read configuration file.") + + # Get the arguments: + try: + opts, args = getopt.getopt(argv, "o:dh", ["obslist=", "debug", "help"]) + except getopt.GetoptError as err: + print(str(err)) + logger.warn(str(err)) + usage() + sys.exit(errno.EINVAL) + except: + msg = "Error when getting arguments: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1])) + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + + for opt, arg in opts: + if opt in ("-o", "--obslist"): + obslist = arg.split(':') + try: + obslist = list(map(int, obslist)) + except: + msg = "Could not parse observer list. Exiting..." + flocklab.error_logandexit(msg, errno.EINVAL, name, logger, config) + elif opt in ("-d", "--debug"): + debug = True + logger.setLevel(logging.DEBUG) + logger.debug("Detected debug flag.") + elif opt in ("-h", "--help"): + usage() + sys.exit(SUCCESS) + else: + logger.warn("Wrong API usage") + sys.exit(errno.EINVAL) + + try: + # Reserve testbed --- + # Connect to the database: + try: + (cn, cur) = flocklab.connect_to_db(config, logger) + except: + msg = "Could not connect to database" + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + # find next available slot + starttime = datetime.datetime.now() + sql = "SELECT DATE_SUB(time_start_wish, INTERVAL %d MINUTE), DATE_ADD(time_end_wish, INTERVAL %d MINUTE) FROM tbl_serv_tests WHERE time_end_wish > '%s' AND test_status IN ('planned','preparing','running','cleaning up','syncing','synced','aborting') ORDER BY time_start_wish ASC" + cur.execute(sql % (int(config.get('tests', 'setuptime')), int(config.get('tests', 'cleanuptime')), starttime.strftime(config.get("database", "timeformat")))) + ret = cur.fetchall() + for r in ret: + if r[0] - starttime > datetime.timedelta(hours=int(config.get("mmccheck", "reservation_duration_h"))): + break + starttime = r[1] + # add reservation slot + sql = "INSERT INTO tbl_serv_reservations (group_id_fk, time_start, time_end) VALUES (%d, '%s', '%s');" + cur.execute(sql % (int(config.get('mmccheck', 'reservation_group_id')), starttime.strftime(config.get("database", "timeformat")), (starttime + datetime.timedelta(hours=int(config.get("mmccheck", "reservation_duration_h")))).strftime(config.get("database", "timeformat")))) + cn.commit() + cur.close() + cn.close() + # ---- + logger.debug("Starttime of testbed reservation is: %s" % str(starttime)) + starttime = time.mktime(starttime.timetuple()) + # Wait for testbed reservation to start --- + if len(errors) == 0: + waittime = starttime - time.time() + 10 + while waittime >= 0.0: + # Interrupt waiting every 5 minutes if needed: + waittime = 300 if waittime > 300 else waittime + logger.info("Going to sleep for %f seconds."%waittime) + time.sleep(waittime) + logger.info("Woke up") + waittime = starttime - time.time() + 10 + # Put Flocklab-Group in Zabbix into maintenance mode --- + logger.debug("Putting FlockLab group in Zabbix into maintenance mode...") + maintenance_id = None + logger.debug("Authenticating on Zabbix server...") + authkey = zabbixAuthenticate(zabbixServer) + if authkey == None: + errors.append("Could not authenticate on Zabbix server.") + else: + logger.debug("Authenticated on Zabbix server") + now = time.time() - 60 + nowint = int(float(now)) + duration = 3600*int(config.get('mmccheck', 'reservation_duration_h')) + data = json.dumps({"jsonrpc":"2.0","method":"maintenance.create","params":[{"groupids":[11],"name":"Maintenance for FLockLab","maintenance_type":"0","description":"Maintenance for FlockLab", "active_since":nowint,"active_till":nowint + duration + 100,"timeperiods":[{"timeperiod_type":"0","start_date":nowint,"period":duration}],}],"id":"3","auth":authkey}) + req = urllib.request.Request(zabbixServer, data, headers = {'Content-Type': 'application/json'}) + ret = urllib.request.urlopen(req) + out = ret.read() + try: + pairs = out.split(',')[1] + maintenance_id = pairs.split(':')[2] + maintenance_id = maintenance_id[2:-3] + logger.debug("Put FlockLab group on Zabbix into maintenance mode (maintenance ID %s)"%maintenance_id) + except: + errors.append("Could not put FlockLab group on Zabbix into maintenance mode.") + maintenance_id = None + + # Get all available observers --- + if len(errors) == 0: + try: + (cn, cur) = flocklab.connect_to_db(config, logger) + logger.debug("Connected to database.") + sql = """ SELECT `ethernet_address` + FROM `tbl_serv_observer` + WHERE `status` = 'online' + """ + if len(obslist) > 0: + sql = sql + ' AND `observer_id` IN (%s)' % (','.join(map(str,obslist))) + sql = sql + ';' + rows = cur.execute(sql) + rs = cur.fetchall() + if not rs: + errors.append("Found no observers which are online.") + else: + logger.debug("Found %d online observers."%rows) + observers = [] + for obs in rs: + observers.append(obs[0]) + try: + cur.close() + cn.close() + except: + pass + except: + errors.append("Could not connect to database.") + + # Run MMC check on observers --- + if len(errors) == 0: + # Start a thread for each observer: + thread_list = [] + errors_queue = queue.Queue() + for observer in observers: + thread = MmcCheckThread(observer, errors_queue, setupscriptpath) + thread_list.append(thread) + thread.start() + logger.debug("Started thread for %s" %(observer)) + # Wait for all threads to finish: + for thread in thread_list: + # Wait at most as long as the testbed is reserved for the threads to finish. + thread.join(timeout=3600*int(config.get('mmccheck', 'reservation_duration_h'))) + if thread.isAlive(): + # Timeout occurred. Signal the thread to abort: + logger.error("Telling thread for observer to abort...") + thread.abort() + # Wait again for the aborted threads: + for thread in thread_list: + thread.join(timeout=20) + if thread.isAlive(): + msg = "Thread for observer is still alive but should be aborted now." + errors.append(msg) + logger.error(msg) + + # Get all errors (if any): + if not errors_queue.empty(): + logger.error("Queue with errors from observer threads is not empty. Getting errors...") + while not errors_queue.empty(): + errs = errors_queue.get() + for err in errs[1]: + logger.error("Error from observer thread for %s: %s" %(str(err[2]), str(err[0]))) + errors.append(err[0]) + + # Take Flocklab group on Zabbix out of maintenance mode: + if maintenance_id: + logger.debug("Taking FlockLab group in Zabbix out of maintenance mode...") + logger.debug("Authenticating on Zabbix server...") + authkey = zabbixAuthenticate(zabbixServer) + if authkey == None: + errors.append("Could not authenticate on Zabbix server.") + errors.append("Flocklab group not taken out of maintenance mode on Zabbix server due to errors. Please do so manually.") + else: + logger.debug("Authenticated on Zabbix server.") + data = json.dumps({"jsonrpc":"2.0","method":"maintenance.delete","params":[maintenance_id],"auth":authkey,"id":"4"}) + req = urllib.request.Request(zabbixServer, data, headers = {'Content-Type': 'application/json'}) + ret = urllib.request.urlopen(req) + out = ret.read() + if '"maintenanceids":["%s'%maintenance_id in out: + logger.debug("FlockLab group taken out of maintenance mode on Zabbix server.") + else: + errors.append("Flocklab group not taken out of maintenance mode on Zabbix server due to errors. Please do so manually.") + + + # Free testbed from reservation --- + try: + (cn, cur) = flocklab.connect_to_db(config, logger) + logger.info("Removing reservation to free testbed...") + sql = "DELETE FROM tbl_serv_reservations WHERE group_id_fk = %d;" + cur.execute(sql % int(config.get('mmccheck', 'reservation_group_id'))) + cn.commit() + cur.close() + cn.close() + except: + msg = "Could not connect to database" + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + + # Inform admins --- + if ((len(errors) > 0) or (len(warnings) > 0)): + msg = "The following errors/warnings occurred:\n\n" + for error in errors: + msg = msg + "\t * ERROR: %s\n" %(str(error)) + for warn in warnings: + msg = msg + "\t * WARNING: %s\n" %(str(warn)) + logger.debug("Finished with %d errors and %d warnings"%(len(errors), len(warnings))) + flocklab.error_logandexit(msg, errno.EFAULT, name, logger, config) + else: + # Send email to admin: + try: + cn, cur = flocklab.connect_to_db(config, logger) + admin_emails = flocklab.get_admin_emails(cur, config) + cur.close() + cn.close() + if ((admin_emails == 1) or (admin_emails == 2)): + logger.error("Error when getting admin emails from database") + raise + except: + # Use backup email address: + admin_emails = "flocklab@tik.ee.ethz.ch" + finally: + msg = "Successfully finished MMC checks on observers." + logger.debug(msg) + flocklab.send_mail(subject="[FlockLab %s]"%(scriptname.capitalize()), message=msg, recipients=admin_emails) + except Exception: + msg = "Unexpected error: %s: %s"%(str(sys.exc_info()[0]), str(sys.exc_info()[1])) + print(msg) + flocklab.error_logandexit(msg, errno.EFAULT, name, logger, config) + sys.exit(SUCCESS) + ### END main() if __name__ == "__main__": - try: - main(sys.argv[1:]) - except Exception: - msg = "Encountered error: %s: %s\n%s\nCommand line was: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc(), str(sys.argv)) - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + try: + main(sys.argv[1:]) + except Exception: + msg = "Encountered error: %s: %s\n%s\nCommand line was: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc(), str(sys.argv)) + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) diff --git a/testmanagementserver/flocklab_retention_cleaner.py b/testmanagementserver/flocklab_retention_cleaner.py index 9ba774bab6ec3bfa8ade4d5c87b11e0ddaa202f6..6d7ba1e39f4a337105b91534bb25639de39a1812 100755 --- a/testmanagementserver/flocklab_retention_cleaner.py +++ b/testmanagementserver/flocklab_retention_cleaner.py @@ -1,8 +1,8 @@ #! /usr/bin/env python3 -__author__ = "Christoph Walser <walserc@tik.ee.ethz.ch>" -__copyright__ = "Copyright 2013, ETH Zurich, Switzerland" -__license__ = "GPL" +__author__ = "Christoph Walser <walserc@tik.ee.ethz.ch>" +__copyright__ = "Copyright 2013, ETH Zurich, Switzerland" +__license__ = "GPL" import sys, os, getopt, errno, traceback, logging, time, __main__, shutil, glob @@ -27,8 +27,8 @@ config = None # ############################################################################## class Error(Exception): - """ Base class for exception. """ - pass + """ Base class for exception. """ + pass ### END Error classes @@ -39,10 +39,10 @@ class Error(Exception): # ############################################################################## def usage(): - print("Usage: %s [--debug] [--help]" %scriptname) - print("Options:") - print(" --debug\t\t\tOptional. Print debug messages to log.") - print(" --help\t\t\tOptional. Print this help.") + print("Usage: %s [--debug] [--help]" %scriptname) + print("Options:") + print(" --debug\t\t\tOptional. Print debug messages to log.") + print(" --help\t\t\tOptional. Print this help.") ### END usage() @@ -54,103 +54,103 @@ def usage(): ############################################################################## def main(argv): - ### Global Variables ### - global logger - global config - - # Set timezone to UTC: - os.environ['TZ'] = 'UTC' - time.tzset() - - # Get logger: - logger = flocklab.get_logger(loggername=scriptname, loggerpath=scriptpath) - - # Get config --- - config = flocklab.get_config(configpath=scriptpath) - if not config: - msg = "Could not read configuration file. Exiting..." - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) - #logger.debug("Read configuration file.") - - # Get the arguments: - try: - opts, args = getopt.getopt(argv, "dh", ["debug", "help"]) - except getopt.GetoptError as err: - print(str(err)) - logger.warn(str(err)) - usage() - sys.exit(errno.EINVAL) - except: - msg = "Error when getting arguments: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1])) - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) - - for opt, arg in opts: - if opt in ("-d", "--debug"): - logger.setLevel(logging.DEBUG) - elif opt in ("-h", "--help"): - usage() - sys.exit(SUCCESS) - else: - logger.warn("Wrong API usage") - sys.exit(errno.EINVAL) - - # Allow only x instances --- - rs = flocklab.count_running_instances(scriptname) - if (rs >= 0): - maxinscount = config.getint('retentioncleaner', 'max_instances') - if rs > maxinscount: - msg = "Maximum number of instances (%d) for script %s with currently %d instances running exceeded. Aborting..."%(maxinscount, scriptname, rs) - flocklab.error_logandexit(msg, errno.EUSERS, name, logger, config) - #else: - #logger.debug("Maximum number of instances (%d) for script %s with currently %d instances running not exceeded."%(maxinscount, scriptname, rs)) - else: - msg = "Error when trying to count running instances of %s. Function returned with %d"%(scriptname, rs) - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) - - # Connect to the database --- - try: - (cn, cur) = flocklab.connect_to_db(config, logger) - except: - msg = "Could not connect to database" - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) - #logger.debug("Connected to database") - - # Check for work --- - expiration_leadtime = config.get('retentioncleaner', 'expiration_leadtime') - logger.debug("Expiration lead time is %s days"%expiration_leadtime) - try: - # Get all users that have ran tests: - sql = """ SELECT DISTINCT `owner_fk` - FROM `tbl_serv_tests` - WHERE (`test_status` IN ('not schedulable','finished','failed', 'retention expiring')) - """ - if ( cur.execute(sql) <= 0 ): - logger.info("No users found which ran tests.") - else: - rs = cur.fetchall() - ownerids = [str(i[0]) for i in rs] - for ownerid in ownerids: - sql = """ SELECT `retention_time`, `email`, `username`, `is_active` - FROM `tbl_serv_users` - WHERE (`serv_users_key` = %s) - """ % (ownerid) - cur.execute(sql) - rs = cur.fetchone() - retention_time_user = rs[0] - owneremail = rs[1] - ownerusername = rs[2] - is_active = rs[3] - logger.debug("Checking tests of user %s (users retention time is %d days)."%(ownerusername, retention_time_user)) - # Check for each user (taking into account her individual retention time [-1 means saving data forever]) if there are tests to be cleaned soon and inform the user about these tests. - if retention_time_user != -1: - sql = """ SELECT `serv_tests_key`, `title`, DATE(`time_end_act`), `test_status` - FROM `tbl_serv_tests` - WHERE ((`owner_fk` = %s) AND (`time_end_act` < ADDDATE(NOW(), -(%s + %s))) AND (`test_status` IN ('not schedulable','finished','failed'))) - ORDER BY `time_end_act` DESC - """ % (ownerid, retention_time_user, expiration_leadtime) - if(cur.execute(sql) > 0): - rs = cur.fetchall() - msg_expiring = """Dear FlockLab user,\n\n\ + ### Global Variables ### + global logger + global config + + # Set timezone to UTC: + os.environ['TZ'] = 'UTC' + time.tzset() + + # Get logger: + logger = flocklab.get_logger(loggername=scriptname, loggerpath=scriptpath) + + # Get config --- + config = flocklab.get_config(configpath=scriptpath) + if not config: + msg = "Could not read configuration file. Exiting..." + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + #logger.debug("Read configuration file.") + + # Get the arguments: + try: + opts, args = getopt.getopt(argv, "dh", ["debug", "help"]) + except getopt.GetoptError as err: + print(str(err)) + logger.warn(str(err)) + usage() + sys.exit(errno.EINVAL) + except: + msg = "Error when getting arguments: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1])) + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + + for opt, arg in opts: + if opt in ("-d", "--debug"): + logger.setLevel(logging.DEBUG) + elif opt in ("-h", "--help"): + usage() + sys.exit(SUCCESS) + else: + logger.warn("Wrong API usage") + sys.exit(errno.EINVAL) + + # Allow only x instances --- + rs = flocklab.count_running_instances(scriptname) + if (rs >= 0): + maxinscount = config.getint('retentioncleaner', 'max_instances') + if rs > maxinscount: + msg = "Maximum number of instances (%d) for script %s with currently %d instances running exceeded. Aborting..."%(maxinscount, scriptname, rs) + flocklab.error_logandexit(msg, errno.EUSERS, name, logger, config) + #else: + #logger.debug("Maximum number of instances (%d) for script %s with currently %d instances running not exceeded."%(maxinscount, scriptname, rs)) + else: + msg = "Error when trying to count running instances of %s. Function returned with %d"%(scriptname, rs) + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + + # Connect to the database --- + try: + (cn, cur) = flocklab.connect_to_db(config, logger) + except: + msg = "Could not connect to database" + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + #logger.debug("Connected to database") + + # Check for work --- + expiration_leadtime = config.get('retentioncleaner', 'expiration_leadtime') + logger.debug("Expiration lead time is %s days"%expiration_leadtime) + try: + # Get all users that have ran tests: + sql = """ SELECT DISTINCT `owner_fk` + FROM `tbl_serv_tests` + WHERE (`test_status` IN ('not schedulable','finished','failed', 'retention expiring')) + """ + if ( cur.execute(sql) <= 0 ): + logger.info("No users found which ran tests.") + else: + rs = cur.fetchall() + ownerids = [str(i[0]) for i in rs] + for ownerid in ownerids: + sql = """ SELECT `retention_time`, `email`, `username`, `is_active` + FROM `tbl_serv_users` + WHERE (`serv_users_key` = %s) + """ % (ownerid) + cur.execute(sql) + rs = cur.fetchone() + retention_time_user = rs[0] + owneremail = rs[1] + ownerusername = rs[2] + is_active = rs[3] + logger.debug("Checking tests of user %s (users retention time is %d days)."%(ownerusername, retention_time_user)) + # Check for each user (taking into account her individual retention time [-1 means saving data forever]) if there are tests to be cleaned soon and inform the user about these tests. + if retention_time_user != -1: + sql = """ SELECT `serv_tests_key`, `title`, DATE(`time_end_act`), `test_status` + FROM `tbl_serv_tests` + WHERE ((`owner_fk` = %s) AND (`time_end_act` < ADDDATE(NOW(), -(%s + %s))) AND (`test_status` IN ('not schedulable','finished','failed'))) + ORDER BY `time_end_act` DESC + """ % (ownerid, retention_time_user, expiration_leadtime) + if(cur.execute(sql) > 0): + rs = cur.fetchall() + msg_expiring = """Dear FlockLab user,\n\n\ FlockLab can not save your test data forever and thus your tests have a retention time of %s days before they are deleted.\n\ According to this policy, the following tests will be deleted in %s days. If you want to keep the test data, please download it before \ it is deleted. \n\n\ @@ -158,96 +158,96 @@ Test ID\tEnd of test\tTest state\tTest Title\n\ ===============================================================\n\ %s\n\ Yours faithfully,\nthe FlockLab server""" - testlist = "" - testids = ", ".join([str(i[0]) for i in rs]) - logger.debug("Found tests whose retention time expires soon: %s"%testids) - for testid, title, enddate, teststatus in rs: - testlist = testlist + "%s\t%s\t%s\t%s\n"%(testid, enddate, teststatus, title) - msg = msg_expiring%(retention_time_user, expiration_leadtime, testlist) - if is_active == 1: - ret = flocklab.send_mail(subject="[FlockLab %s] %s"%(name, "Retention time expiring soon") , message=msg, recipients=owneremail) - else: - ret = 0 - if ret != 0: - msg = "Could not send Email to %s. Function returned %d"%(owneremail, ret) - logger.error(msg) - emails = flocklab.get_admin_emails(cur, config) - msg = "%s on server %s encountered error:\n\n%s" %(scriptname, os.uname()[1], msg) - flocklab.send_mail(subject="[FlockLab %s]"%name, message=msg, recipients=emails) - continue - else: - # Mark the tests in the database: - sql = """ UPDATE `tbl_serv_tests` - SET `test_status` = 'retention expiring', `retention_expiration_warned` = NOW() - WHERE `serv_tests_key` IN (%s) - """ - cur.execute(sql%(testids)) - cn.commit() - logger.debug("Set test status to 'retention expiring' for tests.") - else: - logger.debug("Found no tests whose retention time expires soon.") - - # Check for each user if there are tests which are to be marked for deletion as their retention time expired: - sql = """ SELECT `serv_tests_key`, `title`, DATE(`time_end_act`) - FROM `tbl_serv_tests` - WHERE ((`owner_fk` = %s) AND (`time_end_act` < ADDDATE(NOW(), -(%s))) AND (`test_status` = 'retention expiring') AND (`retention_expiration_warned` < ADDDATE(NOW(), -(%s+1)))) - ORDER BY `time_end_act` DESC - """ - if(cur.execute(sql % (ownerid, retention_time_user, expiration_leadtime)) > 0): - rs = cur.fetchall() - msg_deleted = """Dear FlockLab user,\n\n\ + testlist = "" + testids = ", ".join([str(i[0]) for i in rs]) + logger.debug("Found tests whose retention time expires soon: %s"%testids) + for testid, title, enddate, teststatus in rs: + testlist = testlist + "%s\t%s\t%s\t%s\n"%(testid, enddate, teststatus, title) + msg = msg_expiring%(retention_time_user, expiration_leadtime, testlist) + if is_active == 1: + ret = flocklab.send_mail(subject="[FlockLab %s] %s"%(name, "Retention time expiring soon") , message=msg, recipients=owneremail) + else: + ret = 0 + if ret != 0: + msg = "Could not send Email to %s. Function returned %d"%(owneremail, ret) + logger.error(msg) + emails = flocklab.get_admin_emails(cur, config) + msg = "%s on server %s encountered error:\n\n%s" %(scriptname, os.uname()[1], msg) + flocklab.send_mail(subject="[FlockLab %s]"%name, message=msg, recipients=emails) + continue + else: + # Mark the tests in the database: + sql = """ UPDATE `tbl_serv_tests` + SET `test_status` = 'retention expiring', `retention_expiration_warned` = NOW() + WHERE `serv_tests_key` IN (%s) + """ + cur.execute(sql%(testids)) + cn.commit() + logger.debug("Set test status to 'retention expiring' for tests.") + else: + logger.debug("Found no tests whose retention time expires soon.") + + # Check for each user if there are tests which are to be marked for deletion as their retention time expired: + sql = """ SELECT `serv_tests_key`, `title`, DATE(`time_end_act`) + FROM `tbl_serv_tests` + WHERE ((`owner_fk` = %s) AND (`time_end_act` < ADDDATE(NOW(), -(%s))) AND (`test_status` = 'retention expiring') AND (`retention_expiration_warned` < ADDDATE(NOW(), -(%s+1)))) + ORDER BY `time_end_act` DESC + """ + if(cur.execute(sql % (ownerid, retention_time_user, expiration_leadtime)) > 0): + rs = cur.fetchall() + msg_deleted = """Dear FlockLab user,\n\n\ FlockLab can not save your test data forever and thus your tests have a retention time of %s days before they are deleted.\n\ According to this policy, the following tests have been deleted. \n\n\ Test ID\tEnd of test\tTest title\n\ ===============================================================\n\ %s\n\ Yours faithfully,\nthe FlockLab server""" - testlist = "" - testids = ", ".join([str(i[0]) for i in rs]) - logger.debug("Found tests whose retention time expired: %s"%testids) - for testid, title, enddate in rs: - testlist = testlist + "%s\t%s\t%s\n"%(testid, enddate, title) - msg = msg_deleted%(retention_time_user, testlist) - if is_active == 1: - ret = flocklab.send_mail(subject="[FlockLab %s] %s"%(name, "Retention time expired") , message=msg, recipients=owneremail) - else: - ret = 0 - if ret != 0: - msg = "Could not send Email to %s. Function returned %d"%(owneremail, ret) - logger.error(msg) - emails = flocklab.get_admin_emails(cur, config) - msg = "%s on server %s encountered error:\n\n%s" %(scriptname, os.uname()[1], msg) - flocklab.send_mail(subject="[FlockLab %s]"%name, message=msg, recipients=emails) - continue - else: - # Mark the tests in the database: - sql = """ UPDATE `tbl_serv_tests` - SET `test_status` = 'todelete' - WHERE `serv_tests_key` IN (%s) - """ - cur.execute(sql%(testids)) - cn.commit() - logger.debug("Set test status to 'todelete' for tests.") - else: - logger.debug("Found no tests whose retention time expired.") - except: - msg = "Encountered error: %s: %s\n%s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc()) - logger.error(msg) - emails = flocklab.get_admin_emails(cur, config) - msg = "%s on server %s encountered error:\n\n%s" %(scriptname, os.uname()[1], msg) - flocklab.send_mail(subject="[FlockLab %s]"%name, message=msg, recipients=emails) - finally: - cur.close() - cn.close() - - logger.debug("Finished. Exit program.") - sys.exit(SUCCESS) + testlist = "" + testids = ", ".join([str(i[0]) for i in rs]) + logger.debug("Found tests whose retention time expired: %s"%testids) + for testid, title, enddate in rs: + testlist = testlist + "%s\t%s\t%s\n"%(testid, enddate, title) + msg = msg_deleted%(retention_time_user, testlist) + if is_active == 1: + ret = flocklab.send_mail(subject="[FlockLab %s] %s"%(name, "Retention time expired") , message=msg, recipients=owneremail) + else: + ret = 0 + if ret != 0: + msg = "Could not send Email to %s. Function returned %d"%(owneremail, ret) + logger.error(msg) + emails = flocklab.get_admin_emails(cur, config) + msg = "%s on server %s encountered error:\n\n%s" %(scriptname, os.uname()[1], msg) + flocklab.send_mail(subject="[FlockLab %s]"%name, message=msg, recipients=emails) + continue + else: + # Mark the tests in the database: + sql = """ UPDATE `tbl_serv_tests` + SET `test_status` = 'todelete' + WHERE `serv_tests_key` IN (%s) + """ + cur.execute(sql%(testids)) + cn.commit() + logger.debug("Set test status to 'todelete' for tests.") + else: + logger.debug("Found no tests whose retention time expired.") + except: + msg = "Encountered error: %s: %s\n%s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc()) + logger.error(msg) + emails = flocklab.get_admin_emails(cur, config) + msg = "%s on server %s encountered error:\n\n%s" %(scriptname, os.uname()[1], msg) + flocklab.send_mail(subject="[FlockLab %s]"%name, message=msg, recipients=emails) + finally: + cur.close() + cn.close() + + logger.debug("Finished. Exit program.") + sys.exit(SUCCESS) ### END main() if __name__ == "__main__": - try: - main(sys.argv[1:]) - except Exception: - msg = "Encountered error: %s: %s\n%s\nCommand line was: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc(), str(sys.argv)) - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) - + try: + main(sys.argv[1:]) + except Exception: + msg = "Encountered error: %s: %s\n%s\nCommand line was: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc(), str(sys.argv)) + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + diff --git a/testmanagementserver/flocklab_scheduler.py b/testmanagementserver/flocklab_scheduler.py index 2c64675f87ae01e06614e6decb4b96fd4003949c..a2c4d0ce3f18ceb4e652904e3e6f2ce0b29bbec2 100755 --- a/testmanagementserver/flocklab_scheduler.py +++ b/testmanagementserver/flocklab_scheduler.py @@ -1,8 +1,8 @@ #! /usr/bin/env python3 -__author__ = "Christoph Walser <walserc@tik.ee.ethz.ch>" +__author__ = "Christoph Walser <walserc@tik.ee.ethz.ch>" __copyright__ = "Copyright 2010, ETH Zurich, Switzerland" -__license__ = "GPL" +__license__ = "GPL" import sys, os, getopt, errno, time, datetime, subprocess, MySQLdb, logging, __main__, traceback, types, calendar, multiprocessing @@ -29,8 +29,8 @@ config = None # ############################################################################## class Error(Exception): - """ Base class for exception. """ - pass + """ Base class for exception. """ + pass ### END Error classes @@ -40,49 +40,49 @@ class Error(Exception): # ############################################################################## def test_startstopabort(testid=None, mode='stop',delay=0): - if ((type(testid) != int) or (testid <= 0) or (mode not in ('start', 'stop', 'abort'))): - return -1 - - # change status of test that the next scheduler will skip this test - try: - conn = MySQLdb.connect(host=config.get('database','host'), user=config.get('database','user'), passwd=config.get('database','password'), db=config.get('database','database')) - cursor = conn.cursor() - except: - logger.error("Could not connect to the database because: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - - flocklab.set_test_dispatched(cursor, conn, testid) + if ((type(testid) != int) or (testid <= 0) or (mode not in ('start', 'stop', 'abort'))): + return -1 + + # change status of test that the next scheduler will skip this test + try: + conn = MySQLdb.connect(host=config.get('database','host'), user=config.get('database','user'), passwd=config.get('database','password'), db=config.get('database','database')) + cursor = conn.cursor() + except: + logger.error("Could not connect to the database because: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + + flocklab.set_test_dispatched(cursor, conn, testid) - # wait for the actual start time of the test - time.sleep(delay) - - logger.info("Found test ID %d which should be %sed."%(testid, mode)) - # Add testid to logger name - logger.name += " (Test %d)"%testid - # Call the dispatcher: - cmd = [config.get("dispatcher", "dispatcherscript"), '--testid=%d'%testid, '--%s'%mode] - # Make sure no other instance of the scheduler is running for the same task: - cmd2 = ['pgrep', '-o', '-f', ' '.join(cmd)] - p = subprocess.Popen(cmd2, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) - out, err = p.communicate() - if (p.returncode == 0): - logger.error("There is already an instance of this task running with PID %s. Nothing will be done." % (str(out))) - logger.debug("Command executed was: %s"%(str(cmd2))) - rs = errno.EALREADY - else: - if debug: - cmd.append('--debug') - p = subprocess.Popen(cmd) - p.wait() - rs = p.returncode - if (rs != SUCCESS): - logger.error("Dispatcher to %s test returned with error %d" %(mode, rs)) - logger.debug("Command executed was: %s"%(str(cmd))) - conn.close() - return errno.EFAULT - else: - logger.info("Test %d %s done." % (testid, mode)) - conn.close() - return SUCCESS + # wait for the actual start time of the test + time.sleep(delay) + + logger.info("Found test ID %d which should be %sed."%(testid, mode)) + # Add testid to logger name + logger.name += " (Test %d)"%testid + # Call the dispatcher: + cmd = [config.get("dispatcher", "dispatcherscript"), '--testid=%d'%testid, '--%s'%mode] + # Make sure no other instance of the scheduler is running for the same task: + cmd2 = ['pgrep', '-o', '-f', ' '.join(cmd)] + p = subprocess.Popen(cmd2, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) + out, err = p.communicate() + if (p.returncode == 0): + logger.error("There is already an instance of this task running with PID %s. Nothing will be done." % (str(out))) + logger.debug("Command executed was: %s"%(str(cmd2))) + rs = errno.EALREADY + else: + if debug: + cmd.append('--debug') + p = subprocess.Popen(cmd) + p.wait() + rs = p.returncode + if (rs != SUCCESS): + logger.error("Dispatcher to %s test returned with error %d" %(mode, rs)) + logger.debug("Command executed was: %s"%(str(cmd))) + conn.close() + return errno.EFAULT + else: + logger.info("Test %d %s done." % (testid, mode)) + conn.close() + return SUCCESS ### END test_startstopabort() @@ -92,10 +92,10 @@ def test_startstopabort(testid=None, mode='stop',delay=0): # ############################################################################## def usage(): - print("Usage: %s [--debug] [--help]" %scriptname) - print("Options:") - print(" --debug\t\t\tOptional. Print debug messages to log.") - print(" --help\t\t\tOptional. Print this help.") + print("Usage: %s [--debug] [--help]" %scriptname) + print("Options:") + print(" --debug\t\t\tOptional. Print debug messages to log.") + print(" --help\t\t\tOptional. Print this help.") ### END usage() @@ -107,164 +107,164 @@ def usage(): ############################################################################## def main(argv): - ### Global Variables ### - global logger - global debug - global config + ### Global Variables ### + global logger + global debug + global config - # Set timezone to UTC: - os.environ['TZ'] = 'UTC' - time.tzset() - - # Get logger: - logger = flocklab.get_logger(loggername=scriptname, loggerpath=scriptpath) - - # Get the config file: - config = flocklab.get_config(configpath=scriptpath) - if not config: - msg = "Could not read configuration file. Exiting..." - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) - - # Get the arguments: - try: - opts, args = getopt.getopt(argv, "dh", ["debug", "help"]) - except getopt.GetoptError as err: - print(str(err)) - logger.warn(str(err)) - usage() - sys.exit(errno.EINVAL) - except: - msg = "Error when getting arguments: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1])) - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + # Set timezone to UTC: + os.environ['TZ'] = 'UTC' + time.tzset() + + # Get logger: + logger = flocklab.get_logger(loggername=scriptname, loggerpath=scriptpath) + + # Get the config file: + config = flocklab.get_config(configpath=scriptpath) + if not config: + msg = "Could not read configuration file. Exiting..." + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + + # Get the arguments: + try: + opts, args = getopt.getopt(argv, "dh", ["debug", "help"]) + except getopt.GetoptError as err: + print(str(err)) + logger.warn(str(err)) + usage() + sys.exit(errno.EINVAL) + except: + msg = "Error when getting arguments: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1])) + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) - for opt, arg in opts: - if opt in ("-d", "--debug"): - debug = True - logger.setLevel(logging.DEBUG) - elif opt in ("-h", "--help"): - usage() - sys.exit(SUCCESS) - else: - logger.warn("Wrong API usage") - sys.exit(errno.EINVAL) - - # Connect to the database --- - try: - (cn, cur) = flocklab.connect_to_db(config, logger) - except: - msg = "Could not connect to database" - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) - #logger.debug("Connected to database") - - try: - flocklab.acquire_db_lock(cur, cn, 'scheduler', 5) - except: - msg = "Could not acquire db lock" - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) - # Get current time --- - now = time.strftime(config.get("database", "timeformat"), time.gmtime()) - - # schedule link measurement if needed - flocklab.scheduleLinkTest(logger, config, cur, cn, debug) - - # Check for work --- - # Check if a new test is to be started --- - # Calculate the time frame in which a test can be started: at least the setuptime ahead, at most 5 minutes more ahead - earlieststart = (datetime.datetime.now() + datetime.timedelta(minutes=float(config.get("tests", "setuptime"))) - datetime.timedelta(seconds=10)).strftime(config.get("database", "timeformat")) - lateststart = (datetime.datetime.now() + datetime.timedelta(minutes=int(config.get("tests", "setuptime"))) + datetime.timedelta(minutes=2)).strftime(config.get("database", "timeformat")) - # Check if a test is going to start soon: - sql = """ SELECT `serv_tests_key`,`time_start_wish` - FROM `tbl_serv_tests` - WHERE - (`time_start_wish` >= '%s') - AND (`time_start_wish` <= '%s') - AND (`test_status` = 'planned') - AND (`dispatched` = 0) - ;"""%(earlieststart, lateststart) - logger.debug("Looking in DB for tests with start time between %s and %s and test status planned..."%(now, lateststart)) - cur.execute(sql) - - # start thread for each test to start - rs = cur.fetchall() - if rs: - for test in rs: - testid = int(test[0]) - delay = int(calendar.timegm(time.strptime(str(test[1]), '%Y-%m-%d %H:%M:%S'))) - (int(config.get("tests", "setuptime"))*60) - int(time.time()) - if delay < 0: - delay = 0 - logger.info("Call process to start test %s with delay %s"%(testid,delay)) - p = multiprocessing.Process(target=test_startstopabort,args=(testid, 'start', delay)) - p.start() - else: - logger.debug("No test is to be started within the next %s minutes"%(config.get("tests", "setuptime"))) - #logger.debug("Looking in DB for missed tests...") - # Check for test which have been missed --- - sql1 = """ SELECT `serv_tests_key` - FROM `tbl_serv_tests` - WHERE - (`time_start_wish` < '%s') - AND (`test_status` = 'planned') - """%earlieststart - sql2 = """ UPDATE `tbl_serv_tests` - SET `test_status` = 'failed' - WHERE - (`time_start_wish` < '%s') - AND (`test_status` = 'planned') - """%earlieststart - nmissed = cur.execute(sql1) - if nmissed > 0: - tests = cur.fetchall() - cur.execute(sql2) - cn.commit() - # Inform users that test has been missed: - for testid in tests: - testid=int(testid[0]) - rs = flocklab.get_test_owner(cur, testid) - if isinstance(rs, tuple): - disable_infomails = int(rs[5]) - # Only send email to test owner if she didn't disable reception of info mails: - if disable_infomails != 1: - owner_email = rs[4] - msg = "The test with ID %d could not be started as planned because of the following errors:\n\n" %testid - msg += "\t * Scheduler missed start time of test (probably because the previous test took too long to stop). Try re-scheduling your test.\n" - flocklab.send_mail(subject="[FlockLab Scheduler] Missed test %d"%(testid), message=msg, recipients=owner_email) - else: - logger.error("Error %s returned when trying to get test owner information"%str(rs)) - logger.debug("Updated test status of %d missed tests to 'failed' and informed users."%nmissed) - else: - logger.debug("No missed tests found.") - rs = errno.ENODATA - - # Check if a test has to be stopped --- - # Check if there is a running test which is to be stopped: - sql = """ SELECT `serv_tests_key`, `test_status` - FROM `tbl_serv_tests` - WHERE - ((`test_status` = 'aborting') - OR ((`test_status` = 'running') AND (`time_end_wish` <= '%s'))) - AND (`dispatched` = 0) - ;""" - #logger.debug("Looking in DB for running tests with stop time equal or before %s..."%now) - status2mode = {'running':'stop', 'aborting':'abort'} - cur.execute(sql%(now)) - # start process for each test which has to be stopped - rs = cur.fetchall() - if rs: - for test in rs: - testid = int(test[0]) - logger.debug("Call process to stop test %d, status %s"%(testid, test[1])) - p = multiprocessing.Process(target=test_startstopabort,args=(testid, status2mode[test[1]])) - p.start() - - flocklab.release_db_lock(cur, cn, 'scheduler') - cur.close() - cn.close() - sys.exit(SUCCESS) + for opt, arg in opts: + if opt in ("-d", "--debug"): + debug = True + logger.setLevel(logging.DEBUG) + elif opt in ("-h", "--help"): + usage() + sys.exit(SUCCESS) + else: + logger.warn("Wrong API usage") + sys.exit(errno.EINVAL) + + # Connect to the database --- + try: + (cn, cur) = flocklab.connect_to_db(config, logger) + except: + msg = "Could not connect to database" + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + #logger.debug("Connected to database") + + try: + flocklab.acquire_db_lock(cur, cn, 'scheduler', 5) + except: + msg = "Could not acquire db lock" + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + # Get current time --- + now = time.strftime(config.get("database", "timeformat"), time.gmtime()) + + # schedule link measurement if needed + flocklab.scheduleLinkTest(logger, config, cur, cn, debug) + + # Check for work --- + # Check if a new test is to be started --- + # Calculate the time frame in which a test can be started: at least the setuptime ahead, at most 5 minutes more ahead + earlieststart = (datetime.datetime.now() + datetime.timedelta(minutes=float(config.get("tests", "setuptime"))) - datetime.timedelta(seconds=10)).strftime(config.get("database", "timeformat")) + lateststart = (datetime.datetime.now() + datetime.timedelta(minutes=int(config.get("tests", "setuptime"))) + datetime.timedelta(minutes=2)).strftime(config.get("database", "timeformat")) + # Check if a test is going to start soon: + sql = """ SELECT `serv_tests_key`,`time_start_wish` + FROM `tbl_serv_tests` + WHERE + (`time_start_wish` >= '%s') + AND (`time_start_wish` <= '%s') + AND (`test_status` = 'planned') + AND (`dispatched` = 0) + ;"""%(earlieststart, lateststart) + logger.debug("Looking in DB for tests with start time between %s and %s and test status planned..."%(now, lateststart)) + cur.execute(sql) + + # start thread for each test to start + rs = cur.fetchall() + if rs: + for test in rs: + testid = int(test[0]) + delay = int(calendar.timegm(time.strptime(str(test[1]), '%Y-%m-%d %H:%M:%S'))) - (int(config.get("tests", "setuptime"))*60) - int(time.time()) + if delay < 0: + delay = 0 + logger.info("Call process to start test %s with delay %s"%(testid,delay)) + p = multiprocessing.Process(target=test_startstopabort,args=(testid, 'start', delay)) + p.start() + else: + logger.debug("No test is to be started within the next %s minutes"%(config.get("tests", "setuptime"))) + #logger.debug("Looking in DB for missed tests...") + # Check for test which have been missed --- + sql1 = """ SELECT `serv_tests_key` + FROM `tbl_serv_tests` + WHERE + (`time_start_wish` < '%s') + AND (`test_status` = 'planned') + """%earlieststart + sql2 = """ UPDATE `tbl_serv_tests` + SET `test_status` = 'failed' + WHERE + (`time_start_wish` < '%s') + AND (`test_status` = 'planned') + """%earlieststart + nmissed = cur.execute(sql1) + if nmissed > 0: + tests = cur.fetchall() + cur.execute(sql2) + cn.commit() + # Inform users that test has been missed: + for testid in tests: + testid=int(testid[0]) + rs = flocklab.get_test_owner(cur, testid) + if isinstance(rs, tuple): + disable_infomails = int(rs[5]) + # Only send email to test owner if she didn't disable reception of info mails: + if disable_infomails != 1: + owner_email = rs[4] + msg = "The test with ID %d could not be started as planned because of the following errors:\n\n" %testid + msg += "\t * Scheduler missed start time of test (probably because the previous test took too long to stop). Try re-scheduling your test.\n" + flocklab.send_mail(subject="[FlockLab Scheduler] Missed test %d"%(testid), message=msg, recipients=owner_email) + else: + logger.error("Error %s returned when trying to get test owner information"%str(rs)) + logger.debug("Updated test status of %d missed tests to 'failed' and informed users."%nmissed) + else: + logger.debug("No missed tests found.") + rs = errno.ENODATA + + # Check if a test has to be stopped --- + # Check if there is a running test which is to be stopped: + sql = """ SELECT `serv_tests_key`, `test_status` + FROM `tbl_serv_tests` + WHERE + ((`test_status` = 'aborting') + OR ((`test_status` = 'running') AND (`time_end_wish` <= '%s'))) + AND (`dispatched` = 0) + ;""" + #logger.debug("Looking in DB for running tests with stop time equal or before %s..."%now) + status2mode = {'running':'stop', 'aborting':'abort'} + cur.execute(sql%(now)) + # start process for each test which has to be stopped + rs = cur.fetchall() + if rs: + for test in rs: + testid = int(test[0]) + logger.debug("Call process to stop test %d, status %s"%(testid, test[1])) + p = multiprocessing.Process(target=test_startstopabort,args=(testid, status2mode[test[1]])) + p.start() + + flocklab.release_db_lock(cur, cn, 'scheduler') + cur.close() + cn.close() + sys.exit(SUCCESS) ### END main() if __name__ == "__main__": - try: - main(sys.argv[1:]) - except Exception: - msg = "Encountered error: %s: %s\n%s\nCommand line was: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc(), str(sys.argv)) - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + try: + main(sys.argv[1:]) + except Exception: + msg = "Encountered error: %s: %s\n%s\nCommand line was: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc(), str(sys.argv)) + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) diff --git a/testmanagementserver/flocklab_sendmail.py b/testmanagementserver/flocklab_sendmail.py index bae1fd36a86427576a641451d9481f863198c397..49e914c6f4846ef0275f4869d6bed9b08f5424e1 100755 --- a/testmanagementserver/flocklab_sendmail.py +++ b/testmanagementserver/flocklab_sendmail.py @@ -1,8 +1,8 @@ #!/usr/bin/env python3 -__author__ = "Reto Da Forno <reto.daforno@tik.ee.ethz.ch>" -__copyright__ = "Copyright 2018, ETH Zurich, Switzerland" -__license__ = "GPL" +__author__ = "Reto Da Forno <reto.daforno@tik.ee.ethz.ch>" +__copyright__ = "Copyright 2018, ETH Zurich, Switzerland" +__license__ = "GPL" import sys, os, __main__, time, re diff --git a/testmanagementserver/flocklab_serialproxy.py b/testmanagementserver/flocklab_serialproxy.py index 0d3799210c5ea21dd3ac7a5e3f893df9ca78e080..9d9c04286efb31f19e8f89efb9a8626773209b59 100755 --- a/testmanagementserver/flocklab_serialproxy.py +++ b/testmanagementserver/flocklab_serialproxy.py @@ -1,8 +1,8 @@ #! /usr/bin/env python3 -__author__ = "Roman Lim <lim@tik.ee.ethz.ch>" +__author__ = "Roman Lim <lim@tik.ee.ethz.ch>" __copyright__ = "Copyright 2016, ETH Zurich, Switzerland" -__license__ = "GPL" +__license__ = "GPL" import os, sys, getopt, traceback, MySQLdb, signal, time, errno, subprocess, logging, __main__, multiprocessing, queue, threading, select, socket, io @@ -20,11 +20,11 @@ scriptpath = os.path.dirname(os.path.abspath(sys.argv[0])) name = "SerialProxy" ### -logger = None -debug = False -config = None -stopevent = None -reloadevent = None +logger = None +debug = False +config = None +stopevent = None +reloadevent = None ############################################################################## @@ -33,8 +33,8 @@ reloadevent = None # ############################################################################## class Error(Exception): - """ Base class for exception. """ - pass + """ Base class for exception. """ + pass ### END Error classes @@ -44,20 +44,20 @@ class Error(Exception): # ############################################################################## def sigterm_handler(signum, frame): - """If the program is terminated by sending it the signal SIGTERM - (e.g. by executing 'kill') or SIGINT (pressing ctrl-c), - this signal handler is invoked for cleanup.""" - # NOTE: logging should not be used in signal handlers: https://docs.python.org/2/library/logging.html#thread-safety - - global stopevent - global reloadevent - - logger.debug("sigterm_handler: signal %u received" % (signum)) - # Signal all threads to stop: - if signum == signal.SIGTERM and stopevent: - stopevent.set() - elif signum == signal.SIGINT and reloadevent: - reloadevent.set() + """If the program is terminated by sending it the signal SIGTERM + (e.g. by executing 'kill') or SIGINT (pressing ctrl-c), + this signal handler is invoked for cleanup.""" + # NOTE: logging should not be used in signal handlers: https://docs.python.org/2/library/logging.html#thread-safety + + global stopevent + global reloadevent + + logger.debug("sigterm_handler: signal %u received" % (signum)) + # Signal all threads to stop: + if signum == signal.SIGTERM and stopevent: + stopevent.set() + elif signum == signal.SIGINT and reloadevent: + reloadevent.set() ### END sigterm_handler ############################################################################## @@ -66,27 +66,27 @@ def sigterm_handler(signum, frame): # ############################################################################## def listen_process(port, newConnectionQueue, _stopevent): - while not _stopevent.is_set(): - try: - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - sock.bind(('',port)) - sock.settimeout(1) - logger.info("Started socket %s:%d"%('',port)) - while not _stopevent.is_set(): - sock.listen(1) - try: - connection, address = sock.accept() - except socket.timeout: - continue - connection.settimeout(None) - logger.info("Connection from %s at port %d"%(str(address),port)) - address = (address[0], port) - newConnectionQueue.put((connection, address)) - logger.info("Listen process on port %d ended." % port) - except: - logger.error("Listen process on port %d: Socket error %s"%(port,str(sys.exc_info()[1]))) - time.sleep(5) + while not _stopevent.is_set(): + try: + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + sock.bind(('',port)) + sock.settimeout(1) + logger.info("Started socket %s:%d"%('',port)) + while not _stopevent.is_set(): + sock.listen(1) + try: + connection, address = sock.accept() + except socket.timeout: + continue + connection.settimeout(None) + logger.info("Connection from %s at port %d"%(str(address),port)) + address = (address[0], port) + newConnectionQueue.put((connection, address)) + logger.info("Listen process on port %d ended." % port) + except: + logger.error("Listen process on port %d: Socket error %s"%(port,str(sys.exc_info()[1]))) + time.sleep(5) ### END listen_process ############################################################################## @@ -95,28 +95,28 @@ def listen_process(port, newConnectionQueue, _stopevent): # ############################################################################## def obs_connect_process(conreqQueue, condoneQueue, _stopevent): - worklist = [] - while not _stopevent.is_set(): - try: - req = conreqQueue.get(True, 1) - worklist.append(req) - except queue.Empty: - pass - for w in worklist: - if w is None: - worklist = [] - break - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - try: - sock.connect(w) - logger.info("Connected to observer %s on port %d" % (w[0],w[1])) - condoneQueue.put((sock, w)) - worklist.remove(w) - except ConnectionRefusedError: - logger.info("Could not connect to observer %s on port %d, will retry later.." % (w[0],w[1])) - except Exception: - logger.info("Could not connect to observer %s on port %d: %s, %s\n%s" % (w[0], w[1], str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc())) - pass + worklist = [] + while not _stopevent.is_set(): + try: + req = conreqQueue.get(True, 1) + worklist.append(req) + except queue.Empty: + pass + for w in worklist: + if w is None: + worklist = [] + break + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + try: + sock.connect(w) + logger.info("Connected to observer %s on port %d" % (w[0],w[1])) + condoneQueue.put((sock, w)) + worklist.remove(w) + except ConnectionRefusedError: + logger.info("Could not connect to observer %s on port %d, will retry later.." % (w[0],w[1])) + except Exception: + logger.info("Could not connect to observer %s on port %d: %s, %s\n%s" % (w[0], w[1], str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc())) + pass ### END obs_connect_process ############################################################################## @@ -125,61 +125,61 @@ def obs_connect_process(conreqQueue, condoneQueue, _stopevent): # ############################################################################## def update_configuration_from_db(): - # Get needed metadata from database --- - # for all running / preparing tests - # for each observer used in a serial configuration - # (user remoteIp, server port, observer ip, port) - - proxystartport = config.getint('serialproxy', 'startport') - obsdataport = config.getint('serialproxy', 'obsdataport') - proxyConfig = [] - try: - (cn, cur) = flocklab.connect_to_db(config, logger) - cur.execute('SET time_zone="+0:00"') - except: - msg = "Could not connect to database" - logger.error(msg) - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) - try: - # Get the XML config from the database: - cur.execute("SELECT `testconfig_xml`, `serv_tests_key` FROM `tbl_serv_tests` WHERE (`test_status` IN ('preparing', 'running') AND `time_end_wish` >= NOW())") - ret = cur.fetchall() - for testconfig in ret: - logger.debug("Create proxy config for test %d" % testconfig[1]) - # get slot mappings - cur.execute("SELECT `observer_id`, `ethernet_address`, `slot` FROM `tbl_serv_map_test_observer_targetimages` `a` left join `tbl_serv_observer` `b` ON (`a`.`observer_fk` = `b`.`serv_observer_key`) WHERE `test_fk` = %d" % testconfig[1]) - mapret = cur.fetchall() - mapping = {} # dict obsid -> (ip_address, port) - for m in mapret: - if not m[2] is None: - mapping[int(m[0])] = (m[1], obsdataport) - parser = etree.XMLParser(remove_comments=True) - tree = etree.fromstring(bytes(bytearray(testconfig[0], encoding = 'utf-8')), parser) - ns = {'d': config.get('xml', 'namespace')} - logger.debug("Got XML from database.") - ## Process serial configuration --- - srconfs = tree.xpath('//d:serialConf', namespaces=ns) - for srconf in srconfs: - obsids = srconf.xpath('d:obsIds', namespaces=ns)[0].text.split() - remoteIp = srconf.xpath('d:remoteIp', namespaces=ns)[0].text - # Create a pair of FIFO pipes for every observer and start ncat: - for obsid in obsids: - if int(obsid) in mapping: - proxyConfig.append(((remoteIp, proxystartport + int(obsid)),mapping[int(obsid)])) - if len(proxyConfig) == 0: - logger.info("No serial forwarders required.") - else: - logger.debug("Current proxy configuration:") - for pc in proxyConfig: - logger.debug("%s:%d <-> %s:%d" % (pc[0][0],pc[0][1],pc[1][0],pc[1][1])) - return proxyConfig - except MySQLdb.Error as err: - msg = str(err) - logger.error(msg) - flocklab.error_logandexit(msg, errno.EIO, name, logger, config) - except: - logger.warn("Error %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - raise + # Get needed metadata from database --- + # for all running / preparing tests + # for each observer used in a serial configuration + # (user remoteIp, server port, observer ip, port) + + proxystartport = config.getint('serialproxy', 'startport') + obsdataport = config.getint('serialproxy', 'obsdataport') + proxyConfig = [] + try: + (cn, cur) = flocklab.connect_to_db(config, logger) + cur.execute('SET time_zone="+0:00"') + except: + msg = "Could not connect to database" + logger.error(msg) + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + try: + # Get the XML config from the database: + cur.execute("SELECT `testconfig_xml`, `serv_tests_key` FROM `tbl_serv_tests` WHERE (`test_status` IN ('preparing', 'running') AND `time_end_wish` >= NOW())") + ret = cur.fetchall() + for testconfig in ret: + logger.debug("Create proxy config for test %d" % testconfig[1]) + # get slot mappings + cur.execute("SELECT `observer_id`, `ethernet_address`, `slot` FROM `tbl_serv_map_test_observer_targetimages` `a` left join `tbl_serv_observer` `b` ON (`a`.`observer_fk` = `b`.`serv_observer_key`) WHERE `test_fk` = %d" % testconfig[1]) + mapret = cur.fetchall() + mapping = {} # dict obsid -> (ip_address, port) + for m in mapret: + if not m[2] is None: + mapping[int(m[0])] = (m[1], obsdataport) + parser = etree.XMLParser(remove_comments=True) + tree = etree.fromstring(bytes(bytearray(testconfig[0], encoding = 'utf-8')), parser) + ns = {'d': config.get('xml', 'namespace')} + logger.debug("Got XML from database.") + ## Process serial configuration --- + srconfs = tree.xpath('//d:serialConf', namespaces=ns) + for srconf in srconfs: + obsids = srconf.xpath('d:obsIds', namespaces=ns)[0].text.split() + remoteIp = srconf.xpath('d:remoteIp', namespaces=ns)[0].text + # Create a pair of FIFO pipes for every observer and start ncat: + for obsid in obsids: + if int(obsid) in mapping: + proxyConfig.append(((remoteIp, proxystartport + int(obsid)),mapping[int(obsid)])) + if len(proxyConfig) == 0: + logger.info("No serial forwarders required.") + else: + logger.debug("Current proxy configuration:") + for pc in proxyConfig: + logger.debug("%s:%d <-> %s:%d" % (pc[0][0],pc[0][1],pc[1][0],pc[1][1])) + return proxyConfig + except MySQLdb.Error as err: + msg = str(err) + logger.error(msg) + flocklab.error_logandexit(msg, errno.EIO, name, logger, config) + except: + logger.warn("Error %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + raise ### END update_configuration_from_db @@ -189,216 +189,216 @@ def update_configuration_from_db(): # ############################################################################## class ProxyConnections(): - server_socket_process_list = {} # dict port > process - obs_socket_list = {} # dict (obs,slot) -> socket - server_socket_list = {} # dict (clientaddr, obs, slot) -> socket - client_to_obs = {} # dict obs_socket -> server_socket - obs_to_client = {} # dict server_socket -> obs_socket - proxyConfig = [] - addlist = [] - removelist = [] - op = None - - def __init__(self): - # multiprocessing events and queues - # for server socket processes - self.stopevent = multiprocessing.Event() - self.reloadevent = multiprocessing.Event() - self.newConnectionQueue = multiprocessing.Queue() - # for observer reconnect process - self.conreqQueue = multiprocessing.Queue() - self.condoneQueue = multiprocessing.Queue() - # start observer reconnect process - self.op = threading.Thread(target = obs_connect_process, args=(self.conreqQueue,self.condoneQueue,self.stopevent,)) - self.op.daemon = True - - def reloadConfiguration(self, newconfig): - oldconfig = self.proxyConfig - self.proxyConfig = newconfig - # empty observer request queue - self.conreqQueue.put(None) - # drop old connections - for dc in [c for c in oldconfig if c not in newconfig]: - logger.debug("Drop old connection %s" % str(dc)) - self.server_socket_process_list[dc[0][1]][1].set() # set stop event - if dc[0] in self.server_socket_list and self.server_socket_list[dc[0]]: - self.removeHandler(self.server_socket_list[dc[0]]) - elif dc[1] in self.obs_socket_list and self.obs_socket_list[dc[1]]: - self.removeHandler(self.obs_socket_list[dc[1]]) - for dc in [c for c in oldconfig if c not in newconfig]: - self.server_socket_process_list[dc[0][1]][0].join() - del self.server_socket_process_list[dc[0][1]] # remove the entry from the dictionary - # add new connections - for nc in [c for c in newconfig if c not in oldconfig]: - logger.debug("Add new connection %s" % str(nc)) - self.requestListenSocket(nc[0]) - self.requestObserverSocket(nc[1]) - - def requestListenSocket(self, addr): - if not addr[1] in self.server_socket_process_list: - _stopevent = multiprocessing.Event() - lp = threading.Thread(target = listen_process, args=(addr[1],self.newConnectionQueue,_stopevent,)) - lp.daemon = True - lp.start() - self.server_socket_process_list[addr[1]] = (lp, _stopevent) - - def requestObserverSocket(self, addr): - self.conreqQueue.put(addr) - - def getLists(self, is_observer): - if is_observer: - return self.obs_socket_list, self.server_socket_list, self.obs_to_client, self.client_to_obs - else: - return self.server_socket_list, self.obs_socket_list, self.client_to_obs, self.obs_to_client - - def removeHandler(self, conn): - reconnect = None - conn.shutdown(socket.SHUT_RDWR) - conn.close() - # remove from socket list - for l in (self.obs_socket_list, self.server_socket_list): - for k,s in list(l.items()): - if s == conn: - del(l[k]) - reconnectaddr = k - break - # if bidirectional, remove also other socket - if conn in self.client_to_obs: # client connetion. remove - reconnect = False - src_list, dst_list, src_to_dst, dst_to_src = self.getLists(False) - elif conn in self.obs_to_client: # observer connection. try to reconnect with timeout - reconnect = True - src_list, dst_list, src_to_dst, dst_to_src = self.getLists(True) - else: - return - self.removelist.append(conn) - self.removelist.append(src_to_dst[conn]) - del(dst_to_src[src_to_dst[conn]]) - del(src_to_dst[conn]) - if reconnect and reconnectaddr: - connectionConfig = [p for p in self.proxyConfig if p[1] == reconnectaddr] - if len(connectionConfig) > 0: - self.requestObserverSocket(connectionConfig[0][1]) - - def addHandler(self, conn, addr, is_observer): - if is_observer: - connectionConfig = [p[0] for p in self.proxyConfig if p[1] == addr] - else: - connectionConfig = [p[1] for p in self.proxyConfig if p[0] == addr] - if len(connectionConfig) > 0: - src_list, dst_list, src_to_dst, dst_to_src = self.getLists(is_observer) - connectionConfig = connectionConfig[0] - if addr in src_list: - conn.shutdown(socket.SHUT_RDWR) - conn.close() - logger.info("Connection rejected, already exists") - return - src_list[addr] = conn - #logger.debug("src_list is %s" % str(src_list)) - if connectionConfig in dst_list: - src_to_dst[conn] = dst_list[connectionConfig] - dst_to_src[dst_list[connectionConfig]] = conn - # forward on this connection - self.addlist.append(conn) - self.addlist.append(src_to_dst[conn]) - logger.info("Established connection %s" % (str((connectionConfig, addr)))) - else: - conn.close() - logger.info("Connection not for us, addr was %s" % str(addr)) + server_socket_process_list = {} # dict port > process + obs_socket_list = {} # dict (obs,slot) -> socket + server_socket_list = {} # dict (clientaddr, obs, slot) -> socket + client_to_obs = {} # dict obs_socket -> server_socket + obs_to_client = {} # dict server_socket -> obs_socket + proxyConfig = [] + addlist = [] + removelist = [] + op = None + + def __init__(self): + # multiprocessing events and queues + # for server socket processes + self.stopevent = multiprocessing.Event() + self.reloadevent = multiprocessing.Event() + self.newConnectionQueue = multiprocessing.Queue() + # for observer reconnect process + self.conreqQueue = multiprocessing.Queue() + self.condoneQueue = multiprocessing.Queue() + # start observer reconnect process + self.op = threading.Thread(target = obs_connect_process, args=(self.conreqQueue,self.condoneQueue,self.stopevent,)) + self.op.daemon = True + + def reloadConfiguration(self, newconfig): + oldconfig = self.proxyConfig + self.proxyConfig = newconfig + # empty observer request queue + self.conreqQueue.put(None) + # drop old connections + for dc in [c for c in oldconfig if c not in newconfig]: + logger.debug("Drop old connection %s" % str(dc)) + self.server_socket_process_list[dc[0][1]][1].set() # set stop event + if dc[0] in self.server_socket_list and self.server_socket_list[dc[0]]: + self.removeHandler(self.server_socket_list[dc[0]]) + elif dc[1] in self.obs_socket_list and self.obs_socket_list[dc[1]]: + self.removeHandler(self.obs_socket_list[dc[1]]) + for dc in [c for c in oldconfig if c not in newconfig]: + self.server_socket_process_list[dc[0][1]][0].join() + del self.server_socket_process_list[dc[0][1]] # remove the entry from the dictionary + # add new connections + for nc in [c for c in newconfig if c not in oldconfig]: + logger.debug("Add new connection %s" % str(nc)) + self.requestListenSocket(nc[0]) + self.requestObserverSocket(nc[1]) + + def requestListenSocket(self, addr): + if not addr[1] in self.server_socket_process_list: + _stopevent = multiprocessing.Event() + lp = threading.Thread(target = listen_process, args=(addr[1],self.newConnectionQueue,_stopevent,)) + lp.daemon = True + lp.start() + self.server_socket_process_list[addr[1]] = (lp, _stopevent) + + def requestObserverSocket(self, addr): + self.conreqQueue.put(addr) + + def getLists(self, is_observer): + if is_observer: + return self.obs_socket_list, self.server_socket_list, self.obs_to_client, self.client_to_obs + else: + return self.server_socket_list, self.obs_socket_list, self.client_to_obs, self.obs_to_client + + def removeHandler(self, conn): + reconnect = None + conn.shutdown(socket.SHUT_RDWR) + conn.close() + # remove from socket list + for l in (self.obs_socket_list, self.server_socket_list): + for k,s in list(l.items()): + if s == conn: + del(l[k]) + reconnectaddr = k + break + # if bidirectional, remove also other socket + if conn in self.client_to_obs: # client connetion. remove + reconnect = False + src_list, dst_list, src_to_dst, dst_to_src = self.getLists(False) + elif conn in self.obs_to_client: # observer connection. try to reconnect with timeout + reconnect = True + src_list, dst_list, src_to_dst, dst_to_src = self.getLists(True) + else: + return + self.removelist.append(conn) + self.removelist.append(src_to_dst[conn]) + del(dst_to_src[src_to_dst[conn]]) + del(src_to_dst[conn]) + if reconnect and reconnectaddr: + connectionConfig = [p for p in self.proxyConfig if p[1] == reconnectaddr] + if len(connectionConfig) > 0: + self.requestObserverSocket(connectionConfig[0][1]) + + def addHandler(self, conn, addr, is_observer): + if is_observer: + connectionConfig = [p[0] for p in self.proxyConfig if p[1] == addr] + else: + connectionConfig = [p[1] for p in self.proxyConfig if p[0] == addr] + if len(connectionConfig) > 0: + src_list, dst_list, src_to_dst, dst_to_src = self.getLists(is_observer) + connectionConfig = connectionConfig[0] + if addr in src_list: + conn.shutdown(socket.SHUT_RDWR) + conn.close() + logger.info("Connection rejected, already exists") + return + src_list[addr] = conn + #logger.debug("src_list is %s" % str(src_list)) + if connectionConfig in dst_list: + src_to_dst[conn] = dst_list[connectionConfig] + dst_to_src[dst_list[connectionConfig]] = conn + # forward on this connection + self.addlist.append(conn) + self.addlist.append(src_to_dst[conn]) + logger.info("Established connection %s" % (str((connectionConfig, addr)))) + else: + conn.close() + logger.info("Connection not for us, addr was %s" % str(addr)) - def getChanges(self): - a = self.addlist - r = self.removelist - self.addlist = [] - self.removelist = [] - return a, r - - def forward(self, data, src_conn): - if src_conn in self.client_to_obs and self.client_to_obs[src_conn]: - self.client_to_obs[src_conn].send(data) - elif src_conn in self.obs_to_client and self.obs_to_client[src_conn]: - self.obs_to_client[src_conn].send(data) - - def run(self): - global stopevent - global reloadevent - stopevent = self.stopevent - reloadevent = self.reloadevent - self.op.start() - logger.info("FlockLab serial proxy started.") - - # infinite while loop - inputs = [self.newConnectionQueue._reader, self.condoneQueue._reader] - while not stopevent.is_set(): - try: - (readable, writable, ex) = select.select(inputs,[],[],10) # 10s timeout - except select.error as e: - if e[0] != errno.EINTR: - raise - except Exception as e: - logger.error("Error %s, %s" % (str(e), type(e))) - raise - - # config reload necessary? - if reloadevent.is_set(): - reloadevent.clear() - logger.info("Reloading configuration...") - newProxyConfig = update_configuration_from_db() - self.reloadConfiguration(newProxyConfig) - if len(newProxyConfig) == 0: - logger.info("No running tests, shutting down serial proxy...") - stopevent.set() - readable = [] - - # check new connections - try: - for i in readable: - # new connection from user - if i == self.newConnectionQueue._reader: - try: - conn, addr = self.newConnectionQueue.get(False) - self.addHandler(conn, addr, is_observer = False) - except queue.Empty: - pass - # new connection to observer - elif i == self.condoneQueue._reader: - try: - conn, addr = self.condoneQueue.get(False) - self.addHandler(conn, addr, is_observer = True) - except queue.Empty: - pass - # assume it is a socket, do forwarding - else: - m = '' - try: - m = i.recv(1024) - logger.debug("received %d bytes from socket %s" % (len(m), str(i))) - except socket_error as serr: - # user probably disconnected, don't generate an error message - logger.debug("socket_error") - break - except Exception as e: - logger.error("error %s, %s" % (str(e), type(e))) - # a socket without data available is from a client that has disconnected - if len(m) == 0: - self.removeHandler(i) - else: - self.forward(m, i) - # do book keeping - iadd, iremove = self.getChanges() - for r in iremove: - logger.debug("remove socket %s" %str(r)) - inputs.remove(r) - for a in iadd: - logger.debug("add socket %s" %str(a)) - inputs.append(a) - except: - logger.error("Error %s, %s" % (str(e), type(e))) - - self.reloadConfiguration([]) - self.op.join() - - logger.info("Serial proxy stopped.") + def getChanges(self): + a = self.addlist + r = self.removelist + self.addlist = [] + self.removelist = [] + return a, r + + def forward(self, data, src_conn): + if src_conn in self.client_to_obs and self.client_to_obs[src_conn]: + self.client_to_obs[src_conn].send(data) + elif src_conn in self.obs_to_client and self.obs_to_client[src_conn]: + self.obs_to_client[src_conn].send(data) + + def run(self): + global stopevent + global reloadevent + stopevent = self.stopevent + reloadevent = self.reloadevent + self.op.start() + logger.info("FlockLab serial proxy started.") + + # infinite while loop + inputs = [self.newConnectionQueue._reader, self.condoneQueue._reader] + while not stopevent.is_set(): + try: + (readable, writable, ex) = select.select(inputs,[],[],10) # 10s timeout + except select.error as e: + if e[0] != errno.EINTR: + raise + except Exception as e: + logger.error("Error %s, %s" % (str(e), type(e))) + raise + + # config reload necessary? + if reloadevent.is_set(): + reloadevent.clear() + logger.info("Reloading configuration...") + newProxyConfig = update_configuration_from_db() + self.reloadConfiguration(newProxyConfig) + if len(newProxyConfig) == 0: + logger.info("No running tests, shutting down serial proxy...") + stopevent.set() + readable = [] + + # check new connections + try: + for i in readable: + # new connection from user + if i == self.newConnectionQueue._reader: + try: + conn, addr = self.newConnectionQueue.get(False) + self.addHandler(conn, addr, is_observer = False) + except queue.Empty: + pass + # new connection to observer + elif i == self.condoneQueue._reader: + try: + conn, addr = self.condoneQueue.get(False) + self.addHandler(conn, addr, is_observer = True) + except queue.Empty: + pass + # assume it is a socket, do forwarding + else: + m = '' + try: + m = i.recv(1024) + logger.debug("received %d bytes from socket %s" % (len(m), str(i))) + except socket_error as serr: + # user probably disconnected, don't generate an error message + logger.debug("socket_error") + break + except Exception as e: + logger.error("error %s, %s" % (str(e), type(e))) + # a socket without data available is from a client that has disconnected + if len(m) == 0: + self.removeHandler(i) + else: + self.forward(m, i) + # do book keeping + iadd, iremove = self.getChanges() + for r in iremove: + logger.debug("remove socket %s" %str(r)) + inputs.remove(r) + for a in iadd: + logger.debug("add socket %s" %str(a)) + inputs.append(a) + except: + logger.error("Error %s, %s" % (str(e), type(e))) + + self.reloadConfiguration([]) + self.op.join() + + logger.info("Serial proxy stopped.") ### END class ProxyConnections ############################################################################## @@ -407,19 +407,19 @@ class ProxyConnections(): # ############################################################################## def start_proxy(): - proxyConfig = update_configuration_from_db() - if len(proxyConfig) == 0: - logger.info("No connections, exiting...") - return - # Daemonize the process --- - daemon.daemonize(None, closedesc=False) - # Catch kill signals --- - signal.signal(signal.SIGTERM, sigterm_handler) - signal.signal(signal.SIGINT, sigterm_handler) - logger.info("Daemon started") - proxy = ProxyConnections() - proxy.reloadConfiguration(proxyConfig) - proxy.run() + proxyConfig = update_configuration_from_db() + if len(proxyConfig) == 0: + logger.info("No connections, exiting...") + return + # Daemonize the process --- + daemon.daemonize(None, closedesc=False) + # Catch kill signals --- + signal.signal(signal.SIGTERM, sigterm_handler) + signal.signal(signal.SIGINT, sigterm_handler) + logger.info("Daemon started") + proxy = ProxyConnections() + proxy.reloadConfiguration(proxyConfig) + proxy.run() ### END start_proxy ############################################################################## @@ -428,43 +428,43 @@ def start_proxy(): # ############################################################################## def sig_proxy(signum): - # Get oldest running instance of the proxy for the selected test ID which is the main process and send it the terminate signal: - try: - searchterm = "%s"%scriptname - cmd = ['pgrep', '-o', '-f', searchterm] - p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) - out, err = p.communicate() - if (p.returncode == 0): - pid = int(out) - # Do not stop this instance if it is the only one running: - if (pid == os.getpid()): - raise Error - else: - logger.warn("Command failed: %s" % (str(cmd))) - raise Error - # Signal the process to stop: - if (pid > 0): - logger.debug("Sending signal %d to process %d" %(signum, pid)) - try: - os.kill(pid, signum) - if signum == signal.SIGTERM: - logger.debug("Waiting for process to finish...") - # wait for process to finish (timeout..) - shutdown_timeout = config.getint("serialproxy", "shutdown_timeout") - pidpath = "/proc/%d"%pid - while os.path.exists(pidpath) & (shutdown_timeout>0): - time.sleep(1) - shutdown_timeout = shutdown_timeout - 1 - if os.path.exists(pidpath): - logger.warn("Serial proxy is still running, sending it the SIGKILL signal...") - os.kill(pid, signal.SIGKILL) - except: - logger.warn("Failed to send SIGKILL: %s: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - except (ValueError, Error): - logger.debug("Serial proxy daemon was not running, thus it cannot be stopped.") - return errno.ENOPKG - - return SUCCESS + # Get oldest running instance of the proxy for the selected test ID which is the main process and send it the terminate signal: + try: + searchterm = "%s"%scriptname + cmd = ['pgrep', '-o', '-f', searchterm] + p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) + out, err = p.communicate() + if (p.returncode == 0): + pid = int(out) + # Do not stop this instance if it is the only one running: + if (pid == os.getpid()): + raise Error + else: + logger.warn("Command failed: %s" % (str(cmd))) + raise Error + # Signal the process to stop: + if (pid > 0): + logger.debug("Sending signal %d to process %d" %(signum, pid)) + try: + os.kill(pid, signum) + if signum == signal.SIGTERM: + logger.debug("Waiting for process to finish...") + # wait for process to finish (timeout..) + shutdown_timeout = config.getint("serialproxy", "shutdown_timeout") + pidpath = "/proc/%d"%pid + while os.path.exists(pidpath) & (shutdown_timeout>0): + time.sleep(1) + shutdown_timeout = shutdown_timeout - 1 + if os.path.exists(pidpath): + logger.warn("Serial proxy is still running, sending it the SIGKILL signal...") + os.kill(pid, signal.SIGKILL) + except: + logger.warn("Failed to send SIGKILL: %s: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + except (ValueError, Error): + logger.debug("Serial proxy daemon was not running, thus it cannot be stopped.") + return errno.ENOPKG + + return SUCCESS ### END sig_proxy @@ -475,13 +475,13 @@ def sig_proxy(signum): # ############################################################################## def usage(): - print("Usage: %s --notify/start/stop [--debug] [--help]" %scriptname) - print("Options:") - print(" --notify\t\t\tNotifies the proxy of a change in the database.") - print(" --start\t\t\tStarts the background process of the proxy.") - print(" --stop\t\t\tCauses the program to stop a possibly running instance of the serial proxy.") - print(" --debug\t\t\tOptional. Print debug messages to log.") - print(" --help\t\t\tOptional. Print this help.") + print("Usage: %s --notify/start/stop [--debug] [--help]" %scriptname) + print("Options:") + print(" --notify\t\t\tNotifies the proxy of a change in the database.") + print(" --start\t\t\tStarts the background process of the proxy.") + print(" --stop\t\t\tCauses the program to stop a possibly running instance of the serial proxy.") + print(" --debug\t\t\tOptional. Print debug messages to log.") + print(" --help\t\t\tOptional. Print this help.") ### END usage() @@ -492,80 +492,80 @@ def usage(): # ############################################################################## def main(argv): - - ### Get global variables ### - global logger - global debug - global config - - stop = False - start = False - notify = False - - # Set timezone to UTC --- - os.environ['TZ'] = 'UTC' - time.tzset() - - # Get logger: - logger = flocklab.get_logger(loggername=scriptname, loggerpath=scriptpath) - - # Get the config file --- - config = flocklab.get_config(configpath=scriptpath) - if not config: - msg = "Could not read configuration file. Exiting..." - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + + ### Get global variables ### + global logger + global debug + global config + + stop = False + start = False + notify = False + + # Set timezone to UTC --- + os.environ['TZ'] = 'UTC' + time.tzset() + + # Get logger: + logger = flocklab.get_logger(loggername=scriptname, loggerpath=scriptpath) + + # Get the config file --- + config = flocklab.get_config(configpath=scriptpath) + if not config: + msg = "Could not read configuration file. Exiting..." + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) - # Get command line parameters --- - try: - opts, args = getopt.getopt(argv, "hnsed", ["help", "notify", "start", "stop", "debug"]) - except getopt.GetoptError as err: - print(str(err)) - logger.warn(str(err)) - usage() - sys.exit(errno.EINVAL) - except: - msg = "Error when getting arguments: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1])) - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) - for opt, arg in opts: - if opt in ("-h", "--help"): - usage() - sys.exit(SUCCESS) - elif opt in ("-d", "--debug"): - debug = True - logger.debug("Detected debug flag.") - logger.setLevel(logging.DEBUG) - elif opt in ("-e", "--stop"): - stop = True - elif opt in ("-s", "--start"): - start = True - elif opt in ("-n", "--notify"): - notify = True - else: - print("Wrong API usage") - logger.warn("Wrong API usage") - sys.exit(errno.EINVAL) - - # Start / stop the proxy --- - ret = SUCCESS - if stop: - ret = sig_proxy(signal.SIGTERM) - elif notify: - ret = sig_proxy(signal.SIGINT) - if start or notify and ret == errno.ENOPKG: - # Start the proxy processes: - ret = SUCCESS - try: - start_proxy() - except Exception: - logger.info(traceback.format_exc()) - raise - - sys.exit(ret) + # Get command line parameters --- + try: + opts, args = getopt.getopt(argv, "hnsed", ["help", "notify", "start", "stop", "debug"]) + except getopt.GetoptError as err: + print(str(err)) + logger.warn(str(err)) + usage() + sys.exit(errno.EINVAL) + except: + msg = "Error when getting arguments: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1])) + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + for opt, arg in opts: + if opt in ("-h", "--help"): + usage() + sys.exit(SUCCESS) + elif opt in ("-d", "--debug"): + debug = True + logger.debug("Detected debug flag.") + logger.setLevel(logging.DEBUG) + elif opt in ("-e", "--stop"): + stop = True + elif opt in ("-s", "--start"): + start = True + elif opt in ("-n", "--notify"): + notify = True + else: + print("Wrong API usage") + logger.warn("Wrong API usage") + sys.exit(errno.EINVAL) + + # Start / stop the proxy --- + ret = SUCCESS + if stop: + ret = sig_proxy(signal.SIGTERM) + elif notify: + ret = sig_proxy(signal.SIGINT) + if start or notify and ret == errno.ENOPKG: + # Start the proxy processes: + ret = SUCCESS + try: + start_proxy() + except Exception: + logger.info(traceback.format_exc()) + raise + + sys.exit(ret) ### END main() if __name__ == "__main__": - try: - main(sys.argv[1:]) - except Exception: - msg = "Encountered error: %s: %s\n%s\nCommand line was: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc(), str(sys.argv)) - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + try: + main(sys.argv[1:]) + except Exception: + msg = "Encountered error: %s: %s\n%s\nCommand line was: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc(), str(sys.argv)) + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) diff --git a/testmanagementserver/flocklab_upd_slot_assign.py b/testmanagementserver/flocklab_upd_slot_assign.py index 400f12cc4b6348f323d270eea51f32313449e2a9..23cea3c7cc7207f08e5302ca5c26e00d23b74fec 100755 --- a/testmanagementserver/flocklab_upd_slot_assign.py +++ b/testmanagementserver/flocklab_upd_slot_assign.py @@ -1,8 +1,8 @@ #! /usr/bin/env python3 -__author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" -__copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" -__license__ = "GPL" +__author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" +__copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" +__license__ = "GPL" import os, sys, getopt, MySQLdb, errno, threading, subprocess, time, traceback, queue, logging @@ -25,127 +25,127 @@ debug = False # ############################################################################## class UpdateSlotAssignThread(threading.Thread): - def __init__(self, observerdata, config, logger, searchtime, maxretries, queue): - threading.Thread.__init__(self) - self.ObsKey = observerdata[0] - self.ObsHostname = observerdata[1] - self.ObsSerialList = observerdata[2:] - self.Config = config - self.Logger = logger - self.Searchtime = searchtime - self.Maxretries = maxretries - self.Queue = queue + def __init__(self, observerdata, config, logger, searchtime, maxretries, queue): + threading.Thread.__init__(self) + self.ObsKey = observerdata[0] + self.ObsHostname = observerdata[1] + self.ObsSerialList = observerdata[2:] + self.Config = config + self.Logger = logger + self.Searchtime = searchtime + self.Maxretries = maxretries + self.Queue = queue - def run(self): - # Get list of ID's for every slot from observer over SSH: - cmd = self.Config.get("observer", "serialidscript") - if self.Searchtime: - cmd = "%s -s%.1f" %(cmd, self.Searchtime) - if self.Maxretries: - cmd = "%s -m%d" %(cmd, self.Maxretries) - self.Logger.debug("Observer %s: calling %s" %(self.ObsHostname, cmd)) - p = subprocess.Popen(['ssh', '%s' % (self.ObsHostname), cmd], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) - rs = p.communicate() - self.Logger.debug("Observer %s: got response: %s" % (self.ObsHostname, str(rs))) + def run(self): + # Get list of ID's for every slot from observer over SSH: + cmd = self.Config.get("observer", "serialidscript") + if self.Searchtime: + cmd = "%s -s%.1f" %(cmd, self.Searchtime) + if self.Maxretries: + cmd = "%s -m%d" %(cmd, self.Maxretries) + self.Logger.debug("Observer %s: calling %s" %(self.ObsHostname, cmd)) + p = subprocess.Popen(['ssh', '%s' % (self.ObsHostname), cmd], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) + rs = p.communicate() + self.Logger.debug("Observer %s: got response: %s" % (self.ObsHostname, str(rs))) - # Compare list against values stored on database if ssh command was successful - if (rs[1] != ''): - self.Logger.debug("Observer %s: returned error: %s" % (self.ObsHostname, str(rs[1]))) - slots = rs[0].split('\n') - if ((rs[1] == '') and (len(slots) > 1)): - cmds = [] - changes = [] - for i, slot in enumerate(slots[0:4]): - s = slot.split(' ') - slotnr = s[0][0] - serialid = s[1] + # Compare list against values stored on database if ssh command was successful + if (rs[1] != ''): + self.Logger.debug("Observer %s: returned error: %s" % (self.ObsHostname, str(rs[1]))) + slots = rs[0].split('\n') + if ((rs[1] == '') and (len(slots) > 1)): + cmds = [] + changes = [] + for i, slot in enumerate(slots[0:4]): + s = slot.split(' ') + slotnr = s[0][0] + serialid = s[1] - # If a serial ID was found and if it differs in the database and on the observer, update the database: - if serialid == 'not': - serialid = None - if (serialid != self.ObsSerialList[i]): - msg = "Observer %s: serial IDs for slot %s differ. Value database: %s, value observer slot: %s" % (self.ObsHostname, slotnr, self.ObsSerialList[i], serialid) - self.Logger.debug(msg) - changes.append((self.ObsHostname, slotnr, self.ObsSerialList[i], serialid)) - cmds.append(""" UPDATE `tbl_serv_observer` - SET slot_%s_tg_adapt_list_fk = ( - SELECT `serv_tg_adapt_list_key` - FROM `tbl_serv_tg_adapt_list` - WHERE `serialid` = '%s') - WHERE `serv_observer_key` = %s; - """ % (i+1, serialid, self.ObsKey)) + # If a serial ID was found and if it differs in the database and on the observer, update the database: + if serialid == 'not': + serialid = None + if (serialid != self.ObsSerialList[i]): + msg = "Observer %s: serial IDs for slot %s differ. Value database: %s, value observer slot: %s" % (self.ObsHostname, slotnr, self.ObsSerialList[i], serialid) + self.Logger.debug(msg) + changes.append((self.ObsHostname, slotnr, self.ObsSerialList[i], serialid)) + cmds.append(""" UPDATE `tbl_serv_observer` + SET slot_%s_tg_adapt_list_fk = ( + SELECT `serv_tg_adapt_list_key` + FROM `tbl_serv_tg_adapt_list` + WHERE `serialid` = '%s') + WHERE `serv_observer_key` = %s; + """ % (i+1, serialid, self.ObsKey)) - # If any changes need to be done to the database, do so: - if len(cmds) > 0: - try: - (cn, cur) = flocklab.connect_to_db(self.Config, self.Logger) - except: - self.Logger.error("Could not connect to database") - raise - try: - for cmd in cmds: - #self.Logger.debug("Observer %s: executing SQL: %s" % (self.ObsHostname, cmd)) - cur.execute(cmd) - cn.commit() + # If any changes need to be done to the database, do so: + if len(cmds) > 0: + try: + (cn, cur) = flocklab.connect_to_db(self.Config, self.Logger) + except: + self.Logger.error("Could not connect to database") + raise + try: + for cmd in cmds: + #self.Logger.debug("Observer %s: executing SQL: %s" % (self.ObsHostname, cmd)) + cur.execute(cmd) + cn.commit() - # Finally prepare message to send to admins about the change(s): - msg = "" - sql = """ SELECT `name` - FROM `tbl_serv_tg_adapt_types` - WHERE `serv_tg_adapt_types_key` = ( - SELECT `tg_adapt_types_fk` - FROM `tbl_serv_tg_adapt_list` - WHERE `serialid` = '%s'); - """ - for change in changes: - old = None - new = None - # Get the type of the old adapter from the DB: - if (change[2] == None): - old = 'None' - else: - cmd = sql % (change[2]) - cur.execute(cmd) - #self.Logger.debug("Observer %s: executing SQL: %s" % (self.ObsHostname, cmd)) - rs = cur.fetchone() - if rs: - old = rs[0] - # Get the type of the new adapter from the DB: - if (change[3] == None): - new = 'None' - else: - cmd = sql % (change[3]) - cur.execute(cmd) - #self.Logger.debug("Observer %s: executing SQL: %s" % (self.ObsHostname, cmd)) - rs = cur.fetchone() - if rs: - new = rs[0] - # If the serial id was not found in the database, inform the admin about it: - if not old: - msg = msg + "Observer %s: serial ID %s in slot %s not found in database. Has it not been registered yet?\n" % (str(change[0]), str(change[2]), str(change[1])) - elif not new: - msg = msg + "Observer %s: serial ID %s in slot %s not found in database. Has it not been registered yet?\n" % (str(change[0]), str(change[3]), str(change[1])) - else: - msg = msg + "Observer %s: serial IDs for slot %s differ. Old adapter according to database was %s (%s) but detected %s (%s) in slot. Database has been updated accordingly.\n" % (str(change[0]), str(change[1]), str(old), str(change[2]), str(new), str(change[3])) - self.Queue.put(msg) + # Finally prepare message to send to admins about the change(s): + msg = "" + sql = """ SELECT `name` + FROM `tbl_serv_tg_adapt_types` + WHERE `serv_tg_adapt_types_key` = ( + SELECT `tg_adapt_types_fk` + FROM `tbl_serv_tg_adapt_list` + WHERE `serialid` = '%s'); + """ + for change in changes: + old = None + new = None + # Get the type of the old adapter from the DB: + if (change[2] == None): + old = 'None' + else: + cmd = sql % (change[2]) + cur.execute(cmd) + #self.Logger.debug("Observer %s: executing SQL: %s" % (self.ObsHostname, cmd)) + rs = cur.fetchone() + if rs: + old = rs[0] + # Get the type of the new adapter from the DB: + if (change[3] == None): + new = 'None' + else: + cmd = sql % (change[3]) + cur.execute(cmd) + #self.Logger.debug("Observer %s: executing SQL: %s" % (self.ObsHostname, cmd)) + rs = cur.fetchone() + if rs: + new = rs[0] + # If the serial id was not found in the database, inform the admin about it: + if not old: + msg = msg + "Observer %s: serial ID %s in slot %s not found in database. Has it not been registered yet?\n" % (str(change[0]), str(change[2]), str(change[1])) + elif not new: + msg = msg + "Observer %s: serial ID %s in slot %s not found in database. Has it not been registered yet?\n" % (str(change[0]), str(change[3]), str(change[1])) + else: + msg = msg + "Observer %s: serial IDs for slot %s differ. Old adapter according to database was %s (%s) but detected %s (%s) in slot. Database has been updated accordingly.\n" % (str(change[0]), str(change[1]), str(old), str(change[2]), str(new), str(change[3])) + self.Queue.put(msg) - except MySQLdb.Error as err: - self.Logger.warn(str(err)) - sys.exit(errno.EIO) - except: - self.Logger.warn("Error updating serial ID: %s: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - finally: - cur.close() - cn.close() - else: - msg = "Observer %s: No change detected!\n" % (self.ObsHostname) - self.Queue.put(msg) + except MySQLdb.Error as err: + self.Logger.warn(str(err)) + sys.exit(errno.EIO) + except: + self.Logger.warn("Error updating serial ID: %s: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + finally: + cur.close() + cn.close() + else: + msg = "Observer %s: No change detected!\n" % (self.ObsHostname) + self.Queue.put(msg) - else: - msg = "Observer %s: ssh invalid return!\n" % (self.ObsHostname) - self.Queue.put(msg) + else: + msg = "Observer %s: ssh invalid return!\n" % (self.ObsHostname) + self.Queue.put(msg) - return(SUCCESS) + return(SUCCESS) ### END UpdateSlotAssignThread @@ -156,14 +156,14 @@ class UpdateSlotAssignThread(threading.Thread): # ############################################################################## def usage(): - print(("Usage: %s [--searchtime <float>] [--maxretries <int>] [--debug] [--help] [--obs <id>] [--develop]" % sys.argv[0])) - print("Options:") - print(" --searchtime\t\t\tOptional. If set, standard time for waiting for the ID search is overwritten.") - print(" --maxretries\t\t\tOptional. If set, standard number of retries for reading an ID is overwritten.") - print(" --debug\t\t\tPrint debug messages to log.") - print(" --observer\t\t\tUpdate only observer with ID <id>.") - print(" --develop\t\t\tUpdate only observers with status 'develop'.") - print(" --help\t\t\tOptional. Print this help.") + print(("Usage: %s [--searchtime <float>] [--maxretries <int>] [--debug] [--help] [--obs <id>] [--develop]" % sys.argv[0])) + print("Options:") + print(" --searchtime\t\t\tOptional. If set, standard time for waiting for the ID search is overwritten.") + print(" --maxretries\t\t\tOptional. If set, standard number of retries for reading an ID is overwritten.") + print(" --debug\t\t\tPrint debug messages to log.") + print(" --observer\t\t\tUpdate only observer with ID <id>.") + print(" --develop\t\t\tUpdate only observers with status 'develop'.") + print(" --help\t\t\tOptional. Print this help.") ### END usage() @@ -175,169 +175,169 @@ def usage(): ############################################################################## def main(argv): - ### Get global variables ### - global debug - global scriptname - global scriptpath - threadlist = [] - searchtime = None - maxretries = None - force = False - observer = "" - status = "'online', 'internal', 'develop'" + ### Get global variables ### + global debug + global scriptname + global scriptpath + threadlist = [] + searchtime = None + maxretries = None + force = False + observer = "" + status = "'online', 'internal', 'develop'" - # Set timezone to UTC: - os.environ['TZ'] = 'UTC' - time.tzset() + # Set timezone to UTC: + os.environ['TZ'] = 'UTC' + time.tzset() - # Get logger: - logger = flocklab.get_logger(loggername=scriptname, loggerpath=scriptpath) + # Get logger: + logger = flocklab.get_logger(loggername=scriptname, loggerpath=scriptpath) - # Get command line parameters. - try: - opts, args = getopt.getopt(argv, "hds:m:fo:d", ["help", "debug", "searchtime", "maxretries", "force", "observer", "develop"]) - except getopt.GetoptError as err: - print((str(err))) - logger.warn(str(err)) - usage() - sys.exit(errno.EINVAL) - except: - logger.warn("Error %s: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - for opt, arg in opts: - if opt in ("-h", "--help"): - usage() - sys.exit(SUCCESS) - elif opt in ("-d", "--debug"): - debug = True - logger.setLevel(logging.DEBUG) - elif opt in ("-s", "--searchtime"): - try: - searchtime = float(arg) - if (searchtime <= 0.0): - raise ValueError - except: - logger.warn("Wrong API usage: %s" %str(arg)) - usage() - sys.exit(errno.EINVAL) - elif opt in ("-f", "--force"): - force = True - elif opt in ("-o", "--observer"): - try: - observer = " AND observer_id=%u" % int(arg) - print(("will only update observer %u" % int(arg))) - except: - print(("invalid argument '%s'" % arg)) - sys.exit(errno.EINVAL) - elif opt in ("-d", "--develop"): - print("will only update observers with status 'develop'") - status = "'develop'" - elif opt in ("-m", "--maxretries"): - try: - maxretries = int(arg) - if (maxretries < 0): - raise ValueError - except: - logger.warn("Wrong API usage: %s" %str(arg)) - usage() - sys.exit(errno.EINVAL) - else: - print("Wrong API usage") - logger.warn("Wrong API usage") - usage() - sys.exit(errno.EINVAL) + # Get command line parameters. + try: + opts, args = getopt.getopt(argv, "hds:m:fo:d", ["help", "debug", "searchtime", "maxretries", "force", "observer", "develop"]) + except getopt.GetoptError as err: + print((str(err))) + logger.warn(str(err)) + usage() + sys.exit(errno.EINVAL) + except: + logger.warn("Error %s: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + for opt, arg in opts: + if opt in ("-h", "--help"): + usage() + sys.exit(SUCCESS) + elif opt in ("-d", "--debug"): + debug = True + logger.setLevel(logging.DEBUG) + elif opt in ("-s", "--searchtime"): + try: + searchtime = float(arg) + if (searchtime <= 0.0): + raise ValueError + except: + logger.warn("Wrong API usage: %s" %str(arg)) + usage() + sys.exit(errno.EINVAL) + elif opt in ("-f", "--force"): + force = True + elif opt in ("-o", "--observer"): + try: + observer = " AND observer_id=%u" % int(arg) + print(("will only update observer %u" % int(arg))) + except: + print(("invalid argument '%s'" % arg)) + sys.exit(errno.EINVAL) + elif opt in ("-d", "--develop"): + print("will only update observers with status 'develop'") + status = "'develop'" + elif opt in ("-m", "--maxretries"): + try: + maxretries = int(arg) + if (maxretries < 0): + raise ValueError + except: + logger.warn("Wrong API usage: %s" %str(arg)) + usage() + sys.exit(errno.EINVAL) + else: + print("Wrong API usage") + logger.warn("Wrong API usage") + usage() + sys.exit(errno.EINVAL) - # Get the config file: - config = flocklab.get_config(configpath=scriptpath) - if not config: - logger.warn("Could not read configuration file. Exiting...") - sys.exit(errno.EAGAIN) + # Get the config file: + config = flocklab.get_config(configpath=scriptpath) + if not config: + logger.warn("Could not read configuration file. Exiting...") + sys.exit(errno.EAGAIN) - # Check if a test is preparing, running or cleaning up. If yes, exit program. - try: - (cn, cur) = flocklab.connect_to_db(config, logger) - except: - logger.error("Could not connect to database") - raise - if flocklab.is_test_running(cur) and not force: - print("Test is running! You can force the target slot update on a specific observer with the flags '-f -o=<id>'."); - logger.debug("A test is running, thus exit...") - cur.close() - cn.close() - else: - logger.info("Started slot assignment updater.") - # Get all active observers from the database: - logger.debug("Going to fetch current database status for active observers...") - try: - sql = """ SELECT a.serv_observer_key, a.ethernet_address, b.serialid AS serialid_1, c.serialid AS serialid_2, d.serialid AS serialid_3, e.serialid AS serialid_4 - FROM `tbl_serv_observer` AS a - LEFT JOIN `tbl_serv_tg_adapt_list` AS b - ON a.slot_1_tg_adapt_list_fk = b.serv_tg_adapt_list_key - LEFT JOIN `tbl_serv_tg_adapt_list` AS c - ON a.slot_2_tg_adapt_list_fk = c.serv_tg_adapt_list_key - LEFT JOIN `tbl_serv_tg_adapt_list` AS d - ON a.slot_3_tg_adapt_list_fk = d.serv_tg_adapt_list_key - LEFT JOIN `tbl_serv_tg_adapt_list` AS e - ON a.slot_4_tg_adapt_list_fk = e.serv_tg_adapt_list_key - WHERE a.status IN (%s) %s - """ % (status, observer) - cur.execute(sql) - except MySQLdb.Error as err: - logger.warn(str(err)) - sys.exit(errno.EIO) - except: - logger.warn("Error %s: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - rs = cur.fetchall() - cur.close() - cn.close() - # Prepare queue which is going to hold the messages returned from the threads: - q = queue.Queue() - # Start one update thread per observer: - for observerdata in rs: - logger.debug("Starting thread for %s" % (observerdata[1])) - try: - t = UpdateSlotAssignThread(observerdata, config, logger, searchtime, maxretries, q) - threadlist.append(t) - t.start() - except: - logger.warn("Error when starting thread for observer %s: %s: %s" % (observerdata[1], str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - continue - # Wait for threads to finish: - logger.debug("Joining threads") - for t in threadlist: - try: - if (maxretries and searchtime): - thread_timeoutadd = int(4*maxretries*searchtime) - else: - thread_timeoutadd = 0 - t.join(timeout=(10 + thread_timeoutadd)) - if t.isAlive(): - logger.warn("Timeout when joining thread - is still alive...") - except: - logger.warn("Error when joining threads...") - continue - # Get all messages from the threads which are now in the queue and send them to the admin: - try: - msg = "" - while not q.empty(): - msg = msg + q.get_nowait() - if not msg == "": - try: - (cn, cur) = flocklab.connect_to_db(config, logger) - except: - logger.error("Could not connect to database") - raise - emails = flocklab.get_admin_emails(cur, config) - flocklab.send_mail(subject="[FlockLab Slot Updater]", message=msg, recipients=emails) - cur.close() - cn.close() - except: - logger.warn("Error when sending change notifications to admin. %s: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + # Check if a test is preparing, running or cleaning up. If yes, exit program. + try: + (cn, cur) = flocklab.connect_to_db(config, logger) + except: + logger.error("Could not connect to database") + raise + if flocklab.is_test_running(cur) and not force: + print("Test is running! You can force the target slot update on a specific observer with the flags '-f -o=<id>'."); + logger.debug("A test is running, thus exit...") + cur.close() + cn.close() + else: + logger.info("Started slot assignment updater.") + # Get all active observers from the database: + logger.debug("Going to fetch current database status for active observers...") + try: + sql = """ SELECT a.serv_observer_key, a.ethernet_address, b.serialid AS serialid_1, c.serialid AS serialid_2, d.serialid AS serialid_3, e.serialid AS serialid_4 + FROM `tbl_serv_observer` AS a + LEFT JOIN `tbl_serv_tg_adapt_list` AS b + ON a.slot_1_tg_adapt_list_fk = b.serv_tg_adapt_list_key + LEFT JOIN `tbl_serv_tg_adapt_list` AS c + ON a.slot_2_tg_adapt_list_fk = c.serv_tg_adapt_list_key + LEFT JOIN `tbl_serv_tg_adapt_list` AS d + ON a.slot_3_tg_adapt_list_fk = d.serv_tg_adapt_list_key + LEFT JOIN `tbl_serv_tg_adapt_list` AS e + ON a.slot_4_tg_adapt_list_fk = e.serv_tg_adapt_list_key + WHERE a.status IN (%s) %s + """ % (status, observer) + cur.execute(sql) + except MySQLdb.Error as err: + logger.warn(str(err)) + sys.exit(errno.EIO) + except: + logger.warn("Error %s: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + rs = cur.fetchall() + cur.close() + cn.close() + # Prepare queue which is going to hold the messages returned from the threads: + q = queue.Queue() + # Start one update thread per observer: + for observerdata in rs: + logger.debug("Starting thread for %s" % (observerdata[1])) + try: + t = UpdateSlotAssignThread(observerdata, config, logger, searchtime, maxretries, q) + threadlist.append(t) + t.start() + except: + logger.warn("Error when starting thread for observer %s: %s: %s" % (observerdata[1], str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + continue + # Wait for threads to finish: + logger.debug("Joining threads") + for t in threadlist: + try: + if (maxretries and searchtime): + thread_timeoutadd = int(4*maxretries*searchtime) + else: + thread_timeoutadd = 0 + t.join(timeout=(10 + thread_timeoutadd)) + if t.isAlive(): + logger.warn("Timeout when joining thread - is still alive...") + except: + logger.warn("Error when joining threads...") + continue + # Get all messages from the threads which are now in the queue and send them to the admin: + try: + msg = "" + while not q.empty(): + msg = msg + q.get_nowait() + if not msg == "": + try: + (cn, cur) = flocklab.connect_to_db(config, logger) + except: + logger.error("Could not connect to database") + raise + emails = flocklab.get_admin_emails(cur, config) + flocklab.send_mail(subject="[FlockLab Slot Updater]", message=msg, recipients=emails) + cur.close() + cn.close() + except: + logger.warn("Error when sending change notifications to admin. %s: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - logger.debug("Slot assignment updater finished.") + logger.debug("Slot assignment updater finished.") - sys.exit(SUCCESS) + sys.exit(SUCCESS) ### END main() if __name__ == "__main__": - main(sys.argv[1:]) + main(sys.argv[1:]) diff --git a/testmanagementserver/lib/flocklab.py b/testmanagementserver/lib/flocklab.py index 561e433c5f7d4b5ebdedfddc856d6501e854cd75..16836ae011f439b1f388778b49894a139d4c934d 100755 --- a/testmanagementserver/lib/flocklab.py +++ b/testmanagementserver/lib/flocklab.py @@ -1,8 +1,8 @@ #!/usr/bin/env python -__author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" +__author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" -__license__ = "GPL" +__license__ = "GPL" import sys, os, smtplib, MySQLdb, configparser, time, re, errno, random, subprocess, string, logging, traceback, numpy, calendar @@ -42,24 +42,24 @@ TOGGLE = 2 # ############################################################################## def get_config(configpath=None): - global scriptpath - global scriptname - """Arguments: - configpath - Return value: - The configuration object on success - none otherwise - """ - if not configpath: - configpath = scriptpath - try: - config = configparser.SafeConfigParser(comment_prefixes=('#', ';'), inline_comment_prefixes=(';')) - config.read(configpath + '/config.ini') - except: - logger = get_logger() - logger.error("Could not read %s/config.ini because: %s: %s" %(str(configpath), str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - config = None - return config + global scriptpath + global scriptname + """Arguments: + configpath + Return value: + The configuration object on success + none otherwise + """ + if not configpath: + configpath = scriptpath + try: + config = configparser.SafeConfigParser(comment_prefixes=('#', ';'), inline_comment_prefixes=(';')) + config.read(configpath + '/config.ini') + except: + logger = get_logger() + logger.error("Could not read %s/config.ini because: %s: %s" %(str(configpath), str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + config = None + return config ### END get_config() @@ -69,28 +69,28 @@ def get_config(configpath=None): # ############################################################################## def get_logger(loggername=None, loggerpath=None): - global scriptpath - global scriptname - """Arguments: - loggername - loggerpath - Return value: - The logger object on success - none otherwise - """ - if not loggerpath: - loggerpath = scriptpath - if not loggername: - loggername = scriptname - try: - logging.config.fileConfig(loggerpath + '/logging.conf') - logger = logging.getLogger(loggername) - if not logger: - print("no valid logger received") - except: - syslog.syslog(syslog.LOG_ERR, "flocklab.py: error in get_logger(): %s: Could not open logger because: %s: %s" %(str(loggername), str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - logger = None - return logger + global scriptpath + global scriptname + """Arguments: + loggername + loggerpath + Return value: + The logger object on success + none otherwise + """ + if not loggerpath: + loggerpath = scriptpath + if not loggername: + loggername = scriptname + try: + logging.config.fileConfig(loggerpath + '/logging.conf') + logger = logging.getLogger(loggername) + if not logger: + print("no valid logger received") + except: + syslog.syslog(syslog.LOG_ERR, "flocklab.py: error in get_logger(): %s: Could not open logger because: %s: %s" %(str(loggername), str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + logger = None + return logger ### END get_logger() @@ -100,17 +100,17 @@ def get_logger(loggername=None, loggerpath=None): # ############################################################################## def connect_to_db(config=None, logger=None): - # Check the arguments: - if ((not isinstance(config, configparser.SafeConfigParser)) or (not isinstance(logger, logging.Logger))): - return (None, None) - try: - cn = MySQLdb.connect(host=config.get('database','host'), user=config.get('database','user'), passwd=config.get('database','password'), db=config.get('database','database'), charset='utf8', use_unicode=True) - cur = cn.cursor() - #cur.execute("SET sql_mode=''") - except: - logger.error("Could not connect to the database because: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - raise - return (cn, cur) + # Check the arguments: + if ((not isinstance(config, configparser.SafeConfigParser)) or (not isinstance(logger, logging.Logger))): + return (None, None) + try: + cn = MySQLdb.connect(host=config.get('database','host'), user=config.get('database','user'), passwd=config.get('database','password'), db=config.get('database','database'), charset='utf8', use_unicode=True) + cur = cn.cursor() + #cur.execute("SET sql_mode=''") + except: + logger.error("Could not connect to the database because: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + raise + return (cn, cur) ### END connect_to_db() @@ -120,70 +120,70 @@ def connect_to_db(config=None, logger=None): # ############################################################################## def send_mail(subject="[FlockLab]", message="", recipients="", attachments=[]): - """Arguments: - subject: the subject of the message - message: the message to be sent - recipients: tuple with recipient(s) of the message - attachments: list of files to attach. Each file has to be an absolute path - Return value: - 0 on success - 1 if there is an error in the arguments passed to the function - 2 if there was an error processing the function - """ - - # Local variables: - from_address = "flocklab@tik.ee.ethz.ch" - - # Check the arguments: - if ((type(message) != str) or ((type(recipients) != str) and (type(recipients) != list) and (type(recipients) != tuple)) or (type(attachments) != list)): - return(1) - # Check if attachments exist in file system: - if (len(attachments) > 0): - for path in attachments: - if not os.path.isfile(path): - return(1) - - # Create the email: - mail = MIMEMultipart() - - # Attach the message text: - mail.attach(MIMEText(str(message))) - - # Set header fields: - mail['Subject'] = str(subject) - mail['From'] = "FlockLab <%s>" % from_address - mail['Date'] = formatdate(localtime=True) - mail['Message-ID'] = make_msgid() - if ((type(recipients) == tuple) or (type(recipients) == list)): - mail['To'] = ', '.join(recipients) - elif (type(recipients) == str): - mail['To'] = recipients - else: - return(1) - - # If there are attachments, attach them to the email: - for path in attachments: - fp = open(path, 'rb') - fil = MIMEBase('application', 'octet-stream') - fil.set_payload(fp.read()) - fp.close() - encoders.encode_base64(fil) - fil.add_header('Content-Disposition', 'attachment', filename=os.path.basename(path)) - mail.attach(fil) - - # Establish an SMTP object and connect to your mail server - try: - s = smtplib.SMTP() - s.connect("smtp.ee.ethz.ch") - # Send the email - real from, real to, extra headers and content ... - s.sendmail(from_address, recipients, mail.as_string()) - s.close() - except: - logger = get_logger() - logger.error("%s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - return(2) - - return (0) + """Arguments: + subject: the subject of the message + message: the message to be sent + recipients: tuple with recipient(s) of the message + attachments: list of files to attach. Each file has to be an absolute path + Return value: + 0 on success + 1 if there is an error in the arguments passed to the function + 2 if there was an error processing the function + """ + + # Local variables: + from_address = "flocklab@tik.ee.ethz.ch" + + # Check the arguments: + if ((type(message) != str) or ((type(recipients) != str) and (type(recipients) != list) and (type(recipients) != tuple)) or (type(attachments) != list)): + return(1) + # Check if attachments exist in file system: + if (len(attachments) > 0): + for path in attachments: + if not os.path.isfile(path): + return(1) + + # Create the email: + mail = MIMEMultipart() + + # Attach the message text: + mail.attach(MIMEText(str(message))) + + # Set header fields: + mail['Subject'] = str(subject) + mail['From'] = "FlockLab <%s>" % from_address + mail['Date'] = formatdate(localtime=True) + mail['Message-ID'] = make_msgid() + if ((type(recipients) == tuple) or (type(recipients) == list)): + mail['To'] = ', '.join(recipients) + elif (type(recipients) == str): + mail['To'] = recipients + else: + return(1) + + # If there are attachments, attach them to the email: + for path in attachments: + fp = open(path, 'rb') + fil = MIMEBase('application', 'octet-stream') + fil.set_payload(fp.read()) + fp.close() + encoders.encode_base64(fil) + fil.add_header('Content-Disposition', 'attachment', filename=os.path.basename(path)) + mail.attach(fil) + + # Establish an SMTP object and connect to your mail server + try: + s = smtplib.SMTP() + s.connect("smtp.ee.ethz.ch") + # Send the email - real from, real to, extra headers and content ... + s.sendmail(from_address, recipients, mail.as_string()) + s.close() + except: + logger = get_logger() + logger.error("%s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + return(2) + + return (0) ### END send_mail() @@ -194,34 +194,34 @@ def send_mail(subject="[FlockLab]", message="", recipients="", attachments=[]): # ############################################################################## def check_test_id(cursor=None, testid=0): - """Arguments: - cursor: cursor of the database connection to be used for the query - testid: test ID which should be checked - Return value: - 0 if test ID exists in database - 1 if there is an error in the arguments passed to the function - 2 if there was an error in processing the request - 3 if test ID does not exist in the database - """ - # Check the arguments: - if ((type(cursor) != MySQLdb.cursors.Cursor) or (type(testid) != int) or (testid <= 0)): - return(1) - - # Check if the test ID is in the database: - try: - # Check if the test ID exists in tbl_serv_tests.serv_tests_key - cursor.execute("SELECT COUNT(serv_tests_key) FROM `tbl_serv_tests` WHERE serv_tests_key = %d" %testid) - rs = cursor.fetchone()[0] - - if (rs == 0): - return(3) - else: - return(0) - except: - # There was an error in the database connection: - logger = get_logger() - logger.error("%s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - return(2) + """Arguments: + cursor: cursor of the database connection to be used for the query + testid: test ID which should be checked + Return value: + 0 if test ID exists in database + 1 if there is an error in the arguments passed to the function + 2 if there was an error in processing the request + 3 if test ID does not exist in the database + """ + # Check the arguments: + if ((type(cursor) != MySQLdb.cursors.Cursor) or (type(testid) != int) or (testid <= 0)): + return(1) + + # Check if the test ID is in the database: + try: + # Check if the test ID exists in tbl_serv_tests.serv_tests_key + cursor.execute("SELECT COUNT(serv_tests_key) FROM `tbl_serv_tests` WHERE serv_tests_key = %d" %testid) + rs = cursor.fetchone()[0] + + if (rs == 0): + return(3) + else: + return(0) + except: + # There was an error in the database connection: + logger = get_logger() + logger.error("%s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + return(2) ### END check_test_id() @@ -231,37 +231,37 @@ def check_test_id(cursor=None, testid=0): # ############################################################################## def get_test_obs(cursor=None, testid=0): - """Arguments: - cursor: cursor of the database connection to be used for the query - testid: test ID - Return value: - Dictionary with observer IDs, keys and node IDs - 1 if there is an error in the arguments passed to the function - 2 if there was an error in processing the request - """ - if ((type(cursor) != MySQLdb.cursors.Cursor) or (type(testid) != int) or (testid <= 0) or (check_test_id(cursor, testid) != 0)): - return 1 - - try: - cursor.execute("SELECT `a`.serv_observer_key, `a`.observer_id, `b`.node_id \ + """Arguments: + cursor: cursor of the database connection to be used for the query + testid: test ID + Return value: + Dictionary with observer IDs, keys and node IDs + 1 if there is an error in the arguments passed to the function + 2 if there was an error in processing the request + """ + if ((type(cursor) != MySQLdb.cursors.Cursor) or (type(testid) != int) or (testid <= 0) or (check_test_id(cursor, testid) != 0)): + return 1 + + try: + cursor.execute("SELECT `a`.serv_observer_key, `a`.observer_id, `b`.node_id \ FROM tbl_serv_observer AS `a` \ LEFT JOIN tbl_serv_map_test_observer_targetimages AS `b` \ ON `a`.serv_observer_key = `b`.observer_fk \ WHERE `b`.test_fk = %d \ ORDER BY `a`.observer_id" %testid) - rs = cursor.fetchall() - - obsdict_bykey = {} - obsdict_byid = {} - for row in rs: - obsdict_bykey[row[0]] = (row[1], row[2]) - obsdict_byid[row[1]] = (row[0], row[2]) - return (obsdict_bykey, obsdict_byid) - - except: - logger = get_logger() - logger.error("%s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - return 2 + rs = cursor.fetchall() + + obsdict_bykey = {} + obsdict_byid = {} + for row in rs: + obsdict_bykey[row[0]] = (row[1], row[2]) + obsdict_byid[row[1]] = (row[0], row[2]) + return (obsdict_bykey, obsdict_byid) + + except: + logger = get_logger() + logger.error("%s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + return 2 ### END get_test_obs() @@ -271,19 +271,19 @@ ORDER BY `a`.observer_id" %testid) # ############################################################################## def get_fetcher_pid(testid): - try: - searchterm = "flocklab_fetcher.py (.)*-(-)?t(estid=)?%d"%(testid) - cmd = ['pgrep', '-o', '-f', searchterm] - p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) - out, err = p.communicate() - if (p.returncode == 0): - return int(out) - else: - return -1 - except: - logger = get_logger() - logger.error("%s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - return -2 + try: + searchterm = "flocklab_fetcher.py (.)*-(-)?t(estid=)?%d"%(testid) + cmd = ['pgrep', '-o', '-f', searchterm] + p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) + out, err = p.communicate() + if (p.returncode == 0): + return int(out) + else: + return -1 + except: + logger = get_logger() + logger.error("%s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + return -2 ############################################################################## @@ -292,32 +292,32 @@ def get_fetcher_pid(testid): # ############################################################################## def get_test_owner(cursor=None, testid=0): - """Arguments: - cursor: cursor of the database connection to be used for the query - testid: test ID - Return value: - On success, tuple with information - 1 if there is an error in the arguments passed to the function - 2 if there was an error in processing the request - """ - - # Check the arguments: - if ((type(cursor) != MySQLdb.cursors.Cursor) or (type(testid) != int) or (testid <= 0)): - return(1) - - try: - sql = " SELECT `a`.serv_users_key, `a`.lastname, `a`.firstname, `a`.username, `a`.email, `a`.disable_infomails \ - FROM tbl_serv_users AS `a` \ - LEFT JOIN tbl_serv_tests AS `b` \ - ON `a`.serv_users_key = `b`.owner_fk WHERE `b`.serv_tests_key=%d;" - cursor.execute(sql %testid) - rs = cursor.fetchone() - - return (rs[0], rs[1], rs[2], rs[3], rs[4], rs[5]) - except: - logger = get_logger() - logger.error("%s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - return (2) + """Arguments: + cursor: cursor of the database connection to be used for the query + testid: test ID + Return value: + On success, tuple with information + 1 if there is an error in the arguments passed to the function + 2 if there was an error in processing the request + """ + + # Check the arguments: + if ((type(cursor) != MySQLdb.cursors.Cursor) or (type(testid) != int) or (testid <= 0)): + return(1) + + try: + sql = " SELECT `a`.serv_users_key, `a`.lastname, `a`.firstname, `a`.username, `a`.email, `a`.disable_infomails \ + FROM tbl_serv_users AS `a` \ + LEFT JOIN tbl_serv_tests AS `b` \ + ON `a`.serv_users_key = `b`.owner_fk WHERE `b`.serv_tests_key=%d;" + cursor.execute(sql %testid) + rs = cursor.fetchone() + + return (rs[0], rs[1], rs[2], rs[3], rs[4], rs[5]) + except: + logger = get_logger() + logger.error("%s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + return (2) ### END get_test_owner() @@ -328,36 +328,36 @@ def get_test_owner(cursor=None, testid=0): # ############################################################################## def get_pinmappings(cursor=None): - """Arguments: - cursor: cursor of the database connection to be used for the query - Return value: - Dictionary with pin number, pin_name - 1 if there is an error in the arguments passed to the function - 2 if there was an error in processing the request - - """ - if ((type(cursor) != MySQLdb.cursors.Cursor)): - return 1 - - try: - cursor.execute("SELECT `a`.`pin_number`, `a`.`pin_name` , `b`.`service` \ - FROM `tbl_serv_pinmappings` AS `a` \ - LEFT JOIN `tbl_serv_services` AS `b` \ - ON `a`.`services_fk` = `b`.`serv_services_key` \ - ") - rs = cursor.fetchall() - - pindict = {} - for row in rs: - pindict[row[0]] = (row[1], row[2]) - if len(pindict) == 0: - raise - return pindict - - except: - logger = get_logger() - logger.error("%s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - return 2 + """Arguments: + cursor: cursor of the database connection to be used for the query + Return value: + Dictionary with pin number, pin_name + 1 if there is an error in the arguments passed to the function + 2 if there was an error in processing the request + + """ + if ((type(cursor) != MySQLdb.cursors.Cursor)): + return 1 + + try: + cursor.execute("SELECT `a`.`pin_number`, `a`.`pin_name` , `b`.`service` \ + FROM `tbl_serv_pinmappings` AS `a` \ + LEFT JOIN `tbl_serv_services` AS `b` \ + ON `a`.`services_fk` = `b`.`serv_services_key` \ + ") + rs = cursor.fetchall() + + pindict = {} + for row in rs: + pindict[row[0]] = (row[1], row[2]) + if len(pindict) == 0: + raise + return pindict + + except: + logger = get_logger() + logger.error("%s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + return 2 ### END get_pinmappings() @@ -368,30 +368,30 @@ def get_pinmappings(cursor=None): # ############################################################################## def get_servicemappings(cursor=None): - """Arguments: - cursor: cursor of the database connection to be used for the query - Return value: - Dictionary with mappings - 1 if there is an error in the arguments passed to the function - 2 if there was an error in processing the request - - """ - if ((type(cursor) != MySQLdb.cursors.Cursor)): - return 1 - - try: - cursor.execute("SELECT `serv_services_key`, `service`, `abbreviation` FROM `tbl_serv_services`") - rs = cursor.fetchall() - - servicedict = {} - for row in rs: - servicedict[row[0]] = (row[1], row[2]) - return servicedict - - except: - logger = get_logger() - logger.error("%s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - return 2 + """Arguments: + cursor: cursor of the database connection to be used for the query + Return value: + Dictionary with mappings + 1 if there is an error in the arguments passed to the function + 2 if there was an error in processing the request + + """ + if ((type(cursor) != MySQLdb.cursors.Cursor)): + return 1 + + try: + cursor.execute("SELECT `serv_services_key`, `service`, `abbreviation` FROM `tbl_serv_services`") + rs = cursor.fetchall() + + servicedict = {} + for row in rs: + servicedict[row[0]] = (row[1], row[2]) + return servicedict + + except: + logger = get_logger() + logger.error("%s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + return 2 ### END get_servicemappings() @@ -401,57 +401,57 @@ def get_servicemappings(cursor=None): # ############################################################################## def get_slot(cursor=None, obs_fk=None, platname=None): - """Arguments: - cursor: cursor of the database connection to be used for the query - obs_fk: key of the observer which has to be queried - platname: name of the platform which the slot has to host - Return value: - slot number on success - 0 if no suitable slot was found - -1 if there is an error in the arguments passed to the function - -2 if there was an error in processing the request - - """ - if ((type(cursor) != MySQLdb.cursors.Cursor) or (type(obs_fk) != int) or (type(platname) != str)): - return -1 - - try: - # First, get a list of all possible adapt_list keys: - sql = """ SELECT `l`.`serv_tg_adapt_list_key` FROM `tbl_serv_tg_adapt_types` AS `t` - LEFT JOIN `tbl_serv_platforms` AS `p` - ON `t`.`platforms_fk` = `p`.`serv_platforms_key` - LEFT JOIN `tbl_serv_tg_adapt_list` AS `l` - ON `l`.`tg_adapt_types_fk` = `t`.`serv_tg_adapt_types_key` - WHERE LOWER(p.name) = '%s' - """ - cursor.execute(sql%(platname)) - ret = cursor.fetchall() - al_keys = [] - for r in ret: - al_keys.append(r[0]) - # Now get all adapt_list FK's used on the particular observer and see if there is a match: - sql = """ SELECT `slot_1_tg_adapt_list_fk`, `slot_2_tg_adapt_list_fk`, `slot_3_tg_adapt_list_fk`, `slot_4_tg_adapt_list_fk` - FROM `tbl_serv_observer` - WHERE `serv_observer_key` = %d - """ - cursor.execute(sql%(obs_fk)) - slotlist = cursor.fetchone() - slot = None - if (slotlist[0] in al_keys): - slot = 1 - elif (slotlist[1] in al_keys): - slot = 2 - elif (slotlist[2] in al_keys): - slot = 3 - elif (slotlist[3] in al_keys): - slot = 4 - if not slot: - slot = 0 - return slot - except: - logger = get_logger() - logger.error("%s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - return -2 + """Arguments: + cursor: cursor of the database connection to be used for the query + obs_fk: key of the observer which has to be queried + platname: name of the platform which the slot has to host + Return value: + slot number on success + 0 if no suitable slot was found + -1 if there is an error in the arguments passed to the function + -2 if there was an error in processing the request + + """ + if ((type(cursor) != MySQLdb.cursors.Cursor) or (type(obs_fk) != int) or (type(platname) != str)): + return -1 + + try: + # First, get a list of all possible adapt_list keys: + sql = """ SELECT `l`.`serv_tg_adapt_list_key` FROM `tbl_serv_tg_adapt_types` AS `t` + LEFT JOIN `tbl_serv_platforms` AS `p` + ON `t`.`platforms_fk` = `p`.`serv_platforms_key` + LEFT JOIN `tbl_serv_tg_adapt_list` AS `l` + ON `l`.`tg_adapt_types_fk` = `t`.`serv_tg_adapt_types_key` + WHERE LOWER(p.name) = '%s' + """ + cursor.execute(sql%(platname)) + ret = cursor.fetchall() + al_keys = [] + for r in ret: + al_keys.append(r[0]) + # Now get all adapt_list FK's used on the particular observer and see if there is a match: + sql = """ SELECT `slot_1_tg_adapt_list_fk`, `slot_2_tg_adapt_list_fk`, `slot_3_tg_adapt_list_fk`, `slot_4_tg_adapt_list_fk` + FROM `tbl_serv_observer` + WHERE `serv_observer_key` = %d + """ + cursor.execute(sql%(obs_fk)) + slotlist = cursor.fetchone() + slot = None + if (slotlist[0] in al_keys): + slot = 1 + elif (slotlist[1] in al_keys): + slot = 2 + elif (slotlist[2] in al_keys): + slot = 3 + elif (slotlist[3] in al_keys): + slot = 4 + if not slot: + slot = 0 + return slot + except: + logger = get_logger() + logger.error("%s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + return -2 ### END get_slot() @@ -462,37 +462,37 @@ def get_slot(cursor=None, obs_fk=None, platname=None): # ############################################################################## def get_slot_calib(cursor=None, obsfk=None, testid=None): - """Arguments: - cursor: cursor of the database connection to be used for the query - obsfk: observer key - testid: Test key - Return value: - tuple with calibration values on success (0,1) if none were found - 1 if there is an error in the arguments passed to the function - 2 if there was an error in processing the request - """ - if ((type(cursor) != MySQLdb.cursors.Cursor) or (type(obsfk) != int) or (type(testid) != int)): - return 1 - - try: - # First, get a list of all possible adapt_list keys: - sql = """ SELECT 1000*`c`.`offset`, `c`.`factor` - FROM tbl_serv_map_test_observer_targetimages AS `b` - LEFT JOIN tbl_serv_observer_slot_calibration AS `c` - ON (`b`.slot = `c`.slot) AND (`b`.observer_fk = `c`.observer_fk) - WHERE (`b`.test_fk = %d) AND (`b`.observer_fk = %d); - """ - cursor.execute(sql%(testid, obsfk)) - offset, factor = cursor.fetchone() - if (offset == None): - offset = 0.0 - if (factor== None): - factor = 1.0 - return (float(offset), float(factor)) - except: - logger = get_logger() - logger.error("%s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - return -2 + """Arguments: + cursor: cursor of the database connection to be used for the query + obsfk: observer key + testid: Test key + Return value: + tuple with calibration values on success (0,1) if none were found + 1 if there is an error in the arguments passed to the function + 2 if there was an error in processing the request + """ + if ((type(cursor) != MySQLdb.cursors.Cursor) or (type(obsfk) != int) or (type(testid) != int)): + return 1 + + try: + # First, get a list of all possible adapt_list keys: + sql = """ SELECT 1000*`c`.`offset`, `c`.`factor` + FROM tbl_serv_map_test_observer_targetimages AS `b` + LEFT JOIN tbl_serv_observer_slot_calibration AS `c` + ON (`b`.slot = `c`.slot) AND (`b`.observer_fk = `c`.observer_fk) + WHERE (`b`.test_fk = %d) AND (`b`.observer_fk = %d); + """ + cursor.execute(sql%(testid, obsfk)) + offset, factor = cursor.fetchone() + if (offset == None): + offset = 0.0 + if (factor== None): + factor = 1.0 + return (float(offset), float(factor)) + except: + logger = get_logger() + logger.error("%s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + return -2 ### END get_slot_calib() @@ -502,31 +502,31 @@ def get_slot_calib(cursor=None, obsfk=None, testid=None): # ############################################################################## def get_obs_from_id(cursor=None, obsid=0): - """Arguments: - cursor: cursor of the database connection to be used for the query - testid: observer ID - Return value: - On success, tuple with information - 1 if there is an error in the arguments passed to the function - 2 if there was an error in processing the request - """ - - # Check the arguments: - if ((type(cursor) != MySQLdb.cursors.Cursor) or (type(obsid) != int) or (obsid <= 0)): - return(1) - - try: - sql = " SELECT `ethernet_address`, `status` \ - FROM `tbl_serv_observer` \ - WHERE `observer_id`=%d;" - cursor.execute(sql %obsid) - rs = cursor.fetchone() - - return (rs[0], rs[1]) - except: - logger = get_logger() - logger.error("%s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - return (2) + """Arguments: + cursor: cursor of the database connection to be used for the query + testid: observer ID + Return value: + On success, tuple with information + 1 if there is an error in the arguments passed to the function + 2 if there was an error in processing the request + """ + + # Check the arguments: + if ((type(cursor) != MySQLdb.cursors.Cursor) or (type(obsid) != int) or (obsid <= 0)): + return(1) + + try: + sql = " SELECT `ethernet_address`, `status` \ + FROM `tbl_serv_observer` \ + WHERE `observer_id`=%d;" + cursor.execute(sql %obsid) + rs = cursor.fetchone() + + return (rs[0], rs[1]) + except: + logger = get_logger() + logger.error("%s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + return (2) ### END get_obs_from_id() @@ -534,38 +534,38 @@ def get_obs_from_id(cursor=None, obsid=0): ############################################################################## # # check_observer_id - Check if an observer id is present in the flocklab -# database and return its key if present. +# database and return its key if present. # ############################################################################## def check_observer_id(cursor=None, obsid=0): - """Arguments: - cursor: cursor of the database connection to be used for the query - obsid: observer ID which should be checked - Return value: - key if observer ID exists in database - -1 if there is an error in the arguments passed to the function - -2 if there was an error in processing the request - -3 if observer ID does not exist in the database - """ - # Check the arguments: - if ((type(cursor) != MySQLdb.cursors.Cursor) or (type(obsid) != int) or (obsid <= 0)): - return(1) - - # Check if the test ID is in the database: - try: - # Check if the test ID exists in tbl_serv_tests.serv_tests_key - cursor.execute("SELECT serv_observer_key FROM `tbl_serv_observer` WHERE observer_id = %d" %obsid) - rs = cursor.fetchone() - - if (rs == None): - return(-3) - else: - return(rs[0]) - except: - # There was an error in the database connection: - logger = get_logger() - logger.error("%s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - return(-2) + """Arguments: + cursor: cursor of the database connection to be used for the query + obsid: observer ID which should be checked + Return value: + key if observer ID exists in database + -1 if there is an error in the arguments passed to the function + -2 if there was an error in processing the request + -3 if observer ID does not exist in the database + """ + # Check the arguments: + if ((type(cursor) != MySQLdb.cursors.Cursor) or (type(obsid) != int) or (obsid <= 0)): + return(1) + + # Check if the test ID is in the database: + try: + # Check if the test ID exists in tbl_serv_tests.serv_tests_key + cursor.execute("SELECT serv_observer_key FROM `tbl_serv_observer` WHERE observer_id = %d" %obsid) + rs = cursor.fetchone() + + if (rs == None): + return(-3) + else: + return(rs[0]) + except: + # There was an error in the database connection: + logger = get_logger() + logger.error("%s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + return(-2) ### END check_observer_id() @@ -575,40 +575,40 @@ def check_observer_id(cursor=None, obsid=0): # ############################################################################## def set_test_status(cursor=None, conn=None, testid=0, status=None): - """Arguments: - cursor: cursor of the database connection to be used for the query - conn: database connection - testid: test ID for which the status is to be set - Return value: - 0 on success - 1 if there is an error in the arguments passed to the function - 2 if there was an error in processing the request - """ - # Check the arguments: - if ((type(cursor) != MySQLdb.cursors.Cursor) or (type(conn) != MySQLdb.connections.Connection) or (type(testid) != int) or (testid <= 0)): - return(1) - # Get all possible test stati and check the status argument: - try: - cursor.execute("SHOW COLUMNS FROM `tbl_serv_tests` WHERE Field = 'test_status'") - possible_stati = cursor.fetchone()[1][5:-1].split(",") - if ("'%s'"%status not in possible_stati): - return(1) - except: - # There was an error in the database connection: - logger = get_logger() - logger.error("%s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - return(2) - - # Set the status in the database - try: - cursor.execute("UPDATE `tbl_serv_tests` SET `test_status` = '%s', `dispatched` = 0 WHERE `serv_tests_key` = %d;" %(status, testid)) - conn.commit() - except: - # There was an error in the database connection: - logger = get_logger() - logger.error("%s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - return(2) - return(0) + """Arguments: + cursor: cursor of the database connection to be used for the query + conn: database connection + testid: test ID for which the status is to be set + Return value: + 0 on success + 1 if there is an error in the arguments passed to the function + 2 if there was an error in processing the request + """ + # Check the arguments: + if ((type(cursor) != MySQLdb.cursors.Cursor) or (type(conn) != MySQLdb.connections.Connection) or (type(testid) != int) or (testid <= 0)): + return(1) + # Get all possible test stati and check the status argument: + try: + cursor.execute("SHOW COLUMNS FROM `tbl_serv_tests` WHERE Field = 'test_status'") + possible_stati = cursor.fetchone()[1][5:-1].split(",") + if ("'%s'"%status not in possible_stati): + return(1) + except: + # There was an error in the database connection: + logger = get_logger() + logger.error("%s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + return(2) + + # Set the status in the database + try: + cursor.execute("UPDATE `tbl_serv_tests` SET `test_status` = '%s', `dispatched` = 0 WHERE `serv_tests_key` = %d;" %(status, testid)) + conn.commit() + except: + # There was an error in the database connection: + logger = get_logger() + logger.error("%s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + return(2) + return(0) ### END set_test_status() @@ -618,31 +618,31 @@ def set_test_status(cursor=None, conn=None, testid=0, status=None): # ############################################################################## def get_test_status(cursor=None, conn=None, testid=0): - # Check the arguments: - if ((type(cursor) != MySQLdb.cursors.Cursor) or (type(conn) != MySQLdb.connections.Connection) or (type(testid) != int) or (testid <= 0)): - return -1 - - # Get the status in the database - try: - # To read changed values directly, one needs to change the isolation level to "READ UNCOMMITTED" - cursor.execute("SELECT @@session.tx_isolation") - isolation_old = cursor.fetchone()[0] - if isolation_old != 'READ-UNCOMMITTED': - cursor.execute("SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED") - conn.commit() - # Now get the value: - cursor.execute("SELECT `test_status` FROM `tbl_serv_tests` WHERE `serv_tests_key` = %d;" %testid) - status = cursor.fetchone()[0] - # Reset the isolation level: - if isolation_old != 'READ-UNCOMMITTED': - cursor.execute("SET SESSION TRANSACTION ISOLATION LEVEL %s"%(str.replace(isolation_old, '-', ' '))) - conn.commit() - except: - # There was an error in the database connection: - logger = get_logger() - logger.error("%s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - return -2 - return status + # Check the arguments: + if ((type(cursor) != MySQLdb.cursors.Cursor) or (type(conn) != MySQLdb.connections.Connection) or (type(testid) != int) or (testid <= 0)): + return -1 + + # Get the status in the database + try: + # To read changed values directly, one needs to change the isolation level to "READ UNCOMMITTED" + cursor.execute("SELECT @@session.tx_isolation") + isolation_old = cursor.fetchone()[0] + if isolation_old != 'READ-UNCOMMITTED': + cursor.execute("SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED") + conn.commit() + # Now get the value: + cursor.execute("SELECT `test_status` FROM `tbl_serv_tests` WHERE `serv_tests_key` = %d;" %testid) + status = cursor.fetchone()[0] + # Reset the isolation level: + if isolation_old != 'READ-UNCOMMITTED': + cursor.execute("SET SESSION TRANSACTION ISOLATION LEVEL %s"%(str.replace(isolation_old, '-', ' '))) + conn.commit() + except: + # There was an error in the database connection: + logger = get_logger() + logger.error("%s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + return -2 + return status ### END get_test_status() ############################################################################## @@ -651,29 +651,29 @@ def get_test_status(cursor=None, conn=None, testid=0): # ############################################################################## def set_test_dispatched(cursor=None, conn=None, testid=0): - """Arguments: - cursor: cursor of the database connection to be used for the query - conn: database connection - testid: test ID for which the status is to be set - Return value: - 0 on success - 1 if there is an error in the arguments passed to the function - 2 if there was an error in processing the request - """ - # Check the arguments: - if ((type(cursor) != MySQLdb.cursors.Cursor) or (type(conn) != MySQLdb.connections.Connection) or (type(testid) != int) or (testid <= 0)): - return(1) - - # Set the flag in the database - try: - cursor.execute("UPDATE `tbl_serv_tests` SET `dispatched` = 1 WHERE `serv_tests_key` = %d;" %(testid)) - conn.commit() - except: - # There was an error in the database connection: - logger = get_logger() - logger.error("%s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - return(2) - return(0) + """Arguments: + cursor: cursor of the database connection to be used for the query + conn: database connection + testid: test ID for which the status is to be set + Return value: + 0 on success + 1 if there is an error in the arguments passed to the function + 2 if there was an error in processing the request + """ + # Check the arguments: + if ((type(cursor) != MySQLdb.cursors.Cursor) or (type(conn) != MySQLdb.connections.Connection) or (type(testid) != int) or (testid <= 0)): + return(1) + + # Set the flag in the database + try: + cursor.execute("UPDATE `tbl_serv_tests` SET `dispatched` = 1 WHERE `serv_tests_key` = %d;" %(testid)) + conn.commit() + except: + # There was an error in the database connection: + logger = get_logger() + logger.error("%s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + return(2) + return(0) ### END set_test_dispatched() ############################################################################## @@ -683,35 +683,35 @@ def set_test_dispatched(cursor=None, conn=None, testid=0): # ############################################################################## def acquire_db_lock(cursor, conn, key, expiry_time=10): - """Arguments: - cursor: cursor of the database connection to be used for the query - conn: database connection - key: key to lock - """ - try: - spin = True - while spin: - spin = False - try: - cursor.execute("DELETE FROM `tbl_serv_locks` WHERE (`name`='%s' AND `expiry_time` < now());" %(key)) - conn.commit() # this is needed to release a potential shared lock on the table - cursor.execute("INSERT INTO `tbl_serv_locks` (`name`, `expiry_time`) values ('%s', now() + %d);" %(key, expiry_time)) - conn.commit() - except MySQLdb.IntegrityError: - time.sleep(1) - spin = True - except MySQLdb.OperationalError as e: # retry if deadlock - if e.args[0] == MySQLErrors.LOCK_DEADLOCK: - time.sleep(1) - spin = True - else: - raise - except: - # There was an error in the database connection: - logger = get_logger() - logger.error("%s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - raise - return(0) + """Arguments: + cursor: cursor of the database connection to be used for the query + conn: database connection + key: key to lock + """ + try: + spin = True + while spin: + spin = False + try: + cursor.execute("DELETE FROM `tbl_serv_locks` WHERE (`name`='%s' AND `expiry_time` < now());" %(key)) + conn.commit() # this is needed to release a potential shared lock on the table + cursor.execute("INSERT INTO `tbl_serv_locks` (`name`, `expiry_time`) values ('%s', now() + %d);" %(key, expiry_time)) + conn.commit() + except MySQLdb.IntegrityError: + time.sleep(1) + spin = True + except MySQLdb.OperationalError as e: # retry if deadlock + if e.args[0] == MySQLErrors.LOCK_DEADLOCK: + time.sleep(1) + spin = True + else: + raise + except: + # There was an error in the database connection: + logger = get_logger() + logger.error("%s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + raise + return(0) ### END acquire_db_lock() ############################################################################## @@ -720,30 +720,30 @@ def acquire_db_lock(cursor, conn, key, expiry_time=10): # ############################################################################## def release_db_lock(cursor, conn, key, expiry_time=10): - """Arguments: - cursor: cursor of the database connection to be used for the query - conn: database connection - key: key to lock - """ - spin = True - try: - while spin: - spin = False - try: - cursor.execute("DELETE FROM `tbl_serv_locks` WHERE (`name`='%s');" %(key)) - conn.commit() - except MySQLdb.OperationalError as e: # retry if deadlock - if e.args[0] == MySQLErrors.LOCK_DEADLOCK: - time.sleep(1) - spin = True - else: - raise - except: - # There was an error in the database connection: - logger = get_logger() - logger.error("%s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - return(2) - return(0) + """Arguments: + cursor: cursor of the database connection to be used for the query + conn: database connection + key: key to lock + """ + spin = True + try: + while spin: + spin = False + try: + cursor.execute("DELETE FROM `tbl_serv_locks` WHERE (`name`='%s');" %(key)) + conn.commit() + except MySQLdb.OperationalError as e: # retry if deadlock + if e.args[0] == MySQLErrors.LOCK_DEADLOCK: + time.sleep(1) + spin = True + else: + raise + except: + # There was an error in the database connection: + logger = get_logger() + logger.error("%s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + return(2) + return(0) ### END release_db_lock() ############################################################################## @@ -752,51 +752,51 @@ def release_db_lock(cursor, conn, key, expiry_time=10): # ############################################################################## def write_errorlog(cursor=None, conn=None, testid=0, obsid=0, message="", timestamp=0.0): - """Arguments: - cursor: cursor of the database connection to be used for the query - conn: database connection - testid: test ID - obsid: observer ID - message: message to write to the database - Return value: - 0 on success - 1 if there is an error in the arguments passed to the function - 2 if there was an error in processing the request - """ - # Check the arguments: - if ((type(cursor) != MySQLdb.cursors.Cursor) or (type(conn) != MySQLdb.connections.Connection) or (type(testid) != int) or (type(obsid) != int) or (type(message) != str) or (len(message) <= 0) or (type(timestamp) != float) or (timestamp < 0.0)): - return(1) - if ((testid != 0) and (check_test_id(cursor, testid) != 0)): - return(1) - if ((obsid != 0) and (check_observer_id(cursor, obsid) <= 0)): - return(1) - else: - obskey = check_observer_id(cursor, obsid) - - # Prepare timestamp: - if (timestamp <= 0.0): - timestamp = time.time() - - # Set the status in the database - sql = "INSERT INTO `tbl_serv_errorlog` (`errormessage`, `timestamp`, `test_fk`, `observer_fk`) VALUES ('%s', %f" %(re.escape(message), timestamp) - if testid != 0: - sql += ", %d"%testid - else: - sql += ", NULL" - if obsid != 0: - sql += ", %d"%obskey - else: - sql += ", NULL" - sql += ");" - try: - cursor.execute(sql) - conn.commit() - except: - # There was an error in the database connection: - logger = get_logger() - logger.error("Error when executing %s: %s: %s" %(sql, str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - return(2) - return(0) + """Arguments: + cursor: cursor of the database connection to be used for the query + conn: database connection + testid: test ID + obsid: observer ID + message: message to write to the database + Return value: + 0 on success + 1 if there is an error in the arguments passed to the function + 2 if there was an error in processing the request + """ + # Check the arguments: + if ((type(cursor) != MySQLdb.cursors.Cursor) or (type(conn) != MySQLdb.connections.Connection) or (type(testid) != int) or (type(obsid) != int) or (type(message) != str) or (len(message) <= 0) or (type(timestamp) != float) or (timestamp < 0.0)): + return(1) + if ((testid != 0) and (check_test_id(cursor, testid) != 0)): + return(1) + if ((obsid != 0) and (check_observer_id(cursor, obsid) <= 0)): + return(1) + else: + obskey = check_observer_id(cursor, obsid) + + # Prepare timestamp: + if (timestamp <= 0.0): + timestamp = time.time() + + # Set the status in the database + sql = "INSERT INTO `tbl_serv_errorlog` (`errormessage`, `timestamp`, `test_fk`, `observer_fk`) VALUES ('%s', %f" %(re.escape(message), timestamp) + if testid != 0: + sql += ", %d"%testid + else: + sql += ", NULL" + if obsid != 0: + sql += ", %d"%obskey + else: + sql += ", NULL" + sql += ");" + try: + cursor.execute(sql) + conn.commit() + except: + # There was an error in the database connection: + logger = get_logger() + logger.error("Error when executing %s: %s: %s" %(sql, str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + return(2) + return(0) ### END write_errorlog() @@ -807,55 +807,55 @@ def write_errorlog(cursor=None, conn=None, testid=0, obsid=0, message="", timest # ############################################################################## def error_logandexit(message=None, exitcode=SUCCESS, scriptname="", logger=None, config=None): - """Arguments: - message: error message to log - exitcode: code to exit with - scriptname: name of script which calls the function - logger: logger instance to log to - config: config instance - Return value: - none on success - 1 if there is an error in the arguments passed to the function - 2 if there was an error in processing the request - """ - # Check the arguments: - if ((type(message) != str) or (type(exitcode) != int) or (type(scriptname) != str) or ((logger != None) and (not isinstance(logger, logging.Logger))) or ((config != None) and (not isinstance(config, configparser.SafeConfigParser)))): - return(1) - # Required arguments: - if (message == ""): - return(1) - - # Log error - if available, use logger, otherwise get it first: - if logger: - logger.error(message) - else: - logger = get_logger(loggername=scriptname) - logger.error(message) - - - # Send email to admin: - try: - cn, cur = connect_to_db(config, logger) - admin_emails = get_admin_emails(cur, config) - cur.close() - cn.close() - if ((admin_emails == 1) or (admin_emails == 2)): - msg = "Error when getting admin emails from database" - if logger: - logger.error(msg) - else: - logger = get_logger() - logger.error(msg) - raise - except: - # Use backup email address: - admin_emails = "flocklab@tik.ee.ethz.ch" - finally: - send_mail(subject="[FlockLab %s]"%(scriptname.capitalize()), message=message, recipients=admin_emails) - - # Exit program - logger.debug("Exiting with error code %u." % exitcode) - sys.exit(exitcode) + """Arguments: + message: error message to log + exitcode: code to exit with + scriptname: name of script which calls the function + logger: logger instance to log to + config: config instance + Return value: + none on success + 1 if there is an error in the arguments passed to the function + 2 if there was an error in processing the request + """ + # Check the arguments: + if ((type(message) != str) or (type(exitcode) != int) or (type(scriptname) != str) or ((logger != None) and (not isinstance(logger, logging.Logger))) or ((config != None) and (not isinstance(config, configparser.SafeConfigParser)))): + return(1) + # Required arguments: + if (message == ""): + return(1) + + # Log error - if available, use logger, otherwise get it first: + if logger: + logger.error(message) + else: + logger = get_logger(loggername=scriptname) + logger.error(message) + + + # Send email to admin: + try: + cn, cur = connect_to_db(config, logger) + admin_emails = get_admin_emails(cur, config) + cur.close() + cn.close() + if ((admin_emails == 1) or (admin_emails == 2)): + msg = "Error when getting admin emails from database" + if logger: + logger.error(msg) + else: + logger = get_logger() + logger.error(msg) + raise + except: + # Use backup email address: + admin_emails = "flocklab@tik.ee.ethz.ch" + finally: + send_mail(subject="[FlockLab %s]"%(scriptname.capitalize()), message=message, recipients=admin_emails) + + # Exit program + logger.debug("Exiting with error code %u." % exitcode) + sys.exit(exitcode) ### END error_logandexit() @@ -866,30 +866,30 @@ def error_logandexit(message=None, exitcode=SUCCESS, scriptname="", logger=None, # ############################################################################## def count_running_instances(scriptname=None): - """Arguments: - scriptname: name of script to check - Return value: - Count on success - -1 if there is an error in the arguments passed to the function - -2 if there was an error in processing the request - """ - # Check the arguments: - if ((type(scriptname) != str) or (len(scriptname) <= 0)): - return(-1) - - cmd = ['pgrep', '-l', '-f', scriptname] - p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) - out, err = p.communicate() - if (p.returncode == 0): - # If a script is called from a cronjob, this will add an additional line in pgrep which needs to be filtered. - count = 0 - for line in out.split('\n'): - if ((len(line) > 0) and (line.find('python') != -1)): - count += 1 - # Return the total instance count (including the instance which called this function): - return count - else: - return(-2) + """Arguments: + scriptname: name of script to check + Return value: + Count on success + -1 if there is an error in the arguments passed to the function + -2 if there was an error in processing the request + """ + # Check the arguments: + if ((type(scriptname) != str) or (len(scriptname) <= 0)): + return(-1) + + cmd = ['pgrep', '-l', '-f', scriptname] + p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) + out, err = p.communicate() + if (p.returncode == 0): + # If a script is called from a cronjob, this will add an additional line in pgrep which needs to be filtered. + count = 0 + for line in out.split('\n'): + if ((len(line) > 0) and (line.find('python') != -1)): + count += 1 + # Return the total instance count (including the instance which called this function): + return count + else: + return(-2) ### END count_running_instances() @@ -897,42 +897,42 @@ def count_running_instances(scriptname=None): ############################################################################## # # get_admin_emails - Get the email addresses of all admins from the FlockLab -# database +# database # ############################################################################## def get_admin_emails(cursor=None, config=None): - """Arguments: - cursor: cursor of the database connection to be used for the query - Return value: - On success, a list with all admin email addresses if successful, an empty list if no addresses were found - 1 if there is an error in the arguments passed to the function - 2 if there was an error in processing the request - """ - # Local variables: - email_list = [] - - if (not isinstance(config, configparser.SafeConfigParser)) or (not config.has_option('general', 'admin_email')): - - # Check the arguments: - if (type(cursor) != MySQLdb.cursors.Cursor): - return(1) - - # Get the addresses from the database: - try: - cursor.execute("SELECT `email` FROM `tbl_serv_users` WHERE `role` = 'admin'") - rs = cursor.fetchall() - for mail in rs: - email_list.append(mail[0]) - except: - # There was an error in the database connection: - logger = get_logger() - logger.error("%s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - return(2) - - else: - email_list.append(config.get('general','admin_email')) - - return(email_list) + """Arguments: + cursor: cursor of the database connection to be used for the query + Return value: + On success, a list with all admin email addresses if successful, an empty list if no addresses were found + 1 if there is an error in the arguments passed to the function + 2 if there was an error in processing the request + """ + # Local variables: + email_list = [] + + if (not isinstance(config, configparser.SafeConfigParser)) or (not config.has_option('general', 'admin_email')): + + # Check the arguments: + if (type(cursor) != MySQLdb.cursors.Cursor): + return(1) + + # Get the addresses from the database: + try: + cursor.execute("SELECT `email` FROM `tbl_serv_users` WHERE `role` = 'admin'") + rs = cursor.fetchall() + for mail in rs: + email_list.append(mail[0]) + except: + # There was an error in the database connection: + logger = get_logger() + logger.error("%s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + return(2) + + else: + email_list.append(config.get('general','admin_email')) + + return(email_list) ### END get_admin_emails() @@ -940,112 +940,112 @@ def get_admin_emails(cursor=None, config=None): ############################################################################## # # is_test_running - Check in the FlockLab database if a test is running or -# not. This also includes other test states such as -# preparing, cleaning up, aborting... +# not. This also includes other test states such as +# preparing, cleaning up, aborting... # ############################################################################## def is_test_running(cursor=None): - """Arguments: - cursor: cursor of the database connection to be used for the query - Return value: - True if a test is running - False if no test is running - None otherwise - """ - if not cursor: - return None - - try: - cursor.execute("SELECT COUNT(serv_tests_key) FROM tbl_serv_tests WHERE test_status IN('preparing', 'running', 'aborting', 'cleaning up');") - rs = cursor.fetchone() - if rs[0] != 0: - return True - else: - return False - except: - logger = get_logger() - logger.error("%s: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - return None + """Arguments: + cursor: cursor of the database connection to be used for the query + Return value: + True if a test is running + False if no test is running + None otherwise + """ + if not cursor: + return None + + try: + cursor.execute("SELECT COUNT(serv_tests_key) FROM tbl_serv_tests WHERE test_status IN('preparing', 'running', 'aborting', 'cleaning up');") + rs = cursor.fetchone() + if rs[0] != 0: + return True + else: + return False + except: + logger = get_logger() + logger.error("%s: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + return None ### is_test_running()) - + def viz_plot(t, d, testdir, obsid, imgdir): - fig = Figure(figsize=(2*(t[len(t)-1] - t[0]), 1)) - ax = fig.add_axes([0., 0., 1., 1.]) - ax.patch.set_facecolor(None) - fig.patch.set_alpha(0.) - ax.set_frame_on(False) - ax.axes.get_yaxis().set_visible(False) - ax.axes.get_xaxis().set_visible(False) - canvas = FigureCanvasAgg(fig) - ax.plot(t, d, '-', color = '#001050', linewidth=2) - ax.axis((t[0], t[len(t)-1], -1, 40)) - canvas.get_renderer().clear() - canvas.draw() - try: - os.makedirs('%s/%s' % (imgdir, testdir)) - except OSError as exception: - if exception.errno != errno.EEXIST: - raise - canvas.print_figure('%s/%s/power_%d_%d.png' % (imgdir, testdir, obsid, t[0]*1e3), pad_inches=0, dpi=50, transparent=True) + fig = Figure(figsize=(2*(t[len(t)-1] - t[0]), 1)) + ax = fig.add_axes([0., 0., 1., 1.]) + ax.patch.set_facecolor(None) + fig.patch.set_alpha(0.) + ax.set_frame_on(False) + ax.axes.get_yaxis().set_visible(False) + ax.axes.get_xaxis().set_visible(False) + canvas = FigureCanvasAgg(fig) + ax.plot(t, d, '-', color = '#001050', linewidth=2) + ax.axis((t[0], t[len(t)-1], -1, 40)) + canvas.get_renderer().clear() + canvas.draw() + try: + os.makedirs('%s/%s' % (imgdir, testdir)) + except OSError as exception: + if exception.errno != errno.EEXIST: + raise + canvas.print_figure('%s/%s/power_%d_%d.png' % (imgdir, testdir, obsid, t[0]*1e3), pad_inches=0, dpi=50, transparent=True) def viz_powerprofiling(testid, owner_fk, values, obsid, imgdir, logger): - #logger.debug("Viz %i values" % len(values[0])) - # samples, count, start, end - t=[] - d=[] - try: - if len(values[0]) != len(values[1]): - raise Exception("Could not process data, timestamp count and value count must be equal.") - for i in range(len(values[0])): # packets - start = time.time() - t.extend(values[0][i]) - d.extend(values[1][i]) - if t[0] is None: - logger.warn("first timestamp in list is none.") - if t[-1] is None: - logger.warn("last timestamp in list is none.") - if t[-1] < t[0]: - logger.warn("timestamps are not propperly ordered. t[0]: %f, t[-1]: %f." % (t[0], t[-1])) - if (t[-1]-t[0] >= 2) | (i==len(values[0])-1): - try: - viz_plot(t, d, "%d_%d"%(testid, owner_fk), obsid, imgdir) - except: - msg = "Viz error: %s: %s, data t: %f .. %f size(t)=%d" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]),t[0],t[-1],len(t)) - msg = msg.join(traceback.format_list(traceback.extract_tb(sys.exc_info()[2]))) - logger.error(msg) - t=[] - d=[] - #logger.debug("Viz time spent %f" % (time.time() - start)) - except: - logger.error("Error in viz_powerprofiling: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + #logger.debug("Viz %i values" % len(values[0])) + # samples, count, start, end + t=[] + d=[] + try: + if len(values[0]) != len(values[1]): + raise Exception("Could not process data, timestamp count and value count must be equal.") + for i in range(len(values[0])): # packets + start = time.time() + t.extend(values[0][i]) + d.extend(values[1][i]) + if t[0] is None: + logger.warn("first timestamp in list is none.") + if t[-1] is None: + logger.warn("last timestamp in list is none.") + if t[-1] < t[0]: + logger.warn("timestamps are not propperly ordered. t[0]: %f, t[-1]: %f." % (t[0], t[-1])) + if (t[-1]-t[0] >= 2) | (i==len(values[0])-1): + try: + viz_plot(t, d, "%d_%d"%(testid, owner_fk), obsid, imgdir) + except: + msg = "Viz error: %s: %s, data t: %f .. %f size(t)=%d" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]),t[0],t[-1],len(t)) + msg = msg.join(traceback.format_list(traceback.extract_tb(sys.exc_info()[2]))) + logger.error(msg) + t=[] + d=[] + #logger.debug("Viz time spent %f" % (time.time() - start)) + except: + logger.error("Error in viz_powerprofiling: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) def viz_gpio_monitor(testid, owner_fk, values, obsid, imgdir, logger): - # gpio; edge; timestamp; - # print max time int values per file to gpiom_<obsid>_<starttime>.json - try: - os.makedirs('%s/%d_%d' % (imgdir, testid, owner_fk)) - except OSError as exception: - if exception.errno != errno.EEXIST: - raise - starttime = 0 - try: - for i in range(len(values)): - e = values[i] - if starttime == 0: - starttime = float(e[2]) - f = open('%s/%d_%d/gpiom_%d_%d.json' % (imgdir, testid, owner_fk, obsid, 1e3 * starttime), 'w') - f.write('{"e":[') - if (float(e[2]) - starttime > 5) or (i==len(values)-1): - f.write('{"t":%d,"p":%s,"l":%s}\n' % (int(round((float(e[2]) - starttime) * 1e3)), e[0], e[1])) - f.write(']}\n') - f.close() - starttime = 0 - else: - f.write('{"t":%d,"p":%s,"l":%s},\n' % (int(round((float(e[2]) - starttime) * 1e3)), e[0], e[1])) - except: - logger.error("Error in viz_gpio_monitor: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - + # gpio; edge; timestamp; + # print max time int values per file to gpiom_<obsid>_<starttime>.json + try: + os.makedirs('%s/%d_%d' % (imgdir, testid, owner_fk)) + except OSError as exception: + if exception.errno != errno.EEXIST: + raise + starttime = 0 + try: + for i in range(len(values)): + e = values[i] + if starttime == 0: + starttime = float(e[2]) + f = open('%s/%d_%d/gpiom_%d_%d.json' % (imgdir, testid, owner_fk, obsid, 1e3 * starttime), 'w') + f.write('{"e":[') + if (float(e[2]) - starttime > 5) or (i==len(values)-1): + f.write('{"t":%d,"p":%s,"l":%s}\n' % (int(round((float(e[2]) - starttime) * 1e3)), e[0], e[1])) + f.write(']}\n') + f.close() + starttime = 0 + else: + f.write('{"t":%d,"p":%s,"l":%s},\n' % (int(round((float(e[2]) - starttime) * 1e3)), e[0], e[1])) + except: + logger.error("Error in viz_gpio_monitor: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + ############################################################################## @@ -1053,103 +1053,103 @@ def viz_gpio_monitor(testid, owner_fk, values, obsid, imgdir, logger): # scheduleLinkTest - try to schedule a link test for every platform, according to config file # ############################################################################## - + def scheduleLinkTest(logger, config, cur, cn, debug=False): - # Check the arguments: - if ((type(cur) != MySQLdb.cursors.Cursor) or (type(cn) != MySQLdb.connections.Connection)): - return(1) - - sql = "SELECT TIMESTAMPDIFF(MINUTE, `begin`, NOW()) AS `last` FROM `tbl_serv_web_link_measurements` ORDER BY `last` ASC LIMIT 1" - cur.execute(sql) - rs = cur.fetchone() - if rs: - lasttest = int(rs[0]) - logger.debug("Last link measurement was %s minutes ago."%(lasttest)) - nexttest = 60 * config.getint("linktests", "interval_hours") + random.randint(-config.getint("linktests", "interval_random_minutes"), config.getint("linktests", "interval_random_minutes")) - - if lasttest >= nexttest: - # Schedule new tests - # Check if the lockfile is present: - lockfile = config.get("linktests", "lockfile") - if os.path.exists(lockfile): - logger.debug("Lockfile %s exists already. Skip adding new linktests.") - # If the last scheduled link tests are a long time ago, generate a warning since it may be that the lockfile was not deleted for whatever reason: - if lasttest > 2*nexttest: - logger.error("Lockfile %s exists and the last linktest was %d min ago (interval is %d min)"%(lockfile, lasttest, config.getint("linktests", "interval_hours"))) - else: - # Create the lockfile: - basedir = os.path.dirname(lockfile) - if not os.path.exists(basedir): - os.makedirs(basedir) - open(lockfile, 'a').close() - logger.debug("Touched lockfile %s"%lockfile) - - # Schedule new tests - logger.debug("Schedule new link measurements") - listing = os.listdir(config.get("linktests", "testfolder")) - for linktestfile in listing: - if re.search("\.xml$", os.path.basename(linktestfile)) is not None: - # read platform - parser = etree.XMLParser(remove_comments=True) - tree = etree.parse("%s/%s" % (config.get("linktests", "testfolder"),linktestfile), parser) - ns = {'d': config.get('xml', 'namespace')} - pl = tree.xpath('//d:platform', namespaces=ns) - platform = pl[0].text.strip() - # get available observers with that platform from DB - sql = """SELECT LPAD(obs.observer_id, 3, '0') as obsid - FROM `flocklab`.`tbl_serv_observer` AS obs - LEFT JOIN `flocklab`.`tbl_serv_tg_adapt_list` AS a ON obs.slot_1_tg_adapt_list_fk = a.serv_tg_adapt_list_key - LEFT JOIN `flocklab`.`tbl_serv_tg_adapt_types` AS slot1 ON a.tg_adapt_types_fk = slot1.serv_tg_adapt_types_key - LEFT JOIN `flocklab`.`tbl_serv_tg_adapt_list` AS b ON obs.slot_2_tg_adapt_list_fk = b.serv_tg_adapt_list_key - LEFT JOIN `flocklab`.`tbl_serv_tg_adapt_types` AS slot2 ON b.tg_adapt_types_fk = slot2.serv_tg_adapt_types_key - LEFT JOIN `flocklab`.`tbl_serv_tg_adapt_list` AS c ON obs.slot_3_tg_adapt_list_fk = c.serv_tg_adapt_list_key - LEFT JOIN `flocklab`.`tbl_serv_tg_adapt_types` AS slot3 ON c.tg_adapt_types_fk = slot3.serv_tg_adapt_types_key - LEFT JOIN `flocklab`.`tbl_serv_tg_adapt_list` AS d ON obs.slot_4_tg_adapt_list_fk = d.serv_tg_adapt_list_key - LEFT JOIN `flocklab`.`tbl_serv_tg_adapt_types` AS slot4 ON d.tg_adapt_types_fk = slot4.serv_tg_adapt_types_key - WHERE - obs.status = 'online' AND ( - LOWER(slot1.name) = LOWER('%s') OR - LOWER(slot2.name) = LOWER('%s') OR - LOWER(slot3.name) = LOWER('%s') OR - LOWER(slot4.name) = LOWER('%s')) - ORDER BY obs.observer_id""" % (platform,platform,platform,platform) - cur.execute(sql) - ret = cur.fetchall() - if not ret: - logger.info("Target platform %s not available, skipping link test." % platform) - continue - logger.debug("Observers with platform %s: %s" %(platform,' '.join([x[0] for x in ret]))) - obsIdTags = tree.xpath('//d:obsIds', namespaces=ns) - for o in obsIdTags: - o.text = ' '.join([x[0] for x in ret]) - targetIdTags = tree.xpath('//d:targetIds', namespaces=ns) - for o in targetIdTags: - o.text = ' '.join(map(str,list(range(len(ret))))) - # generate temporary test config - (fd, xmlpath) = tempfile.mkstemp(suffix='.xml') - tree.write(xmlpath, xml_declaration=True, encoding="UTF-8") - logger.info("add link test: %s" % linktestfile) - cmd = [config.get("linktests", "starttest_script"), '-c', "%s" % xmlpath] - - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, cwd=os.path.dirname(config.get("linktests", "starttest_script"))) - out, err = p.communicate() - rs = p.wait() - testid = re.search("Test ID: ([0-9]*)",out) - if (testid is None) | (rs != SUCCESS): - logger.error("Could not register link test %s (%s)" % (linktestfile,err)) - else: - # flag in db - sql = "INSERT INTO `tbl_serv_web_link_measurements` (test_fk, begin, end, platform_fk, links) \ - SELECT %s, NOW(), NOW(), serv_platforms_key, NULL from tbl_serv_platforms WHERE serv_platforms_key = (SELECT `b`.platforms_fk FROM \ - flocklab.tbl_serv_map_test_observer_targetimages as `a` left join \ - flocklab.tbl_serv_targetimages as `b` ON (a.targetimage_fk = b.serv_targetimages_key) WHERE `a`.test_fk=%s ORDER BY serv_platforms_key LIMIT 1)"% (testid.group(1), testid.group(1)) - cur.execute(sql) - cn.commit() - os.remove(xmlpath) - # Delete the lockfile: - os.remove(lockfile) - logger.debug("Removed lockfile %s"%lockfile) - + # Check the arguments: + if ((type(cur) != MySQLdb.cursors.Cursor) or (type(cn) != MySQLdb.connections.Connection)): + return(1) + + sql = "SELECT TIMESTAMPDIFF(MINUTE, `begin`, NOW()) AS `last` FROM `tbl_serv_web_link_measurements` ORDER BY `last` ASC LIMIT 1" + cur.execute(sql) + rs = cur.fetchone() + if rs: + lasttest = int(rs[0]) + logger.debug("Last link measurement was %s minutes ago."%(lasttest)) + nexttest = 60 * config.getint("linktests", "interval_hours") + random.randint(-config.getint("linktests", "interval_random_minutes"), config.getint("linktests", "interval_random_minutes")) + + if lasttest >= nexttest: + # Schedule new tests + # Check if the lockfile is present: + lockfile = config.get("linktests", "lockfile") + if os.path.exists(lockfile): + logger.debug("Lockfile %s exists already. Skip adding new linktests.") + # If the last scheduled link tests are a long time ago, generate a warning since it may be that the lockfile was not deleted for whatever reason: + if lasttest > 2*nexttest: + logger.error("Lockfile %s exists and the last linktest was %d min ago (interval is %d min)"%(lockfile, lasttest, config.getint("linktests", "interval_hours"))) + else: + # Create the lockfile: + basedir = os.path.dirname(lockfile) + if not os.path.exists(basedir): + os.makedirs(basedir) + open(lockfile, 'a').close() + logger.debug("Touched lockfile %s"%lockfile) + + # Schedule new tests + logger.debug("Schedule new link measurements") + listing = os.listdir(config.get("linktests", "testfolder")) + for linktestfile in listing: + if re.search("\.xml$", os.path.basename(linktestfile)) is not None: + # read platform + parser = etree.XMLParser(remove_comments=True) + tree = etree.parse("%s/%s" % (config.get("linktests", "testfolder"),linktestfile), parser) + ns = {'d': config.get('xml', 'namespace')} + pl = tree.xpath('//d:platform', namespaces=ns) + platform = pl[0].text.strip() + # get available observers with that platform from DB + sql = """SELECT LPAD(obs.observer_id, 3, '0') as obsid + FROM `flocklab`.`tbl_serv_observer` AS obs + LEFT JOIN `flocklab`.`tbl_serv_tg_adapt_list` AS a ON obs.slot_1_tg_adapt_list_fk = a.serv_tg_adapt_list_key + LEFT JOIN `flocklab`.`tbl_serv_tg_adapt_types` AS slot1 ON a.tg_adapt_types_fk = slot1.serv_tg_adapt_types_key + LEFT JOIN `flocklab`.`tbl_serv_tg_adapt_list` AS b ON obs.slot_2_tg_adapt_list_fk = b.serv_tg_adapt_list_key + LEFT JOIN `flocklab`.`tbl_serv_tg_adapt_types` AS slot2 ON b.tg_adapt_types_fk = slot2.serv_tg_adapt_types_key + LEFT JOIN `flocklab`.`tbl_serv_tg_adapt_list` AS c ON obs.slot_3_tg_adapt_list_fk = c.serv_tg_adapt_list_key + LEFT JOIN `flocklab`.`tbl_serv_tg_adapt_types` AS slot3 ON c.tg_adapt_types_fk = slot3.serv_tg_adapt_types_key + LEFT JOIN `flocklab`.`tbl_serv_tg_adapt_list` AS d ON obs.slot_4_tg_adapt_list_fk = d.serv_tg_adapt_list_key + LEFT JOIN `flocklab`.`tbl_serv_tg_adapt_types` AS slot4 ON d.tg_adapt_types_fk = slot4.serv_tg_adapt_types_key + WHERE + obs.status = 'online' AND ( + LOWER(slot1.name) = LOWER('%s') OR + LOWER(slot2.name) = LOWER('%s') OR + LOWER(slot3.name) = LOWER('%s') OR + LOWER(slot4.name) = LOWER('%s')) + ORDER BY obs.observer_id""" % (platform,platform,platform,platform) + cur.execute(sql) + ret = cur.fetchall() + if not ret: + logger.info("Target platform %s not available, skipping link test." % platform) + continue + logger.debug("Observers with platform %s: %s" %(platform,' '.join([x[0] for x in ret]))) + obsIdTags = tree.xpath('//d:obsIds', namespaces=ns) + for o in obsIdTags: + o.text = ' '.join([x[0] for x in ret]) + targetIdTags = tree.xpath('//d:targetIds', namespaces=ns) + for o in targetIdTags: + o.text = ' '.join(map(str,list(range(len(ret))))) + # generate temporary test config + (fd, xmlpath) = tempfile.mkstemp(suffix='.xml') + tree.write(xmlpath, xml_declaration=True, encoding="UTF-8") + logger.info("add link test: %s" % linktestfile) + cmd = [config.get("linktests", "starttest_script"), '-c', "%s" % xmlpath] + + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, cwd=os.path.dirname(config.get("linktests", "starttest_script"))) + out, err = p.communicate() + rs = p.wait() + testid = re.search("Test ID: ([0-9]*)",out) + if (testid is None) | (rs != SUCCESS): + logger.error("Could not register link test %s (%s)" % (linktestfile,err)) + else: + # flag in db + sql = "INSERT INTO `tbl_serv_web_link_measurements` (test_fk, begin, end, platform_fk, links) \ + SELECT %s, NOW(), NOW(), serv_platforms_key, NULL from tbl_serv_platforms WHERE serv_platforms_key = (SELECT `b`.platforms_fk FROM \ + flocklab.tbl_serv_map_test_observer_targetimages as `a` left join \ + flocklab.tbl_serv_targetimages as `b` ON (a.targetimage_fk = b.serv_targetimages_key) WHERE `a`.test_fk=%s ORDER BY serv_platforms_key LIMIT 1)"% (testid.group(1), testid.group(1)) + cur.execute(sql) + cn.commit() + os.remove(xmlpath) + # Delete the lockfile: + os.remove(lockfile) + logger.debug("Removed lockfile %s"%lockfile) + ############################################################################## # # getXmlTimestamp @@ -1177,24 +1177,24 @@ def getXmlTimestamp(datetimestring): # ############################################################################## def daqpin_abbr2num(abbr=""): - if not abbr: - return errno.EINVAL - abbrdict = { - 'RST' : 0, - 'SIG1': 2, - 'SIG2': 1, - 'INT1': 4, - 'INT2': 3, - 'LED1': 7, - 'LED2': 6, - 'LED3': 5 - } - try: - pinnum = abbrdict[abbr.upper()] - except KeyError: - return errno.EFAULT - - return pinnum + if not abbr: + return errno.EINVAL + abbrdict = { + 'RST' : 0, + 'SIG1': 2, + 'SIG2': 1, + 'INT1': 4, + 'INT2': 3, + 'LED1': 7, + 'LED2': 6, + 'LED3': 5 + } + try: + pinnum = abbrdict[abbr.upper()] + except KeyError: + return errno.EFAULT + + return pinnum ### END daqpin_abbr2num() @@ -1204,17 +1204,17 @@ def daqpin_abbr2num(abbr=""): # ############################################################################## def daqlevel_str2abbr(levelstr=""): - if not levelstr: - return errno.EINVAL - strdict = { - 'LOW' : LOW, - 'HIGH' : HIGH, - 'TOGGLE': TOGGLE - } - try: - abbr = strdict[levelstr.upper()] - except KeyError: - return errno.EFAULT - - return abbr + if not levelstr: + return errno.EINVAL + strdict = { + 'LOW' : LOW, + 'HIGH' : HIGH, + 'TOGGLE': TOGGLE + } + try: + abbr = strdict[levelstr.upper()] + except KeyError: + return errno.EFAULT + + return abbr ### END daqlevel_str2abbr() diff --git a/testmanagementserver/test_to_linkmap.py b/testmanagementserver/test_to_linkmap.py index d9fe90b5a2582a3827345c3ddebc936ce5e79243..6f2b403314051dbf6efcf5381b097adbd0ffa32a 100755 --- a/testmanagementserver/test_to_linkmap.py +++ b/testmanagementserver/test_to_linkmap.py @@ -1,8 +1,8 @@ #! /usr/bin/env python3 -__author__ = "Christoph Walser <walserc@tik.ee.ethz.ch>, Adnan Mlika" -__copyright__ = "Copyright 2010, ETH Zurich, Switzerland" -__license__ = "GPL" +__author__ = "Christoph Walser <walserc@tik.ee.ethz.ch>, Adnan Mlika" +__copyright__ = "Copyright 2010, ETH Zurich, Switzerland" +__license__ = "GPL" import sys, os, getopt, tempfile, shutil, re, time, errno, io, logging, traceback, __main__, csv, tarfile @@ -30,8 +30,8 @@ config = None # ############################################################################## class Error(Exception): - """ Base class for exception. """ - pass + """ Base class for exception. """ + pass ### END Error classes @@ -41,35 +41,35 @@ class Error(Exception): # ############################################################################## class Node(): - def __init__(self, obsid, id): - self.nodeid = id - self.obsid = obsid - self.stat = {} - self.rssi = {} - - def addStats(self, sender_id, num_messages, num_received): - if not sender_id in self.stat: - self.stat[sender_id] = [] - self.stat[sender_id].append((sender_id, num_messages, num_received)) - - def getPRR(self): - prr = [] - for sender,statlist in self.stat.items(): - prr_rec = 0 - prr_tot = 0 - for (sender_id, num_messages, num_received) in statlist: - prr_rec = prr_rec + num_received - prr_tot = prr_tot + num_messages - prr.append((self.obsid, sender, float(prr_rec) / prr_tot, prr_tot)) - return prr - - def addRssi(self, channel, level, ouccurences): - if not channel in self.rssi: - self.rssi[channel] = {} - self.rssi[channel][level] = ouccurences - - def getRssi(self): - return self.rssi + def __init__(self, obsid, id): + self.nodeid = id + self.obsid = obsid + self.stat = {} + self.rssi = {} + + def addStats(self, sender_id, num_messages, num_received): + if not sender_id in self.stat: + self.stat[sender_id] = [] + self.stat[sender_id].append((sender_id, num_messages, num_received)) + + def getPRR(self): + prr = [] + for sender,statlist in self.stat.items(): + prr_rec = 0 + prr_tot = 0 + for (sender_id, num_messages, num_received) in statlist: + prr_rec = prr_rec + num_received + prr_tot = prr_tot + num_messages + prr.append((self.obsid, sender, float(prr_rec) / prr_tot, prr_tot)) + return prr + + def addRssi(self, channel, level, ouccurences): + if not channel in self.rssi: + self.rssi[channel] = {} + self.rssi[channel][level] = ouccurences + + def getRssi(self): + return self.rssi ### END Node class @@ -79,154 +79,154 @@ class Node(): # ############################################################################## def TestToLinkmap(testid=None, cn=None, cur=None): - - errors = [] - _serial_service_file = None - nodes = {} - starttime = None - stoptime = None - channels = [] - - logger.debug("Starting to create linkmap for test ID %s..."%testid) - - # Get test results from archive --- - archive_path = "%s/%s%s"%(config.get('archiver','archive_dir'), testid, config.get('archiver','archive_ext')) - if not os.path.exists(archive_path): - msg = "Archive path %s does not exist, removing link measurement." % archive_path - cur.execute("DELETE FROM `tbl_serv_web_link_measurements` WHERE `test_fk` = %s" % testid) - logger.error(msg) - errors.append(msg) - return errors - - # Extract serial service results file --- - logger.debug("Extracting serial service file from archive...") - tempdir = tempfile.mkdtemp() - archive = tarfile.open(archive_path, 'r:gz') - for f in archive.getmembers(): - if re.search("serial[_]?", f.name) is not None: - archive.extract(f, tempdir) - _serial_service_file = "%s/%s" % (tempdir, f.name) - logger.debug("Found serial service file in test archive.") - break - archive.close() - if _serial_service_file is None: - msg = "Serial service file could not be found in archive %s."%(archive_path) - logger.error(msg) - errors.append(msg) - return errors - - # Process CSV file --- - logger.debug("Processing CSV file...") - packetreader = csv.reader(open(_serial_service_file, 'rb'), delimiter=',') - for packetinfo in packetreader: - if re.search("^observer_id", packetinfo[1]): - continue - # nx_uint16_t num_messages; - # nx_uint16_t sender_id; - # nx_uint16_t num_received; - packet = packetinfo[4].decode('hex') - data = unpack(">7xB%dx" % (len(packet) - 8), packet) - if data[0] == 7: - # link measurement - data = unpack(">8xHHH",packet) - #print "%s: src:%d dst:%s %d/%d" % (packetinfo[1], data[1], packetinfo[2], data[2], data[0]) - if not int(packetinfo[2]) in nodes: - nodes[int(packetinfo[2])] = Node(int(packetinfo[1]), int(packetinfo[2])) - nodes[int(packetinfo[2])].addStats(data[1], data[0], data[2]) - if starttime is None or starttime > float(packetinfo[0]): - starttime = float(packetinfo[0]) - if stoptime is None or stoptime < float(packetinfo[0]): - stoptime = float(packetinfo[0]) - elif data[0] == 8: - # RSSI scan - data = unpack(">8xBHH",packet) - # print "RSSI scan: %d %d %d" % (data[0], data[1] - 127 - 45, data[2]) - if not int(packetinfo[2]) in nodes: - nodes[int(packetinfo[2])] = Node(int(packetinfo[1]), int(packetinfo[2])) - nodes[int(packetinfo[2])].addRssi(data[0], data[1], data[2]) - if not data[0] in channels: - channels.append(data[0]) - logger.debug("Processed CSV file.") - - # Determine start/stop time --- - if (starttime is None) or (stoptime is None): - msg = "Could not determine start or stop time of link test ID %s." % testid - filesize = os.path.getsize(_serial_service_file) - if filesize < 100: - # file size is less than 100 bytes (empty file) -> test failed - ret = flocklab.set_test_status(cur, cn, testid, 'failed') - if ret != 0: - msg += " Could not set test status to failed.\n" - else: - msg += " File size is %u bytes, test status set to failed.\n" % filesize - logger.error(msg) - errors.append(msg) - return errors - # structure: list, [[sum(received packets from node i at j)],[],[]...] - # structure: list, [[sum(stat packets node j about i)],[],[]...] - - # Get platform info --- - logger.debug("Getting platform info...") - sql = """ SELECT `c`.`platforms_fk`, `d`.`name`, `a`.`description` - FROM - `tbl_serv_tests` as `a` - LEFT JOIN `tbl_serv_map_test_observer_targetimages` as `b` ON (`a`.serv_tests_key = `b`.test_fk) - LEFT JOIN `tbl_serv_targetimages` AS `c` ON (`b`.`targetimage_fk` = `c`.`serv_targetimages_key`) - LEFT JOIN `tbl_serv_platforms` AS `d` ON (`c`.`platforms_fk` = `d`.`serv_platforms_key`) - WHERE `a`.serv_tests_key = %s - LIMIT 1 - """ - cur.execute(sql, str(testid)) - ret = cur.fetchall() - platform_fk = ret[0][0] - platform_name = ret[0][1] - # search for structure (Radio:*) in description - platform_radio = re.search('\(Radio:([^)]*)\)', ret[0][2]) - if platform_radio is not None: - platform_radio = platform_radio.group(1) - - # Write XML file --- - logger.debug("Writing XML file...") - linkmap = io.StringIO() - linkmap.write('<?xml version="1.0" encoding="UTF-8" ?>\n<network platform="%s"' % platform_name) - if platform_radio is not None: - linkmap.write(' radio="%s"' % platform_radio) - linkmap.write('>') - for receiver, node in nodes.items(): - nodeprr = node.getPRR() - for (obsid, sender, prr, numpkt) in nodeprr: - if prr > 0: - if sender in nodes: - sender_obs_id = str(nodes[sender].obsid) - else: - sender_obs_id = "?" - linkmap.write('<link src="%s" dest="%d" prr="%0.4f" numpackets="%d" />' % (sender_obs_id, obsid, prr, numpkt)) - for ch in channels: - linkmap.write('<rssiscan channel="%d">' % ch) - for receiver, node in nodes.items(): - rssi = node.getRssi() - if ch in rssi: - linkmap.write('<rssi nodeid="%s" frq="' % (node.obsid)) - linkmap.write(','.join(map(str, iter(rssi[ch].values())))) - linkmap.write('" />') - linkmap.write('</rssiscan>') - linkmap.write('</network>') - - # Store XML file in DB --- - logger.debug("Storing XML file in DB...") - cur.execute("DELETE FROM `tbl_serv_web_link_measurements` WHERE `test_fk`=%s", str(testid)) - if platform_radio is None: - cur.execute("INSERT INTO `tbl_serv_web_link_measurements` (`test_fk`, `platform_fk`, `links`, `begin`, `end`) VALUES (%s,%s,%s,%s,%s)", (str(testid), platform_fk, linkmap.getvalue(), datetime.fromtimestamp(starttime), datetime.fromtimestamp(stoptime))) - else: - cur.execute("INSERT INTO `tbl_serv_web_link_measurements` (`test_fk`, `platform_fk`, `links`, `begin`, `end`, `radio`) VALUES (%s,%s,%s,%s,%s,%s)", (str(testid), platform_fk, linkmap.getvalue(), datetime.fromtimestamp(starttime), datetime.fromtimestamp(stoptime), platform_radio)) - cn.commit() + + errors = [] + _serial_service_file = None + nodes = {} + starttime = None + stoptime = None + channels = [] + + logger.debug("Starting to create linkmap for test ID %s..."%testid) + + # Get test results from archive --- + archive_path = "%s/%s%s"%(config.get('archiver','archive_dir'), testid, config.get('archiver','archive_ext')) + if not os.path.exists(archive_path): + msg = "Archive path %s does not exist, removing link measurement." % archive_path + cur.execute("DELETE FROM `tbl_serv_web_link_measurements` WHERE `test_fk` = %s" % testid) + logger.error(msg) + errors.append(msg) + return errors + + # Extract serial service results file --- + logger.debug("Extracting serial service file from archive...") + tempdir = tempfile.mkdtemp() + archive = tarfile.open(archive_path, 'r:gz') + for f in archive.getmembers(): + if re.search("serial[_]?", f.name) is not None: + archive.extract(f, tempdir) + _serial_service_file = "%s/%s" % (tempdir, f.name) + logger.debug("Found serial service file in test archive.") + break + archive.close() + if _serial_service_file is None: + msg = "Serial service file could not be found in archive %s."%(archive_path) + logger.error(msg) + errors.append(msg) + return errors + + # Process CSV file --- + logger.debug("Processing CSV file...") + packetreader = csv.reader(open(_serial_service_file, 'rb'), delimiter=',') + for packetinfo in packetreader: + if re.search("^observer_id", packetinfo[1]): + continue + # nx_uint16_t num_messages; + # nx_uint16_t sender_id; + # nx_uint16_t num_received; + packet = packetinfo[4].decode('hex') + data = unpack(">7xB%dx" % (len(packet) - 8), packet) + if data[0] == 7: + # link measurement + data = unpack(">8xHHH",packet) + #print "%s: src:%d dst:%s %d/%d" % (packetinfo[1], data[1], packetinfo[2], data[2], data[0]) + if not int(packetinfo[2]) in nodes: + nodes[int(packetinfo[2])] = Node(int(packetinfo[1]), int(packetinfo[2])) + nodes[int(packetinfo[2])].addStats(data[1], data[0], data[2]) + if starttime is None or starttime > float(packetinfo[0]): + starttime = float(packetinfo[0]) + if stoptime is None or stoptime < float(packetinfo[0]): + stoptime = float(packetinfo[0]) + elif data[0] == 8: + # RSSI scan + data = unpack(">8xBHH",packet) + # print "RSSI scan: %d %d %d" % (data[0], data[1] - 127 - 45, data[2]) + if not int(packetinfo[2]) in nodes: + nodes[int(packetinfo[2])] = Node(int(packetinfo[1]), int(packetinfo[2])) + nodes[int(packetinfo[2])].addRssi(data[0], data[1], data[2]) + if not data[0] in channels: + channels.append(data[0]) + logger.debug("Processed CSV file.") + + # Determine start/stop time --- + if (starttime is None) or (stoptime is None): + msg = "Could not determine start or stop time of link test ID %s." % testid + filesize = os.path.getsize(_serial_service_file) + if filesize < 100: + # file size is less than 100 bytes (empty file) -> test failed + ret = flocklab.set_test_status(cur, cn, testid, 'failed') + if ret != 0: + msg += " Could not set test status to failed.\n" + else: + msg += " File size is %u bytes, test status set to failed.\n" % filesize + logger.error(msg) + errors.append(msg) + return errors + # structure: list, [[sum(received packets from node i at j)],[],[]...] + # structure: list, [[sum(stat packets node j about i)],[],[]...] + + # Get platform info --- + logger.debug("Getting platform info...") + sql = """ SELECT `c`.`platforms_fk`, `d`.`name`, `a`.`description` + FROM + `tbl_serv_tests` as `a` + LEFT JOIN `tbl_serv_map_test_observer_targetimages` as `b` ON (`a`.serv_tests_key = `b`.test_fk) + LEFT JOIN `tbl_serv_targetimages` AS `c` ON (`b`.`targetimage_fk` = `c`.`serv_targetimages_key`) + LEFT JOIN `tbl_serv_platforms` AS `d` ON (`c`.`platforms_fk` = `d`.`serv_platforms_key`) + WHERE `a`.serv_tests_key = %s + LIMIT 1 + """ + cur.execute(sql, str(testid)) + ret = cur.fetchall() + platform_fk = ret[0][0] + platform_name = ret[0][1] + # search for structure (Radio:*) in description + platform_radio = re.search('\(Radio:([^)]*)\)', ret[0][2]) + if platform_radio is not None: + platform_radio = platform_radio.group(1) + + # Write XML file --- + logger.debug("Writing XML file...") + linkmap = io.StringIO() + linkmap.write('<?xml version="1.0" encoding="UTF-8" ?>\n<network platform="%s"' % platform_name) + if platform_radio is not None: + linkmap.write(' radio="%s"' % platform_radio) + linkmap.write('>') + for receiver, node in nodes.items(): + nodeprr = node.getPRR() + for (obsid, sender, prr, numpkt) in nodeprr: + if prr > 0: + if sender in nodes: + sender_obs_id = str(nodes[sender].obsid) + else: + sender_obs_id = "?" + linkmap.write('<link src="%s" dest="%d" prr="%0.4f" numpackets="%d" />' % (sender_obs_id, obsid, prr, numpkt)) + for ch in channels: + linkmap.write('<rssiscan channel="%d">' % ch) + for receiver, node in nodes.items(): + rssi = node.getRssi() + if ch in rssi: + linkmap.write('<rssi nodeid="%s" frq="' % (node.obsid)) + linkmap.write(','.join(map(str, iter(rssi[ch].values())))) + linkmap.write('" />') + linkmap.write('</rssiscan>') + linkmap.write('</network>') + + # Store XML file in DB --- + logger.debug("Storing XML file in DB...") + cur.execute("DELETE FROM `tbl_serv_web_link_measurements` WHERE `test_fk`=%s", str(testid)) + if platform_radio is None: + cur.execute("INSERT INTO `tbl_serv_web_link_measurements` (`test_fk`, `platform_fk`, `links`, `begin`, `end`) VALUES (%s,%s,%s,%s,%s)", (str(testid), platform_fk, linkmap.getvalue(), datetime.fromtimestamp(starttime), datetime.fromtimestamp(stoptime))) + else: + cur.execute("INSERT INTO `tbl_serv_web_link_measurements` (`test_fk`, `platform_fk`, `links`, `begin`, `end`, `radio`) VALUES (%s,%s,%s,%s,%s,%s)", (str(testid), platform_fk, linkmap.getvalue(), datetime.fromtimestamp(starttime), datetime.fromtimestamp(stoptime), platform_radio)) + cn.commit() - # Remove temp dir --- - logger.debug("Removing %s..."%tempdir) - shutil.rmtree(tempdir) - - logger.debug("Created linkmap for test ID %s"%testid) - return errors + # Remove temp dir --- + logger.debug("Removing %s..."%tempdir) + shutil.rmtree(tempdir) + + logger.debug("Created linkmap for test ID %s"%testid) + return errors ### END TestToLinkmap() @@ -236,10 +236,10 @@ def TestToLinkmap(testid=None, cn=None, cur=None): # ############################################################################## def usage(): - print("Usage: %s [--debug] [--help]" %scriptname) - print("Options:") - print(" --debug\t\t\tOptional. Print debug messages to log.") - print(" --help\t\t\tOptional. Print this help.") + print("Usage: %s [--debug] [--help]" %scriptname) + print("Options:") + print(" --debug\t\t\tOptional. Print debug messages to log.") + print(" --help\t\t\tOptional. Print this help.") ### END usage() @@ -249,98 +249,98 @@ def usage(): # ############################################################################## def main(argv): - global logger - global config - global name - - errors = [] - _serial_service_file = None - - # Set timezone to UTC: - os.environ['TZ'] = 'UTC' - time.tzset() - - # Get logger: - logger = flocklab.get_logger(loggername=scriptname, loggerpath=scriptpath) - - # Get the config file: - config = flocklab.get_config(configpath=scriptpath) - if not config: - msg = "Could not read configuration file. Exiting..." - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) - #logger.debug("Read configuration file.") - - # Get the arguments: - try: - opts, args = getopt.getopt(argv, "hd", ["help", "debug"]) - except getopt.GetoptError as err: - logger.warn(str(err)) - usage() - sys.exit(errno.EINVAL) - except: - msg = "Error when getting arguments: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1])) - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + global logger + global config + global name + + errors = [] + _serial_service_file = None + + # Set timezone to UTC: + os.environ['TZ'] = 'UTC' + time.tzset() + + # Get logger: + logger = flocklab.get_logger(loggername=scriptname, loggerpath=scriptpath) + + # Get the config file: + config = flocklab.get_config(configpath=scriptpath) + if not config: + msg = "Could not read configuration file. Exiting..." + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + #logger.debug("Read configuration file.") + + # Get the arguments: + try: + opts, args = getopt.getopt(argv, "hd", ["help", "debug"]) + except getopt.GetoptError as err: + logger.warn(str(err)) + usage() + sys.exit(errno.EINVAL) + except: + msg = "Error when getting arguments: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1])) + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) - for opt, arg in opts: - if opt in ("-h", "--help"): - usage() - sys.exit(SUCCESS) - elif opt in ("-d", "--debug"): - logger.setLevel(logging.DEBUG) - else: - logger.warn("Wrong API usage") - sys.exit(errno.EINVAL) - - # Connect to the DB --- - try: - (cn, cur) = flocklab.connect_to_db(config, logger) - except: - msg = "Could not connect to database" - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) - #logger.debug("Connected to database") - - # Query database for pending link measurements --- - testids = [] - cur.execute("SELECT `test_fk` FROM `tbl_serv_web_link_measurements` LEFT JOIN `tbl_serv_tests` ON (`test_fk` = `serv_tests_key`) WHERE `links` is NULL AND `test_status` IN ('synced', 'finished')") - ret = cur.fetchall() - for row in ret: - testids.append(int(row[0])) - if (len(testids) == 0): - logger.debug("No pending test for evaluation") - else: - logger.debug("Test IDs to process: %s\n"%str(testids)) - for testid in testids: - try: - ret = TestToLinkmap(testid=testid, cn=cn, cur=cur) - if len(ret) == 0: - # No errors occurred, thus mark the test for deletion: - logger.debug("Mark test %s for deletion."%str(testid)) - flocklab.set_test_status(cur, cn, testid, 'todelete') - else: - logger.debug("Errors detected while processing test %s."%str(testid)) - for err in ret: - errors.append(err) - except: - msg = "Encountered error for test ID %d: %s: %s" % (testid, str(sys.exc_info()[0]), str(sys.exc_info()[1])) - errors.append(msg) - logger.error(msg) - continue - if (len(errors)): - msg = "" - for err in errors: - msg += err - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) - - logger.debug("Finished. Exit program.") - cn.close() - sys.exit(SUCCESS) + for opt, arg in opts: + if opt in ("-h", "--help"): + usage() + sys.exit(SUCCESS) + elif opt in ("-d", "--debug"): + logger.setLevel(logging.DEBUG) + else: + logger.warn("Wrong API usage") + sys.exit(errno.EINVAL) + + # Connect to the DB --- + try: + (cn, cur) = flocklab.connect_to_db(config, logger) + except: + msg = "Could not connect to database" + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + #logger.debug("Connected to database") + + # Query database for pending link measurements --- + testids = [] + cur.execute("SELECT `test_fk` FROM `tbl_serv_web_link_measurements` LEFT JOIN `tbl_serv_tests` ON (`test_fk` = `serv_tests_key`) WHERE `links` is NULL AND `test_status` IN ('synced', 'finished')") + ret = cur.fetchall() + for row in ret: + testids.append(int(row[0])) + if (len(testids) == 0): + logger.debug("No pending test for evaluation") + else: + logger.debug("Test IDs to process: %s\n"%str(testids)) + for testid in testids: + try: + ret = TestToLinkmap(testid=testid, cn=cn, cur=cur) + if len(ret) == 0: + # No errors occurred, thus mark the test for deletion: + logger.debug("Mark test %s for deletion."%str(testid)) + flocklab.set_test_status(cur, cn, testid, 'todelete') + else: + logger.debug("Errors detected while processing test %s."%str(testid)) + for err in ret: + errors.append(err) + except: + msg = "Encountered error for test ID %d: %s: %s" % (testid, str(sys.exc_info()[0]), str(sys.exc_info()[1])) + errors.append(msg) + logger.error(msg) + continue + if (len(errors)): + msg = "" + for err in errors: + msg += err + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + + logger.debug("Finished. Exit program.") + cn.close() + sys.exit(SUCCESS) ### END main() if __name__ == "__main__": - try: - main(sys.argv[1:]) - except Exception: - msg = "Encountered error: %s: %s\n%s\nCommand line was: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc(), str(sys.argv)) - flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) - + try: + main(sys.argv[1:]) + except Exception: + msg = "Encountered error: %s: %s\n%s\nCommand line was: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc(), str(sys.argv)) + flocklab.error_logandexit(msg, errno.EAGAIN, name, logger, config) + diff --git a/webserver/testmanagement/check_schedulability.py b/webserver/testmanagement/check_schedulability.py index 5b2dc08decdd58179aa811d6898c8097c245942f..28f405fdf93e4e218ba11e7e96069ff5c577cecc 100644 --- a/webserver/testmanagement/check_schedulability.py +++ b/webserver/testmanagement/check_schedulability.py @@ -16,14 +16,14 @@ SUCCESS = 0 class TestbedResource(): - def __init__(self, time_start, time_end, obsid, restype): - self.time_start = time_start - self.time_end = time_end - self.obsid = obsid - self.restype = restype - - def __repr__(self): - return '%d to %d, obs %d, res %s' % (self.time_start,self.time_end,self.obsid,self.restype) + def __init__(self, time_start, time_end, obsid, restype): + self.time_start = time_start + self.time_end = time_end + self.obsid = obsid + self.restype = restype + + def __repr__(self): + return '%d to %d, obs %d, res %s' % (self.time_start,self.time_end,self.obsid,self.restype) ############################################################################## # @@ -31,18 +31,18 @@ class TestbedResource(): # ############################################################################## def get_config(): - """Arguments: - none - Return value: - The configuration object on success - none otherwise - """ - try: - config = configparser.SafeConfigParser(comment_prefixes=('#', ';'), inline_comment_prefixes=(';')) - config.read(os.path.dirname(os.path.abspath(sys.argv[0])) + '/user.ini') - except: - syslog(LOG_WARNING, "Could not read %s/user.ini because: %s: %s" %(str(os.path.dirname(os.path.abspath(sys.argv[0]))), str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - return config + """Arguments: + none + Return value: + The configuration object on success + none otherwise + """ + try: + config = configparser.SafeConfigParser(comment_prefixes=('#', ';'), inline_comment_prefixes=(';')) + config.read(os.path.dirname(os.path.abspath(sys.argv[0])) + '/user.ini') + except: + syslog(LOG_WARNING, "Could not read %s/user.ini because: %s: %s" %(str(os.path.dirname(os.path.abspath(sys.argv[0]))), str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + return config ### END get_config() ############################################################################## @@ -73,71 +73,71 @@ def getXmlTimestamp(datetimestring): # ############################################################################## def getobsids(tree,ns,cursor): - obsidlist = [] - obsiddict = {} - ### OBSERVERS - # Loop through all targetconfs - targetconfs = tree.xpath('//d:targetConf', namespaces=ns) - for targetconf in targetconfs: - dbimageid = None - embimageid = None - # Get elements: - obsids = targetconf.xpath('d:obsIds', namespaces=ns)[0].text.split() - ret = targetconf.xpath('d:dbImageId', namespaces=ns) - if ret: - dbimageid = [o.text for o in ret] - ret = targetconf.xpath('d:embeddedImageId', namespaces=ns) - if ret: - embimageid = [o.text for o in ret] - embimageid_line = [o.sourceline for o in ret] - - # Put obsids into obsidlist: - for obsid in obsids: - obsidlist.append(obsid) + obsidlist = [] + obsiddict = {} + ### OBSERVERS + # Loop through all targetconfs + targetconfs = tree.xpath('//d:targetConf', namespaces=ns) + for targetconf in targetconfs: + dbimageid = None + embimageid = None + # Get elements: + obsids = targetconf.xpath('d:obsIds', namespaces=ns)[0].text.split() + ret = targetconf.xpath('d:dbImageId', namespaces=ns) + if ret: + dbimageid = [o.text for o in ret] + ret = targetconf.xpath('d:embeddedImageId', namespaces=ns) + if ret: + embimageid = [o.text for o in ret] + embimageid_line = [o.sourceline for o in ret] + + # Put obsids into obsidlist: + for obsid in obsids: + obsidlist.append(obsid) # If DB image IDs are present, check if they are in the database and get values for later use: - if dbimageid: - for dbimg in dbimageid: - if not dbimg: - logger.warn("Empty dbimage configuration.") - sys.exit(errno.EAGAIN) - # Get frequency and architecture for each observer - sql = """SELECT c.name - FROM `tbl_serv_targetimages` AS a - LEFT JOIN `tbl_serv_operatingsystems` AS b - ON a.operatingsystems_fk = b.serv_operatingsystems_key - LEFT JOIN `tbl_serv_platforms` AS c - ON a.platforms_fk = c.serv_platforms_key - WHERE (a.`serv_targetimages_key` = %s )"""%dbimg #AND a.`binary` IS NOT NULL) - - cursor.execute(sql) - ret = cursor.fetchone() - if not ret: - logger.warn("The image %s does not exist in the database"%dbimg) - sys.exit(errno.EAGAIN) - else: - # Put data into dictionary for later use: - for obsid in obsids: - if obsid not in obsiddict: - obsiddict[obsid] = {} - obsiddict[obsid]['architecture']=ret[0] - - # If embedded image IDs are present, check if they have a corresponding <imageConf> which is valid: - if embimageid: - for embimg, line in zip(embimageid, embimageid_line): - imageconf = tree.xpath('//d:imageConf/d:embeddedImageId[text()="%s"]/..' %(embimg), namespaces=ns) - if not imageconf: - logger.warn("The embedded image %s does not exist"%dbimg) - sys.exit(errno.EAGAIN) - else: - # Get platform and frequency: - platform = imageconf[0].xpath('d:platform', namespaces=ns)[0].text - for obsid in obsids: - if obsid not in obsiddict: - obsiddict[obsid] = {} - obsiddict[obsid]['architecture']=platform - - return obsidlist,obsiddict + if dbimageid: + for dbimg in dbimageid: + if not dbimg: + logger.warn("Empty dbimage configuration.") + sys.exit(errno.EAGAIN) + # Get frequency and architecture for each observer + sql = """SELECT c.name + FROM `tbl_serv_targetimages` AS a + LEFT JOIN `tbl_serv_operatingsystems` AS b + ON a.operatingsystems_fk = b.serv_operatingsystems_key + LEFT JOIN `tbl_serv_platforms` AS c + ON a.platforms_fk = c.serv_platforms_key + WHERE (a.`serv_targetimages_key` = %s )"""%dbimg #AND a.`binary` IS NOT NULL) + + cursor.execute(sql) + ret = cursor.fetchone() + if not ret: + logger.warn("The image %s does not exist in the database"%dbimg) + sys.exit(errno.EAGAIN) + else: + # Put data into dictionary for later use: + for obsid in obsids: + if obsid not in obsiddict: + obsiddict[obsid] = {} + obsiddict[obsid]['architecture']=ret[0] + + # If embedded image IDs are present, check if they have a corresponding <imageConf> which is valid: + if embimageid: + for embimg, line in zip(embimageid, embimageid_line): + imageconf = tree.xpath('//d:imageConf/d:embeddedImageId[text()="%s"]/..' %(embimg), namespaces=ns) + if not imageconf: + logger.warn("The embedded image %s does not exist"%dbimg) + sys.exit(errno.EAGAIN) + else: + # Get platform and frequency: + platform = imageconf[0].xpath('d:platform', namespaces=ns)[0].text + for obsid in obsids: + if obsid not in obsiddict: + obsiddict[obsid] = {} + obsiddict[obsid]['architecture']=platform + + return obsidlist,obsiddict # END getobids @@ -148,39 +148,39 @@ def getobsids(tree,ns,cursor): # ############################################################################## def gettimeslot(tree,ns,config): - schedASAP = False - # Start and End time from xml - sched_abs = tree.xpath('//d:generalConf/d:scheduleAbsolute', namespaces=ns) - now = int(time.time()) - testSetup = int(config.get('tests', 'setuptime')) * 60 - - if not sched_abs: - schedASAP = True - # Use now + setup time as start time and start time + duration as end time - testDuration = int(tree.xpath('//d:generalConf/d:scheduleAsap/d:durationSecs', namespaces=ns)[0].text) - testStart = now + testSetup - testEnd = testStart + testDuration - - else: - ### TIMES + schedASAP = False + # Start and End time from xml + sched_abs = tree.xpath('//d:generalConf/d:scheduleAbsolute', namespaces=ns) + now = int(time.time()) + testSetup = int(config.get('tests', 'setuptime')) * 60 + + if not sched_abs: + schedASAP = True + # Use now + setup time as start time and start time + duration as end time + testDuration = int(tree.xpath('//d:generalConf/d:scheduleAsap/d:durationSecs', namespaces=ns)[0].text) + testStart = now + testSetup + testEnd = testStart + testDuration + + else: + ### TIMES # The start date and time have to be more than setup time in the future: - rs = tree.xpath('//d:generalConf/d:scheduleAbsolute/d:start', namespaces=ns) - testStart = getXmlTimestamp(rs[0].text) - if (testStart <= now + testSetup): - logging.warn("Test starts to soon, exit") - sys.exit(errno.EAGAIN) - - # The end date and time have to be in the future and after the start: - rs = tree.xpath('//d:generalConf/d:scheduleAbsolute/d:end', namespaces=ns) - testEnd = getXmlTimestamp(rs[0].text) - if (testEnd <= testStart): - logging.warn("Endtime before start time, exit") - sys.exit(errno.EAGAIN) - - # Calculate the test duration which is needed later on: - testDuration = testEnd - testStart - - return testStart,testEnd,testDuration,schedASAP + rs = tree.xpath('//d:generalConf/d:scheduleAbsolute/d:start', namespaces=ns) + testStart = getXmlTimestamp(rs[0].text) + if (testStart <= now + testSetup): + logging.warn("Test starts to soon, exit") + sys.exit(errno.EAGAIN) + + # The end date and time have to be in the future and after the start: + rs = tree.xpath('//d:generalConf/d:scheduleAbsolute/d:end', namespaces=ns) + testEnd = getXmlTimestamp(rs[0].text) + if (testEnd <= testStart): + logging.warn("Endtime before start time, exit") + sys.exit(errno.EAGAIN) + + # Calculate the test duration which is needed later on: + testDuration = testEnd - testStart + + return testStart,testEnd,testDuration,schedASAP # END gettimeslot ############################################################################## @@ -190,48 +190,48 @@ def gettimeslot(tree,ns,config): # ############################################################################## def resourceSlots(testStart,testEnd,obsidlist,obsiddict,config,cursor): - ### SLOT NUMBER - # Get slot number for each observer, slots are used for the whole test plus setup and cleanup time - obsslotlist = [] - start = int(testStart) - int(config.get('tests', 'setuptime')) * 60 - end = int(testEnd) + int(config.get('tests', 'cleanuptime')) * 60 - for obsid in obsidlist: - sql_adap = """SELECT `b`.`tg_adapt_types_fk`, `c`.`tg_adapt_types_fk`, `d`.`tg_adapt_types_fk`, `e`.`tg_adapt_types_fk` - FROM `tbl_serv_observer` AS `a` - LEFT JOIN `tbl_serv_tg_adapt_list` AS `b` ON `a`.`slot_1_tg_adapt_list_fk` = `b`.`serv_tg_adapt_list_key` - LEFT JOIN `tbl_serv_tg_adapt_list` AS `c` ON `a`.`slot_2_tg_adapt_list_fk` = `c`.`serv_tg_adapt_list_key` - LEFT JOIN `tbl_serv_tg_adapt_list` AS `d` ON `a`.`slot_3_tg_adapt_list_fk` = `d`.`serv_tg_adapt_list_key` - LEFT JOIN `tbl_serv_tg_adapt_list` AS `e` ON `a`.`slot_4_tg_adapt_list_fk` = `e`.`serv_tg_adapt_list_key` - WHERE - (`a`.`observer_id` = %s) - """ - sql_platf = """SELECT COUNT(*) - FROM `tbl_serv_tg_adapt_types` AS `a` - WHERE - (`a`.`serv_tg_adapt_types_key` = %s) - AND (LOWER(`name`) = LOWER('%s')) - """ - cursor.execute(sql_adap %(obsid)) - adaptTypesFk = cursor.fetchone() - if not adaptTypesFk: - logger.warn("Could not fetch adapter type fks for the observer %s"%obsid) - sys.exit(errno.EAGAIN) - else: - slotFound = False - for x in range(0,4): - if not adaptTypesFk[x]: - continue - cursor.execute(sql_platf %(adaptTypesFk[x],obsiddict[obsid]['architecture'])) - ret = cursor.fetchone() - if int(ret[0]) == 1: - obsslotlist.append(TestbedResource(start,end,int(obsid),'slot_%d'%(x+1))) - slotFound = True - break - if not slotFound: - logger.warn("No Architecture %s on Observer %s"%(obsiddict[obsid]['architecture'],obsid)) - sys.exit(errno.EAGAIN) - - return obsslotlist + ### SLOT NUMBER + # Get slot number for each observer, slots are used for the whole test plus setup and cleanup time + obsslotlist = [] + start = int(testStart) - int(config.get('tests', 'setuptime')) * 60 + end = int(testEnd) + int(config.get('tests', 'cleanuptime')) * 60 + for obsid in obsidlist: + sql_adap = """SELECT `b`.`tg_adapt_types_fk`, `c`.`tg_adapt_types_fk`, `d`.`tg_adapt_types_fk`, `e`.`tg_adapt_types_fk` + FROM `tbl_serv_observer` AS `a` + LEFT JOIN `tbl_serv_tg_adapt_list` AS `b` ON `a`.`slot_1_tg_adapt_list_fk` = `b`.`serv_tg_adapt_list_key` + LEFT JOIN `tbl_serv_tg_adapt_list` AS `c` ON `a`.`slot_2_tg_adapt_list_fk` = `c`.`serv_tg_adapt_list_key` + LEFT JOIN `tbl_serv_tg_adapt_list` AS `d` ON `a`.`slot_3_tg_adapt_list_fk` = `d`.`serv_tg_adapt_list_key` + LEFT JOIN `tbl_serv_tg_adapt_list` AS `e` ON `a`.`slot_4_tg_adapt_list_fk` = `e`.`serv_tg_adapt_list_key` + WHERE + (`a`.`observer_id` = %s) + """ + sql_platf = """SELECT COUNT(*) + FROM `tbl_serv_tg_adapt_types` AS `a` + WHERE + (`a`.`serv_tg_adapt_types_key` = %s) + AND (LOWER(`name`) = LOWER('%s')) + """ + cursor.execute(sql_adap %(obsid)) + adaptTypesFk = cursor.fetchone() + if not adaptTypesFk: + logger.warn("Could not fetch adapter type fks for the observer %s"%obsid) + sys.exit(errno.EAGAIN) + else: + slotFound = False + for x in range(0,4): + if not adaptTypesFk[x]: + continue + cursor.execute(sql_platf %(adaptTypesFk[x],obsiddict[obsid]['architecture'])) + ret = cursor.fetchone() + if int(ret[0]) == 1: + obsslotlist.append(TestbedResource(start,end,int(obsid),'slot_%d'%(x+1))) + slotFound = True + break + if not slotFound: + logger.warn("No Architecture %s on Observer %s"%(obsiddict[obsid]['architecture'],obsid)) + sys.exit(errno.EAGAIN) + + return obsslotlist # END resourceSlots @@ -242,24 +242,24 @@ def resourceSlots(testStart,testEnd,obsidlist,obsiddict,config,cursor): # ############################################################################## def resourceFrequency(testStart,testEnd,obsidlist,obsiddict,cursor): - # get frequencies for all observers, frequencies are used for the whole test - obsfreqlist = [] - for obsid in obsidlist: - sql = "SELECT freq_2400, freq_868, freq_433 FROM `tbl_serv_platforms` WHERE (LOWER(`name`) = LOWER('%s'))"%obsiddict[obsid]['architecture'] - cursor.execute(sql) - ret = cursor.fetchone() - if not ret: - logger.warn("Could not fetch frequencies for the observer %s"%obsid) - sys.exit(errno.EAGAIN) - else: - if int(ret[0]) == 1: - obsfreqlist.append(TestbedResource(testStart,testEnd,int(obsid),'freq_2400')) - if int(ret[1]) == 1: - obsfreqlist.append(TestbedResource(testStart,testEnd,int(obsid),'freq_868')) - if int(ret[2]) == 1: - obsfreqlist.append(TestbedResource(testStart,testEnd,int(obsid),'freq_433')) - - return obsfreqlist + # get frequencies for all observers, frequencies are used for the whole test + obsfreqlist = [] + for obsid in obsidlist: + sql = "SELECT freq_2400, freq_868, freq_433 FROM `tbl_serv_platforms` WHERE (LOWER(`name`) = LOWER('%s'))"%obsiddict[obsid]['architecture'] + cursor.execute(sql) + ret = cursor.fetchone() + if not ret: + logger.warn("Could not fetch frequencies for the observer %s"%obsid) + sys.exit(errno.EAGAIN) + else: + if int(ret[0]) == 1: + obsfreqlist.append(TestbedResource(testStart,testEnd,int(obsid),'freq_2400')) + if int(ret[1]) == 1: + obsfreqlist.append(TestbedResource(testStart,testEnd,int(obsid),'freq_868')) + if int(ret[2]) == 1: + obsfreqlist.append(TestbedResource(testStart,testEnd,int(obsid),'freq_433')) + + return obsfreqlist # END resourceFrequency ############################################################################## @@ -269,63 +269,63 @@ def resourceFrequency(testStart,testEnd,obsidlist,obsiddict,cursor): # ############################################################################## def resourceMux(testStart,testEnd,obsidlist,tree,ns,config): - - # first get the services which use the mux for the whole test - obsmuxdict = {} - start = int(testStart) - int(config.get('tests', 'setuptime')) * 60 - end = int(testEnd) + int(config.get('tests', 'cleanuptime')) * 60 - - # Check serial configuration - serial = tree.xpath('//d:serialConf', namespaces=ns) - if serial: - port = tree.xpath('//d:serialConf/d:port', namespaces=ns) - if port and port[0].text == 'serial': - obsids = tree.xpath('//d:serialConf/d:obsIds', namespaces=ns)[0].text.split() - for obsid in obsids: - obsmuxdict[int(obsid)] = TestbedResource(start,end,int(obsid),'mux') - - # Check gpio tracing configuration - gpiotracing = tree.xpath('//d:gpioTracingConf', namespaces=ns) - if gpiotracing: - obsids = tree.xpath('//d:gpioTracingConf/d:obsIds', namespaces=ns)[0].text.split() - for obsid in obsids: - obsmuxdict[int(obsid)] = TestbedResource(start,end,int(obsid),'mux') - - # Check gpio actuation configuration - gpioactuation = tree.xpath('//d:gpioActuationConf', namespaces=ns) - if gpioactuation: - obsids = tree.xpath('//d:gpioActuationConf/d:obsIds', namespaces=ns)[0].text.split() - for obsid in obsids: - obsmuxdict[int(obsid)] = TestbedResource(start,end,int(obsid),'mux') - - # Check power profiling configuration - powerProfiling = tree.xpath('//d:powerProfilingConf', namespaces=ns) - if powerProfiling: - obsids = tree.xpath('//d:powerProfilingConf/d:obsIds', namespaces=ns)[0].text.split() - for obsid in obsids: - obsmuxdict[int(obsid)] = TestbedResource(start,end,int(obsid),'mux') - - # TODO: if DAQ is used - - muxlist = [] - - for obsid in obsidlist: - # if the mux is not used for the services above it is only used for setup,start,end and cleanup (except duration is smaller than start + stop time) - if (not int(obsid) in obsmuxdict): - duration = testEnd - testStart - if duration > (int(config.get('tests','guard_starttime')) * 60) + (int(config.get('tests', 'guard_stoptime')) * 60): - # set mux for setup and start - muxlist.append(TestbedResource(testStart - int(config.get('tests', 'setuptime')) * 60,testStart + int(config.get('tests','guard_starttime')) * 60,int(obsid),'mux')) - # set mux for stop and cleanup - muxlist.append(TestbedResource(testEnd - int(config.get('tests', 'guard_stoptime')) * 60,testEnd + int(config.get('tests','cleanuptime')) * 60,int(obsid),'mux')) - else: - # set mux for whole test plus setup and cleanup - muxlist.append(TestbedResource(testStart - int(config.get('tests', 'setuptime')) * 60,testEnd + int(config.get('tests','cleanuptime')) * 60,int(obsid),'mux')) - - for dictres in list(obsmuxdict.values()): - muxlist.append(dictres) - - return muxlist + + # first get the services which use the mux for the whole test + obsmuxdict = {} + start = int(testStart) - int(config.get('tests', 'setuptime')) * 60 + end = int(testEnd) + int(config.get('tests', 'cleanuptime')) * 60 + + # Check serial configuration + serial = tree.xpath('//d:serialConf', namespaces=ns) + if serial: + port = tree.xpath('//d:serialConf/d:port', namespaces=ns) + if port and port[0].text == 'serial': + obsids = tree.xpath('//d:serialConf/d:obsIds', namespaces=ns)[0].text.split() + for obsid in obsids: + obsmuxdict[int(obsid)] = TestbedResource(start,end,int(obsid),'mux') + + # Check gpio tracing configuration + gpiotracing = tree.xpath('//d:gpioTracingConf', namespaces=ns) + if gpiotracing: + obsids = tree.xpath('//d:gpioTracingConf/d:obsIds', namespaces=ns)[0].text.split() + for obsid in obsids: + obsmuxdict[int(obsid)] = TestbedResource(start,end,int(obsid),'mux') + + # Check gpio actuation configuration + gpioactuation = tree.xpath('//d:gpioActuationConf', namespaces=ns) + if gpioactuation: + obsids = tree.xpath('//d:gpioActuationConf/d:obsIds', namespaces=ns)[0].text.split() + for obsid in obsids: + obsmuxdict[int(obsid)] = TestbedResource(start,end,int(obsid),'mux') + + # Check power profiling configuration + powerProfiling = tree.xpath('//d:powerProfilingConf', namespaces=ns) + if powerProfiling: + obsids = tree.xpath('//d:powerProfilingConf/d:obsIds', namespaces=ns)[0].text.split() + for obsid in obsids: + obsmuxdict[int(obsid)] = TestbedResource(start,end,int(obsid),'mux') + + # TODO: if DAQ is used + + muxlist = [] + + for obsid in obsidlist: + # if the mux is not used for the services above it is only used for setup,start,end and cleanup (except duration is smaller than start + stop time) + if (not int(obsid) in obsmuxdict): + duration = testEnd - testStart + if duration > (int(config.get('tests','guard_starttime')) * 60) + (int(config.get('tests', 'guard_stoptime')) * 60): + # set mux for setup and start + muxlist.append(TestbedResource(testStart - int(config.get('tests', 'setuptime')) * 60,testStart + int(config.get('tests','guard_starttime')) * 60,int(obsid),'mux')) + # set mux for stop and cleanup + muxlist.append(TestbedResource(testEnd - int(config.get('tests', 'guard_stoptime')) * 60,testEnd + int(config.get('tests','cleanuptime')) * 60,int(obsid),'mux')) + else: + # set mux for whole test plus setup and cleanup + muxlist.append(TestbedResource(testStart - int(config.get('tests', 'setuptime')) * 60,testEnd + int(config.get('tests','cleanuptime')) * 60,int(obsid),'mux')) + + for dictres in list(obsmuxdict.values()): + muxlist.append(dictres) + + return muxlist # END resourceMux ############################################################################## @@ -335,58 +335,58 @@ def resourceMux(testStart,testEnd,obsidlist,tree,ns,config): # ############################################################################## def schedule(ASAP,resources,cursor,logger): - timeStart = time.clock() - isSchedulable = False - # create lookup dictionary - resourcesdict = {} - for r in resources: - try: - l = resourcesdict[(r.obsid, r.restype)] - except KeyError: - resourcesdict[(r.obsid, r.restype)] = [] - l = resourcesdict[(r.obsid, r.restype)] - l.append(r) - # TODO: Consider reservations as well - - # Get all tests which overlap in time - testStartString = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(min([r.time_start for r in resources]))) - sql = "SELECT `time_start`, `time_end`, `observer_fk`, `resource_type` FROM `tbl_serv_resource_allocation` WHERE (`time_end` >= '%s')" % testStartString - cursor.execute(sql) - ret = cursor.fetchall() - # Now check for all resource usage intervals if they overlap in time with an already scheduled test - testShift = 0 - while not isSchedulable: - maxShift = 0 # keep track of largest shift needed to resolve dependencies - isSchedulable = True - for i in range(len(ret)): - # for every ret, check for collisions - allocated_start = int(calendar.timegm(ret[i][0].timetuple())) - allocated_end = int(calendar.timegm(ret[i][1].timetuple())) - for res in resourcesdict[(ret[i][2],ret[i][3])]: - if(allocated_start <= res.time_start + testShift and allocated_end >= res.time_end + testShift): - isSchedulable = False - # if not ASAP, break - if not ASAP: - break - else: - shift = allocated_end - (res.time_start + testShift) - if shift > maxShift: - maxShift = shift - if not ASAP and not isSchedulable: - break - if not ASAP and not isSchedulable: - break - else: - # else shift by maxShift and repeat - testShift = testShift + maxShift + 1 - - if isSchedulable: - for i in range(len(resources)): - resources[i].time_start += testShift - resources[i].time_end += testShift - - dur = time.clock() - timeStart - return isSchedulable,testShift,[dur] + timeStart = time.clock() + isSchedulable = False + # create lookup dictionary + resourcesdict = {} + for r in resources: + try: + l = resourcesdict[(r.obsid, r.restype)] + except KeyError: + resourcesdict[(r.obsid, r.restype)] = [] + l = resourcesdict[(r.obsid, r.restype)] + l.append(r) + # TODO: Consider reservations as well + + # Get all tests which overlap in time + testStartString = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(min([r.time_start for r in resources]))) + sql = "SELECT `time_start`, `time_end`, `observer_fk`, `resource_type` FROM `tbl_serv_resource_allocation` WHERE (`time_end` >= '%s')" % testStartString + cursor.execute(sql) + ret = cursor.fetchall() + # Now check for all resource usage intervals if they overlap in time with an already scheduled test + testShift = 0 + while not isSchedulable: + maxShift = 0 # keep track of largest shift needed to resolve dependencies + isSchedulable = True + for i in range(len(ret)): + # for every ret, check for collisions + allocated_start = int(calendar.timegm(ret[i][0].timetuple())) + allocated_end = int(calendar.timegm(ret[i][1].timetuple())) + for res in resourcesdict[(ret[i][2],ret[i][3])]: + if(allocated_start <= res.time_start + testShift and allocated_end >= res.time_end + testShift): + isSchedulable = False + # if not ASAP, break + if not ASAP: + break + else: + shift = allocated_end - (res.time_start + testShift) + if shift > maxShift: + maxShift = shift + if not ASAP and not isSchedulable: + break + if not ASAP and not isSchedulable: + break + else: + # else shift by maxShift and repeat + testShift = testShift + maxShift + 1 + + if isSchedulable: + for i in range(len(resources)): + resources[i].time_start += testShift + resources[i].time_end += testShift + + dur = time.clock() - timeStart + return isSchedulable,testShift,[dur] # END schedule ############################################################################## @@ -396,117 +396,117 @@ def schedule(ASAP,resources,cursor,logger): # ############################################################################## def modifyConfig(tree,ns,userId,testStart,testEnd,config,cursor): - nstext = config.get('xml', 'namespace') - - # 1) Add embedded images to the db and change the config accordingly - targetconfs = tree.xpath('//d:targetConf', namespaces=ns) - for targetconf in targetconfs: - embimageid = None - # Get elements: - ret = targetconf.xpath('d:embeddedImageId', namespaces=ns) - if ret: - embimageid = [o.text for o in ret] - embimageid_line = [o.sourceline for o in ret] - - for embimg, line,embIm in zip(embimageid, embimageid_line,ret): - imageconf = tree.xpath('//d:imageConf/d:embeddedImageId[text()="%s"]/..' %(embimg), namespaces=ns) - - # Get Image Data: - # Name can be used directly - name = imageconf[0].xpath('d:name', namespaces=ns)[0].text - # description can be empty - description = imageconf[0].xpath('d:description', namespaces=ns) - if description: - description = description[0].text - else: - description = "" - # for the platform we have to get the fk - platform = imageconf[0].xpath('d:platform', namespaces=ns)[0].text - sql = "SELECT serv_platforms_key FROM tbl_serv_platforms WHERE (LOWER(name) = LOWER('%s'))"%platform - cursor.execute(sql) - ret = cursor.fetchone() - if not ret: - logger.warn("Error in modifyConfig while getting the platform_fk") - sys.exit(errno.EAGAIN) - else: - platform = ret[0] - # same for the os - if imageconf[0].xpath('d:os', namespaces=ns): - os = imageconf[0].xpath('d:os', namespaces=ns)[0].text - else: - os = 'other' # os tag is optional and deprecated - sql = "SELECT serv_operatingsystems_key FROM tbl_serv_operatingsystems WHERE (LOWER(name) = LOWER('%s'))"%os - cursor.execute(sql) - ret = cursor.fetchone() - if not ret: - logger.warn("Error in modifyConfig while getting the operatingsystem_fk") - sys.exit(errno.EAGAIN) - else: - os = ret[0] - # get core - sql = "SELECT core FROM tbl_serv_architectures WHERE (platforms_fk = LOWER('%s'))"%platform - cursor.execute(sql) - ret = cursor.fetchone() - if not ret: - logger.warn("Error in modifyConfig while getting the core") - sys.exit(errno.EAGAIN) - else: - core = ret[0] - # get data and create hash - data = base64.b64decode(imageconf[0].xpath('d:data', namespaces=ns)[0].text.strip()) - imgHash = hashlib.sha1(data).hexdigest() - # Check if there are dublicates - sql = """SELECT `serv_targetimages_key`, `binary` - FROM `tbl_serv_targetimages` - WHERE `owner_fk`='%s' - AND `binary` IS NOT NULL - AND `binary_hash_sha1`='%s' - AND `operatingsystems_fk`='%s' - AND `platforms_fk`='%s' - AND `core`='%s'"""%(userId,imgHash,os,platform,core) - test = cursor.execute(sql) - ret = cursor.fetchone() - if not ret: - # Add image to db: - sql = """INSERT INTO `tbl_serv_targetimages` (`name`,`description`,`owner_fk`,`operatingsystems_fk`,`platforms_fk`,`core`,`binary`,`binary_hash_sha1`) - VALUES (%s,%s,%s,%s,%s,%s,%s,%s)""" - insertData = (name,description,userId,os,platform,core,data,imgHash) - cursor.execute(sql,insertData) - if not cursor.lastrowid: - logger.warn("Error, couldn't insert image in db") - sys.exit(errno.EAGAIN) - imageId = int(cursor.lastrowid) - logger.debug("Added Image with id %s to db."%imageId) - else: - imageId = ret[0] - logger.debug("Dublicate Image with id %s detected."%imageId) - # Replace embedded image with dbimage - targetconf.remove(embIm) - dbImageEntry = etree.Element('{%s}dbImageId'%nstext) - dbImageEntry.text = str(imageId) - targetconf.append(dbImageEntry) - root = tree.getroot() - root.remove(imageconf[0]) - - # 2) Change from ASAP to fixed time if necessary - genConf = tree.xpath('//d:generalConf', namespaces=ns)[0] - asap = tree.xpath('//d:generalConf/d:scheduleAsap', namespaces=ns) - if asap: - genConf.remove(asap[0]) - absEntry = etree.Element('{%s}scheduleAbsolute'%nstext) - genConf.append(absEntry) - start = etree.Element('{%s}start'%nstext) - start.text = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime(testStart)) - absEntry.append(start) - end = etree.Element('{%s}end'%nstext) - end.text = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime(testEnd)) - absEntry.append(end) - # to keep right order remove and add email if it exists - email = tree.xpath('//d:generalConf/d:emailResults', namespaces=ns) - if email: - genConf.remove(email[0]) - genConf.append(email[0]) - return tree + nstext = config.get('xml', 'namespace') + + # 1) Add embedded images to the db and change the config accordingly + targetconfs = tree.xpath('//d:targetConf', namespaces=ns) + for targetconf in targetconfs: + embimageid = None + # Get elements: + ret = targetconf.xpath('d:embeddedImageId', namespaces=ns) + if ret: + embimageid = [o.text for o in ret] + embimageid_line = [o.sourceline for o in ret] + + for embimg, line,embIm in zip(embimageid, embimageid_line,ret): + imageconf = tree.xpath('//d:imageConf/d:embeddedImageId[text()="%s"]/..' %(embimg), namespaces=ns) + + # Get Image Data: + # Name can be used directly + name = imageconf[0].xpath('d:name', namespaces=ns)[0].text + # description can be empty + description = imageconf[0].xpath('d:description', namespaces=ns) + if description: + description = description[0].text + else: + description = "" + # for the platform we have to get the fk + platform = imageconf[0].xpath('d:platform', namespaces=ns)[0].text + sql = "SELECT serv_platforms_key FROM tbl_serv_platforms WHERE (LOWER(name) = LOWER('%s'))"%platform + cursor.execute(sql) + ret = cursor.fetchone() + if not ret: + logger.warn("Error in modifyConfig while getting the platform_fk") + sys.exit(errno.EAGAIN) + else: + platform = ret[0] + # same for the os + if imageconf[0].xpath('d:os', namespaces=ns): + os = imageconf[0].xpath('d:os', namespaces=ns)[0].text + else: + os = 'other' # os tag is optional and deprecated + sql = "SELECT serv_operatingsystems_key FROM tbl_serv_operatingsystems WHERE (LOWER(name) = LOWER('%s'))"%os + cursor.execute(sql) + ret = cursor.fetchone() + if not ret: + logger.warn("Error in modifyConfig while getting the operatingsystem_fk") + sys.exit(errno.EAGAIN) + else: + os = ret[0] + # get core + sql = "SELECT core FROM tbl_serv_architectures WHERE (platforms_fk = LOWER('%s'))"%platform + cursor.execute(sql) + ret = cursor.fetchone() + if not ret: + logger.warn("Error in modifyConfig while getting the core") + sys.exit(errno.EAGAIN) + else: + core = ret[0] + # get data and create hash + data = base64.b64decode(imageconf[0].xpath('d:data', namespaces=ns)[0].text.strip()) + imgHash = hashlib.sha1(data).hexdigest() + # Check if there are dublicates + sql = """SELECT `serv_targetimages_key`, `binary` + FROM `tbl_serv_targetimages` + WHERE `owner_fk`='%s' + AND `binary` IS NOT NULL + AND `binary_hash_sha1`='%s' + AND `operatingsystems_fk`='%s' + AND `platforms_fk`='%s' + AND `core`='%s'"""%(userId,imgHash,os,platform,core) + test = cursor.execute(sql) + ret = cursor.fetchone() + if not ret: + # Add image to db: + sql = """INSERT INTO `tbl_serv_targetimages` (`name`,`description`,`owner_fk`,`operatingsystems_fk`,`platforms_fk`,`core`,`binary`,`binary_hash_sha1`) + VALUES (%s,%s,%s,%s,%s,%s,%s,%s)""" + insertData = (name,description,userId,os,platform,core,data,imgHash) + cursor.execute(sql,insertData) + if not cursor.lastrowid: + logger.warn("Error, couldn't insert image in db") + sys.exit(errno.EAGAIN) + imageId = int(cursor.lastrowid) + logger.debug("Added Image with id %s to db."%imageId) + else: + imageId = ret[0] + logger.debug("Dublicate Image with id %s detected."%imageId) + # Replace embedded image with dbimage + targetconf.remove(embIm) + dbImageEntry = etree.Element('{%s}dbImageId'%nstext) + dbImageEntry.text = str(imageId) + targetconf.append(dbImageEntry) + root = tree.getroot() + root.remove(imageconf[0]) + + # 2) Change from ASAP to fixed time if necessary + genConf = tree.xpath('//d:generalConf', namespaces=ns)[0] + asap = tree.xpath('//d:generalConf/d:scheduleAsap', namespaces=ns) + if asap: + genConf.remove(asap[0]) + absEntry = etree.Element('{%s}scheduleAbsolute'%nstext) + genConf.append(absEntry) + start = etree.Element('{%s}start'%nstext) + start.text = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime(testStart)) + absEntry.append(start) + end = etree.Element('{%s}end'%nstext) + end.text = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime(testEnd)) + absEntry.append(end) + # to keep right order remove and add email if it exists + email = tree.xpath('//d:generalConf/d:emailResults', namespaces=ns) + if email: + genConf.remove(email[0]) + genConf.append(email[0]) + return tree # END modifyConfig @@ -518,97 +518,97 @@ def modifyConfig(tree,ns,userId,testStart,testEnd,config,cursor): ############################################################################## def addtesttodb(tree,ns,resArrayClean,userId,config,cursor): - # First add the test to the test db - title = tree.xpath('//d:generalConf/d:name', namespaces=ns)[0].text - description = tree.xpath('//d:generalConf/d:name', namespaces=ns) - if description: - description = description[0].text - else: - description = "" - start = resArrayClean[0][0] + int(config.get('tests', 'setuptime')) * 60 - start = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start)) - end = resArrayClean[-1][1] - int(config.get('tests', 'cleanuptime')) * 60 - end = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end)) - root = tree.getroot() - config_xml = etree.tostring(root, encoding='utf8', method='xml') - sql = """INSERT INTO tbl_serv_tests(title, description,owner_fk,testconfig_xml,time_start_wish,time_end_wish,test_status) - VALUES (%s,%s,%s,%s,%s,%s,%s)""" - insertData = (title,description,userId,config_xml,start,end,'planned') - cursor.execute(sql,insertData) - if not cursor.lastrowid: - logger.warn("Error, couldn't insert test in db") - sys.exit(errno.EAGAIN) - testId = cursor.lastrowid - logger.debug("Added test with id %s to tbl_srv_tests"%testId) - - # add entries for mapping table - targetconfs = tree.xpath('//d:targetConf', namespaces=ns) - for targetconf in targetconfs: - obsids = targetconf.xpath('d:obsIds', namespaces=ns)[0].text.split() - imgid = targetconf.xpath('d:dbImageId', namespaces=ns)[0].text - if not imgid: - print("ERROR") - - for obs in obsids: - # get observer_fk - sql = """SELECT serv_observer_key - FROM tbl_serv_observer - WHERE observer_id=%s"""%(obs) - test = cursor.execute(sql) - ret = cursor.fetchone() - if not ret: - logger.warn("Couldn't fetch observer key for observer id %s"%obs) - sys.exit(errno.EAGAIN) - obsKey = ret[0] - sql = """INSERT INTO tbl_serv_map_test_observer_targetimages (observer_fk,test_fk,targetimage_fk,node_id) VALUES (%s,%s,%s,%s)""" - insertData = (int(obsKey),int(testId),int(imgid),int(obs)) - try: - cursor.execute(sql,insertData) - db.commit() - except: - logger.warn("Couldn't add mapping for observer %s"%obs) - sys.exit(errno.EAGAIN) - logger.debug("Added mapping for observers for test %s to db"%(testId)) - - # add entries for resource table - resources = resArrayClean - # First get all tests which overlap in time and delete them - testStartString = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(resArrayClean[0][0])) - testStopString = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(resArrayClean[-1][1])) - sql = "SELECT * FROM `tbl_serv_test_resources` WHERE (time_start <= '%s') AND (time_end >= '%s')"%(testStopString,testStartString) - cursor.execute(sql) - ret = cursor.fetchall() - - sql = "DELETE FROM `tbl_serv_test_resources` WHERE (time_start <= '%s') AND (time_end >= '%s')"%(testStopString,testStartString) - cursor.execute(sql) - - if not cursor.rowcount == len(ret): - logger.warn("Couldn't delete from resource table") - sys.exit(errno.EAGAIN) - - # Now check for all resource usage intervals if they overlap in time with an already scheduled test - for old in ret: - old = list(old[0:-1]) - old[0] = int(calendar.timegm(time.strptime(str(old[0]), '%Y-%m-%d %H:%M:%S'))) - old[1] = int(calendar.timegm(time.strptime(str(old[1]), '%Y-%m-%d %H:%M:%S'))) - resources.append(old) - - resources = cleanuparray(resources) - - for ts in resources: - ts[0] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(ts[0])) - ts[1] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(ts[1])) - ts.append(time.strftime('%Y-%m-%d %H:%M:%S')) - sql = """INSERT INTO tbl_serv_test_resources VALUES %r"""%(tuple(ts),) - cursor.execute(sql) - db.commit() - if not cursor.rowcount: - logger.warn("Couldn't add resources timeslot [%s,%s] for test %s to db"%(ts[0],ts[1],testId)) - sys.exit(errno.EAGAIN) - logger.debug("Added resources timeslot [%s,%s] for test %s to db"%(ts[0],ts[1],testId)) - - logger.debug("Successfully added test with starttime %s and testId %s"%(start,testId)) - return start,testId + # First add the test to the test db + title = tree.xpath('//d:generalConf/d:name', namespaces=ns)[0].text + description = tree.xpath('//d:generalConf/d:name', namespaces=ns) + if description: + description = description[0].text + else: + description = "" + start = resArrayClean[0][0] + int(config.get('tests', 'setuptime')) * 60 + start = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start)) + end = resArrayClean[-1][1] - int(config.get('tests', 'cleanuptime')) * 60 + end = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end)) + root = tree.getroot() + config_xml = etree.tostring(root, encoding='utf8', method='xml') + sql = """INSERT INTO tbl_serv_tests(title, description,owner_fk,testconfig_xml,time_start_wish,time_end_wish,test_status) + VALUES (%s,%s,%s,%s,%s,%s,%s)""" + insertData = (title,description,userId,config_xml,start,end,'planned') + cursor.execute(sql,insertData) + if not cursor.lastrowid: + logger.warn("Error, couldn't insert test in db") + sys.exit(errno.EAGAIN) + testId = cursor.lastrowid + logger.debug("Added test with id %s to tbl_srv_tests"%testId) + + # add entries for mapping table + targetconfs = tree.xpath('//d:targetConf', namespaces=ns) + for targetconf in targetconfs: + obsids = targetconf.xpath('d:obsIds', namespaces=ns)[0].text.split() + imgid = targetconf.xpath('d:dbImageId', namespaces=ns)[0].text + if not imgid: + print("ERROR") + + for obs in obsids: + # get observer_fk + sql = """SELECT serv_observer_key + FROM tbl_serv_observer + WHERE observer_id=%s"""%(obs) + test = cursor.execute(sql) + ret = cursor.fetchone() + if not ret: + logger.warn("Couldn't fetch observer key for observer id %s"%obs) + sys.exit(errno.EAGAIN) + obsKey = ret[0] + sql = """INSERT INTO tbl_serv_map_test_observer_targetimages (observer_fk,test_fk,targetimage_fk,node_id) VALUES (%s,%s,%s,%s)""" + insertData = (int(obsKey),int(testId),int(imgid),int(obs)) + try: + cursor.execute(sql,insertData) + db.commit() + except: + logger.warn("Couldn't add mapping for observer %s"%obs) + sys.exit(errno.EAGAIN) + logger.debug("Added mapping for observers for test %s to db"%(testId)) + + # add entries for resource table + resources = resArrayClean + # First get all tests which overlap in time and delete them + testStartString = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(resArrayClean[0][0])) + testStopString = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(resArrayClean[-1][1])) + sql = "SELECT * FROM `tbl_serv_test_resources` WHERE (time_start <= '%s') AND (time_end >= '%s')"%(testStopString,testStartString) + cursor.execute(sql) + ret = cursor.fetchall() + + sql = "DELETE FROM `tbl_serv_test_resources` WHERE (time_start <= '%s') AND (time_end >= '%s')"%(testStopString,testStartString) + cursor.execute(sql) + + if not cursor.rowcount == len(ret): + logger.warn("Couldn't delete from resource table") + sys.exit(errno.EAGAIN) + + # Now check for all resource usage intervals if they overlap in time with an already scheduled test + for old in ret: + old = list(old[0:-1]) + old[0] = int(calendar.timegm(time.strptime(str(old[0]), '%Y-%m-%d %H:%M:%S'))) + old[1] = int(calendar.timegm(time.strptime(str(old[1]), '%Y-%m-%d %H:%M:%S'))) + resources.append(old) + + resources = cleanuparray(resources) + + for ts in resources: + ts[0] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(ts[0])) + ts[1] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(ts[1])) + ts.append(time.strftime('%Y-%m-%d %H:%M:%S')) + sql = """INSERT INTO tbl_serv_test_resources VALUES %r"""%(tuple(ts),) + cursor.execute(sql) + db.commit() + if not cursor.rowcount: + logger.warn("Couldn't add resources timeslot [%s,%s] for test %s to db"%(ts[0],ts[1],testId)) + sys.exit(errno.EAGAIN) + logger.debug("Added resources timeslot [%s,%s] for test %s to db"%(ts[0],ts[1],testId)) + + logger.debug("Successfully added test with starttime %s and testId %s"%(start,testId)) + return start,testId # END addtesttodb @@ -618,96 +618,96 @@ def addtesttodb(tree,ns,resArrayClean,userId,config,cursor): # ############################################################################## def main(argv): - # Initialize error counter and set timezone to UTC: - os.environ['TZ'] = 'UTC' - time.tzset() - - # Open the log and create logger: - try: - logging.config.fileConfig(scriptpath + '/logging.conf') - logger = logging.getLogger(os.path.basename(__file__)) - logger.debug("Start") - except: - syslog.syslog(syslog.LOG_ERR, "%s: Could not open logger because: %s: %s" %(os.path.basename(__file__), str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - - # Get the config file: - config = get_config() - if not config: - logger.warn("Could not read configuration file. Exiting...") - sys.exit(errno.EAGAIN) - - # Get command line parameters. - if (not len(sys.argv) == 3): - logger.warn("Wrong API usage, there has to be exacly two parameters: xmlfilepath and userid") - sys.exit(errno.EAGAIN) - userId = sys.argv[2] - xml = sys.argv[1] - logger.debug("XML config filepath is: %s" %xml) - - # Parse XML file - f = open(xml, 'r') - parser = etree.XMLParser(remove_comments=True) - tree = etree.parse(f, parser) - ns = {'d': 'http://www.flocklab.ethz.ch'} - - ### First get information which all "resource-functions" need: time information and observer ids - # Get test times - [testStart,testEnd,testDuration,ASAP] = gettimeslot(tree,ns,config) - logger.debug("Test start,stop,duration: %s,%s,%s" %(testStart,testEnd,testDuration)) - - # Connect to the DB: - try: - db = MySQLdb.connect(host=config.get('database','host'), user=config.get('database','user'), passwd=config.get('database','password'), db=config.get('database','database')) - cursor = db.cursor() - except: - logger.warn("Could not connect to the database because: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - sys.exit(errno.EAGAIN) - - # Get list of observers - [obsidlist,obsiddict] = getobsids(tree,ns,cursor) - logger.debug("Obsidlist: %s" %(obsidlist)) - - ### List with all timeslots and resource usages - resources = [] - - # get slot numbers - resources += resourceSlots(testStart,testEnd,obsidlist,obsiddict,config,cursor) - - # get frequencies - resources += resourceFrequency(testStart,testEnd,obsidlist,obsiddict,cursor) - - # get mux usage - resources += resourceMux(testStart,testEnd,obsidlist,tree,ns,config) - - ### Scheduling - tmTot = time.clock() - isSchedulable,timeshift,timeInf = schedule(ASAP,resources, cursor,logger) - tmTot = time.clock() - tmTot - logger.debug("Time for scheduling (%s loops): %s, avg: %s, max: %s, min: %s"%(tmTot,len(timeInf),sum(timeInf)/len(timeInf),max(timeInf),min(timeInf))) - - if isSchedulable: - logger.debug("Test is schedulable with Starttime %s and Endtime %s" %(testStart + timeshift, testEnd + timeshift)) - # modify xml (asap -> absolute, embeddedimage -> dbimage) - startTime = testStart + timeshift - endTime = testEnd + timeshift - tree = modifyConfig(tree,ns,userId,startTime,endTime,config,cursor) - ## Add test to db and testmapping to db - #start,testId = addtesttodb(tree,ns,resArrayClean,userId) - #db.commit() - #return True,start,testId - db.close() - return True,1,1 - else: - db.close() - logger.debug("Test is NOT schedulable.") - return False,0,0 + # Initialize error counter and set timezone to UTC: + os.environ['TZ'] = 'UTC' + time.tzset() + + # Open the log and create logger: + try: + logging.config.fileConfig(scriptpath + '/logging.conf') + logger = logging.getLogger(os.path.basename(__file__)) + logger.debug("Start") + except: + syslog.syslog(syslog.LOG_ERR, "%s: Could not open logger because: %s: %s" %(os.path.basename(__file__), str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + + # Get the config file: + config = get_config() + if not config: + logger.warn("Could not read configuration file. Exiting...") + sys.exit(errno.EAGAIN) + + # Get command line parameters. + if (not len(sys.argv) == 3): + logger.warn("Wrong API usage, there has to be exacly two parameters: xmlfilepath and userid") + sys.exit(errno.EAGAIN) + userId = sys.argv[2] + xml = sys.argv[1] + logger.debug("XML config filepath is: %s" %xml) + + # Parse XML file + f = open(xml, 'r') + parser = etree.XMLParser(remove_comments=True) + tree = etree.parse(f, parser) + ns = {'d': 'http://www.flocklab.ethz.ch'} + + ### First get information which all "resource-functions" need: time information and observer ids + # Get test times + [testStart,testEnd,testDuration,ASAP] = gettimeslot(tree,ns,config) + logger.debug("Test start,stop,duration: %s,%s,%s" %(testStart,testEnd,testDuration)) + + # Connect to the DB: + try: + db = MySQLdb.connect(host=config.get('database','host'), user=config.get('database','user'), passwd=config.get('database','password'), db=config.get('database','database')) + cursor = db.cursor() + except: + logger.warn("Could not connect to the database because: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + sys.exit(errno.EAGAIN) + + # Get list of observers + [obsidlist,obsiddict] = getobsids(tree,ns,cursor) + logger.debug("Obsidlist: %s" %(obsidlist)) + + ### List with all timeslots and resource usages + resources = [] + + # get slot numbers + resources += resourceSlots(testStart,testEnd,obsidlist,obsiddict,config,cursor) + + # get frequencies + resources += resourceFrequency(testStart,testEnd,obsidlist,obsiddict,cursor) + + # get mux usage + resources += resourceMux(testStart,testEnd,obsidlist,tree,ns,config) + + ### Scheduling + tmTot = time.clock() + isSchedulable,timeshift,timeInf = schedule(ASAP,resources, cursor,logger) + tmTot = time.clock() - tmTot + logger.debug("Time for scheduling (%s loops): %s, avg: %s, max: %s, min: %s"%(tmTot,len(timeInf),sum(timeInf)/len(timeInf),max(timeInf),min(timeInf))) + + if isSchedulable: + logger.debug("Test is schedulable with Starttime %s and Endtime %s" %(testStart + timeshift, testEnd + timeshift)) + # modify xml (asap -> absolute, embeddedimage -> dbimage) + startTime = testStart + timeshift + endTime = testEnd + timeshift + tree = modifyConfig(tree,ns,userId,startTime,endTime,config,cursor) + ## Add test to db and testmapping to db + #start,testId = addtesttodb(tree,ns,resArrayClean,userId) + #db.commit() + #return True,start,testId + db.close() + return True,1,1 + else: + db.close() + logger.debug("Test is NOT schedulable.") + return False,0,0 if __name__ == "__main__": - isSchedulable,start,testId = main(sys.argv) - retVal = "%s,%s,%s"%(isSchedulable,start,testId) - print(isSchedulable) - print(start) - print(testId) + isSchedulable,start,testId = main(sys.argv) + retVal = "%s,%s,%s"%(isSchedulable,start,testId) + print(isSchedulable) + print(start) + print(testId) diff --git a/webserver/testmanagement/targetimage_validator.py b/webserver/testmanagement/targetimage_validator.py index c2436738a72aa18d65ad092f9cec9f3f65c1db8c..f67e1deac5ba25a00b7861534385ea6ca551fd59 100755 --- a/webserver/testmanagement/targetimage_validator.py +++ b/webserver/testmanagement/targetimage_validator.py @@ -1,8 +1,8 @@ #!/usr/bin/env python -__author__ = "Christoph Walser <walserc@tik.ee.ethz.ch>, Adnan Mlika" -__copyright__ = "Copyright 2010, ETH Zurich, Switzerland" -__license__ = "GPL" +__author__ = "Christoph Walser <walserc@tik.ee.ethz.ch>, Adnan Mlika" +__copyright__ = "Copyright 2010, ETH Zurich, Switzerland" +__license__ = "GPL" import sys, os, getopt, errno, subprocess, MySQLdb, syslog, configparser, traceback @@ -22,8 +22,8 @@ SUCCESS = 0 # ############################################################################## class Error(Exception): - """ Base class for exception. """ - pass + """ Base class for exception. """ + pass ### END Error classes @@ -34,14 +34,14 @@ class Error(Exception): # ############################################################################## def usage(): - print(("Usage: %s --image=<path> --platform=<string> --os=<string> [--core=<int>] [--quiet] [--help]" % sys.argv[0])) - print("Validate a target image binary. Returns 0 on success, errno on errors.") - print("Options:") - print(" --image\t\t\tPath to the image binary which is to check.") - print(" --platform\t\t\tPlatform for which the image is intended. Must be registered in FlockLab database, table tbl_serv_platforms.") - print(" --core\t\t\tOptional. Core to use on platforms with several cores, defaults to 0.") - print(" --quiet\t\t\tOptional. Do not print on standard out.") - print(" --help\t\t\tOptional. Print this help.") + print(("Usage: %s --image=<path> --platform=<string> --os=<string> [--core=<int>] [--quiet] [--help]" % sys.argv[0])) + print("Validate a target image binary. Returns 0 on success, errno on errors.") + print("Options:") + print(" --image\t\t\tPath to the image binary which is to check.") + print(" --platform\t\t\tPlatform for which the image is intended. Must be registered in FlockLab database, table tbl_serv_platforms.") + print(" --core\t\t\tOptional. Core to use on platforms with several cores, defaults to 0.") + print(" --quiet\t\t\tOptional. Do not print on standard out.") + print(" --help\t\t\tOptional. Print this help.") ### END usage() @@ -51,18 +51,18 @@ def usage(): # ############################################################################## def get_config(): - """Arguments: - none - Return value: - The configuration object on success - none otherwise - """ - try: - config = configparser.SafeConfigParser(comment_prefixes=('#', ';'), inline_comment_prefixes=(';')) - config.read(os.path.dirname(os.path.abspath(sys.argv[0])) + '/user.ini') - except: - syslog(LOG_WARNING, "Could not read %s/user.ini because: %s: %s" %(str(os.path.dirname(os.path.abspath(sys.argv[0]))), str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - return config + """Arguments: + none + Return value: + The configuration object on success + none otherwise + """ + try: + config = configparser.SafeConfigParser(comment_prefixes=('#', ';'), inline_comment_prefixes=(';')) + config.read(os.path.dirname(os.path.abspath(sys.argv[0])) + '/user.ini') + except: + syslog(LOG_WARNING, "Could not read %s/user.ini because: %s: %s" %(str(os.path.dirname(os.path.abspath(sys.argv[0]))), str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + return config ### END get_config() @@ -72,121 +72,121 @@ def get_config(): # ############################################################################## def main(argv): - quiet = False - imagepath = None - platform = None - core = 0 - - # Open the log and create logger: - try: - logging.config.fileConfig(scriptpath + '/logging.conf') - logger = logging.getLogger(os.path.basename(__file__)) - except: - syslog.syslog(syslog.LOG_ERR, "%s: Could not open logger because: %s: %s" %(os.path.basename(__file__), str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - - # Get the config file: - config = get_config() - if not config: - logger.warn("Could not read configuration file. Exiting...") - sys.exit(errno.EAGAIN) - - # Get command line parameters. - try: - opts, args = getopt.getopt(argv, "hqi:p:c:", ["help", "quiet", "image=", "platform=", "core="]) - except getopt.GetoptError as err: - logger.warn(str(err)) - usage() - sys.exit(errno.EINVAL) - for opt, arg in opts: - if opt in ("-i", "--image"): - imagepath = arg - if (not os.path.exists(imagepath) or not os.path.isfile(imagepath)): - logger.warn("Wrong API usage: image binary file does not exist") - sys.exit(errno.EINVAL) - elif opt in ("-p", "--platform"): - platform = arg - elif opt in ("-c", "--core"): - core = int(arg) - elif opt in ("-h", "--help"): - usage() - sys.exit(SUCCESS) - elif opt in ("-q", "--quiet"): - quiet = True - else: - if not quiet: - print("Wrong API usage") - usage() - logger.warn("Wrong API usage") - sys.exit(errno.EINVAL) - # Check mandatory arguments: - if ((imagepath == None) or (platform == None)): - if not quiet: - print("Wrong API usage") - usage() - logger.warn("Wrong API usage") - sys.exit(errno.EINVAL) - - # Get the config file: - config = get_config() - if not config: - logger.warn("Could not read configuration file. Exiting...") - sys.exit(errno.EAGAIN) - - # Connect to the DB: - try: - db = MySQLdb.connect(host=config.get('database','host'), user=config.get('database','user'), passwd=config.get('database','password'), db=config.get('database','database')) - cursor = db.cursor() - except: - logger.warn("Could not connect to the database because: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - sys.exit(errno.EAGAIN) - - # Check if platform is registered in database and get platform architecture: - sql = """SELECT `a`.`architecture` FROM `tbl_serv_platforms` LEFT JOIN `tbl_serv_architectures` `a` ON `tbl_serv_platforms`.`serv_platforms_key` = `a`.`platforms_fk` WHERE LOWER(name) = '%s' and `core`=%d;""" - cursor.execute(sql %(str(platform).lower(), core)) - ret = cursor.fetchone() - if not ret: - err_str = "Could not find platform %s in database. Exiting..." %(str(platform)) - logger.warn(err_str) - if not quiet: - print(err_str) - db.close() - sys.exit(errno.EINVAL) - else: - arch = ret[0] - arch = arch.lower() - - # Validate the image. This is dependent on the architecture of the target platform: - errcnt = 0 - if arch == 'msp430': - p = subprocess.Popen([config.get('targetimage', 'msp430'), '-a', imagepath], stdout=open(os.devnull), stderr=open(os.devnull)) - if p.wait() != 0: - errcnt += 1 - elif arch == 'arm': - p = subprocess.Popen([config.get('targetimage', 'arm'), '-a', imagepath], stdout=open(os.devnull), stderr=open(os.devnull)) - if p.wait() != 0: - errcnt += 1 - else: - err_str = "No image validation test specified for architecture %s, thus defaulting to passed validation." %arch - logger.info(err_str) - if not quiet: - print(err_str) - - if errcnt == 0: - if not quiet: - print("Target image validation successful.") - ret = SUCCESS - else: - err_str = "Target image validation failed. Please check your target image." - logger.debug(err_str) - if not quiet: - print(err_str) - ret = errno.EBADMSG - sys.exit(ret) + quiet = False + imagepath = None + platform = None + core = 0 + + # Open the log and create logger: + try: + logging.config.fileConfig(scriptpath + '/logging.conf') + logger = logging.getLogger(os.path.basename(__file__)) + except: + syslog.syslog(syslog.LOG_ERR, "%s: Could not open logger because: %s: %s" %(os.path.basename(__file__), str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + + # Get the config file: + config = get_config() + if not config: + logger.warn("Could not read configuration file. Exiting...") + sys.exit(errno.EAGAIN) + + # Get command line parameters. + try: + opts, args = getopt.getopt(argv, "hqi:p:c:", ["help", "quiet", "image=", "platform=", "core="]) + except getopt.GetoptError as err: + logger.warn(str(err)) + usage() + sys.exit(errno.EINVAL) + for opt, arg in opts: + if opt in ("-i", "--image"): + imagepath = arg + if (not os.path.exists(imagepath) or not os.path.isfile(imagepath)): + logger.warn("Wrong API usage: image binary file does not exist") + sys.exit(errno.EINVAL) + elif opt in ("-p", "--platform"): + platform = arg + elif opt in ("-c", "--core"): + core = int(arg) + elif opt in ("-h", "--help"): + usage() + sys.exit(SUCCESS) + elif opt in ("-q", "--quiet"): + quiet = True + else: + if not quiet: + print("Wrong API usage") + usage() + logger.warn("Wrong API usage") + sys.exit(errno.EINVAL) + # Check mandatory arguments: + if ((imagepath == None) or (platform == None)): + if not quiet: + print("Wrong API usage") + usage() + logger.warn("Wrong API usage") + sys.exit(errno.EINVAL) + + # Get the config file: + config = get_config() + if not config: + logger.warn("Could not read configuration file. Exiting...") + sys.exit(errno.EAGAIN) + + # Connect to the DB: + try: + db = MySQLdb.connect(host=config.get('database','host'), user=config.get('database','user'), passwd=config.get('database','password'), db=config.get('database','database')) + cursor = db.cursor() + except: + logger.warn("Could not connect to the database because: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + sys.exit(errno.EAGAIN) + + # Check if platform is registered in database and get platform architecture: + sql = """SELECT `a`.`architecture` FROM `tbl_serv_platforms` LEFT JOIN `tbl_serv_architectures` `a` ON `tbl_serv_platforms`.`serv_platforms_key` = `a`.`platforms_fk` WHERE LOWER(name) = '%s' and `core`=%d;""" + cursor.execute(sql %(str(platform).lower(), core)) + ret = cursor.fetchone() + if not ret: + err_str = "Could not find platform %s in database. Exiting..." %(str(platform)) + logger.warn(err_str) + if not quiet: + print(err_str) + db.close() + sys.exit(errno.EINVAL) + else: + arch = ret[0] + arch = arch.lower() + + # Validate the image. This is dependent on the architecture of the target platform: + errcnt = 0 + if arch == 'msp430': + p = subprocess.Popen([config.get('targetimage', 'msp430'), '-a', imagepath], stdout=open(os.devnull), stderr=open(os.devnull)) + if p.wait() != 0: + errcnt += 1 + elif arch == 'arm': + p = subprocess.Popen([config.get('targetimage', 'arm'), '-a', imagepath], stdout=open(os.devnull), stderr=open(os.devnull)) + if p.wait() != 0: + errcnt += 1 + else: + err_str = "No image validation test specified for architecture %s, thus defaulting to passed validation." %arch + logger.info(err_str) + if not quiet: + print(err_str) + + if errcnt == 0: + if not quiet: + print("Target image validation successful.") + ret = SUCCESS + else: + err_str = "Target image validation failed. Please check your target image." + logger.debug(err_str) + if not quiet: + print(err_str) + ret = errno.EBADMSG + sys.exit(ret) ### END main() if __name__ == "__main__": - try: - main(sys.argv[1:]) - except Exception: - print("targetimage validator encountered an error: %s: %s\n%s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc())) - sys.exit(errno.EBADMSG) + try: + main(sys.argv[1:]) + except Exception: + print("targetimage validator encountered an error: %s: %s\n%s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc())) + sys.exit(errno.EBADMSG) diff --git a/webserver/testmanagement/testconfig_validator.py b/webserver/testmanagement/testconfig_validator.py index d6f9ce35d94614a6cafe216206077d9d6b91ec38..71a2b78fd9b078ddafa8569283eca82ec5d4a3fc 100755 --- a/webserver/testmanagement/testconfig_validator.py +++ b/webserver/testmanagement/testconfig_validator.py @@ -1,8 +1,8 @@ #!/usr/bin/env python -__author__ = "Christoph Walser <walserc@tik.ee.ethz.ch>, Adnan Mlika" -__copyright__ = "Copyright 2010, ETH Zurich, Switzerland" -__license__ = "GPL" +__author__ = "Christoph Walser <walserc@tik.ee.ethz.ch>, Adnan Mlika" +__copyright__ = "Copyright 2010, ETH Zurich, Switzerland" +__license__ = "GPL" import sys, os, getopt, errno, subprocess, time, calendar, MySQLdb, tempfile, base64, syslog, re, configparser, traceback @@ -16,7 +16,7 @@ scriptpath = os.path.dirname(os.path.abspath(sys.argv[0])) SUCCESS = 0 ### -debug = False +debug = False ############################################################################## @@ -25,8 +25,8 @@ debug = False # ############################################################################## class Error(Exception): - """ Base class for exception. """ - pass + """ Base class for exception. """ + pass ### END Error classes @@ -34,38 +34,38 @@ class Error(Exception): ############################################################################## # # checkObsids -# Checks if every observer ID returned by the xpath evaluation is only used -# once and if every observer ID is in the list provided in obsidlist. +# Checks if every observer ID returned by the xpath evaluation is only used +# once and if every observer ID is in the list provided in obsidlist. # ############################################################################## def checkObsids(tree, xpathExpr, namespace, obsidlist=None): - duplicates = False - allInList = True - - # Get the observer IDs from the xpath expression: - rs = tree.xpath(xpathExpr, namespaces=namespace) - - # Build a list with all used observer IDs in it: - foundObsids = [] - tmp = [] - for ids in rs: - tmp.append(ids.text.split()) - list(map(foundObsids.extend, tmp)) - - # Check for duplicates: - if ( (len(foundObsids) != len(set(foundObsids))) ): - duplicates = True - - # Check if all obs ids are in the list: - for obsid in foundObsids: - if obsid not in obsidlist: - allInList = False - - # Return the values to the caller: - if (duplicates or not allInList): - return(None, duplicates, allInList) - else: - return(sorted(foundObsids), duplicates, allInList) + duplicates = False + allInList = True + + # Get the observer IDs from the xpath expression: + rs = tree.xpath(xpathExpr, namespaces=namespace) + + # Build a list with all used observer IDs in it: + foundObsids = [] + tmp = [] + for ids in rs: + tmp.append(ids.text.split()) + list(map(foundObsids.extend, tmp)) + + # Check for duplicates: + if ( (len(foundObsids) != len(set(foundObsids))) ): + duplicates = True + + # Check if all obs ids are in the list: + for obsid in foundObsids: + if obsid not in obsidlist: + allInList = False + + # Return the values to the caller: + if (duplicates or not allInList): + return(None, duplicates, allInList) + else: + return(sorted(foundObsids), duplicates, allInList) ### END checkObsids() @@ -73,22 +73,22 @@ def checkObsids(tree, xpathExpr, namespace, obsidlist=None): ############################################################################## # # getXmlTimestamp -# Converts XML timestamps to python taking timezone into account. +# Converts XML timestamps to python taking timezone into account. # ############################################################################## def getXmlTimestamp(datetimestring): - #is there a timezone? - m = re.match('([0-9]{4,4}-[0-9]{1,2}-[0-9]{1,2}T[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2})([+-])([0-9]{2,2}):([0-9]{2,2})',datetimestring) - if m == None: - timestamp = calendar.timegm(time.strptime(datetimestring, "%Y-%m-%dT%H:%M:%SZ")) - else: - timestamp = calendar.timegm(time.strptime('%s' % (m.group(1)), "%Y-%m-%dT%H:%M:%S")) - offset = int(m.group(3))*3600 + int(m.group(4)) * 60 - if m.group(2)=='-': - timestamp = timestamp + offset; - else: - timestamp = timestamp - offset; - return timestamp + #is there a timezone? + m = re.match('([0-9]{4,4}-[0-9]{1,2}-[0-9]{1,2}T[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2})([+-])([0-9]{2,2}):([0-9]{2,2})',datetimestring) + if m == None: + timestamp = calendar.timegm(time.strptime(datetimestring, "%Y-%m-%dT%H:%M:%SZ")) + else: + timestamp = calendar.timegm(time.strptime('%s' % (m.group(1)), "%Y-%m-%dT%H:%M:%S")) + offset = int(m.group(3))*3600 + int(m.group(4)) * 60 + if m.group(2)=='-': + timestamp = timestamp + offset; + else: + timestamp = timestamp - offset; + return timestamp ### END getXmlTimestamp() @@ -99,15 +99,15 @@ def getXmlTimestamp(datetimestring): # ############################################################################## def usage(config): - print(("Usage: %s [--xml=<path>] [--testid=<int>] [--userid=<int>] [--schema=<path>] [--quiet] [--help]" % sys.argv[0])) - print("Validate an XML testconfiguration. Returns 0 on success, errno on errors.") - print("Options:") - print(" --xml\t\t\t\tOptional. Path to the XML file which is to check. Either --xml or --testid are) mandatory. If both are given, --testid will be favoured.") - print(" --testid\t\t\tOptional. Test ID to validate. If this parameter is set, the XML will be taken from the DB. Either --xml or --testid are mandatory. If both are given, --testid will be favoured.") - print(" --userid\t\t\tOptional. User ID to which the XML belongs. Mandatory if --xml is specified.") - print((" --schema\t\t\tOptional. Path to the XML schema to check XML against. If not given, the standard path will be used: %s" %(str(config.get('xml', 'schemapath'))))) - print(" --quiet\t\t\tOptional. Do not print on standard out.") - print(" --help\t\t\tOptional. Print this help.") + print(("Usage: %s [--xml=<path>] [--testid=<int>] [--userid=<int>] [--schema=<path>] [--quiet] [--help]" % sys.argv[0])) + print("Validate an XML testconfiguration. Returns 0 on success, errno on errors.") + print("Options:") + print(" --xml\t\t\t\tOptional. Path to the XML file which is to check. Either --xml or --testid are) mandatory. If both are given, --testid will be favoured.") + print(" --testid\t\t\tOptional. Test ID to validate. If this parameter is set, the XML will be taken from the DB. Either --xml or --testid are mandatory. If both are given, --testid will be favoured.") + print(" --userid\t\t\tOptional. User ID to which the XML belongs. Mandatory if --xml is specified.") + print((" --schema\t\t\tOptional. Path to the XML schema to check XML against. If not given, the standard path will be used: %s" %(str(config.get('xml', 'schemapath'))))) + print(" --quiet\t\t\tOptional. Do not print on standard out.") + print(" --help\t\t\tOptional. Print this help.") ### END usage() @@ -117,19 +117,19 @@ def usage(config): # ############################################################################## def get_config(): - global scriptpath - """Arguments: - none - Return value: - The configuration object on success - none otherwise - """ - try: - config = configparser.SafeConfigParser(comment_prefixes=('#', ';'), inline_comment_prefixes=(';')) - config.read(scriptpath + '/user.ini') - except: - syslog(LOG_WARNING, "Could not read %s/user.ini because: %s: %s" %(scriptpath, str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - return config + global scriptpath + """Arguments: + none + Return value: + The configuration object on success + none otherwise + """ + try: + config = configparser.SafeConfigParser(comment_prefixes=('#', ';'), inline_comment_prefixes=(';')) + config.read(scriptpath + '/user.ini') + except: + syslog(LOG_WARNING, "Could not read %s/user.ini because: %s: %s" %(scriptpath, str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + return config ### END get_config() @@ -139,31 +139,31 @@ def get_config(): # ############################################################################## def is_admin(cursor=None, userid=0): - """Arguments: - cursor: cursor of the database connection to be used for the query - userid: user ID to test - Return value: - On success, True or False - 1 if there is an error in the arguments passed to the function - 2 if there was an error in processing the request - """ - # Check the arguments: - if ((type(cursor) != MySQLdb.cursors.Cursor) or (type(userid) != int) or (userid <= 0)): - return(1) + """Arguments: + cursor: cursor of the database connection to be used for the query + userid: user ID to test + Return value: + On success, True or False + 1 if there is an error in the arguments passed to the function + 2 if there was an error in processing the request + """ + # Check the arguments: + if ((type(cursor) != MySQLdb.cursors.Cursor) or (type(userid) != int) or (userid <= 0)): + return(1) - # Get the addresses from the database: - try: - cursor.execute("SELECT `role` FROM `tbl_serv_users` WHERE `serv_users_key` = %d" %userid) - rs = cursor.fetchone() - ret = False - if ((rs != None) and (rs[0] == 'admin')): - ret = True - except: - # There was an error in the database connection: - syslog(LOG_WARNING, "FlockLab is_admin() error: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - return(2) - - return ret + # Get the addresses from the database: + try: + cursor.execute("SELECT `role` FROM `tbl_serv_users` WHERE `serv_users_key` = %d" %userid) + rs = cursor.fetchone() + ret = False + if ((rs != None) and (rs[0] == 'admin')): + ret = True + except: + # There was an error in the database connection: + syslog(LOG_WARNING, "FlockLab is_admin() error: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + return(2) + + return ret ### END is_admin() @@ -173,31 +173,31 @@ def is_admin(cursor=None, userid=0): # ############################################################################## def is_internal(cursor=None, userid=0): - """Arguments: - cursor: cursor of the database connection to be used for the query - userid: user ID to test - Return value: - On success, True or False - 1 if there is an error in the arguments passed to the function - 2 if there was an error in processing the request - """ - # Check the arguments: - if ((type(cursor) != MySQLdb.cursors.Cursor) or (type(userid) != int) or (userid <= 0)): - return(1) + """Arguments: + cursor: cursor of the database connection to be used for the query + userid: user ID to test + Return value: + On success, True or False + 1 if there is an error in the arguments passed to the function + 2 if there was an error in processing the request + """ + # Check the arguments: + if ((type(cursor) != MySQLdb.cursors.Cursor) or (type(userid) != int) or (userid <= 0)): + return(1) - # Get the addresses from the database: - try: - cursor.execute("SELECT `role` FROM `tbl_serv_users` WHERE `serv_users_key` = %d" %userid) - rs = cursor.fetchone() - ret = False - if ((rs != None) and (rs[0] == 'internal')): - ret = True - except: - # There was an error in the database connection: - syslog(LOG_WARNING, "FlockLab is_internal() error: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - return(2) - - return ret + # Get the addresses from the database: + try: + cursor.execute("SELECT `role` FROM `tbl_serv_users` WHERE `serv_users_key` = %d" %userid) + rs = cursor.fetchone() + ret = False + if ((rs != None) and (rs[0] == 'internal')): + ret = True + except: + # There was an error in the database connection: + syslog(LOG_WARNING, "FlockLab is_internal() error: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + return(2) + + return ret ### END is_internal() @@ -207,610 +207,610 @@ def is_internal(cursor=None, userid=0): # ############################################################################## def main(argv): - quiet = False - userid = None - xmlpath = None - schemapath = None - testid = None - isadmin = False - isinternal = False - - # Open the log and create logger: - try: - logging.config.fileConfig(scriptpath + '/logging.conf') - logger = logging.getLogger(os.path.basename(__file__)) - if debug: - logger.setLevel(logging.DEBUG) - else: - logger.setLevel(logging.INFO) - except: - syslog.syslog(syslog.LOG_ERR, "%s: Could not open logger because: %s: %s" % (os.path.basename(__file__), str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - print("Failed to open logger because: %s: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - sys.exit(errno.EAGAIN) - - # Get the config file: - config = get_config() - if not config: - logger.warn("Could not read configuration file. Exiting...") - sys.exit(errno.EAGAIN) - - # Get command line parameters. - try: - opts, args = getopt.getopt(argv, "hqu:s:x:t:", ["help", "quiet", "userid=", "schema=", "xml=", "testid="]) - except getopt.GetoptError as err: - logger.warn(str(err)) - usage(config) - sys.exit(errno.EINVAL) - for opt, arg in opts: - if opt in ("-u", "--userid"): - try: - userid = int(arg) - if userid <= 0: - raise Error - except: - logger.warn("Wrong API usage: userid has to be a positive number") - sys.exit(errno.EINVAL) - elif opt in ("-t", "--testid"): - try: - testid = int(arg) - if testid <= 0: - raise Error - except: - logger.warn("Wrong API usage: testid has to be a positive number") - sys.exit(errno.EINVAL) - elif opt in ("-s", "--schema"): - schemapath = arg - if (not os.path.exists(schemapath) or not os.path.isfile(schemapath)): - logger.warn("Wrong API usage: schema file '%s' does not exist" % schemapath) - sys.exit(errno.EINVAL) - elif opt in ("-x", "--xml"): - xmlpath = arg - if (not os.path.exists(xmlpath) or not os.path.isfile(xmlpath)): - logger.warn("Wrong API usage: XML file does not exist") - sys.exit(errno.EINVAL) - elif opt in ("-h", "--help"): - usage(config) - sys.exit(SUCCESS) - elif opt in ("-q", "--quiet"): - quiet = True - else: - if not quiet: - print("Wrong API usage") - usage(config) - logger.warn("Wrong API usage") - sys.exit(errno.EINVAL) - - # Check mandatory arguments: - if ( ((not testid) and (not xmlpath)) or ((xmlpath) and (not userid)) ): - if not quiet: - print("Wrong API usage") - usage(config) - logger.warn("Wrong API usage") - sys.exit(errno.EINVAL) - - # Set the schemapath: - if not schemapath: - schemapath = config.get('xml', 'schemapath') - - # check if xmllint is installed - try: - subprocess.check_call(['which', 'xmllint'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) - # if check_call doesn't raise an exception, then return code was zero (success) - except: - if not quiet: - print("xmllint not found!") - logger.warn("xmllint not found!") - sys.exit(errno.EINVAL) - - # Connect to the DB: - try: - db = MySQLdb.connect(host=config.get('database','host'), user=config.get('database','user'), passwd=config.get('database','password'), db=config.get('database','database')) - cursor = db.cursor() - except: - logger.warn("Could not connect to the database because: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) - sys.exit(errno.EAGAIN) - - # Check if the user is admin: - isadmin = is_admin(cursor, userid) - if isadmin not in (True, False): - logger.warn("Could not determine if user is admin or not. Error %d occurred. Exiting..." %isadmin) - sys.exit(errno.EAGAIN) - - # Check if the user is internal: - isinternal = is_internal(cursor, userid) - if isinternal not in (True, False): - logger.warn("Could not determine if user is internal or not. Error %d occurred. Exiting..." %isinternal) - sys.exit(errno.EAGAIN) - - # Initialize error counter and set timezone to UTC: - errcnt = 0; - os.environ['TZ'] = 'UTC' - time.tzset() - - logger.debug("Checking xml config...") - - #=========================================================================== - # If a testid was given, get the xml from the database - #=========================================================================== - if testid: - # Get the XML from the database, put it into a temp file and set the xmlpath accordingly: - (fd, xmlpath) = tempfile.mkstemp() - cursor.execute("SELECT `testconfig_xml`, `owner_fk` FROM `tbl_serv_tests` WHERE (`serv_tests_key` = %s)" %testid) - ret = cursor.fetchone() - if not ret: - if not quiet: - print(("No test found in database with testid %d. Exiting..." %testid)) - errcnt = errcnt + 1 - else: - xmlfile = os.fdopen(fd, 'w+') - xmlfile.write(ret[0]) - xmlfile.close() - userid = int(ret[1]) - - #=========================================================================== - # Validate the XML against the XML schema - #=========================================================================== - if errcnt == 0: - try: - p = subprocess.Popen(['xmllint', '--noout', xmlpath, '--schema', schemapath], stdout=subprocess.PIPE, stderr= subprocess.PIPE, universal_newlines=True) - stdout, stderr = p.communicate() - for err in stderr.split('\n'): - tmp = err.split(':') - if len(tmp) >= 7: - if not quiet: - print(("<b>Line " + tmp[1] + "</b>:" + tmp[2] + ":" + ":".join(tmp[6:]))) - errcnt = errcnt + 1 - elif not ((err.find('fails to validate') != -1) or (err.find('validates') != -1) or (err == '\n') or (err == '')): - if not quiet: - print(err) - errcnt = errcnt + 1 - except: - exc_type, exc_obj, exc_tb = sys.exc_info() - fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] - print(("%s %s, %s %s" % (exc_type, sys.exc_info()[1], fname, exc_tb.tb_lineno))) - errcnt = errcnt + 1 - - #=========================================================================== - # If XML is valid, do additional checks on <generalConf> and <targetConf> elements - #=========================================================================== - if errcnt == 0: - # generalConf additional validation ------------------------------------------- - # * If specified, start time has to be in the future - # * If specified, end time has to be after start time - f = open(xmlpath, 'r') - parser = etree.XMLParser(remove_comments=True) - tree = etree.parse(f, parser) - f.close() - ns = {'d': config.get('xml', 'namespace')} - # additional check for the namespace - m = re.match('\{.*\}', tree.getroot().tag) - if not m: - print("Failed to extract namespace from XML file.") - errcnt = errcnt + 1 - m = m.group(0)[1:-1] # remove braces - if m != ns['d']: - print("Namespace in XML file does not match: found '%s', expected '%s'." % (m, ns['d'])) - errcnt = errcnt + 1 - # check xs:list items (obsIds, targetIds) for whitespace as separator - for l in tree.xpath('//d:*/d:obsIds', namespaces=ns) + tree.xpath('//d:*/d:targetIds', namespaces=ns): - if l.text.find('\t')>=0: - if not quiet: - print("<b>Element obsIds/targetIds</b>: Id lists must not have tabs as separators.") - errcnt = errcnt + 1 - - if errcnt == 0: - sched_abs = tree.xpath('//d:generalConf/d:scheduleAbsolute', namespaces=ns) - sched_asap = tree.xpath('//d:generalConf/d:scheduleAsap', namespaces=ns) - if sched_abs: - # The start date and time have to be in the future: - rs = tree.xpath('//d:generalConf/d:scheduleAbsolute/d:start', namespaces=ns) - now = time.time() - testStart = getXmlTimestamp(rs[0].text) - if (testStart <= now): - if not quiet: - print("<b>Element generalConf</b>: Start time has to be in the future.") - errcnt = errcnt + 1 - # The end date and time have to be in the future and after the start: - rs = tree.xpath('//d:generalConf/d:scheduleAbsolute/d:end', namespaces=ns) - testEnd = getXmlTimestamp(rs[0].text) - if (testEnd <= testStart): - if not quiet: - print("<b>Element generalConf</b>: End time has to be after start time.") - errcnt = errcnt + 1 - # Calculate the test duration which is needed later on: - testDuration = testEnd - testStart - elif sched_asap: - testDuration = int(tree.xpath('//d:generalConf/d:scheduleAsap/d:durationSecs', namespaces=ns)[0].text) - - # targetConf additional validation -------------------------------------------- - # * DB image ids need to be in the database and binary field must not be empty - # * Embedded image ids need to be in elements in the XML and need to be valid and correct type - # * Observer ids need to have the correct target adaptor installed and must be unique - # * If specified, number of target ids need to be the same as observer ids - # * There must be a target image provided for every mandatory core (usually core 0, core 0-3 for DPP) - - # Loop through all targetConf elements: - obsidlist = [] - obsiddict = {} - targetconfs = tree.xpath('//d:targetConf', namespaces=ns) - for targetconf in targetconfs: - targetids = None - dbimageid = None - embimageid = None - # Get elements: - obsids = targetconf.xpath('d:obsIds', namespaces=ns)[0].text.split() - ret = targetconf.xpath('d:targetIds', namespaces=ns) - if ret: - targetids = ret[0].text.split() - targetids_line = ret[0].sourceline - ret = targetconf.xpath('d:dbImageId', namespaces=ns) - if ret: - dbimageid = [o.text for o in ret] - dbimageid_line = [o.sourceline for o in ret] - ret = targetconf.xpath('d:embeddedImageId', namespaces=ns) - if ret: - embimageid = [o.text for o in ret] - embimageid_line = [o.sourceline for o in ret] - - # Put obsids into obsidlist: - for obsid in obsids: - obsidlist.append(obsid) - - # If target ids are present, there need to be as many as observer ids: - if (targetids and (len(targetids) != len(obsids))): - if not quiet: - print(("<b>Line %d</b>: element targetIds: If element targetIds is used, it needs the same amount of IDs as in the corresponding element obsIds." %(targetids_line))) - errcnt = errcnt + 1 - - # If DB image IDs are present, check if they are in the database and belong to the user (if he is not an admin) and get values for later use: - if dbimageid: - for dbimg, line in zip(dbimageid, dbimageid_line): - sql = """ SELECT b.name, c.name, a.core - FROM `tbl_serv_targetimages` AS a - LEFT JOIN `tbl_serv_operatingsystems` AS b - ON a.operatingsystems_fk = b.serv_operatingsystems_key - LEFT JOIN `tbl_serv_platforms` AS c - ON a.platforms_fk = c.serv_platforms_key - WHERE (a.`serv_targetimages_key` = %s AND a.`binary` IS NOT NULL)""" %(dbimg) - if not isadmin: - sql += " AND (a.`owner_fk` = %s)"%(userid) - cursor.execute(sql) - ret = cursor.fetchone() - if not ret: - if not quiet: - print(("<b>Line %d</b>: element dbImageId: The image with ID %s does not exist in the database or does not belong to you." %(line, str(dbimg)))) - errcnt = errcnt + 1 - else: - # Put data into dictionary for later use: - core = int(ret[2]) - for obsid in obsids: - if obsid not in obsiddict: - obsiddict[obsid] = {} - if core in obsiddict[obsid]: - if not quiet: - print(("<b>Line %d</b>: element dbImageId: There is already an image for core %d (image with ID %s)." %(line, core, str(dbimg)))) - errcnt = errcnt + 1 - else: - obsiddict[obsid][core]=ret[:2] - - # If embedded image IDs are present, check if they have a corresponding <imageConf> which is valid: - if embimageid: - for embimg, line in zip(embimageid, embimageid_line): - imageconf = tree.xpath('//d:imageConf/d:embeddedImageId[text()="%s"]/..' %(embimg), namespaces=ns) - if not imageconf: - if not quiet: - print(("<b>Line %d</b>: element embeddedImageId: There is no corresponding element imageConf with embeddedImageId %s defined." %(line, embimg))) - errcnt = errcnt + 1 - else: - # Get os and platform and put it into dictionary for later use: - if imageconf[0].xpath('d:os', namespaces=ns): - opersys = imageconf[0].xpath('d:os', namespaces=ns)[0].text - else: - opersys = 'other' - platform = imageconf[0].xpath('d:platform', namespaces=ns)[0].text - try: - core = int(imageconf[0].xpath('d:core', namespaces=ns)[0].text) - except: - # not a mandatory field, use the default value - core = 0 - logger.debug("Target image for platform %s (core %d) found." % (platform, core)) - for obsid in obsids: - if obsid not in obsiddict: - obsiddict[obsid] = {} - if core in obsiddict[obsid]: - if not quiet: - print(("<b>Line %d</b>: element dbImageId: There is already an image for core %d (image with ID %s)." %(line, core, str(embimg)))) - errcnt = errcnt + 1 - obsiddict[obsid][core]=(opersys, platform) - # Get the image and save it to a temporary file: - image = imageconf[0].xpath('d:data', namespaces=ns)[0].text - # For target platform DPP2LoRa, the <data> tag may be empty - if len(image.strip()) == 0 and (platform.lower() == "dpp2lora" or platform.lower() == "dpp2lorahg"): - continue # skip image validation - image_line = imageconf[0].xpath('d:data', namespaces=ns)[0].sourceline - (fd, imagefilename) = tempfile.mkstemp() - imagefile = os.fdopen(fd, 'w+b') - if not imagefile: - print("Failed to create file %s." % (imagefilename)) - imagefile.write(base64.b64decode(image, None)) - imagefile.close() - # Validate image: - p = subprocess.Popen([config.get('targetimage', 'imagevalidator'), '--quiet', '--image', imagefilename, '--platform', platform], stderr=subprocess.PIPE) - stdout, stderr = p.communicate() - if p.returncode != SUCCESS: - if not quiet: - print(("<b>Line %d</b>: element data: Validation of image data failed. Check if elements os and platform are set correctly and if element <data> contains correct data." %(image_line))) - errcnt = errcnt + 1 - # Remove temporary file: - os.remove(imagefilename) - - # Check if no observers are in the list multiple times and if every observer has the correct target adapter installed: - obsidlist = list(set(obsidlist)) - (obsids, duplicates, allInList) = checkObsids(tree, '//d:targetConf/d:obsIds', ns, obsidlist) - if duplicates: - if not quiet: - print("<b>Element targetConf</b>: Some observer IDs have been used more than once.") - errcnt = errcnt + 1 - else: - usedObsidsList = sorted(obsids) - # Now that we have the list, check the observer types: - sql_adap = """SELECT `b`.`tg_adapt_types_fk`, `c`.`tg_adapt_types_fk`, `d`.`tg_adapt_types_fk`, `e`.`tg_adapt_types_fk` - FROM `tbl_serv_observer` AS `a` - LEFT JOIN `tbl_serv_tg_adapt_list` AS `b` ON `a`.`slot_1_tg_adapt_list_fk` = `b`.`serv_tg_adapt_list_key` - LEFT JOIN `tbl_serv_tg_adapt_list` AS `c` ON `a`.`slot_2_tg_adapt_list_fk` = `c`.`serv_tg_adapt_list_key` - LEFT JOIN `tbl_serv_tg_adapt_list` AS `d` ON `a`.`slot_3_tg_adapt_list_fk` = `d`.`serv_tg_adapt_list_key` - LEFT JOIN `tbl_serv_tg_adapt_list` AS `e` ON `a`.`slot_4_tg_adapt_list_fk` = `e`.`serv_tg_adapt_list_key` - WHERE - (`a`.`observer_id` = %s) - AND (`a`.`status` IN (%s)) - """ - sql_platf = """SELECT COUNT(*) - FROM `tbl_serv_tg_adapt_types` AS `a` - LEFT JOIN `tbl_serv_platforms` AS `b` ON `a`.`platforms_fk` = `b`.`serv_platforms_key` - WHERE - (`a`.`serv_tg_adapt_types_key` = %s) - AND (LOWER(`b`.`name`) = LOWER('%s')) - """ - sql_cores = """SELECT core, optional - FROM `tbl_serv_platforms` AS `b` - LEFT JOIN `tbl_serv_architectures` AS `a` ON `a`.`platforms_fk` = `b`.`serv_platforms_key` - WHERE (LOWER(`b`.`name`) = LOWER('%s')) - """ - for obsid in usedObsidsList: - if obsid in obsiddict: - platf = next(iter(obsiddict[obsid].values()))[1].lower() - opersys = next(iter(obsiddict[obsid].values()))[0].lower() - for p in obsiddict[obsid].values(): - if platf!=p[1].lower(): - if not quiet: - print(("<b>Element targetConf</b>: Observer ID %s has images of several platform types assigned." %(obsid))) - errcnt = errcnt + 1 - break - #if opersys!=p[0].lower(): - # if not quiet: - # print(("<b>Element targetConf</b>: Observer ID %s has images of several operating system types assigned." %(obsid))) - # errcnt = errcnt + 1 - # break - else: - platf = None - # Get tg_adapt_types_fk of installed target adaptors on observer: - stati = "'online'" - if isadmin: - stati += ", 'develop', 'internal'" - elif isinternal: - stati += ", 'internal'" - cursor.execute(sql_adap %(obsid, stati)) - adaptTypesFk = cursor.fetchone() - # If no results are returned, it most probably means that the observer is not active at the moment: - if not adaptTypesFk: - if not quiet: - print(("<b>Element targetConf</b>: Observer ID %s cannot be used at the moment." %(obsid))) - errcnt = errcnt + 1 - elif adaptTypesFk and platf: - # Cycle through the adaptors which are attached to the observer and try to find one that can be used with the requested platform: - adaptFound = False - for adapt in adaptTypesFk: - # Only check for entries which are not null: - if adapt: - cursor.execute(sql_platf %(adapt, platf)) - rs = cursor.fetchone() - if (rs[0] > 0): - adaptFound = True - break - if not adaptFound: - if not quiet: - print(("<b>Element targetConf</b>: Observer ID %s has currently no target adapter for %s installed." %(obsid, platf))) - errcnt = errcnt + 1 - if platf is not None: - cursor.execute(sql_cores %(platf)) - core_info = cursor.fetchall() - all_cores = [row[0] for row in core_info] - required_cores = [row[0] for row in [row for row in core_info if row[1]==0]] - provided_cores = list(obsiddict[obsid].keys()) - if not set(required_cores).issubset(set(provided_cores)): - if not quiet: - print(("<b>Element targetConf</b>: Not enough target images provided for Observer ID %s. Platform %s requires images for cores %s." %(obsid, platf, ','.join(map(str,required_cores))))) - errcnt = errcnt + 1 - if not set(provided_cores).issubset(set(all_cores)): - if not quiet: - print(("<b>Element targetConf</b>: Excess target images specified on Observer ID %s. Platform %s requires images for cores %s." %(obsid, platf, ','.join(map(str,required_cores))))) - errcnt = errcnt + 1 - - #=========================================================================== - # If there are still no errors, do additional test on the remaining elements - #=========================================================================== - if errcnt == 0: - # serialConf additional validation -------------------------------------- - # * observer ids need to have a targetConf associated and must be unique - # * check port depending on platform - - # Check observer ids: - (ids, duplicates, allInList) = checkObsids(tree, '//d:serialConf/d:obsIds', ns, obsidlist) - if duplicates: - if not quiet: - print("<b>Element serialConf</b>: Some observer IDs have been used more than once.") - errcnt = errcnt + 1 - if not allInList: - if not quiet: - print("<b>Element serialConf</b>: Some observer IDs have been used but do not have a targetConf element associated with them.") - errcnt = errcnt + 1 - - # gpioTracingConf additional validation --------------------------------------- - # * observer ids need to have a targetConf associated and must be unique - # * Every (pin, edge) combination can only be used once. + quiet = False + userid = None + xmlpath = None + schemapath = None + testid = None + isadmin = False + isinternal = False + + # Open the log and create logger: + try: + logging.config.fileConfig(scriptpath + '/logging.conf') + logger = logging.getLogger(os.path.basename(__file__)) + if debug: + logger.setLevel(logging.DEBUG) + else: + logger.setLevel(logging.INFO) + except: + syslog.syslog(syslog.LOG_ERR, "%s: Could not open logger because: %s: %s" % (os.path.basename(__file__), str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + print("Failed to open logger because: %s: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + sys.exit(errno.EAGAIN) + + # Get the config file: + config = get_config() + if not config: + logger.warn("Could not read configuration file. Exiting...") + sys.exit(errno.EAGAIN) + + # Get command line parameters. + try: + opts, args = getopt.getopt(argv, "hqu:s:x:t:", ["help", "quiet", "userid=", "schema=", "xml=", "testid="]) + except getopt.GetoptError as err: + logger.warn(str(err)) + usage(config) + sys.exit(errno.EINVAL) + for opt, arg in opts: + if opt in ("-u", "--userid"): + try: + userid = int(arg) + if userid <= 0: + raise Error + except: + logger.warn("Wrong API usage: userid has to be a positive number") + sys.exit(errno.EINVAL) + elif opt in ("-t", "--testid"): + try: + testid = int(arg) + if testid <= 0: + raise Error + except: + logger.warn("Wrong API usage: testid has to be a positive number") + sys.exit(errno.EINVAL) + elif opt in ("-s", "--schema"): + schemapath = arg + if (not os.path.exists(schemapath) or not os.path.isfile(schemapath)): + logger.warn("Wrong API usage: schema file '%s' does not exist" % schemapath) + sys.exit(errno.EINVAL) + elif opt in ("-x", "--xml"): + xmlpath = arg + if (not os.path.exists(xmlpath) or not os.path.isfile(xmlpath)): + logger.warn("Wrong API usage: XML file does not exist") + sys.exit(errno.EINVAL) + elif opt in ("-h", "--help"): + usage(config) + sys.exit(SUCCESS) + elif opt in ("-q", "--quiet"): + quiet = True + else: + if not quiet: + print("Wrong API usage") + usage(config) + logger.warn("Wrong API usage") + sys.exit(errno.EINVAL) + + # Check mandatory arguments: + if ( ((not testid) and (not xmlpath)) or ((xmlpath) and (not userid)) ): + if not quiet: + print("Wrong API usage") + usage(config) + logger.warn("Wrong API usage") + sys.exit(errno.EINVAL) + + # Set the schemapath: + if not schemapath: + schemapath = config.get('xml', 'schemapath') + + # check if xmllint is installed + try: + subprocess.check_call(['which', 'xmllint'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + # if check_call doesn't raise an exception, then return code was zero (success) + except: + if not quiet: + print("xmllint not found!") + logger.warn("xmllint not found!") + sys.exit(errno.EINVAL) + + # Connect to the DB: + try: + db = MySQLdb.connect(host=config.get('database','host'), user=config.get('database','user'), passwd=config.get('database','password'), db=config.get('database','database')) + cursor = db.cursor() + except: + logger.warn("Could not connect to the database because: %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1]))) + sys.exit(errno.EAGAIN) + + # Check if the user is admin: + isadmin = is_admin(cursor, userid) + if isadmin not in (True, False): + logger.warn("Could not determine if user is admin or not. Error %d occurred. Exiting..." %isadmin) + sys.exit(errno.EAGAIN) + + # Check if the user is internal: + isinternal = is_internal(cursor, userid) + if isinternal not in (True, False): + logger.warn("Could not determine if user is internal or not. Error %d occurred. Exiting..." %isinternal) + sys.exit(errno.EAGAIN) + + # Initialize error counter and set timezone to UTC: + errcnt = 0; + os.environ['TZ'] = 'UTC' + time.tzset() + + logger.debug("Checking xml config...") + + #=========================================================================== + # If a testid was given, get the xml from the database + #=========================================================================== + if testid: + # Get the XML from the database, put it into a temp file and set the xmlpath accordingly: + (fd, xmlpath) = tempfile.mkstemp() + cursor.execute("SELECT `testconfig_xml`, `owner_fk` FROM `tbl_serv_tests` WHERE (`serv_tests_key` = %s)" %testid) + ret = cursor.fetchone() + if not ret: + if not quiet: + print(("No test found in database with testid %d. Exiting..." %testid)) + errcnt = errcnt + 1 + else: + xmlfile = os.fdopen(fd, 'w+') + xmlfile.write(ret[0]) + xmlfile.close() + userid = int(ret[1]) + + #=========================================================================== + # Validate the XML against the XML schema + #=========================================================================== + if errcnt == 0: + try: + p = subprocess.Popen(['xmllint', '--noout', xmlpath, '--schema', schemapath], stdout=subprocess.PIPE, stderr= subprocess.PIPE, universal_newlines=True) + stdout, stderr = p.communicate() + for err in stderr.split('\n'): + tmp = err.split(':') + if len(tmp) >= 7: + if not quiet: + print(("<b>Line " + tmp[1] + "</b>:" + tmp[2] + ":" + ":".join(tmp[6:]))) + errcnt = errcnt + 1 + elif not ((err.find('fails to validate') != -1) or (err.find('validates') != -1) or (err == '\n') or (err == '')): + if not quiet: + print(err) + errcnt = errcnt + 1 + except: + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] + print(("%s %s, %s %s" % (exc_type, sys.exc_info()[1], fname, exc_tb.tb_lineno))) + errcnt = errcnt + 1 + + #=========================================================================== + # If XML is valid, do additional checks on <generalConf> and <targetConf> elements + #=========================================================================== + if errcnt == 0: + # generalConf additional validation ------------------------------------------- + # * If specified, start time has to be in the future + # * If specified, end time has to be after start time + f = open(xmlpath, 'r') + parser = etree.XMLParser(remove_comments=True) + tree = etree.parse(f, parser) + f.close() + ns = {'d': config.get('xml', 'namespace')} + # additional check for the namespace + m = re.match('\{.*\}', tree.getroot().tag) + if not m: + print("Failed to extract namespace from XML file.") + errcnt = errcnt + 1 + m = m.group(0)[1:-1] # remove braces + if m != ns['d']: + print("Namespace in XML file does not match: found '%s', expected '%s'." % (m, ns['d'])) + errcnt = errcnt + 1 + # check xs:list items (obsIds, targetIds) for whitespace as separator + for l in tree.xpath('//d:*/d:obsIds', namespaces=ns) + tree.xpath('//d:*/d:targetIds', namespaces=ns): + if l.text.find('\t')>=0: + if not quiet: + print("<b>Element obsIds/targetIds</b>: Id lists must not have tabs as separators.") + errcnt = errcnt + 1 + + if errcnt == 0: + sched_abs = tree.xpath('//d:generalConf/d:scheduleAbsolute', namespaces=ns) + sched_asap = tree.xpath('//d:generalConf/d:scheduleAsap', namespaces=ns) + if sched_abs: + # The start date and time have to be in the future: + rs = tree.xpath('//d:generalConf/d:scheduleAbsolute/d:start', namespaces=ns) + now = time.time() + testStart = getXmlTimestamp(rs[0].text) + if (testStart <= now): + if not quiet: + print("<b>Element generalConf</b>: Start time has to be in the future.") + errcnt = errcnt + 1 + # The end date and time have to be in the future and after the start: + rs = tree.xpath('//d:generalConf/d:scheduleAbsolute/d:end', namespaces=ns) + testEnd = getXmlTimestamp(rs[0].text) + if (testEnd <= testStart): + if not quiet: + print("<b>Element generalConf</b>: End time has to be after start time.") + errcnt = errcnt + 1 + # Calculate the test duration which is needed later on: + testDuration = testEnd - testStart + elif sched_asap: + testDuration = int(tree.xpath('//d:generalConf/d:scheduleAsap/d:durationSecs', namespaces=ns)[0].text) + + # targetConf additional validation -------------------------------------------- + # * DB image ids need to be in the database and binary field must not be empty + # * Embedded image ids need to be in elements in the XML and need to be valid and correct type + # * Observer ids need to have the correct target adaptor installed and must be unique + # * If specified, number of target ids need to be the same as observer ids + # * There must be a target image provided for every mandatory core (usually core 0, core 0-3 for DPP) + + # Loop through all targetConf elements: + obsidlist = [] + obsiddict = {} + targetconfs = tree.xpath('//d:targetConf', namespaces=ns) + for targetconf in targetconfs: + targetids = None + dbimageid = None + embimageid = None + # Get elements: + obsids = targetconf.xpath('d:obsIds', namespaces=ns)[0].text.split() + ret = targetconf.xpath('d:targetIds', namespaces=ns) + if ret: + targetids = ret[0].text.split() + targetids_line = ret[0].sourceline + ret = targetconf.xpath('d:dbImageId', namespaces=ns) + if ret: + dbimageid = [o.text for o in ret] + dbimageid_line = [o.sourceline for o in ret] + ret = targetconf.xpath('d:embeddedImageId', namespaces=ns) + if ret: + embimageid = [o.text for o in ret] + embimageid_line = [o.sourceline for o in ret] + + # Put obsids into obsidlist: + for obsid in obsids: + obsidlist.append(obsid) + + # If target ids are present, there need to be as many as observer ids: + if (targetids and (len(targetids) != len(obsids))): + if not quiet: + print(("<b>Line %d</b>: element targetIds: If element targetIds is used, it needs the same amount of IDs as in the corresponding element obsIds." %(targetids_line))) + errcnt = errcnt + 1 + + # If DB image IDs are present, check if they are in the database and belong to the user (if he is not an admin) and get values for later use: + if dbimageid: + for dbimg, line in zip(dbimageid, dbimageid_line): + sql = """ SELECT b.name, c.name, a.core + FROM `tbl_serv_targetimages` AS a + LEFT JOIN `tbl_serv_operatingsystems` AS b + ON a.operatingsystems_fk = b.serv_operatingsystems_key + LEFT JOIN `tbl_serv_platforms` AS c + ON a.platforms_fk = c.serv_platforms_key + WHERE (a.`serv_targetimages_key` = %s AND a.`binary` IS NOT NULL)""" %(dbimg) + if not isadmin: + sql += " AND (a.`owner_fk` = %s)"%(userid) + cursor.execute(sql) + ret = cursor.fetchone() + if not ret: + if not quiet: + print(("<b>Line %d</b>: element dbImageId: The image with ID %s does not exist in the database or does not belong to you." %(line, str(dbimg)))) + errcnt = errcnt + 1 + else: + # Put data into dictionary for later use: + core = int(ret[2]) + for obsid in obsids: + if obsid not in obsiddict: + obsiddict[obsid] = {} + if core in obsiddict[obsid]: + if not quiet: + print(("<b>Line %d</b>: element dbImageId: There is already an image for core %d (image with ID %s)." %(line, core, str(dbimg)))) + errcnt = errcnt + 1 + else: + obsiddict[obsid][core]=ret[:2] + + # If embedded image IDs are present, check if they have a corresponding <imageConf> which is valid: + if embimageid: + for embimg, line in zip(embimageid, embimageid_line): + imageconf = tree.xpath('//d:imageConf/d:embeddedImageId[text()="%s"]/..' %(embimg), namespaces=ns) + if not imageconf: + if not quiet: + print(("<b>Line %d</b>: element embeddedImageId: There is no corresponding element imageConf with embeddedImageId %s defined." %(line, embimg))) + errcnt = errcnt + 1 + else: + # Get os and platform and put it into dictionary for later use: + if imageconf[0].xpath('d:os', namespaces=ns): + opersys = imageconf[0].xpath('d:os', namespaces=ns)[0].text + else: + opersys = 'other' + platform = imageconf[0].xpath('d:platform', namespaces=ns)[0].text + try: + core = int(imageconf[0].xpath('d:core', namespaces=ns)[0].text) + except: + # not a mandatory field, use the default value + core = 0 + logger.debug("Target image for platform %s (core %d) found." % (platform, core)) + for obsid in obsids: + if obsid not in obsiddict: + obsiddict[obsid] = {} + if core in obsiddict[obsid]: + if not quiet: + print(("<b>Line %d</b>: element dbImageId: There is already an image for core %d (image with ID %s)." %(line, core, str(embimg)))) + errcnt = errcnt + 1 + obsiddict[obsid][core]=(opersys, platform) + # Get the image and save it to a temporary file: + image = imageconf[0].xpath('d:data', namespaces=ns)[0].text + # For target platform DPP2LoRa, the <data> tag may be empty + if len(image.strip()) == 0 and (platform.lower() == "dpp2lora" or platform.lower() == "dpp2lorahg"): + continue # skip image validation + image_line = imageconf[0].xpath('d:data', namespaces=ns)[0].sourceline + (fd, imagefilename) = tempfile.mkstemp() + imagefile = os.fdopen(fd, 'w+b') + if not imagefile: + print("Failed to create file %s." % (imagefilename)) + imagefile.write(base64.b64decode(image, None)) + imagefile.close() + # Validate image: + p = subprocess.Popen([config.get('targetimage', 'imagevalidator'), '--quiet', '--image', imagefilename, '--platform', platform], stderr=subprocess.PIPE) + stdout, stderr = p.communicate() + if p.returncode != SUCCESS: + if not quiet: + print(("<b>Line %d</b>: element data: Validation of image data failed. Check if elements os and platform are set correctly and if element <data> contains correct data." %(image_line))) + errcnt = errcnt + 1 + # Remove temporary file: + os.remove(imagefilename) + + # Check if no observers are in the list multiple times and if every observer has the correct target adapter installed: + obsidlist = list(set(obsidlist)) + (obsids, duplicates, allInList) = checkObsids(tree, '//d:targetConf/d:obsIds', ns, obsidlist) + if duplicates: + if not quiet: + print("<b>Element targetConf</b>: Some observer IDs have been used more than once.") + errcnt = errcnt + 1 + else: + usedObsidsList = sorted(obsids) + # Now that we have the list, check the observer types: + sql_adap = """SELECT `b`.`tg_adapt_types_fk`, `c`.`tg_adapt_types_fk`, `d`.`tg_adapt_types_fk`, `e`.`tg_adapt_types_fk` + FROM `tbl_serv_observer` AS `a` + LEFT JOIN `tbl_serv_tg_adapt_list` AS `b` ON `a`.`slot_1_tg_adapt_list_fk` = `b`.`serv_tg_adapt_list_key` + LEFT JOIN `tbl_serv_tg_adapt_list` AS `c` ON `a`.`slot_2_tg_adapt_list_fk` = `c`.`serv_tg_adapt_list_key` + LEFT JOIN `tbl_serv_tg_adapt_list` AS `d` ON `a`.`slot_3_tg_adapt_list_fk` = `d`.`serv_tg_adapt_list_key` + LEFT JOIN `tbl_serv_tg_adapt_list` AS `e` ON `a`.`slot_4_tg_adapt_list_fk` = `e`.`serv_tg_adapt_list_key` + WHERE + (`a`.`observer_id` = %s) + AND (`a`.`status` IN (%s)) + """ + sql_platf = """SELECT COUNT(*) + FROM `tbl_serv_tg_adapt_types` AS `a` + LEFT JOIN `tbl_serv_platforms` AS `b` ON `a`.`platforms_fk` = `b`.`serv_platforms_key` + WHERE + (`a`.`serv_tg_adapt_types_key` = %s) + AND (LOWER(`b`.`name`) = LOWER('%s')) + """ + sql_cores = """SELECT core, optional + FROM `tbl_serv_platforms` AS `b` + LEFT JOIN `tbl_serv_architectures` AS `a` ON `a`.`platforms_fk` = `b`.`serv_platforms_key` + WHERE (LOWER(`b`.`name`) = LOWER('%s')) + """ + for obsid in usedObsidsList: + if obsid in obsiddict: + platf = next(iter(obsiddict[obsid].values()))[1].lower() + opersys = next(iter(obsiddict[obsid].values()))[0].lower() + for p in obsiddict[obsid].values(): + if platf!=p[1].lower(): + if not quiet: + print(("<b>Element targetConf</b>: Observer ID %s has images of several platform types assigned." %(obsid))) + errcnt = errcnt + 1 + break + #if opersys!=p[0].lower(): + # if not quiet: + # print(("<b>Element targetConf</b>: Observer ID %s has images of several operating system types assigned." %(obsid))) + # errcnt = errcnt + 1 + # break + else: + platf = None + # Get tg_adapt_types_fk of installed target adaptors on observer: + stati = "'online'" + if isadmin: + stati += ", 'develop', 'internal'" + elif isinternal: + stati += ", 'internal'" + cursor.execute(sql_adap %(obsid, stati)) + adaptTypesFk = cursor.fetchone() + # If no results are returned, it most probably means that the observer is not active at the moment: + if not adaptTypesFk: + if not quiet: + print(("<b>Element targetConf</b>: Observer ID %s cannot be used at the moment." %(obsid))) + errcnt = errcnt + 1 + elif adaptTypesFk and platf: + # Cycle through the adaptors which are attached to the observer and try to find one that can be used with the requested platform: + adaptFound = False + for adapt in adaptTypesFk: + # Only check for entries which are not null: + if adapt: + cursor.execute(sql_platf %(adapt, platf)) + rs = cursor.fetchone() + if (rs[0] > 0): + adaptFound = True + break + if not adaptFound: + if not quiet: + print(("<b>Element targetConf</b>: Observer ID %s has currently no target adapter for %s installed." %(obsid, platf))) + errcnt = errcnt + 1 + if platf is not None: + cursor.execute(sql_cores %(platf)) + core_info = cursor.fetchall() + all_cores = [row[0] for row in core_info] + required_cores = [row[0] for row in [row for row in core_info if row[1]==0]] + provided_cores = list(obsiddict[obsid].keys()) + if not set(required_cores).issubset(set(provided_cores)): + if not quiet: + print(("<b>Element targetConf</b>: Not enough target images provided for Observer ID %s. Platform %s requires images for cores %s." %(obsid, platf, ','.join(map(str,required_cores))))) + errcnt = errcnt + 1 + if not set(provided_cores).issubset(set(all_cores)): + if not quiet: + print(("<b>Element targetConf</b>: Excess target images specified on Observer ID %s. Platform %s requires images for cores %s." %(obsid, platf, ','.join(map(str,required_cores))))) + errcnt = errcnt + 1 + + #=========================================================================== + # If there are still no errors, do additional test on the remaining elements + #=========================================================================== + if errcnt == 0: + # serialConf additional validation -------------------------------------- + # * observer ids need to have a targetConf associated and must be unique + # * check port depending on platform + + # Check observer ids: + (ids, duplicates, allInList) = checkObsids(tree, '//d:serialConf/d:obsIds', ns, obsidlist) + if duplicates: + if not quiet: + print("<b>Element serialConf</b>: Some observer IDs have been used more than once.") + errcnt = errcnt + 1 + if not allInList: + if not quiet: + print("<b>Element serialConf</b>: Some observer IDs have been used but do not have a targetConf element associated with them.") + errcnt = errcnt + 1 + + # gpioTracingConf additional validation --------------------------------------- + # * observer ids need to have a targetConf associated and must be unique + # * Every (pin, edge) combination can only be used once. - # Check observer ids: - (ids, duplicates, allInList) = checkObsids(tree, '//d:gpioTracingConf/d:obsIds', ns, obsidlist) - if duplicates: - if not quiet: - print("<b>Element gpioTracingConf</b>: Some observer IDs have been used more than once.") - errcnt = errcnt + 1 - if not allInList: - if not quiet: - print("<b>Element gpioTracingConf</b>: Some observer IDs have been used but do not have a) targetConf element associated with them.") - errcnt = errcnt + 1 - # Check (pin, edge) combinations: - gpiomonconfs = tree.xpath('//d:gpioTracingConf', namespaces=ns) - for gpiomonconf in gpiomonconfs: - combList = [] - pinconfs = gpiomonconf.xpath('d:pinConf', namespaces=ns) - for pinconf in pinconfs: - pin = pinconf.xpath('d:pin', namespaces=ns)[0].text - edge = pinconf.xpath('d:edge', namespaces=ns)[0].text - combList.append((pin, edge)) - if (len(combList) != len(set(combList))): - if not quiet: - print(("<b>Line %d</b>: element gpioTracingConf: Every (pin, edge) combination can only be used once per observer configuration." %(gpiomonconf.sourceline))) - errcnt = errcnt + 1 - - - # gpioActuationConf additional validation --------------------------- - # * observer ids need to have a targetConf associated and must be unique - # * relative timing commands cannot be after the test end - # * absolute timing commands need to be between test start and end and are not allowed for ASAP test scheduling - - # Check observer ids: - (ids, duplicates, allInList) = checkObsids(tree, '//d:gpioActuationConf/d:obsIds', ns, obsidlist) - if duplicates: - if not quiet: - print("<b>Element gpioActuationConf</b>: Some observer IDs have been used more than once.") - errcnt = errcnt + 1 - if not allInList: - if not quiet: - print("<b>Element gpioActuationConf</b>: Some observer IDs have been used but do not have a targetConf element associated with them.") - errcnt = errcnt + 1 - # Check relative timings: - rs = tree.xpath('//d:gpioActuationConf/d:pinConf/d:relativeTime/d:offsetSecs', namespaces=ns) - for elem in rs: - if (int(elem.text) > testDuration): - if not quiet: - print(("<b>Line %d</b>: element offsetSecs: The offset is bigger than the test duration, thus the action will never take place." %(elem.sourceline))) - errcnt = errcnt + 1 - # Check absolute timings: - rs = tree.xpath('//d:gpioActuationConf/d:pinConf/d:absoluteTime/d:absoluteDateTime', namespaces=ns) - for elem in rs: - if sched_asap: - if not quiet: - print(("<b>Line %d</b>: element absoluteDateTime: For test scheduling method ASAP, only relative timed actions are allowed." %(elem.sourceline))) - errcnt = errcnt + 1 - else: - eventTime = getXmlTimestamp(elem.text) - if (eventTime > testEnd): - if not quiet: - print(("<b>Line %d</b>: element absoluteDateTime: The action is scheduled after the test ends, thus the action will never take place." %(elem.sourceline))) - errcnt = errcnt + 1 - elif (eventTime < testStart): - if not quiet: - print(("<b>Line %d</b>: element absoluteDateTime: The action is scheduled before the test starts, thus the action will never take place." %(elem.sourceline))) - errcnt = errcnt + 1 - + # Check observer ids: + (ids, duplicates, allInList) = checkObsids(tree, '//d:gpioTracingConf/d:obsIds', ns, obsidlist) + if duplicates: + if not quiet: + print("<b>Element gpioTracingConf</b>: Some observer IDs have been used more than once.") + errcnt = errcnt + 1 + if not allInList: + if not quiet: + print("<b>Element gpioTracingConf</b>: Some observer IDs have been used but do not have a) targetConf element associated with them.") + errcnt = errcnt + 1 + # Check (pin, edge) combinations: + gpiomonconfs = tree.xpath('//d:gpioTracingConf', namespaces=ns) + for gpiomonconf in gpiomonconfs: + combList = [] + pinconfs = gpiomonconf.xpath('d:pinConf', namespaces=ns) + for pinconf in pinconfs: + pin = pinconf.xpath('d:pin', namespaces=ns)[0].text + edge = pinconf.xpath('d:edge', namespaces=ns)[0].text + combList.append((pin, edge)) + if (len(combList) != len(set(combList))): + if not quiet: + print(("<b>Line %d</b>: element gpioTracingConf: Every (pin, edge) combination can only be used once per observer configuration." %(gpiomonconf.sourceline))) + errcnt = errcnt + 1 + + + # gpioActuationConf additional validation --------------------------- + # * observer ids need to have a targetConf associated and must be unique + # * relative timing commands cannot be after the test end + # * absolute timing commands need to be between test start and end and are not allowed for ASAP test scheduling + + # Check observer ids: + (ids, duplicates, allInList) = checkObsids(tree, '//d:gpioActuationConf/d:obsIds', ns, obsidlist) + if duplicates: + if not quiet: + print("<b>Element gpioActuationConf</b>: Some observer IDs have been used more than once.") + errcnt = errcnt + 1 + if not allInList: + if not quiet: + print("<b>Element gpioActuationConf</b>: Some observer IDs have been used but do not have a targetConf element associated with them.") + errcnt = errcnt + 1 + # Check relative timings: + rs = tree.xpath('//d:gpioActuationConf/d:pinConf/d:relativeTime/d:offsetSecs', namespaces=ns) + for elem in rs: + if (int(elem.text) > testDuration): + if not quiet: + print(("<b>Line %d</b>: element offsetSecs: The offset is bigger than the test duration, thus the action will never take place." %(elem.sourceline))) + errcnt = errcnt + 1 + # Check absolute timings: + rs = tree.xpath('//d:gpioActuationConf/d:pinConf/d:absoluteTime/d:absoluteDateTime', namespaces=ns) + for elem in rs: + if sched_asap: + if not quiet: + print(("<b>Line %d</b>: element absoluteDateTime: For test scheduling method ASAP, only relative timed actions are allowed." %(elem.sourceline))) + errcnt = errcnt + 1 + else: + eventTime = getXmlTimestamp(elem.text) + if (eventTime > testEnd): + if not quiet: + print(("<b>Line %d</b>: element absoluteDateTime: The action is scheduled after the test ends, thus the action will never take place." %(elem.sourceline))) + errcnt = errcnt + 1 + elif (eventTime < testStart): + if not quiet: + print(("<b>Line %d</b>: element absoluteDateTime: The action is scheduled before the test starts, thus the action will never take place." %(elem.sourceline))) + errcnt = errcnt + 1 + - # powerProfilingConf additional validation ----------------------------------------- - # * observer ids need to have a targetConf associated and must be unique - # * relative timing commands cannot be after the test end - # * absolute timing commands need to be between test start and end and are not allowed for ASAP test scheduling + # powerProfilingConf additional validation ----------------------------------------- + # * observer ids need to have a targetConf associated and must be unique + # * relative timing commands cannot be after the test end + # * absolute timing commands need to be between test start and end and are not allowed for ASAP test scheduling - # Check observer ids: - (ids, duplicates, allInList) = checkObsids(tree, '//d:powerProfilingConf/d:obsIds', ns, obsidlist) - if duplicates: - if not quiet: - print("<b>Element powerProfilingConf</b>: Some observer IDs have been used more than once.") - errcnt = errcnt + 1 - if not allInList: - if not quiet: - print("<b>Element powerProfilingConf</b>: Some observer IDs have been used but do not have a targetConf element associated with them.") - errcnt = errcnt + 1 - # Check relative timings: - rs = tree.xpath('//d:powerProfilingConf/d:profConf/d:relativeTime/d:offsetSecs', namespaces=ns) - for elem in rs: - ppMicroSecs = elem.getparent().find('d:offsetMicrosecs', namespaces=ns) - if ppMicroSecs is not None: - ppStart = float(ppMicroSecs.text) / 1000000 + int(elem.text) - else: - ppStart = int(elem.text) - elem2 = elem.getparent().getparent().find('d:durationMillisecs', namespaces=ns) - if (ppStart > testDuration): - if not quiet: - print(("<b>Line %d</b>: element offsetSecs: The offset is bigger than the test duration, thus the action will never take place." %(elem.sourceline))) - errcnt = errcnt + 1 - elif (ppStart + int(elem2.text)/1000 > testDuration): - if not quiet: - print(("<b>Line %d</b>: element durationMillisecs: Profiling lasts longer than test." %(elem2.sourceline))) - errcnt = errcnt + 1 - # Check absolute timings: - rs = tree.xpath('//d:powerProfilingConf/d:profConf/d:absoluteTime/d:absoluteDateTime', namespaces=ns) - for elem in rs: - if sched_asap: - if not quiet: - print(("<b>Line %d</b>: element absoluteDateTime: For test scheduling method ASAP, only relative timed actions are allowed." %(elem.sourceline))) - errcnt = errcnt + 1 - else: - ppMicroSecs = elem.getparent().find('d:absoluteMicrosecs', namespaces=ns) - eventTime = getXmlTimestamp(elem.text) - if ppMicroSecs is not None: - ppStart = float(ppMicroSecs.text) / 1000000 + eventTime - else: - ppStart = eventTime - elem2 = elem.getparent().getparent().find('d:durationMillisecs', namespaces=ns) - if (ppStart > testEnd): - if not quiet: - print(("<b>Line %d</b>: element absoluteDateTime: The action is scheduled after the test ends, thus the action will never take place." %(elem.sourceline))) - errcnt = errcnt + 1 - elif (ppStart < testStart): - if not quiet: - print(("<b>Line %d</b>: element absoluteDateTime: The action is scheduled before the test starts, thus the action will never take place." %(elem.sourceline))) - errcnt = errcnt + 1 - elif (ppStart + int(elem2.text)/1000 > testEnd): - if not quiet: - print(("<b>Line %d</b>: element durationMillisecs: Profiling lasts longer than test." %(elem2.sourceline))) - errcnt = errcnt + 1 - - #=========================================================================== - # All additional tests finished. Clean up and exit. - #=========================================================================== - if db.open: - db.close() - - # If there is a temp XML file, delete it: - if testid: - os.remove(xmlpath) - - logger.debug("Validation finished (%u errors)." % errcnt) - - if errcnt == 0: - ret = SUCCESS - else: - err_str = "<b>Number of errors: %d</b>. It is possible that there are more errors which could not be detected due to dependencies from above listed errors."%errcnt - logger.debug(err_str) - if not quiet: - print(err_str) - ret = errno.EBADMSG - sys.exit(ret) + # Check observer ids: + (ids, duplicates, allInList) = checkObsids(tree, '//d:powerProfilingConf/d:obsIds', ns, obsidlist) + if duplicates: + if not quiet: + print("<b>Element powerProfilingConf</b>: Some observer IDs have been used more than once.") + errcnt = errcnt + 1 + if not allInList: + if not quiet: + print("<b>Element powerProfilingConf</b>: Some observer IDs have been used but do not have a targetConf element associated with them.") + errcnt = errcnt + 1 + # Check relative timings: + rs = tree.xpath('//d:powerProfilingConf/d:profConf/d:relativeTime/d:offsetSecs', namespaces=ns) + for elem in rs: + ppMicroSecs = elem.getparent().find('d:offsetMicrosecs', namespaces=ns) + if ppMicroSecs is not None: + ppStart = float(ppMicroSecs.text) / 1000000 + int(elem.text) + else: + ppStart = int(elem.text) + elem2 = elem.getparent().getparent().find('d:durationMillisecs', namespaces=ns) + if (ppStart > testDuration): + if not quiet: + print(("<b>Line %d</b>: element offsetSecs: The offset is bigger than the test duration, thus the action will never take place." %(elem.sourceline))) + errcnt = errcnt + 1 + elif (ppStart + int(elem2.text)/1000 > testDuration): + if not quiet: + print(("<b>Line %d</b>: element durationMillisecs: Profiling lasts longer than test." %(elem2.sourceline))) + errcnt = errcnt + 1 + # Check absolute timings: + rs = tree.xpath('//d:powerProfilingConf/d:profConf/d:absoluteTime/d:absoluteDateTime', namespaces=ns) + for elem in rs: + if sched_asap: + if not quiet: + print(("<b>Line %d</b>: element absoluteDateTime: For test scheduling method ASAP, only relative timed actions are allowed." %(elem.sourceline))) + errcnt = errcnt + 1 + else: + ppMicroSecs = elem.getparent().find('d:absoluteMicrosecs', namespaces=ns) + eventTime = getXmlTimestamp(elem.text) + if ppMicroSecs is not None: + ppStart = float(ppMicroSecs.text) / 1000000 + eventTime + else: + ppStart = eventTime + elem2 = elem.getparent().getparent().find('d:durationMillisecs', namespaces=ns) + if (ppStart > testEnd): + if not quiet: + print(("<b>Line %d</b>: element absoluteDateTime: The action is scheduled after the test ends, thus the action will never take place." %(elem.sourceline))) + errcnt = errcnt + 1 + elif (ppStart < testStart): + if not quiet: + print(("<b>Line %d</b>: element absoluteDateTime: The action is scheduled before the test starts, thus the action will never take place." %(elem.sourceline))) + errcnt = errcnt + 1 + elif (ppStart + int(elem2.text)/1000 > testEnd): + if not quiet: + print(("<b>Line %d</b>: element durationMillisecs: Profiling lasts longer than test." %(elem2.sourceline))) + errcnt = errcnt + 1 + + #=========================================================================== + # All additional tests finished. Clean up and exit. + #=========================================================================== + if db.open: + db.close() + + # If there is a temp XML file, delete it: + if testid: + os.remove(xmlpath) + + logger.debug("Validation finished (%u errors)." % errcnt) + + if errcnt == 0: + ret = SUCCESS + else: + err_str = "<b>Number of errors: %d</b>. It is possible that there are more errors which could not be detected due to dependencies from above listed errors."%errcnt + logger.debug(err_str) + if not quiet: + print(err_str) + ret = errno.EBADMSG + sys.exit(ret) ### END main() if __name__ == "__main__": - try: - main(sys.argv[1:]) - except Exception: - print("testconfig validator encountered an error: %s: %s\n%s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc())) - sys.exit(errno.EBADMSG) + try: + main(sys.argv[1:]) + except Exception: + print("testconfig validator encountered an error: %s: %s\n%s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc())) + sys.exit(errno.EBADMSG) diff --git a/webserver/user/admin_group_reservation.php b/webserver/user/admin_group_reservation.php index f43b3045caca862f7f49a93b50c22f7a51739dfb..8b616c150340f33d46d9c8d246e00dd4497333bc 100644 --- a/webserver/user/admin_group_reservation.php +++ b/webserver/user/admin_group_reservation.php @@ -1,138 +1,138 @@ <?php - /* - * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" - * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" - * __license__ = "GPL" - * __version__ = "$Revision: 2888 $" - * __date__ = "$Date: 2014-08-22 10:01:11 +0200 (Fri, 22 Aug 2014) $" - * __id__ = "$Id: index.php.normal 2888 2014-08-22 08:01:11Z rlim $" - * __source__ = "$URL: svn://svn.ee.ethz.ch/flocklab/trunk/server/webserver/user/index.php.normal $" - */ + /* + * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" + * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" + * __license__ = "GPL" + * __version__ = "$Revision: 2888 $" + * __date__ = "$Date: 2014-08-22 10:01:11 +0200 (Fri, 22 Aug 2014) $" + * __id__ = "$Id: index.php.normal 2888 2014-08-22 08:01:11Z rlim $" + * __source__ = "$URL: svn://svn.ee.ethz.ch/flocklab/trunk/server/webserver/user/index.php.normal $" + */ ?> <?php require_once('include/layout.php');require_once('include/presets.php'); ?> <?php - if (!isset($_SESSION['is_admin']) || !$_SESSION['is_admin']) - exit(1); - if (isset($_POST['resid'])) { - $db = db_connect(); - $sql = "DELETE FROM tbl_serv_reservations where serv_reservation_key=".mysqli_real_escape_string($db, $_POST['resid']); - $rs = mysqli_query($db, $sql) or flocklab_die('Cannot delete reservation in database because: ' . mysqli_error($db)); - mysqli_close($db); - } - if (isset($_POST['add_group'])) { - $db = db_connect(); - $sql = 'INSERT INTO tbl_serv_reservations (group_id_fk, time_start, time_end) values ('.mysqli_real_escape_string($db, $_POST['add_group']).',FROM_UNIXTIME('.strtotime(mysqli_real_escape_string($db, $_POST['add_start_time'])).'),FROM_UNIXTIME('.strtotime(mysqli_real_escape_string($db, $_POST['add_end_time'])).'))'; - $rs = mysqli_query($db, $sql) or flocklab_die('Cannot add reservation in database because: ' . mysqli_error($db)); - mysqli_close($db); - } + if (!isset($_SESSION['is_admin']) || !$_SESSION['is_admin']) + exit(1); + if (isset($_POST['resid'])) { + $db = db_connect(); + $sql = "DELETE FROM tbl_serv_reservations where serv_reservation_key=".mysqli_real_escape_string($db, $_POST['resid']); + $rs = mysqli_query($db, $sql) or flocklab_die('Cannot delete reservation in database because: ' . mysqli_error($db)); + mysqli_close($db); + } + if (isset($_POST['add_group'])) { + $db = db_connect(); + $sql = 'INSERT INTO tbl_serv_reservations (group_id_fk, time_start, time_end) values ('.mysqli_real_escape_string($db, $_POST['add_group']).',FROM_UNIXTIME('.strtotime(mysqli_real_escape_string($db, $_POST['add_start_time'])).'),FROM_UNIXTIME('.strtotime(mysqli_real_escape_string($db, $_POST['add_end_time'])).'))'; + $rs = mysqli_query($db, $sql) or flocklab_die('Cannot add reservation in database because: ' . mysqli_error($db)); + mysqli_close($db); + } ?> <script type="text/javascript" src="scripts/jquery.cookie.js"></script> <script type="text/javascript"> - $(document).ready(function() { - $([$("#add_start_time"), $("#add_end_time")]).each(function() { - var otime = $(this).html(); - $(this).empty().append('<input style="width:200px" name="'+this[0].id+'" value="'+otime+'">'); - }); - var table_rows = Math.max(Math.floor(($(window).height() - 300) / 25),10); - $("#pager_num_rows").attr('value', table_rows); - $("#res_overview") - .tablesorter({widgets: ["zebra"] }) - .tablesorterPager({container: $("#pager"), positionFixed: false}); - $(".qtip_show").qtip( { - content: {text: false}, - style : "flocklab", - }); - $("#res_overview").show(); - $.cookie.json = true; - var res_tbl_state; - try { res_tbl_state = $.cookie('flocklab.ressort'); } - catch (err) { - res_tbl_state = null; - } - if ( res_tbl_state == null) { - res_tbl_state = {s: [[0,1]], p: 0}; - } - $("#res_overview").data('tablesorter').page = res_tbl_state.p; - $("#res_overview").trigger("sorton",[res_tbl_state.s]); - $("#res_overview").bind("applyWidgets",function() { - $.cookie('flocklab.ressort', {s:$("#res_overview").data('tablesorter').sortList, p:$("#res_overview").data('tablesorter').page}); - }); - }); + $(document).ready(function() { + $([$("#add_start_time"), $("#add_end_time")]).each(function() { + var otime = $(this).html(); + $(this).empty().append('<input style="width:200px" name="'+this[0].id+'" value="'+otime+'">'); + }); + var table_rows = Math.max(Math.floor(($(window).height() - 300) / 25),10); + $("#pager_num_rows").attr('value', table_rows); + $("#res_overview") + .tablesorter({widgets: ["zebra"] }) + .tablesorterPager({container: $("#pager"), positionFixed: false}); + $(".qtip_show").qtip( { + content: {text: false}, + style : "flocklab", + }); + $("#res_overview").show(); + $.cookie.json = true; + var res_tbl_state; + try { res_tbl_state = $.cookie('flocklab.ressort'); } + catch (err) { + res_tbl_state = null; + } + if ( res_tbl_state == null) { + res_tbl_state = {s: [[0,1]], p: 0}; + } + $("#res_overview").data('tablesorter').page = res_tbl_state.p; + $("#res_overview").trigger("sorton",[res_tbl_state.s]); + $("#res_overview").bind("applyWidgets",function() { + $.cookie('flocklab.ressort', {s:$("#res_overview").data('tablesorter').sortList, p:$("#res_overview").data('tablesorter').page}); + }); + }); </script> <?php echo '<h1>Admin Group Reservations</h1>'; - /* Get all reservations from the database and display them in the table. */ - $db = db_connect(); - $sql = "SELECT serv_groups_key, groupname, GROUP_CONCAT(username SEPARATOR ', ') as group_list FROM (tbl_serv_groups left join tbl_serv_user_groups on (group_fk=serv_groups_key)) left join tbl_serv_users on (user_fk=tbl_serv_users.serv_users_key) GROUP BY group_fk"; - $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get reservations from database because: ' . mysqli_error($db)); - $groups = array(); - while ($row = mysqli_fetch_array($rs)) { - $groups[$row['serv_groups_key']]=Array('name' => $row['groupname'], 'users' => $row['group_list']); - } - $sql = "SELECT serv_reservation_key, group_id_fk, time_start, time_end, groupname, group_list + /* Get all reservations from the database and display them in the table. */ + $db = db_connect(); + $sql = "SELECT serv_groups_key, groupname, GROUP_CONCAT(username SEPARATOR ', ') as group_list FROM (tbl_serv_groups left join tbl_serv_user_groups on (group_fk=serv_groups_key)) left join tbl_serv_users on (user_fk=tbl_serv_users.serv_users_key) GROUP BY group_fk"; + $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get reservations from database because: ' . mysqli_error($db)); + $groups = array(); + while ($row = mysqli_fetch_array($rs)) { + $groups[$row['serv_groups_key']]=Array('name' => $row['groupname'], 'users' => $row['group_list']); + } + $sql = "SELECT serv_reservation_key, group_id_fk, time_start, time_end, groupname, group_list FROM tbl_serv_reservations LEFT JOIN ( SELECT serv_groups_key, groupname, GROUP_CONCAT(username SEPARATOR ', ') as group_list FROM (tbl_serv_groups left join tbl_serv_user_groups on (group_fk=serv_groups_key)) left join tbl_serv_users on (user_fk=tbl_serv_users.serv_users_key) GROUP BY serv_groups_key) as groups on (groups.serv_groups_key = group_id_fk) ORDER BY time_start DESC"; - $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get reservations from database because: ' . mysqli_error($db)); - $nrows = mysqli_num_rows($rs); - mysqli_close($db); - - ?> - <form name="resadd" method="post" action="admin_group_reservation.php"> - <table id="res_overview" class="tablesorter" style="display:none"> - <thead> - <tr> - <th width="200px">Group</th> - <th width="200px">Start</th> - <th width="200px">End</th> - <th width="60px" class='qtip_show' title='Actions'>Actions</th> - </tr> - </thead> - <tbody> - <?php - $i = 0; - $max_len = 30; // maximum length of text before being cut - while ($row = mysqli_fetch_array($rs)) { - $i++; - if ($i%2 == 1) { - echo "<tr class='even'>"; - } else { - echo "<tr class='odd'>"; - } - echo "<td class='qtip_show' title='" . htmlentities($row['group_list']) . "'>" . htmlentities($row['groupname']) . "</td>"; - // Start time - echo "<td title='start time' class='qtip_show'><i class='starttime'>" . date_to_tzdate($row['time_start']). "</i></td>"; - // End time - echo "<td title='end time' class='qtip_show'><i class='endtime'>" . date_to_tzdate($row['time_end']). "</i></td>"; - echo "</td>"; - echo "<td>"; - echo "<img src='pics/icons/trash.png' height='16px' alt='Delete' title='Delete reservation' class='qtip_show' onClick='document.resdel.resid.value = " . $row['serv_reservation_key'] . ";document.resdel.submit();'>"; - echo "</td></tr>"; - } - // add new reservation - $now = new DateTime(); - $now = $now->format('U'); - echo '<tr><td><span style="display:none">0</span><select name="add_group" style="width:200px">'; - foreach ($groups as $idx=>$group) { - echo '<option value="'.$idx.'">' . $group['name'] . '</option>'; - } - echo '</select></td><td><span class="time" id="add_start_time">'.$now.'</span></td><td><span class="time" id="add_end_time">'.($now+3600).'</span></td>'; - echo "<td><img src='pics/icons/add.png' height='16px' alt='Add' title='Add reservation' class='qtip_show' onClick='document.resadd.submit();'></td>"; - echo '</tr>'; - ?> - </tbody> - </table> - </form> - <span id="pager" class="pager"> - <img src="pics/icons/first.gif" alt="first" class="first"> - <img src="pics/icons/prev.gif" alt="prev" class="prev"> - <span class="pagedisplay"></span> - <img src="pics/icons/next.gif" alt="next" class="next"> - <img src="pics/icons/last.gif" alt="last" class="last"> - <input class="pagesize" style="visibility: hidden;" id="pager_num_rows" value="15"> - </span> <br > - <!-- Forms for processing actions --> - <form name="resdel" method="post" action="admin_group_reservation.php"><input type="hidden" name="resid" value=""></form> + $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get reservations from database because: ' . mysqli_error($db)); + $nrows = mysqli_num_rows($rs); + mysqli_close($db); + + ?> + <form name="resadd" method="post" action="admin_group_reservation.php"> + <table id="res_overview" class="tablesorter" style="display:none"> + <thead> + <tr> + <th width="200px">Group</th> + <th width="200px">Start</th> + <th width="200px">End</th> + <th width="60px" class='qtip_show' title='Actions'>Actions</th> + </tr> + </thead> + <tbody> + <?php + $i = 0; + $max_len = 30; // maximum length of text before being cut + while ($row = mysqli_fetch_array($rs)) { + $i++; + if ($i%2 == 1) { + echo "<tr class='even'>"; + } else { + echo "<tr class='odd'>"; + } + echo "<td class='qtip_show' title='" . htmlentities($row['group_list']) . "'>" . htmlentities($row['groupname']) . "</td>"; + // Start time + echo "<td title='start time' class='qtip_show'><i class='starttime'>" . date_to_tzdate($row['time_start']). "</i></td>"; + // End time + echo "<td title='end time' class='qtip_show'><i class='endtime'>" . date_to_tzdate($row['time_end']). "</i></td>"; + echo "</td>"; + echo "<td>"; + echo "<img src='pics/icons/trash.png' height='16px' alt='Delete' title='Delete reservation' class='qtip_show' onClick='document.resdel.resid.value = " . $row['serv_reservation_key'] . ";document.resdel.submit();'>"; + echo "</td></tr>"; + } + // add new reservation + $now = new DateTime(); + $now = $now->format('U'); + echo '<tr><td><span style="display:none">0</span><select name="add_group" style="width:200px">'; + foreach ($groups as $idx=>$group) { + echo '<option value="'.$idx.'">' . $group['name'] . '</option>'; + } + echo '</select></td><td><span class="time" id="add_start_time">'.$now.'</span></td><td><span class="time" id="add_end_time">'.($now+3600).'</span></td>'; + echo "<td><img src='pics/icons/add.png' height='16px' alt='Add' title='Add reservation' class='qtip_show' onClick='document.resadd.submit();'></td>"; + echo '</tr>'; + ?> + </tbody> + </table> + </form> + <span id="pager" class="pager"> + <img src="pics/icons/first.gif" alt="first" class="first"> + <img src="pics/icons/prev.gif" alt="prev" class="prev"> + <span class="pagedisplay"></span> + <img src="pics/icons/next.gif" alt="next" class="next"> + <img src="pics/icons/last.gif" alt="last" class="last"> + <input class="pagesize" style="visibility: hidden;" id="pager_num_rows" value="15"> + </span> <br > + <!-- Forms for processing actions --> + <form name="resdel" method="post" action="admin_group_reservation.php"><input type="hidden" name="resid" value=""></form> <?php do_layout('Group Reservations','Group Reservations'); ?> diff --git a/webserver/user/admin_groups.php b/webserver/user/admin_groups.php index 0e5e9f1cc49605aed8ce887160e4eee0d86fe705..7bb545b54e36c4bee48354205398c7c65b4fbec1 100644 --- a/webserver/user/admin_groups.php +++ b/webserver/user/admin_groups.php @@ -1,111 +1,111 @@ <?php - /* - * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" - * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" - * __license__ = "GPL" - * __version__ = "$Revision: 2888 $" - * __date__ = "$Date: 2014-08-22 10:01:11 +0200 (Fri, 22 Aug 2014) $" - * __id__ = "$Id: index.php.normal 2888 2014-08-22 08:01:11Z rlim $" - * __source__ = "$URL: svn://svn.ee.ethz.ch/flocklab/trunk/server/webserver/user/index.php.normal $" - */ + /* + * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" + * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" + * __license__ = "GPL" + * __version__ = "$Revision: 2888 $" + * __date__ = "$Date: 2014-08-22 10:01:11 +0200 (Fri, 22 Aug 2014) $" + * __id__ = "$Id: index.php.normal 2888 2014-08-22 08:01:11Z rlim $" + * __source__ = "$URL: svn://svn.ee.ethz.ch/flocklab/trunk/server/webserver/user/index.php.normal $" + */ ?> <?php require_once('include/layout.php');require_once('include/presets.php'); ?> <?php - if (!isset($_SESSION['is_admin']) || !$_SESSION['is_admin']) - exit(1); - if (isset($_POST['inlist']) && isset($_POST['group'])) { - $db = db_connect(); - foreach ($_POST['inlist'] as $user) { - $sql = "DELETE FROM tbl_serv_user_groups where group_fk=".mysqli_real_escape_string($db, $_POST['group'])." and user_fk=".mysqli_real_escape_string($db, $user); - $rs = mysqli_query($db, $sql) or flocklab_die('Cannot remove user from group because: ' . mysqli_error($db)); - echo "remove user ".$user." from group ".$_POST['group']; - } - mysqli_close($db); - } - if (isset($_POST['notinlist']) && isset($_POST['group'])) { - $db = db_connect(); - foreach ($_POST['notinlist'] as $user) { - $sql = "insert into tbl_serv_user_groups (group_fk, user_fk) values (".mysqli_real_escape_string($db, $_POST['group']).",".mysqli_real_escape_string($db, $user).")"; - $rs = mysqli_query($db, $sql) or flocklab_die('Cannot add user to group because: ' . mysqli_error($db)); - echo "add user ".$user." to group ".$_POST['group']; - } - mysqli_close($db); - } + if (!isset($_SESSION['is_admin']) || !$_SESSION['is_admin']) + exit(1); + if (isset($_POST['inlist']) && isset($_POST['group'])) { + $db = db_connect(); + foreach ($_POST['inlist'] as $user) { + $sql = "DELETE FROM tbl_serv_user_groups where group_fk=".mysqli_real_escape_string($db, $_POST['group'])." and user_fk=".mysqli_real_escape_string($db, $user); + $rs = mysqli_query($db, $sql) or flocklab_die('Cannot remove user from group because: ' . mysqli_error($db)); + echo "remove user ".$user." from group ".$_POST['group']; + } + mysqli_close($db); + } + if (isset($_POST['notinlist']) && isset($_POST['group'])) { + $db = db_connect(); + foreach ($_POST['notinlist'] as $user) { + $sql = "insert into tbl_serv_user_groups (group_fk, user_fk) values (".mysqli_real_escape_string($db, $_POST['group']).",".mysqli_real_escape_string($db, $user).")"; + $rs = mysqli_query($db, $sql) or flocklab_die('Cannot add user to group because: ' . mysqli_error($db)); + echo "add user ".$user." to group ".$_POST['group']; + } + mysqli_close($db); + } ?> <script type="text/javascript" src="scripts/jquery.cookie.js"></script> <script type="text/javascript"> - $(document).ready(function() { - $([$("#add_start_time"), $("#add_end_time")]).each(function() { - var otime = $(this).html(); - $(this).empty().append('<input style="width:200px" name="'+this[0].id+'" value="'+otime+'">'); - }); - }); + $(document).ready(function() { + $([$("#add_start_time"), $("#add_end_time")]).each(function() { + var otime = $(this).html(); + $(this).empty().append('<input style="width:200px" name="'+this[0].id+'" value="'+otime+'">'); + }); + }); </script> <?php echo '<h1>Admin Groups</h1>'; - /* Get groups */ - $db = db_connect(); - $sql = "SELECT serv_groups_key, groupname FROM tbl_serv_groups"; - $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get reservations from database because: ' . mysqli_error($db)); - $groups = array(); - while ($row = mysqli_fetch_array($rs)) { - $groups[$row['serv_groups_key']]=$row['groupname']; - } - if (empty($_POST['group'])) { - reset($groups); - $selgroup = key($groups); - } - else { - $selgroup = $_POST['group']; - } - $sql = "SELECT group_fk, serv_users_key, username FROM tbl_serv_user_groups left join tbl_serv_users on (user_fk=tbl_serv_users.serv_users_key) where group_fk=".$selgroup." order by username"; - $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get reservations from database because: ' . mysqli_error($db)); - $isuser = array(); - while ($row = mysqli_fetch_array($rs)) { - $isuser[$row['serv_users_key']]=$row['username']; - } - $sql = "SELECT sum(group_fk=".$selgroup.") as isgroup, serv_users_key, username FROM tbl_serv_users left join tbl_serv_user_groups on (user_fk=tbl_serv_users.serv_users_key) group by serv_users_key having isgroup is null or isgroup=0 order by username"; - $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get reservations from database because: ' . mysqli_error($db)); - $notuser = array(); - while ($row = mysqli_fetch_array($rs)) { - $notuser[$row['serv_users_key']]=$row['username']; - } - mysqli_close($db); - $max_len = 50; - ?> - <form name="resadd" method="post" action="admin_groups.php"> - <table> - <thead> - <tr> - <th width="300px">Group</th> - <th width="100px">Users not in group</th> - <th width="100px">Users in group</th> - </tr> - </thead> - <tbody> - <tr><td></td><td>Select users to add<br><center><img src="pics/icons/sort_desc.gif"></center></td><td>Select users to be removed<br><center><img src="pics/icons/sort_desc.gif"></center></td></tr> - <?php - echo '<tr><td><select name="group" style="width:200px" onChange="$(document.chgrp.group).val($(this).val());document.chgrp.submit()">'; - foreach ($groups as $idx=>$names) { - if (strlen($names) <= $max_len) - echo '<option value="'.$idx.'"'.($idx==$selgroup?' selected="selected"':'').'>' . $names . '</option>'; - else - echo '<option value="'.$idx.'"'.($idx==$selgroup?' selected="selected"':'').'>'. substr($names,0,$max_len) . '...</option>'; - } - echo '</select></td><td><select name="notinlist[]" multiple style="width:150px;height:400px">'; - foreach ($notuser as $idx=>$user) { - echo '<option value="'.$idx.'">' . $user . '</option>'; - } - echo '</select></td><td><select name="inlist[]" multiple style="width:150px;height:400px">'; - foreach ($isuser as $idx=>$user) { - echo '<option value="'.$idx.'">' . $user . '</option>'; - } - echo "</td></tr></tbody></table><input type='button' onClick='document.resadd.submit()' value='update group'>"; - ?> - </form> - <!-- Forms for processing actions --> - <!--<form name="resdel" method="post" action="admin_group_reservation.php"><input type="hidden" name="resid" value=""></form>--> - <form name="chgrp" method="post" action="admin_groups.php"><input type="hidden" name="group" value=""></form> + /* Get groups */ + $db = db_connect(); + $sql = "SELECT serv_groups_key, groupname FROM tbl_serv_groups"; + $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get reservations from database because: ' . mysqli_error($db)); + $groups = array(); + while ($row = mysqli_fetch_array($rs)) { + $groups[$row['serv_groups_key']]=$row['groupname']; + } + if (empty($_POST['group'])) { + reset($groups); + $selgroup = key($groups); + } + else { + $selgroup = $_POST['group']; + } + $sql = "SELECT group_fk, serv_users_key, username FROM tbl_serv_user_groups left join tbl_serv_users on (user_fk=tbl_serv_users.serv_users_key) where group_fk=".$selgroup." order by username"; + $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get reservations from database because: ' . mysqli_error($db)); + $isuser = array(); + while ($row = mysqli_fetch_array($rs)) { + $isuser[$row['serv_users_key']]=$row['username']; + } + $sql = "SELECT sum(group_fk=".$selgroup.") as isgroup, serv_users_key, username FROM tbl_serv_users left join tbl_serv_user_groups on (user_fk=tbl_serv_users.serv_users_key) group by serv_users_key having isgroup is null or isgroup=0 order by username"; + $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get reservations from database because: ' . mysqli_error($db)); + $notuser = array(); + while ($row = mysqli_fetch_array($rs)) { + $notuser[$row['serv_users_key']]=$row['username']; + } + mysqli_close($db); + $max_len = 50; + ?> + <form name="resadd" method="post" action="admin_groups.php"> + <table> + <thead> + <tr> + <th width="300px">Group</th> + <th width="100px">Users not in group</th> + <th width="100px">Users in group</th> + </tr> + </thead> + <tbody> + <tr><td></td><td>Select users to add<br><center><img src="pics/icons/sort_desc.gif"></center></td><td>Select users to be removed<br><center><img src="pics/icons/sort_desc.gif"></center></td></tr> + <?php + echo '<tr><td><select name="group" style="width:200px" onChange="$(document.chgrp.group).val($(this).val());document.chgrp.submit()">'; + foreach ($groups as $idx=>$names) { + if (strlen($names) <= $max_len) + echo '<option value="'.$idx.'"'.($idx==$selgroup?' selected="selected"':'').'>' . $names . '</option>'; + else + echo '<option value="'.$idx.'"'.($idx==$selgroup?' selected="selected"':'').'>'. substr($names,0,$max_len) . '...</option>'; + } + echo '</select></td><td><select name="notinlist[]" multiple style="width:150px;height:400px">'; + foreach ($notuser as $idx=>$user) { + echo '<option value="'.$idx.'">' . $user . '</option>'; + } + echo '</select></td><td><select name="inlist[]" multiple style="width:150px;height:400px">'; + foreach ($isuser as $idx=>$user) { + echo '<option value="'.$idx.'">' . $user . '</option>'; + } + echo "</td></tr></tbody></table><input type='button' onClick='document.resadd.submit()' value='update group'>"; + ?> + </form> + <!-- Forms for processing actions --> + <!--<form name="resdel" method="post" action="admin_group_reservation.php"><input type="hidden" name="resid" value=""></form>--> + <form name="chgrp" method="post" action="admin_groups.php"><input type="hidden" name="group" value=""></form> <?php do_layout('Groups','Groups'); ?> diff --git a/webserver/user/admin_statistics.php b/webserver/user/admin_statistics.php index 85da4d16d4c4ec5ee2f04dd7fe4fae965075d253..570628f3633cfa8d52f6475d5e4551a44b89c962 100644 --- a/webserver/user/admin_statistics.php +++ b/webserver/user/admin_statistics.php @@ -1,158 +1,158 @@ <?php - /* - * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" - * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" - * __license__ = "GPL" - * __version__ = "$Revision: 2888 $" - * __date__ = "$Date: 2014-08-22 10:01:11 +0200 (Fri, 22 Aug 2014) $" - * __id__ = "$Id: index.php.normal 2888 2014-08-22 08:01:11Z rlim $" - * __source__ = "$URL: svn://svn.ee.ethz.ch/flocklab/trunk/server/webserver/user/index.php.normal $" - */ + /* + * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" + * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" + * __license__ = "GPL" + * __version__ = "$Revision: 2888 $" + * __date__ = "$Date: 2014-08-22 10:01:11 +0200 (Fri, 22 Aug 2014) $" + * __id__ = "$Id: index.php.normal 2888 2014-08-22 08:01:11Z rlim $" + * __source__ = "$URL: svn://svn.ee.ethz.ch/flocklab/trunk/server/webserver/user/index.php.normal $" + */ ?> <?php require_once('include/layout.php');require_once('include/presets.php'); ?> <?php - if (!isset($_SESSION['is_admin']) || !$_SESSION['is_admin']) - exit(1); + if (!isset($_SESSION['is_admin']) || !$_SESSION['is_admin']) + exit(1); ?> <script type="text/javascript" src="scripts/jquery-ui-1.8.21.custom.min.js"></script> <script type="text/javascript" src="scripts/protovis-d3.3.js"></script> <?php echo '<h1>Admin Statistics</h1><table>'; - $testoverhead = 2*3*60; // time needed to setup and clean up a test in seconds - $db = db_connect(); - $sql = "select count(*) as num from tbl_serv_users"; - $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get statistics from database because: ' . mysqli_error($db)); - $row = mysqli_fetch_array($rs); - echo '<tr><td><b>Number of users</b></td><td>'.$row['num'].'</td></tr>'; - - echo '<tr><td><b>Users by institution</b></td><td></td></tr>'; - $sql = 'select institution, count(institution) as num from tbl_serv_users group by institution'; - $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get statistics from database because: ' . mysqli_error($db)); - while ($row = mysqli_fetch_array($rs)) { - echo '<tr><td> '.$row['institution'].'</td><td>'.$row['num'].'</td></tr>'; - } - - echo '<tr><td><b>Users by country</b></td><td></td></tr>'; - $sql = 'select country, count(country) as num from tbl_serv_users group by country'; - $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get statistics from database because: ' . mysqli_error($db)); - while ($row = mysqli_fetch_array($rs)) { - echo '<tr><td> '.$row['country'].'</td><td>'.$row['num'].'</td></tr>'; - } - - // Tests, by nodes, with setup and cleanup - $sql = 'select year(time_start_act) as y, count(*) as num from tbl_serv_tests where test_status_preserved in ("finished", "retention expiring", "synced") or test_status_preserved is null group by year(time_start_act) having y is not null'; - $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get statistics from database because: ' . mysqli_error($db)); - $testcount=Array(); - while ($row = mysqli_fetch_array($rs)) { - $year = $row['y']; - echo '<tr><td><b>Number of tests '.$year.' (avg [max] setup / cleanup time)</td><td>'.$row['num'].'</td></tr>'; - $testcount[$year] = $row['num']; - $sql = 'select year(time_start_act), pname, count(*) as c, time_start_act, b.test_status_preserved, avg(setuptime) as tsetup, avg(cleanuptime) as tcleanup, max(setuptime) as tsetupmax, max(cleanuptime) as tcleanupmax from (select distinct test_fk, tbl_serv_platforms.name as pname from tbl_serv_map_test_observer_targetimages left join tbl_serv_targetimages on (targetimage_fk = serv_targetimages_key) left join tbl_serv_platforms on (platforms_fk = serv_platforms_key)) as a left join tbl_serv_tests as b on (a.test_fk = b.serv_tests_key) where year(time_start_act) = '.$year.' and (b.test_status_preserved in ("finished", "retention expiring", "synced") or b.test_status_preserved is null) and time_start_act is not null and pname is not null group by pname order by time_start_act, pname'; - $rs2 = mysqli_query($db, $sql) or flocklab_die('Cannot get statistics from database because: ' . mysqli_error($db)); - while ($row = mysqli_fetch_array($rs2)) { - echo '<tr><td> '.$row['pname'].'</td><td>'.$row['c'].' ('.round($row['tsetup']).' ['.round($row['tsetupmax']).'] / '.round($row['tcleanup']).' ['.round($row['tcleanupmax']).'] s)</td></tr>'; - } - // Tests by service - $sql = 'select year(time_start_act) as y, sum(1) as num_all, sum(ExtractValue(testconfig_xml, "count(/testConf/serialConf|/testConf/serialReaderConf)") > 0) as num_serial, sum(ExtractValue(testconfig_xml, "count(/testConf/gpioTracingConf|/testConf/gpioMonitorConf)") > 0) as num_tracing, sum(ExtractValue(testconfig_xml, "count(/testConf/gpioActuationConf|/testConf/gpioSettingConf)") > 0) as num_actuation, sum(ExtractValue(testconfig_xml, "count(/testConf/powerProfilingConf|/testConf/powerprofConf)") > 0) as num_power from tbl_serv_tests where year(time_start_act) = '.$year.' and (test_status_preserved in ("finished", "retention expiring", "synced") or test_status_preserved is null)'; - $rs3 = mysqli_query($db, $sql) or flocklab_die('Cannot get statistics from database because: ' . mysqli_error($db)); - $row = mysqli_fetch_array($rs3); - foreach (array('Serial'=>'num_serial','GPIO tracing'=>'num_tracing','GPIO actuation'=>'num_actuation','Power profiling'=>'num_power') as $service=>$field) { - echo '<tr><td> '.$service.'</td><td>'.$row[$field].' ('.(round($row[$field] / $row['num_all'] * 100 )).'%)</td></tr>'; - } - } - - // Users by service and node type - $sql = 'select year(time_start_act) as y, count(distinct owner_fk) as num from tbl_serv_tests where test_status_preserved in ("finished", "retention expiring", "synced") or test_status_preserved is null group by year(time_start_act) having y is not null'; - $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get statistics from database because: ' . mysqli_error($db)); - while ($row = mysqli_fetch_array($rs)) { - $year = $row['y']; - $num_users = $row['num']; - echo '<tr><td><b>Number of active users in '.$year.'</td><td>'.$row['num'].'</td></tr>'; - $sql = 'select year(time_start_act), pname, count(distinct owner_fk) as c, time_start_act, b.test_status_preserved from (select distinct test_fk, tbl_serv_platforms.name as pname from tbl_serv_map_test_observer_targetimages left join tbl_serv_targetimages on (targetimage_fk = serv_targetimages_key) left join tbl_serv_platforms on (platforms_fk = serv_platforms_key)) as a left join tbl_serv_tests as b on (a.test_fk = b.serv_tests_key) where year(time_start_act) = '.$year.' and (b.test_status_preserved in ("finished", "retention expiring", "synced") or b.test_status_preserved is null) and time_start_act is not null and pname is not null group by pname order by time_start_act, pname'; - $rs2 = mysqli_query($db, $sql) or flocklab_die('Cannot get statistics from database because: ' . mysqli_error($db)); - while ($row = mysqli_fetch_array($rs2)) { - echo '<tr><td> '.$row['pname'].'</td><td>'.$row['c'].' ('.(round($row['c']/$num_users * 100)).'%)</td></tr>'; - } - - $sql = 'select sum(num_all > 0) as user_all, sum(num_serial > 0) as user_serial, sum(num_tracing > 0) as user_tracing, sum(num_actuation > 0) as user_actuation, sum(num_power > 0) as user_power from (select year(time_start_act) as y, sum(1) as num_all, sum(ExtractValue(testconfig_xml, "count(/testConf/serialConf|/testConf/serialReaderConf)") > 0) as num_serial, sum(ExtractValue(testconfig_xml, "count(/testConf/gpioTracingConf|/testConf/gpioMonitorConf)") > 0) as num_tracing, sum(ExtractValue(testconfig_xml, "count(/testConf/gpioActuationConf|/testConf/gpioSettingConf)") > 0) as num_actuation, sum(ExtractValue(testconfig_xml, "count(/testConf/powerProfilingConf|/testConf/powerprofConf)") > 0) as num_power from tbl_serv_tests where year(time_start_act) = '.$year.' and (test_status_preserved in ("finished", "retention expiring", "synced") or test_status_preserved is null) group by owner_fk) as stats;'; - $rs3 = mysqli_query($db, $sql) or flocklab_die('Cannot get statistics from database because: ' . mysqli_error($db)); - $row = mysqli_fetch_array($rs3); - foreach (array('Serial'=>'user_serial','GPIO tracing'=>'user_tracing','GPIO actuation'=>'user_actuation','Power profiling'=>'user_power') as $service=>$field) { - echo '<tr><td> '.$service.'</td><td>'.$row[$field].' ('.(round($row[$field] / $row['user_all'] * 100 )).'%)</td></tr>'; - } - } - - // Occupied per year - $sql = 'select year(time_start_act) as y, min(time_start_act) as minp, max(time_end_act) as maxp, max(time_end_act - time_start_act), sum(timestampdiff(SECOND,time_start_act,time_end_act)) as duration from tbl_serv_tests where (test_status_preserved in ("finished", "retention expiring", "synced") or test_status_preserved is null) and (time_end_act is not null and time_start_act is not null and time_start_act < time_end_act and timestampdiff(SECOND,time_start_act,time_end_act) < 72 * 3600) group by year(time_start_act)'; - $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get statistics from database because: ' . mysqli_error($db)); - while ($row = mysqli_fetch_array($rs)) { - echo '<tr><td><b>Time occupied '.$row['y'].'</b></td><td>'.round((($row['duration'] + $testcount[$row['y']] * $testoverhead) / 3600)).' hours ('.round((($row['duration'] + $testcount[$row['y']] * $testoverhead) / (strtotime($row['maxp'])-strtotime($row['minp'])) * 100)).'%)</td></tr>'; - } - - // last year, weekly resolution - $sql = 'select year(time_start_act) as y, week(time_start_act) as w, count(*) as num from tbl_serv_tests where - datediff(DATE_SUB(DATE_SUB(CURDATE(),INTERVAL (DAY(CURDATE())-1) DAY),INTERVAL 12 MONTH),time_start_act) <=0 AND - (test_status_preserved in ("finished", "retention expiring", "synced") or test_status_preserved is null) - group by year(time_start_act), week(time_start_act) having y is not null'; - $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get statistics from database because: ' . mysqli_error($db)); - $testcount = Array(); - while ($row = mysqli_fetch_array($rs)) { - $testcount[$row['y'].'-'.$row['w']] = $row['num']; - } - $sql = 'select year(time_start_act) as y, week(time_start_act) as w, min(time_start_act) as minp, max(time_end_act) as maxp, max(timestampdiff(SECOND,time_start_act,time_end_act)), sum(timestampdiff(SECOND,time_start_act,time_end_act)) as duration from tbl_serv_tests - where - datediff(DATE_SUB(DATE_SUB(CURDATE(),INTERVAL (DAY(CURDATE())-1) DAY),INTERVAL 12 MONTH),time_start_act) <=0 AND - (test_status_preserved in ("finished", "retention expiring", "synced") or test_status_preserved is null) and - (time_end_act is not null and time_start_act is not null and time_start_act < time_end_act and timestampdiff(SECOND,time_start_act,time_end_act) < 72 * 3600) - group by year(time_start_act), week(time_start_act) having y is not null'; - $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get statistics from database because: ' . mysqli_error($db)); - $uval=Array();$uweek=Array(); - while ($row = mysqli_fetch_array($rs)) { - // echo '<tr><td> Time occupied '.$row['y'].' week '.$row['w'].'</td><td>'.round((($row['duration'] + $testcount[$row['y'].'-'.$row['w']] * $testoverhead) / 3600)).' hours ('.round((($row['duration'] + $testcount[$row['y'].'-'.$row['w']] * $testoverhead) / (7*24*3600) * 100)).'%)</td></tr>'; - array_push($uval, round((($row['duration'] + $testcount[$row['y'].'-'.$row['w']] * $testoverhead) / (7*24*3600) * 100))); - array_push($uweek, $row['w']); - } - echo ' - <script type="text/javascript"> + $testoverhead = 2*3*60; // time needed to setup and clean up a test in seconds + $db = db_connect(); + $sql = "select count(*) as num from tbl_serv_users"; + $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get statistics from database because: ' . mysqli_error($db)); + $row = mysqli_fetch_array($rs); + echo '<tr><td><b>Number of users</b></td><td>'.$row['num'].'</td></tr>'; + + echo '<tr><td><b>Users by institution</b></td><td></td></tr>'; + $sql = 'select institution, count(institution) as num from tbl_serv_users group by institution'; + $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get statistics from database because: ' . mysqli_error($db)); + while ($row = mysqli_fetch_array($rs)) { + echo '<tr><td> '.$row['institution'].'</td><td>'.$row['num'].'</td></tr>'; + } + + echo '<tr><td><b>Users by country</b></td><td></td></tr>'; + $sql = 'select country, count(country) as num from tbl_serv_users group by country'; + $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get statistics from database because: ' . mysqli_error($db)); + while ($row = mysqli_fetch_array($rs)) { + echo '<tr><td> '.$row['country'].'</td><td>'.$row['num'].'</td></tr>'; + } + + // Tests, by nodes, with setup and cleanup + $sql = 'select year(time_start_act) as y, count(*) as num from tbl_serv_tests where test_status_preserved in ("finished", "retention expiring", "synced") or test_status_preserved is null group by year(time_start_act) having y is not null'; + $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get statistics from database because: ' . mysqli_error($db)); + $testcount=Array(); + while ($row = mysqli_fetch_array($rs)) { + $year = $row['y']; + echo '<tr><td><b>Number of tests '.$year.' (avg [max] setup / cleanup time)</td><td>'.$row['num'].'</td></tr>'; + $testcount[$year] = $row['num']; + $sql = 'select year(time_start_act), pname, count(*) as c, time_start_act, b.test_status_preserved, avg(setuptime) as tsetup, avg(cleanuptime) as tcleanup, max(setuptime) as tsetupmax, max(cleanuptime) as tcleanupmax from (select distinct test_fk, tbl_serv_platforms.name as pname from tbl_serv_map_test_observer_targetimages left join tbl_serv_targetimages on (targetimage_fk = serv_targetimages_key) left join tbl_serv_platforms on (platforms_fk = serv_platforms_key)) as a left join tbl_serv_tests as b on (a.test_fk = b.serv_tests_key) where year(time_start_act) = '.$year.' and (b.test_status_preserved in ("finished", "retention expiring", "synced") or b.test_status_preserved is null) and time_start_act is not null and pname is not null group by pname order by time_start_act, pname'; + $rs2 = mysqli_query($db, $sql) or flocklab_die('Cannot get statistics from database because: ' . mysqli_error($db)); + while ($row = mysqli_fetch_array($rs2)) { + echo '<tr><td> '.$row['pname'].'</td><td>'.$row['c'].' ('.round($row['tsetup']).' ['.round($row['tsetupmax']).'] / '.round($row['tcleanup']).' ['.round($row['tcleanupmax']).'] s)</td></tr>'; + } + // Tests by service + $sql = 'select year(time_start_act) as y, sum(1) as num_all, sum(ExtractValue(testconfig_xml, "count(/testConf/serialConf|/testConf/serialReaderConf)") > 0) as num_serial, sum(ExtractValue(testconfig_xml, "count(/testConf/gpioTracingConf|/testConf/gpioMonitorConf)") > 0) as num_tracing, sum(ExtractValue(testconfig_xml, "count(/testConf/gpioActuationConf|/testConf/gpioSettingConf)") > 0) as num_actuation, sum(ExtractValue(testconfig_xml, "count(/testConf/powerProfilingConf|/testConf/powerprofConf)") > 0) as num_power from tbl_serv_tests where year(time_start_act) = '.$year.' and (test_status_preserved in ("finished", "retention expiring", "synced") or test_status_preserved is null)'; + $rs3 = mysqli_query($db, $sql) or flocklab_die('Cannot get statistics from database because: ' . mysqli_error($db)); + $row = mysqli_fetch_array($rs3); + foreach (array('Serial'=>'num_serial','GPIO tracing'=>'num_tracing','GPIO actuation'=>'num_actuation','Power profiling'=>'num_power') as $service=>$field) { + echo '<tr><td> '.$service.'</td><td>'.$row[$field].' ('.(round($row[$field] / $row['num_all'] * 100 )).'%)</td></tr>'; + } + } + + // Users by service and node type + $sql = 'select year(time_start_act) as y, count(distinct owner_fk) as num from tbl_serv_tests where test_status_preserved in ("finished", "retention expiring", "synced") or test_status_preserved is null group by year(time_start_act) having y is not null'; + $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get statistics from database because: ' . mysqli_error($db)); + while ($row = mysqli_fetch_array($rs)) { + $year = $row['y']; + $num_users = $row['num']; + echo '<tr><td><b>Number of active users in '.$year.'</td><td>'.$row['num'].'</td></tr>'; + $sql = 'select year(time_start_act), pname, count(distinct owner_fk) as c, time_start_act, b.test_status_preserved from (select distinct test_fk, tbl_serv_platforms.name as pname from tbl_serv_map_test_observer_targetimages left join tbl_serv_targetimages on (targetimage_fk = serv_targetimages_key) left join tbl_serv_platforms on (platforms_fk = serv_platforms_key)) as a left join tbl_serv_tests as b on (a.test_fk = b.serv_tests_key) where year(time_start_act) = '.$year.' and (b.test_status_preserved in ("finished", "retention expiring", "synced") or b.test_status_preserved is null) and time_start_act is not null and pname is not null group by pname order by time_start_act, pname'; + $rs2 = mysqli_query($db, $sql) or flocklab_die('Cannot get statistics from database because: ' . mysqli_error($db)); + while ($row = mysqli_fetch_array($rs2)) { + echo '<tr><td> '.$row['pname'].'</td><td>'.$row['c'].' ('.(round($row['c']/$num_users * 100)).'%)</td></tr>'; + } + + $sql = 'select sum(num_all > 0) as user_all, sum(num_serial > 0) as user_serial, sum(num_tracing > 0) as user_tracing, sum(num_actuation > 0) as user_actuation, sum(num_power > 0) as user_power from (select year(time_start_act) as y, sum(1) as num_all, sum(ExtractValue(testconfig_xml, "count(/testConf/serialConf|/testConf/serialReaderConf)") > 0) as num_serial, sum(ExtractValue(testconfig_xml, "count(/testConf/gpioTracingConf|/testConf/gpioMonitorConf)") > 0) as num_tracing, sum(ExtractValue(testconfig_xml, "count(/testConf/gpioActuationConf|/testConf/gpioSettingConf)") > 0) as num_actuation, sum(ExtractValue(testconfig_xml, "count(/testConf/powerProfilingConf|/testConf/powerprofConf)") > 0) as num_power from tbl_serv_tests where year(time_start_act) = '.$year.' and (test_status_preserved in ("finished", "retention expiring", "synced") or test_status_preserved is null) group by owner_fk) as stats;'; + $rs3 = mysqli_query($db, $sql) or flocklab_die('Cannot get statistics from database because: ' . mysqli_error($db)); + $row = mysqli_fetch_array($rs3); + foreach (array('Serial'=>'user_serial','GPIO tracing'=>'user_tracing','GPIO actuation'=>'user_actuation','Power profiling'=>'user_power') as $service=>$field) { + echo '<tr><td> '.$service.'</td><td>'.$row[$field].' ('.(round($row[$field] / $row['user_all'] * 100 )).'%)</td></tr>'; + } + } + + // Occupied per year + $sql = 'select year(time_start_act) as y, min(time_start_act) as minp, max(time_end_act) as maxp, max(time_end_act - time_start_act), sum(timestampdiff(SECOND,time_start_act,time_end_act)) as duration from tbl_serv_tests where (test_status_preserved in ("finished", "retention expiring", "synced") or test_status_preserved is null) and (time_end_act is not null and time_start_act is not null and time_start_act < time_end_act and timestampdiff(SECOND,time_start_act,time_end_act) < 72 * 3600) group by year(time_start_act)'; + $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get statistics from database because: ' . mysqli_error($db)); + while ($row = mysqli_fetch_array($rs)) { + echo '<tr><td><b>Time occupied '.$row['y'].'</b></td><td>'.round((($row['duration'] + $testcount[$row['y']] * $testoverhead) / 3600)).' hours ('.round((($row['duration'] + $testcount[$row['y']] * $testoverhead) / (strtotime($row['maxp'])-strtotime($row['minp'])) * 100)).'%)</td></tr>'; + } + + // last year, weekly resolution + $sql = 'select year(time_start_act) as y, week(time_start_act) as w, count(*) as num from tbl_serv_tests where + datediff(DATE_SUB(DATE_SUB(CURDATE(),INTERVAL (DAY(CURDATE())-1) DAY),INTERVAL 12 MONTH),time_start_act) <=0 AND + (test_status_preserved in ("finished", "retention expiring", "synced") or test_status_preserved is null) + group by year(time_start_act), week(time_start_act) having y is not null'; + $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get statistics from database because: ' . mysqli_error($db)); + $testcount = Array(); + while ($row = mysqli_fetch_array($rs)) { + $testcount[$row['y'].'-'.$row['w']] = $row['num']; + } + $sql = 'select year(time_start_act) as y, week(time_start_act) as w, min(time_start_act) as minp, max(time_end_act) as maxp, max(timestampdiff(SECOND,time_start_act,time_end_act)), sum(timestampdiff(SECOND,time_start_act,time_end_act)) as duration from tbl_serv_tests + where + datediff(DATE_SUB(DATE_SUB(CURDATE(),INTERVAL (DAY(CURDATE())-1) DAY),INTERVAL 12 MONTH),time_start_act) <=0 AND + (test_status_preserved in ("finished", "retention expiring", "synced") or test_status_preserved is null) and + (time_end_act is not null and time_start_act is not null and time_start_act < time_end_act and timestampdiff(SECOND,time_start_act,time_end_act) < 72 * 3600) + group by year(time_start_act), week(time_start_act) having y is not null'; + $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get statistics from database because: ' . mysqli_error($db)); + $uval=Array();$uweek=Array(); + while ($row = mysqli_fetch_array($rs)) { + // echo '<tr><td> Time occupied '.$row['y'].' week '.$row['w'].'</td><td>'.round((($row['duration'] + $testcount[$row['y'].'-'.$row['w']] * $testoverhead) / 3600)).' hours ('.round((($row['duration'] + $testcount[$row['y'].'-'.$row['w']] * $testoverhead) / (7*24*3600) * 100)).'%)</td></tr>'; + array_push($uval, round((($row['duration'] + $testcount[$row['y'].'-'.$row['w']] * $testoverhead) / (7*24*3600) * 100))); + array_push($uweek, $row['w']); + } + echo ' + <script type="text/javascript"> var uval=['.implode(',',$uval).']; var uweek=['.implode(',',$uweek).']; $(document).ready(function() { var vis = new pv.Panel() - .canvas("usagebars") - .width(860) - .height(125); - + .canvas("usagebars") + .width(860) + .height(125); + vis.add(pv.Rule) - .data(pv.range(0, 101, 50)) - .bottom(function(d) { return d + 10.5;}) - .left(26) - .width(794) - .add(pv.Label).textAlign("right"); + .data(pv.range(0, 101, 50)) + .bottom(function(d) { return d + 10.5;}) + .left(26) + .width(794) + .add(pv.Label).textAlign("right"); vis.add(pv.Bar) - .data(pv.range(0, uval.length, 1)) - .width(10) - .height(function(d){ return uval[d];}) - .bottom(10) - .left(function() {return this.index * 14 + 26;}) - .anchor("bottom").add(pv.Label) - .text(function(d) {return uweek[d];}) - .textBaseline("top"); + .data(pv.range(0, uval.length, 1)) + .width(10) + .height(function(d){ return uval[d];}) + .bottom(10) + .left(function() {return this.index * 14 + 26;}) + .anchor("bottom").add(pv.Label) + .text(function(d) {return uweek[d];}) + .textBaseline("top"); vis.add(pv.Label) - .left(10) - .top(50) - .textAlign("center") - .textAngle(-0.5 * Math.PI) - .text("Utilization (%)"); - + .left(10) + .top(50) + .textAlign("center") + .textAngle(-0.5 * Math.PI) + .text("Utilization (%)"); + vis.render(); }); </script> - '; - mysqli_close($db); - ?> - <tr><td colspan="2"><b>Weekly utilization during the most recent 12 months</b><div id="usagebars"></div></td></tr> - </table> + '; + mysqli_close($db); + ?> + <tr><td colspan="2"><b>Weekly utilization during the most recent 12 months</b><div id="usagebars"></div></td></tr> + </table> <?php do_layout('Statistics','Statistics'); ?> diff --git a/webserver/user/admin_user_management.php b/webserver/user/admin_user_management.php index 85063bef8803debd32f9a815c9e75d51d393934d..f3cb8598c58dfd3ee2a4cdfdd294e3c213503bc9 100644 --- a/webserver/user/admin_user_management.php +++ b/webserver/user/admin_user_management.php @@ -1,135 +1,135 @@ <?php - /* - * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" - * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" - * __license__ = "GPL" - * __version__ = "$Revision: 2888 $" - * __date__ = "$Date: 2014-08-22 10:01:11 +0200 (Fri, 22 Aug 2014) $" - * __id__ = "$Id: index.php.normal 2888 2014-08-22 08:01:11Z rlim $" - * __source__ = "$URL: svn://svn.ee.ethz.ch/flocklab/trunk/server/webserver/user/index.php.normal $" - */ + /* + * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" + * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" + * __license__ = "GPL" + * __version__ = "$Revision: 2888 $" + * __date__ = "$Date: 2014-08-22 10:01:11 +0200 (Fri, 22 Aug 2014) $" + * __id__ = "$Id: index.php.normal 2888 2014-08-22 08:01:11Z rlim $" + * __source__ = "$URL: svn://svn.ee.ethz.ch/flocklab/trunk/server/webserver/user/index.php.normal $" + */ ?> <?php require_once('include/layout.php');require_once('include/presets.php'); ?> <?php - if (!isset($_SESSION['is_admin']) || !$_SESSION['is_admin']) - exit(1); - if (isset($_POST['use_daq']) && isset($_POST['user_id'])) { - $db = db_connect(); - $sql = "UPDATE tbl_serv_users SET use_daq=".mysqli_real_escape_string($db, $_POST['use_daq'])." WHERE serv_users_key=".mysqli_real_escape_string($db, $_POST['user_id']); - $rs = mysqli_query($db, $sql) or flocklab_die('Cannot update user propery in database because: ' . mysqli_error($db)); - mysqli_close($db); - } - if (isset($_POST['is_active']) && isset($_POST['user_id'])) { - $db = db_connect(); - $sql = "UPDATE tbl_serv_users SET is_active=".mysqli_real_escape_string($db, $_POST['is_active'])." WHERE serv_users_key=".mysqli_real_escape_string($db, $_POST['user_id']); - $rs = mysqli_query($db, $sql) or flocklab_die('Cannot update user propery in database because: ' . mysqli_error($db)); - // send email to the user - if ($_POST['is_active']) { - // fetch the user email - $sql = "SELECT email, last_login from tbl_serv_users where serv_users_key=".mysqli_real_escape_string($db, $_POST['user_id']); - $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get user email from database because: ' . mysqli_error($db)); - $row = mysqli_fetch_array($rs); - if (file_exists("template/newuser_emailtemplate.txt") && $row['last_login'] === NULL) { // only send mail to new users (who have not yet logged in) - $msg = file_get_contents("template/newuser_emailtemplate.txt"); - mail($row['email'], "Re: Request for FlockLab user account", $msg, "From: flocklab@tik.ee.ethz.ch\r\nReply-To: flocklab-admin@tik.ee.ethz.ch"); - } - } - mysqli_close($db); - } + if (!isset($_SESSION['is_admin']) || !$_SESSION['is_admin']) + exit(1); + if (isset($_POST['use_daq']) && isset($_POST['user_id'])) { + $db = db_connect(); + $sql = "UPDATE tbl_serv_users SET use_daq=".mysqli_real_escape_string($db, $_POST['use_daq'])." WHERE serv_users_key=".mysqli_real_escape_string($db, $_POST['user_id']); + $rs = mysqli_query($db, $sql) or flocklab_die('Cannot update user propery in database because: ' . mysqli_error($db)); + mysqli_close($db); + } + if (isset($_POST['is_active']) && isset($_POST['user_id'])) { + $db = db_connect(); + $sql = "UPDATE tbl_serv_users SET is_active=".mysqli_real_escape_string($db, $_POST['is_active'])." WHERE serv_users_key=".mysqli_real_escape_string($db, $_POST['user_id']); + $rs = mysqli_query($db, $sql) or flocklab_die('Cannot update user propery in database because: ' . mysqli_error($db)); + // send email to the user + if ($_POST['is_active']) { + // fetch the user email + $sql = "SELECT email, last_login from tbl_serv_users where serv_users_key=".mysqli_real_escape_string($db, $_POST['user_id']); + $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get user email from database because: ' . mysqli_error($db)); + $row = mysqli_fetch_array($rs); + if (file_exists("template/newuser_emailtemplate.txt") && $row['last_login'] === NULL) { // only send mail to new users (who have not yet logged in) + $msg = file_get_contents("template/newuser_emailtemplate.txt"); + mail($row['email'], "Re: Request for FlockLab user account", $msg, "From: flocklab@tik.ee.ethz.ch\r\nReply-To: flocklab-admin@tik.ee.ethz.ch"); + } + } + mysqli_close($db); + } ?> <script type="text/javascript" src="scripts/jquery.cookie.js"></script> <script type="text/javascript"> - $(document).ready(function() { - $("form>input[type='checkbox']").each(function() { - $(this).bind('change',function() { - $(this).val($(this).attr('checked')?1:0); - $(this).attr('checked', true); - $(this).parent().submit(); - }); - }); - var table_rows = Math.max(Math.floor(($(window).height() - 300) / 25),10); - $("#pager_num_rows").attr('value', table_rows); - $("#res_overview") - .tablesorter({widgets: ["zebra"] }) - .tablesorterPager({container: $("#pager"), positionFixed: false}); - $(".qtip_show").qtip( { - content: {text: false}, - style : "flocklab", - }); - $("#res_overview").show(); - $.cookie.json = true; - var user_tbl_state; - try { user_tbl_state = $.cookie('flocklab.usersort'); } - catch (err) { - user_tbl_state = null; - } - if ( user_tbl_state == null) { - user_tbl_state = {s: [[0,1]], p: 0}; - } - $("#res_overview").data('tablesorter').page = user_tbl_state.p; - $("#res_overview").trigger("sorton",[user_tbl_state.s]); - $("#res_overview").bind("applyWidgets",function() { - $.cookie('flocklab.usersort', {s:$("#res_overview").data('tablesorter').sortList, p:$("#res_overview").data('tablesorter').page}); - }); - }); + $(document).ready(function() { + $("form>input[type='checkbox']").each(function() { + $(this).bind('change',function() { + $(this).val($(this).attr('checked')?1:0); + $(this).attr('checked', true); + $(this).parent().submit(); + }); + }); + var table_rows = Math.max(Math.floor(($(window).height() - 300) / 25),10); + $("#pager_num_rows").attr('value', table_rows); + $("#res_overview") + .tablesorter({widgets: ["zebra"] }) + .tablesorterPager({container: $("#pager"), positionFixed: false}); + $(".qtip_show").qtip( { + content: {text: false}, + style : "flocklab", + }); + $("#res_overview").show(); + $.cookie.json = true; + var user_tbl_state; + try { user_tbl_state = $.cookie('flocklab.usersort'); } + catch (err) { + user_tbl_state = null; + } + if ( user_tbl_state == null) { + user_tbl_state = {s: [[0,1]], p: 0}; + } + $("#res_overview").data('tablesorter').page = user_tbl_state.p; + $("#res_overview").trigger("sorton",[user_tbl_state.s]); + $("#res_overview").bind("applyWidgets",function() { + $.cookie('flocklab.usersort', {s:$("#res_overview").data('tablesorter').sortList, p:$("#res_overview").data('tablesorter').page}); + }); + }); </script> <?php echo '<h1>Admin User Management</h1>'; - /* Get all users from the database and display them in the table. */ - $db = db_connect(); - $sql = "SELECT serv_users_key, lastname, firstname, username, email, is_active, use_daq, quota_runtime, quota_tests, role, UNIX_TIMESTAMP(create_time) as create_time_ts, DATE_FORMAT(create_time,'%d.%m.%Y') as create_date, last_login from tbl_serv_users"; - $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get users from database because: ' . mysqli_error($db)); - $nrows = mysqli_num_rows($rs); - mysqli_close($db); - ?> - <form name="resadd" method="post" action="#"> - <table id="res_overview" class="tablesorter" style="display:none"> - <thead> - <tr> - <th width="80px">Username</th> - <th width="70px">First name</th> - <th width="80px">Last name</th> - <th width="140px">E-Mail</th> - <th width="50px">Create date</th> - <th width="50px">Quota</th> - <th width="30px">Role</th> - <th width="20px">active</th> - <th width="20px">daq</th> - </tr> - </thead> - <tbody> - <?php - $i = 0; - while ($row = mysqli_fetch_array($rs)) { - $i++; - if ($i%2 == 1) { - echo "<tr class='even'>"; - } else { - echo "<tr class='odd'>"; - } - echo "<td>" . htmlentities($row['username']) . "</td>"; - echo "<td>" . htmlentities($row['firstname']) . "</td>"; - echo "<td>" . htmlentities($row['lastname']) . "</td>"; - echo "<td>" . htmlentities($row['email']) . "</td>"; - echo '<td><span style="display:none">'.$row['create_time_ts'].'</span>' . htmlentities($row['create_date']) . "</td>"; - echo "<td>" . (string)$row['quota_tests'] . " / " . (string)$row['quota_runtime'] . "min</td>"; - echo "<td>" . htmlentities($row['role']) . "</td>"; - echo '<td><span style="display:none">'.$row['is_active'].'</span><form action="admin_user_management.php" method="post"><input name="is_active" type="checkbox" onclick="if(this.checked) { if(!confirm(\'Active this user? An email will be sent.\')) { return false; } }" ' . ($row['is_active']==1?' checked="true"':'') . '><input type="hidden" name="user_id" value ="'.$row['serv_users_key'].'"></form></td>'; - echo '<td><span style="display:none">'.$row['use_daq'].'</span><form action="admin_user_management.php" method="post"><input name="use_daq" type="checkbox" ' . ($row['use_daq']==1?' checked="true"':'') . '><input type="hidden" name="user_id" value ="'.$row['serv_users_key'].'"></form></td>'; - echo "</tr>"; - } - ?> - </tbody> - </table> - </form> - <span id="pager" class="pager"> - <img src="pics/icons/first.gif" alt="first" class="first"> - <img src="pics/icons/prev.gif" alt="prev" class="prev"> - <span class="pagedisplay"></span> - <img src="pics/icons/next.gif" alt="next" class="next"> - <img src="pics/icons/last.gif" alt="last" class="last"> - <input class="pagesize" style="visibility: hidden;" id="pager_num_rows" value="15"> - </span> <br > + /* Get all users from the database and display them in the table. */ + $db = db_connect(); + $sql = "SELECT serv_users_key, lastname, firstname, username, email, is_active, use_daq, quota_runtime, quota_tests, role, UNIX_TIMESTAMP(create_time) as create_time_ts, DATE_FORMAT(create_time,'%d.%m.%Y') as create_date, last_login from tbl_serv_users"; + $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get users from database because: ' . mysqli_error($db)); + $nrows = mysqli_num_rows($rs); + mysqli_close($db); + ?> + <form name="resadd" method="post" action="#"> + <table id="res_overview" class="tablesorter" style="display:none"> + <thead> + <tr> + <th width="80px">Username</th> + <th width="70px">First name</th> + <th width="80px">Last name</th> + <th width="140px">E-Mail</th> + <th width="50px">Create date</th> + <th width="50px">Quota</th> + <th width="30px">Role</th> + <th width="20px">active</th> + <th width="20px">daq</th> + </tr> + </thead> + <tbody> + <?php + $i = 0; + while ($row = mysqli_fetch_array($rs)) { + $i++; + if ($i%2 == 1) { + echo "<tr class='even'>"; + } else { + echo "<tr class='odd'>"; + } + echo "<td>" . htmlentities($row['username']) . "</td>"; + echo "<td>" . htmlentities($row['firstname']) . "</td>"; + echo "<td>" . htmlentities($row['lastname']) . "</td>"; + echo "<td>" . htmlentities($row['email']) . "</td>"; + echo '<td><span style="display:none">'.$row['create_time_ts'].'</span>' . htmlentities($row['create_date']) . "</td>"; + echo "<td>" . (string)$row['quota_tests'] . " / " . (string)$row['quota_runtime'] . "min</td>"; + echo "<td>" . htmlentities($row['role']) . "</td>"; + echo '<td><span style="display:none">'.$row['is_active'].'</span><form action="admin_user_management.php" method="post"><input name="is_active" type="checkbox" onclick="if(this.checked) { if(!confirm(\'Active this user? An email will be sent.\')) { return false; } }" ' . ($row['is_active']==1?' checked="true"':'') . '><input type="hidden" name="user_id" value ="'.$row['serv_users_key'].'"></form></td>'; + echo '<td><span style="display:none">'.$row['use_daq'].'</span><form action="admin_user_management.php" method="post"><input name="use_daq" type="checkbox" ' . ($row['use_daq']==1?' checked="true"':'') . '><input type="hidden" name="user_id" value ="'.$row['serv_users_key'].'"></form></td>'; + echo "</tr>"; + } + ?> + </tbody> + </table> + </form> + <span id="pager" class="pager"> + <img src="pics/icons/first.gif" alt="first" class="first"> + <img src="pics/icons/prev.gif" alt="prev" class="prev"> + <span class="pagedisplay"></span> + <img src="pics/icons/next.gif" alt="next" class="next"> + <img src="pics/icons/last.gif" alt="last" class="last"> + <input class="pagesize" style="visibility: hidden;" id="pager_num_rows" value="15"> + </span> <br > <?php do_layout('User Management','User Management'); ?> diff --git a/webserver/user/calendar.php b/webserver/user/calendar.php index c777733e7029a6ad5a596f2b65a6dc4d9d90910d..3250c70b886a0f02d4a6b6012a7b4dc91796993b 100644 --- a/webserver/user/calendar.php +++ b/webserver/user/calendar.php @@ -1,110 +1,110 @@ <?php - /* - * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" - * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" - * __license__ = "GPL" - * __version__ = "$Revision$" - * __date__ = "$Date$" - * __id__ = "$Id$" - * __source__ = "$URL$" - */ + /* + * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" + * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" + * __license__ = "GPL" + * __version__ = "$Revision$" + * __date__ = "$Date$" + * __id__ = "$Id$" + * __source__ = "$URL$" + */ ?> <?php require_once('include/layout.php');require_once('include/presets.php'); ?> - <script type="text/javascript"> - $(document).ready(function() { - /** - * returns a Date object constructed from the UTC date string s - **/ - function UTCDate(s) { - var s_ = s; - var parts = s_.match(/(\d+)/g); - var d = new Date() - d.setUTCFullYear(parts[0], parts[1]-1, parts[2]); - d.setUTCHours(parts[3], parts[4], parts[5]); - return d; - } - - $('#calendar').fullCalendar({ - header: { - left: 'prev,next today', - center: 'title', - right: 'agendaDay,agendaWeek,month' - }, - firstDay: 1, - weekends: true, - lazyFetching: true, - eventColor: '#98BF21', - defaultView: 'month', - slotMinutes: 30, - allDaySlot: false, - axisFormat: 'HH:mm', - weekMode: 'liquid', - timeFormat: { - agenda: 'HH:mm{ - HH:mm}', - '': 'HH:mm{ - HH:mm}' - }, - columnFormat: { - agendaWeek: 'ddd MM-dd' - }, - //events: 'fullcalendar_feed.php', - events: function(start, end, callback) { - $.ajax({ - url: 'fullcalendar_feed.php', - dataType: 'json', - data: { - "start": start.getTime()/1000, - "end": end.getTime()/1000, - }, - success: function(data) { - var events = []; - var now = new Date(); - $(data).each(function() { - if (this.start != null && this.end != null) { - this.start = UTCDate(this.start); - this.end = UTCDate(this.end); - if (this.end < this.start) { - this.end = new Date(this.start.getTime() + 1000); - } + <script type="text/javascript"> + $(document).ready(function() { + /** + * returns a Date object constructed from the UTC date string s + **/ + function UTCDate(s) { + var s_ = s; + var parts = s_.match(/(\d+)/g); + var d = new Date() + d.setUTCFullYear(parts[0], parts[1]-1, parts[2]); + d.setUTCHours(parts[3], parts[4], parts[5]); + return d; + } + + $('#calendar').fullCalendar({ + header: { + left: 'prev,next today', + center: 'title', + right: 'agendaDay,agendaWeek,month' + }, + firstDay: 1, + weekends: true, + lazyFetching: true, + eventColor: '#98BF21', + defaultView: 'month', + slotMinutes: 30, + allDaySlot: false, + axisFormat: 'HH:mm', + weekMode: 'liquid', + timeFormat: { + agenda: 'HH:mm{ - HH:mm}', + '': 'HH:mm{ - HH:mm}' + }, + columnFormat: { + agendaWeek: 'ddd MM-dd' + }, + //events: 'fullcalendar_feed.php', + events: function(start, end, callback) { + $.ajax({ + url: 'fullcalendar_feed.php', + dataType: 'json', + data: { + "start": start.getTime()/1000, + "end": end.getTime()/1000, + }, + success: function(data) { + var events = []; + var now = new Date(); + $(data).each(function() { + if (this.start != null && this.end != null) { + this.start = UTCDate(this.start); + this.end = UTCDate(this.end); + if (this.end < this.start) { + this.end = new Date(this.start.getTime() + 1000); + } <?php if ($_SESSION['is_admin']) { echo ' - if (this.color=="chocolate" || !this.hasOwnProperty("color")) - this.url="webdavs://'.$_SESSION['username'].'@'.preg_replace('#/[^/]*$#','',$_SERVER['HTTP_HOST'].$_SERVER['REQUEST_URI']).'/webdav/"+this.id+"/"; + if (this.color=="chocolate" || !this.hasOwnProperty("color")) + this.url="webdavs://'.$_SESSION['username'].'@'.preg_replace('#/[^/]*$#','',$_SERVER['HTTP_HOST'].$_SERVER['REQUEST_URI']).'/webdav/"+this.id+"/"; ';} else { echo ' - if (this.color=="chocolate") - this.url="webdavs://'.$_SESSION['username'].'@'.preg_replace('#/[^/]*$#','',$_SERVER['HTTP_HOST'].$_SERVER['REQUEST_URI']).'/webdav/"+this.id+"/"; + if (this.color=="chocolate") + this.url="webdavs://'.$_SESSION['username'].'@'.preg_replace('#/[^/]*$#','',$_SERVER['HTTP_HOST'].$_SERVER['REQUEST_URI']).'/webdav/"+this.id+"/"; ';} ?> - events.push(this); - } - }); - callback(events); - } - }); - }, - eventRender: function(event, element) { - element.qtip({ - content: event.description, - style: 'flocklab', - }); - }, - loading: function(bool) { - if (!bool) { - // Scroll to today: - var now = new Date(); - var elem = $('#calendar').find('*[data-date="' + now.getFullYear()+ "-" + (now.getMonth()+1<10?'0':'') + (now.getMonth()+1) + "-" + now.getDate() + '"]'); - if (elem.html() != null) { - $('html, body').animate({ scrollTop: (elem.offset().top)}, 'slow'); - } - } - }, - }); - }); - </script> - <h1>Full Calendar</h1> - <div id='calendar' class='calendar'></div> + events.push(this); + } + }); + callback(events); + } + }); + }, + eventRender: function(event, element) { + element.qtip({ + content: event.description, + style: 'flocklab', + }); + }, + loading: function(bool) { + if (!bool) { + // Scroll to today: + var now = new Date(); + var elem = $('#calendar').find('*[data-date="' + now.getFullYear()+ "-" + (now.getMonth()+1<10?'0':'') + (now.getMonth()+1) + "-" + now.getDate() + '"]'); + if (elem.html() != null) { + $('html, body').animate({ scrollTop: (elem.offset().top)}, 'slow'); + } + } + }, + }); + }); + </script> + <h1>Full Calendar</h1> + <div id='calendar' class='calendar'></div> <?php do_layout('Full Calendar','Full Calendar'); ?> diff --git a/webserver/user/coojaplugin.php b/webserver/user/coojaplugin.php index 9b468c909d5b6d313a8458744da3db9f0295bd33..ea1f3c08852240a4d1601a952d62c2239a350260 100644 --- a/webserver/user/coojaplugin.php +++ b/webserver/user/coojaplugin.php @@ -1,12 +1,12 @@ <?php - /* - * __author__ = "Roman Lim <lim@tik.ee.ethz.ch>" - * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Roman Lim" - * __license__ = "GPL" - * __version__ = "$Revision: 1296 $" - * __date__ = "$Date: 2011-08-12 17:06:17 +0200 (Fri, 12 Aug 2011) $" - * __id__ = "$Id: newtest.php 1296 2011-08-12 15:06:17Z walserc $" - */ + /* + * __author__ = "Roman Lim <lim@tik.ee.ethz.ch>" + * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Roman Lim" + * __license__ = "GPL" + * __version__ = "$Revision: 1296 $" + * __date__ = "$Date: 2011-08-12 17:06:17 +0200 (Fri, 12 Aug 2011) $" + * __id__ = "$Id: newtest.php 1296 2011-08-12 15:06:17Z walserc $" + */ ?> <?php $dir="/home/flocklab/flocklab_downloads/"; diff --git a/webserver/user/fullcalendar_feed.php b/webserver/user/fullcalendar_feed.php index 9ea9ecfcbc25d1552ffa07491bf5ae57abbe0b2d..26a01c6327b8a734a369c4b768464e4d6d2906f1 100644 --- a/webserver/user/fullcalendar_feed.php +++ b/webserver/user/fullcalendar_feed.php @@ -1,153 +1,153 @@ <?php - /* - * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" - * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" - * __license__ = "GPL" - * __version__ = "$Revision$" - * __date__ = "$Date$" - * __id__ = "$Id$" - * __source__ = "$URL$" - */ + /* + * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" + * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" + * __license__ = "GPL" + * __version__ = "$Revision$" + * __date__ = "$Date$" + * __id__ = "$Id$" + * __source__ = "$URL$" + */ - require_once('include/auth.php'); - ob_start("ob_gzhandler"); - //debug(); - // Transform request parameters to MySQL datetime format. - $mysqlstart = date( 'Y-m-d H:i:s T', $_GET['start']); - $mysqlend = date('Y-m-d H:i:s T', $_GET['end']); - $mini = isset($_GET['mini']) && $_GET['mini']==TRUE; - - // Connect to database and get the corresponding events: - $guard_setup_min = $CONFIG['tests']['setuptime']; - $guard_cleanup_min = $CONFIG['tests']['cleanuptime']; - $db = db_connect(); - // planned tests - $sql = "SELECT `a`.serv_tests_key, `a`.title, `a`.description, `a`.time_start_wish, `a`.time_end_wish, `a`.owner_fk, `b`.username, `b`.firstname, `b`.lastname, `a`.time_start_act, `a`.time_end_act, `a`.test_status, - DATE_ADD(`a`.time_start_wish, INTERVAL -".$guard_setup_min." MINUTE) as time_start_offset, - DATE_ADD(`a`.time_end_wish, INTERVAL ".$guard_cleanup_min." MINUTE) as time_end_offset - FROM `tbl_serv_tests` AS `a` - LEFT JOIN `tbl_serv_users` AS `b` ON `a`.owner_fk = `b`.serv_users_key - WHERE - ( - (`a`.test_status NOT IN ('not schedulable', 'todelete', 'deleted') OR (`a`.test_status_preserved IS NOT NULL AND `a`.test_status_preserved IN ('finished','failed'))) - AND - ( - (`a`.time_start_wish BETWEEN '" . $mysqlstart . "' AND '" . $mysqlend . "') - OR (`a`.time_end_wish BETWEEN '" . $mysqlstart . "' AND '" . $mysqlend . "') - ) - ) ORDER BY `a`.time_start_wish - "; - $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get calendar data from database because: ' . mysqli_error($db)); - - // Build the array of events: - $events = array(); - while ($row = mysqli_fetch_array($rs)) { - // Create the event depending on the owner: if it is an event which belongs to the logged-in user, show more info and color it differently. - if ($row['owner_fk'] == $_SESSION['serv_users_key']) { - if ($row['test_status'] == 'failed' || $row['test_status'] == 'finished' || $row['test_status'] == 'retention expiring' || $row['test_status'] == 'deleted' || $row['test_status'] == 'todelete' ) { - $events[] = array( - 'id' => $row['serv_tests_key'], - 'title' => 'Test ' . $row['serv_tests_key'] . ': ' . $row['title'], - 'description' => $mini?'':'Test-ID: ' . $row['serv_tests_key'] . '<br/>Duration: '.date("H:i", strtotime($row['time_start_act'])).' - '.date("H:i", strtotime($row['time_end_act'])).'<br/>Title: ' . $row['title'] . '<br/> Description: ' . $row['description'].'<br />Status: '.$row['test_status'], - 'start' => $row['time_start_act'], - 'end' => $row['time_end_act'], - 'allDay' => false, - 'color' => 'chocolate', - ); - } - else { - // Insert offset for test setup as separate event in order not to confuse the user: - $events[] = array( - 'id' => 'service', - 'title' => 'Test setup', - 'description' => $mini?'':'Time needed by FlockLab to setup your test.', - 'start' => $row['time_start_offset'], - 'end' => $row['time_start_wish'], - 'allDay' => false, - 'color' => 'orange', - ); - // Insert actual test: - $events[] = array( - 'id' => $row['serv_tests_key'], - 'title' => 'Test ' . $row['serv_tests_key'] . ': ' . $row['title'], - 'description' => $mini?'':'Test-ID: ' . $row['serv_tests_key'] . '<br/>Duration: '.date("H:i", strtotime($row['time_start_wish'])).' - '.date("H:i", strtotime($row['time_end_wish'])).'<br/>Title: ' . $row['title'] . '<br/> Description: ' . $row['description'].'<br />Status: '.$row['test_status'], - 'start' => $row['time_start_wish'], - 'end' => $row['time_end_wish'], - 'allDay' => false, - 'color' => 'chocolate', - ); - // Insert offset for test finish as separate event in order not to confuse the user: - $events[] = array( - 'id' => 'service', - 'title' => 'Test cleanup', - 'description' => $mini?'':'Time needed by FlockLab to cleanup your test.', - 'start' => $row['time_end_wish'], - 'end' => $row['time_end_offset'], - 'allDay' => false, - 'color' => 'orange', - ); - } - } elseif ($_SESSION['is_admin'] == true) { - // The user is admin and can thus see all tests: - $event = array( - 'id' => $row['serv_tests_key'], - 'title' => $row['username'] . ' (' . $row['firstname'] . ' ' . $row['lastname'] . ')', - 'description' => $mini?'':'ID: ' . $row['serv_tests_key'] . '<br/>Duration: '.date("H:i", strtotime($row['time_start_wish'])).' - '.date("H:i", strtotime($row['time_end_wish'])).'<br/>Title: ' . $row['title'] . '<br/> Description: ' . $row['description'] . '<br/> User: ' . $row['username'] . ' (' . $row['firstname'] . ' ' . $row['lastname'] . ')' . '<br/>Status: ' . $row['test_status'], - 'allDay' => false, - ); - if (isset($row['time_start_act'])) - $event['start'] = $row['time_start_act']; - else - $event['start'] = $row['time_start_offset']; - if (isset($row['time_end_act'])) - $event['end'] = $row['time_end_act']; - else - $event['end'] = $row['time_end_offset']; - array_push($events, $event); - - } else { - // The event is not owned by the logged-in user, thus just show one event without details and add the offsets directly to the event: - $event = array( - 'id' => $row['serv_tests_key'], - 'title' => 'Occupied', - 'description' => $mini?'':'Another user is running a test.', - 'allDay' => false, - ); - if (isset($row['time_start_act'])) - $event['start'] = $row['time_start_act']; - else - $event['start'] = $row['time_start_offset']; - if (isset($row['time_end_act'])) - $event['end'] = $row['time_end_act']; - else - $event['end'] = $row['time_end_offset']; - array_push($events, $event); - } - } - - // add reservation slots that affect this user (i.e., blocks time) - $sql = 'SELECT max(`user_fk` = '.$_SESSION['serv_users_key'].') as `reservation_match`, `time_start`, `time_end`, `serv_reservation_key`, `group_id_fk` - FROM `tbl_serv_reservations` LEFT JOIN `tbl_serv_user_groups` ON `group_fk`=`group_id_fk` - WHERE `time_end` > NOW() AND - (`time_start` BETWEEN "' . $mysqlstart . '" AND "' . $mysqlend . '" OR - `time_end` BETWEEN "' . $mysqlstart . '" AND "' . $mysqlend . '") - GROUP BY serv_reservation_key - '. ($_SESSION['is_admin'] == true?'':'HAVING `reservation_match` is NULL OR `reservation_match` <> 1'); - $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get calendar data from database because: ' . mysqli_error($db)); - while ($row = mysqli_fetch_array($rs)) { - $event = array( - 'id' => $row['serv_reservation_key'], - 'title' => ($_SESSION['is_admin'] == true?'Reservation for group '.$row['group_id_fk']:'Occupied'), - 'description' => $mini?'':'Another user is running a test.', - 'allDay' => false, - ); - $event['start'] = $row['time_start']; - $event['end'] = $row['time_end']; - array_push($events, $event); - } - mysqli_close($db); + require_once('include/auth.php'); + ob_start("ob_gzhandler"); + //debug(); + // Transform request parameters to MySQL datetime format. + $mysqlstart = date( 'Y-m-d H:i:s T', $_GET['start']); + $mysqlend = date('Y-m-d H:i:s T', $_GET['end']); + $mini = isset($_GET['mini']) && $_GET['mini']==TRUE; + + // Connect to database and get the corresponding events: + $guard_setup_min = $CONFIG['tests']['setuptime']; + $guard_cleanup_min = $CONFIG['tests']['cleanuptime']; + $db = db_connect(); + // planned tests + $sql = "SELECT `a`.serv_tests_key, `a`.title, `a`.description, `a`.time_start_wish, `a`.time_end_wish, `a`.owner_fk, `b`.username, `b`.firstname, `b`.lastname, `a`.time_start_act, `a`.time_end_act, `a`.test_status, + DATE_ADD(`a`.time_start_wish, INTERVAL -".$guard_setup_min." MINUTE) as time_start_offset, + DATE_ADD(`a`.time_end_wish, INTERVAL ".$guard_cleanup_min." MINUTE) as time_end_offset + FROM `tbl_serv_tests` AS `a` + LEFT JOIN `tbl_serv_users` AS `b` ON `a`.owner_fk = `b`.serv_users_key + WHERE + ( + (`a`.test_status NOT IN ('not schedulable', 'todelete', 'deleted') OR (`a`.test_status_preserved IS NOT NULL AND `a`.test_status_preserved IN ('finished','failed'))) + AND + ( + (`a`.time_start_wish BETWEEN '" . $mysqlstart . "' AND '" . $mysqlend . "') + OR (`a`.time_end_wish BETWEEN '" . $mysqlstart . "' AND '" . $mysqlend . "') + ) + ) ORDER BY `a`.time_start_wish + "; + $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get calendar data from database because: ' . mysqli_error($db)); + + // Build the array of events: + $events = array(); + while ($row = mysqli_fetch_array($rs)) { + // Create the event depending on the owner: if it is an event which belongs to the logged-in user, show more info and color it differently. + if ($row['owner_fk'] == $_SESSION['serv_users_key']) { + if ($row['test_status'] == 'failed' || $row['test_status'] == 'finished' || $row['test_status'] == 'retention expiring' || $row['test_status'] == 'deleted' || $row['test_status'] == 'todelete' ) { + $events[] = array( + 'id' => $row['serv_tests_key'], + 'title' => 'Test ' . $row['serv_tests_key'] . ': ' . $row['title'], + 'description' => $mini?'':'Test-ID: ' . $row['serv_tests_key'] . '<br/>Duration: '.date("H:i", strtotime($row['time_start_act'])).' - '.date("H:i", strtotime($row['time_end_act'])).'<br/>Title: ' . $row['title'] . '<br/> Description: ' . $row['description'].'<br />Status: '.$row['test_status'], + 'start' => $row['time_start_act'], + 'end' => $row['time_end_act'], + 'allDay' => false, + 'color' => 'chocolate', + ); + } + else { + // Insert offset for test setup as separate event in order not to confuse the user: + $events[] = array( + 'id' => 'service', + 'title' => 'Test setup', + 'description' => $mini?'':'Time needed by FlockLab to setup your test.', + 'start' => $row['time_start_offset'], + 'end' => $row['time_start_wish'], + 'allDay' => false, + 'color' => 'orange', + ); + // Insert actual test: + $events[] = array( + 'id' => $row['serv_tests_key'], + 'title' => 'Test ' . $row['serv_tests_key'] . ': ' . $row['title'], + 'description' => $mini?'':'Test-ID: ' . $row['serv_tests_key'] . '<br/>Duration: '.date("H:i", strtotime($row['time_start_wish'])).' - '.date("H:i", strtotime($row['time_end_wish'])).'<br/>Title: ' . $row['title'] . '<br/> Description: ' . $row['description'].'<br />Status: '.$row['test_status'], + 'start' => $row['time_start_wish'], + 'end' => $row['time_end_wish'], + 'allDay' => false, + 'color' => 'chocolate', + ); + // Insert offset for test finish as separate event in order not to confuse the user: + $events[] = array( + 'id' => 'service', + 'title' => 'Test cleanup', + 'description' => $mini?'':'Time needed by FlockLab to cleanup your test.', + 'start' => $row['time_end_wish'], + 'end' => $row['time_end_offset'], + 'allDay' => false, + 'color' => 'orange', + ); + } + } elseif ($_SESSION['is_admin'] == true) { + // The user is admin and can thus see all tests: + $event = array( + 'id' => $row['serv_tests_key'], + 'title' => $row['username'] . ' (' . $row['firstname'] . ' ' . $row['lastname'] . ')', + 'description' => $mini?'':'ID: ' . $row['serv_tests_key'] . '<br/>Duration: '.date("H:i", strtotime($row['time_start_wish'])).' - '.date("H:i", strtotime($row['time_end_wish'])).'<br/>Title: ' . $row['title'] . '<br/> Description: ' . $row['description'] . '<br/> User: ' . $row['username'] . ' (' . $row['firstname'] . ' ' . $row['lastname'] . ')' . '<br/>Status: ' . $row['test_status'], + 'allDay' => false, + ); + if (isset($row['time_start_act'])) + $event['start'] = $row['time_start_act']; + else + $event['start'] = $row['time_start_offset']; + if (isset($row['time_end_act'])) + $event['end'] = $row['time_end_act']; + else + $event['end'] = $row['time_end_offset']; + array_push($events, $event); + + } else { + // The event is not owned by the logged-in user, thus just show one event without details and add the offsets directly to the event: + $event = array( + 'id' => $row['serv_tests_key'], + 'title' => 'Occupied', + 'description' => $mini?'':'Another user is running a test.', + 'allDay' => false, + ); + if (isset($row['time_start_act'])) + $event['start'] = $row['time_start_act']; + else + $event['start'] = $row['time_start_offset']; + if (isset($row['time_end_act'])) + $event['end'] = $row['time_end_act']; + else + $event['end'] = $row['time_end_offset']; + array_push($events, $event); + } + } + + // add reservation slots that affect this user (i.e., blocks time) + $sql = 'SELECT max(`user_fk` = '.$_SESSION['serv_users_key'].') as `reservation_match`, `time_start`, `time_end`, `serv_reservation_key`, `group_id_fk` + FROM `tbl_serv_reservations` LEFT JOIN `tbl_serv_user_groups` ON `group_fk`=`group_id_fk` + WHERE `time_end` > NOW() AND + (`time_start` BETWEEN "' . $mysqlstart . '" AND "' . $mysqlend . '" OR + `time_end` BETWEEN "' . $mysqlstart . '" AND "' . $mysqlend . '") + GROUP BY serv_reservation_key + '. ($_SESSION['is_admin'] == true?'':'HAVING `reservation_match` is NULL OR `reservation_match` <> 1'); + $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get calendar data from database because: ' . mysqli_error($db)); + while ($row = mysqli_fetch_array($rs)) { + $event = array( + 'id' => $row['serv_reservation_key'], + 'title' => ($_SESSION['is_admin'] == true?'Reservation for group '.$row['group_id_fk']:'Occupied'), + 'description' => $mini?'':'Another user is running a test.', + 'allDay' => false, + ); + $event['start'] = $row['time_start']; + $event['end'] = $row['time_end']; + array_push($events, $event); + } + mysqli_close($db); - // JSON-encode the array and return it to the calendar: - echo json_encode($events); + // JSON-encode the array and return it to the calendar: + echo json_encode($events); ?> diff --git a/webserver/user/ical.php b/webserver/user/ical.php index aadcf026b85755629349bd3e41e5ddf84ee57353..77ccdd163e436d43b4574b6b1c726a3d1a80dd76 100644 --- a/webserver/user/ical.php +++ b/webserver/user/ical.php @@ -1,67 +1,67 @@ <?php - /* - * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" - * __copyright__ = "Copyright 2012, ETH Zurich, Switzerland, Christoph Walser" - * __license__ = "GPL" - * __version__ = "$Revision$" - * __date__ = "$Date$" - * __id__ = "$Id$" - * __source__ = "$URL$" - */ + /* + * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" + * __copyright__ = "Copyright 2012, ETH Zurich, Switzerland, Christoph Walser" + * __license__ = "GPL" + * __version__ = "$Revision$" + * __date__ = "$Date$" + * __id__ = "$Id$" + * __source__ = "$URL$" + */ - require_once('include/libflocklab.php'); - if (!isset($_SERVER['PHP_AUTH_USER'])) { - header('WWW-Authenticate: Basic realm="Flocklab"'); - header('HTTP/1.0 401 Unauthorized'); - echo "wrong username / password.\n"; - exit(); - } else { - if (!do_login($_SERVER['PHP_AUTH_USER'], $_SERVER['PHP_AUTH_PW'])) - exit(); - } - require_once('include/iCalcreator.class.php'); + require_once('include/libflocklab.php'); + if (!isset($_SERVER['PHP_AUTH_USER'])) { + header('WWW-Authenticate: Basic realm="Flocklab"'); + header('HTTP/1.0 401 Unauthorized'); + echo "wrong username / password.\n"; + exit(); + } else { + if (!do_login($_SERVER['PHP_AUTH_USER'], $_SERVER['PHP_AUTH_PW'])) + exit(); + } + require_once('include/iCalcreator.class.php'); - //debug(); + //debug(); - // Set timezone to UTC: - date_default_timezone_set('UTC'); + // Set timezone to UTC: + date_default_timezone_set('UTC'); - // Connect to database and get the corresponding events: - $db = db_connect(); - // Only get data for the last 30 days: - $sql = "SELECT serv_tests_key, title, left(description, 100) as description, ADDTIME(`a`.time_start_wish, '-00:05:00') AS time_start_w_offset, ADDTIME(`a`.time_end_wish, '00:05:00') AS time_end_w_offset, - `b`.username - FROM `tbl_serv_tests` AS `a` - LEFT JOIN `tbl_serv_users` AS `b` ON `a`.owner_fk = `b`.serv_users_key - WHERE `a`.test_status <> 'not schedulable' AND `a`.test_status <> 'deleted' AND `a`.test_status <> 'todelete' AND (`a`.time_end_wish >= ADDTIME(NOW(), '-30 0:0:0.0')) - ORDER by `a`.time_start_wish ASC LIMIT 1000"; + // Connect to database and get the corresponding events: + $db = db_connect(); + // Only get data for the last 30 days: + $sql = "SELECT serv_tests_key, title, left(description, 100) as description, ADDTIME(`a`.time_start_wish, '-00:05:00') AS time_start_w_offset, ADDTIME(`a`.time_end_wish, '00:05:00') AS time_end_w_offset, + `b`.username + FROM `tbl_serv_tests` AS `a` + LEFT JOIN `tbl_serv_users` AS `b` ON `a`.owner_fk = `b`.serv_users_key + WHERE `a`.test_status <> 'not schedulable' AND `a`.test_status <> 'deleted' AND `a`.test_status <> 'todelete' AND (`a`.time_end_wish >= ADDTIME(NOW(), '-30 0:0:0.0')) + ORDER by `a`.time_start_wish ASC LIMIT 1000"; - $rs = mysqli_query($db, $sql) or die("Unknown error occurred."); - mysqli_close($db); - - $config = array( "unique_id" => "flocklab.ethz.ch" ); - $vcalendar = new vcalendar( $config ); - - // Build the events: - while ($row = mysqli_fetch_array($rs)) { - $start = date_parse($row['time_start_w_offset']); - $end = date_parse($row['time_end_w_offset']); - $vevent = & $vcalendar->newComponent( "vevent" ); - $vevent->setProperty( "dtstart", $start['year'], $start['month'], $start['day'], $start['hour'], $start['minute'], $start['second']); - $vevent->setProperty( "dtend", $end['year'], $end['month'], $end['day'], $end['hour'], $end['minute'], $end['second']); - if ($_SESSION['is_admin']) { - $vevent->setProperty( "description", $row['description']); - $vevent->setProperty( "summary", "FlockLab (".$row['serv_tests_key'].") [".$row['username']."]: ".$row['title']); - } - else if (strcmp($row['username'],$_SESSION['username'])==0) { - $vevent->setProperty( "description", $row['description']); - $vevent->setProperty( "summary", "FlockLab (".$row['serv_tests_key']."): ".$row['title']); - } - else { - $vevent->setProperty( "description", "Flocklab is occupied."); - $vevent->setProperty( "summary", "FlockLab is occupied."); - } - } + $rs = mysqli_query($db, $sql) or die("Unknown error occurred."); + mysqli_close($db); + + $config = array( "unique_id" => "flocklab.ethz.ch" ); + $vcalendar = new vcalendar( $config ); + + // Build the events: + while ($row = mysqli_fetch_array($rs)) { + $start = date_parse($row['time_start_w_offset']); + $end = date_parse($row['time_end_w_offset']); + $vevent = & $vcalendar->newComponent( "vevent" ); + $vevent->setProperty( "dtstart", $start['year'], $start['month'], $start['day'], $start['hour'], $start['minute'], $start['second']); + $vevent->setProperty( "dtend", $end['year'], $end['month'], $end['day'], $end['hour'], $end['minute'], $end['second']); + if ($_SESSION['is_admin']) { + $vevent->setProperty( "description", $row['description']); + $vevent->setProperty( "summary", "FlockLab (".$row['serv_tests_key'].") [".$row['username']."]: ".$row['title']); + } + else if (strcmp($row['username'],$_SESSION['username'])==0) { + $vevent->setProperty( "description", $row['description']); + $vevent->setProperty( "summary", "FlockLab (".$row['serv_tests_key']."): ".$row['title']); + } + else { + $vevent->setProperty( "description", "Flocklab is occupied."); + $vevent->setProperty( "summary", "FlockLab is occupied."); + } + } - $vcalendar->returnCalendar(); + $vcalendar->returnCalendar(); ?> diff --git a/webserver/user/image_delete.php b/webserver/user/image_delete.php index 7baa569d98911b05f892ae809ab35bc6a9cbb083..5452b94d94a96f8b2f34b882061dada580661a1d 100644 --- a/webserver/user/image_delete.php +++ b/webserver/user/image_delete.php @@ -1,13 +1,13 @@ <?php - /* - * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" - * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" - * __license__ = "GPL" - * __version__ = "$Revision$" - * __date__ = "$Date$" - * __id__ = "$Id$" - * __source__ = "$URL$" - */ + /* + * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" + * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" + * __license__ = "GPL" + * __version__ = "$Revision$" + * __date__ = "$Date$" + * __id__ = "$Id$" + * __source__ = "$URL$" + */ ?> <?php require_once('include/layout.php');require_once('include/presets.php'); ?> <?php @@ -16,63 +16,63 @@ if (isset($_POST['removeit']) && isset($_POST['imageid'])) { // remove image $db = db_connect(); - $sql = 'UPDATE `tbl_serv_targetimages` - SET `binary` = NULL, `binary_hash_sha1` = NULL - WHERE `owner_fk` = '.$_SESSION['serv_users_key'].' - AND `serv_targetimages_key` = ' .mysqli_real_escape_string($db, $_POST['imageid']); + $sql = 'UPDATE `tbl_serv_targetimages` + SET `binary` = NULL, `binary_hash_sha1` = NULL + WHERE `owner_fk` = '.$_SESSION['serv_users_key'].' + AND `serv_targetimages_key` = ' .mysqli_real_escape_string($db, $_POST['imageid']); mysqli_query($db, $sql) or flocklab_die('Cannot remove image: ' . mysqli_error($db)); } ?> - <?php - if (isset($_POST['removeit']) && isset($_POST['imageid'])) { - echo "<div class=\"info\"><div style=\"float:left;\"><img alt=\"\" src=\"pics/icons/success.png\"></div>"; - echo "<p>The image has been removed.</p><ul>"; - echo "</div><p></p>"; - include('images.php'); - echo '<meta http-equiv="Refresh" content="10; URL=images.php">'; - exit(); - } - else { - echo ' - <script type="text/javascript"> - $(document).ready(function() { - $(".qtip_show").qtip( { - content: {text: false}, - style : "flocklab", - }); - }); - </script> - - <h1>Manage Images</h1>'; - $db = db_connect(); - $sql = 'SELECT `serv_targetimages_key`, `tbl_serv_targetimages`.`name` as `name`, `tbl_serv_targetimages`.`description` as `description`, `tbl_serv_operatingsystems`.`name` as `os_name`, `tbl_serv_platforms`.`name` as `platform_name`, `tbl_serv_targetimages`.`last_changed` - FROM `tbl_serv_targetimages` - LEFT JOIN (`tbl_serv_platforms`, `tbl_serv_operatingsystems`) ON (`operatingsystems_fk`=`tbl_serv_operatingsystems`.`serv_operatingsystems_key` AND `platforms_fk` = `tbl_serv_platforms`.`serv_platforms_key`) - WHERE `owner_fk` = '.$_SESSION['serv_users_key'].' AND `serv_targetimages_key` = ' .mysqli_real_escape_string($db, $_POST['imageid']); - $res = mysqli_query($db, $sql) or flocklab_die('Cannot fetch image information: ' . mysqli_error($db)); - $row = mysqli_fetch_assoc($res); - echo ' - <form method="post" action="image_delete.php" enctype="multipart/form-data"> - <fieldset> - <legend>Remove image</legend> - <div class="warning"><div style="float:left;"><img alt="" src="pics/icons/att.png"></div> - <p>The following image will be removed:</p> - <p><table> - <tr><td>Image ID</td><td>'.$row['serv_targetimages_key'].'</td></tr> - <tr><td>Name</td><td>'.$row['name'].'</td></tr> - <tr><td>Description</td><td>'.$row['description'].'</td></tr> - <tr><td>Platform</td><td>'.$row['platform_name'].'</td></tr> - <tr><td>Os</td><td>'.$row['os_name'].'</td></tr> - <tr><td>Date</td><td>'.$row['last_changed'].'</td></tr> - </table></p> - </div><p></p> - <input type="hidden" name="imageid" value="'.htmlentities($_POST['imageid']).'"> - <input type="submit" name="removeit" value="Remove image"> - </fieldset> - <p></p> - </form>'; - } - ?> + <?php + if (isset($_POST['removeit']) && isset($_POST['imageid'])) { + echo "<div class=\"info\"><div style=\"float:left;\"><img alt=\"\" src=\"pics/icons/success.png\"></div>"; + echo "<p>The image has been removed.</p><ul>"; + echo "</div><p></p>"; + include('images.php'); + echo '<meta http-equiv="Refresh" content="10; URL=images.php">'; + exit(); + } + else { + echo ' + <script type="text/javascript"> + $(document).ready(function() { + $(".qtip_show").qtip( { + content: {text: false}, + style : "flocklab", + }); + }); + </script> + + <h1>Manage Images</h1>'; + $db = db_connect(); + $sql = 'SELECT `serv_targetimages_key`, `tbl_serv_targetimages`.`name` as `name`, `tbl_serv_targetimages`.`description` as `description`, `tbl_serv_operatingsystems`.`name` as `os_name`, `tbl_serv_platforms`.`name` as `platform_name`, `tbl_serv_targetimages`.`last_changed` + FROM `tbl_serv_targetimages` + LEFT JOIN (`tbl_serv_platforms`, `tbl_serv_operatingsystems`) ON (`operatingsystems_fk`=`tbl_serv_operatingsystems`.`serv_operatingsystems_key` AND `platforms_fk` = `tbl_serv_platforms`.`serv_platforms_key`) + WHERE `owner_fk` = '.$_SESSION['serv_users_key'].' AND `serv_targetimages_key` = ' .mysqli_real_escape_string($db, $_POST['imageid']); + $res = mysqli_query($db, $sql) or flocklab_die('Cannot fetch image information: ' . mysqli_error($db)); + $row = mysqli_fetch_assoc($res); + echo ' + <form method="post" action="image_delete.php" enctype="multipart/form-data"> + <fieldset> + <legend>Remove image</legend> + <div class="warning"><div style="float:left;"><img alt="" src="pics/icons/att.png"></div> + <p>The following image will be removed:</p> + <p><table> + <tr><td>Image ID</td><td>'.$row['serv_targetimages_key'].'</td></tr> + <tr><td>Name</td><td>'.$row['name'].'</td></tr> + <tr><td>Description</td><td>'.$row['description'].'</td></tr> + <tr><td>Platform</td><td>'.$row['platform_name'].'</td></tr> + <tr><td>Os</td><td>'.$row['os_name'].'</td></tr> + <tr><td>Date</td><td>'.$row['last_changed'].'</td></tr> + </table></p> + </div><p></p> + <input type="hidden" name="imageid" value="'.htmlentities($_POST['imageid']).'"> + <input type="submit" name="removeit" value="Remove image"> + </fieldset> + <p></p> + </form>'; + } + ?> <!-- END content --> <?php do_layout('Manage Images','Manage Images'); diff --git a/webserver/user/image_download.php b/webserver/user/image_download.php index 0a60e62e19df8146d56f442336892ff7f9daa2cc..a1936dcfaa4d86d7d396ff8c2b7914b2de18bb7c 100644 --- a/webserver/user/image_download.php +++ b/webserver/user/image_download.php @@ -1,13 +1,13 @@ <?php - /* - * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" - * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" - * __license__ = "GPL" - * __version__ = "$Revision: 2435 $" - * __date__ = "$Date: 2013-09-27 16:03:15 +0200 (Fri, 27 Sep 2013) $" - * __id__ = "$Id: testconfig_download.php 2435 2013-09-27 14:03:15Z walserc $" - * __source__ = "$URL: svn://svn.ee.ethz.ch/flocklab/trunk/server/webserver/user/testconfig_download.php $" - */ + /* + * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" + * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" + * __license__ = "GPL" + * __version__ = "$Revision: 2435 $" + * __date__ = "$Date: 2013-09-27 16:03:15 +0200 (Fri, 27 Sep 2013) $" + * __id__ = "$Id: testconfig_download.php 2435 2013-09-27 14:03:15Z walserc $" + * __source__ = "$URL: svn://svn.ee.ethz.ch/flocklab/trunk/server/webserver/user/testconfig_download.php $" + */ ?> <?php include_once('include/presets.php');?> <?php diff --git a/webserver/user/images.php b/webserver/user/images.php index 4c5bbef7a8ff26500163a69a19176eba2e4f10ef..877691aa367abdce1398de984f9eb9a45bd0ce3a 100644 --- a/webserver/user/images.php +++ b/webserver/user/images.php @@ -1,148 +1,148 @@ <?php - /* - * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" - * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" - * __license__ = "GPL" - * __version__ = "$Revision$" - * __date__ = "$Date$" - * __id__ = "$Id$" - * __source__ = "$URL$" - */ + /* + * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" + * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" + * __license__ = "GPL" + * __version__ = "$Revision$" + * __date__ = "$Date$" + * __id__ = "$Id$" + * __source__ = "$URL$" + */ ?> <?php require_once('include/layout.php');require_once('include/presets.php'); ?> <script type="text/javascript" src="scripts/jquery.cookie.js"></script> <script type="text/javascript"> - $(document).ready(function() { - var table_rows = Math.max(Math.floor(($(window).height() - 300) / 25),10); - $("#pager_num_rows").attr('value', table_rows); - $("#test_overview") - .tablesorter({widgets: ['zebra']}) - .tablesorterPager({container: $("#pager"), positionFixed: false}); - $('.qtip_show').qtip( { - content: {text: false}, - style : 'flocklab', - }); - $("#test_overview").show(); - $.cookie.json = true; - var img_tbl_state; - try { img_tbl_state = $.cookie('flocklab.imgsort'); } - catch (err) { - img_tbl_state = null; - } - if ( img_tbl_state == null) { - img_tbl_state = {s: [[0,1]], p: 0}; - } - $("#test_overview").data('tablesorter').page = img_tbl_state.p; - $("#test_overview").trigger("sorton",[img_tbl_state.s]); - $("#test_overview").bind("applyWidgets",function() { - $.cookie('flocklab.imgsort', {s:$("#test_overview").data('tablesorter').sortList, p:$("#test_overview").data('tablesorter').page}); - }); - }); + $(document).ready(function() { + var table_rows = Math.max(Math.floor(($(window).height() - 300) / 25),10); + $("#pager_num_rows").attr('value', table_rows); + $("#test_overview") + .tablesorter({widgets: ['zebra']}) + .tablesorterPager({container: $("#pager"), positionFixed: false}); + $('.qtip_show').qtip( { + content: {text: false}, + style : 'flocklab', + }); + $("#test_overview").show(); + $.cookie.json = true; + var img_tbl_state; + try { img_tbl_state = $.cookie('flocklab.imgsort'); } + catch (err) { + img_tbl_state = null; + } + if ( img_tbl_state == null) { + img_tbl_state = {s: [[0,1]], p: 0}; + } + $("#test_overview").data('tablesorter').page = img_tbl_state.p; + $("#test_overview").trigger("sorton",[img_tbl_state.s]); + $("#test_overview").bind("applyWidgets",function() { + $.cookie('flocklab.imgsort', {s:$("#test_overview").data('tablesorter').sortList, p:$("#test_overview").data('tablesorter').page}); + }); + }); </script> - <h1>Manage Images for <?php echo $_SESSION['firstname'] . " " . $_SESSION['lastname'];?></h1> - <?php - /* Platforms with more than one core. */ - $db = db_connect(); - $sql = "select count(core) as corenum, name from tbl_serv_architectures left join tbl_serv_platforms on serv_platforms_key = platforms_fk group by platforms_fk having corenum > 1"; - $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get test images from database because: ' . mysqli_error($db)); - $nrows = mysqli_num_rows($rs); - $multicore = Array(); - while ($row = mysqli_fetch_assoc($rs)) - array_push($multicore, $row['name']); - /* Get all test images of the current user from the database and display them in the table. */ - $sql = "SELECT `serv_targetimages_key`, `tbl_serv_targetimages`.`name` as `name`, `tbl_serv_targetimages`.`description` as `description`, `tbl_serv_architectures`.`description` as `core_desc`, `tbl_serv_operatingsystems`.`name` as `os_name`, `tbl_serv_platforms`.`name` as `platform_name`, `tbl_serv_targetimages`.`last_changed`, `test_fk`, `tbl_serv_tests`.`test_status` - FROM `tbl_serv_targetimages` - LEFT JOIN (`tbl_serv_platforms`, `tbl_serv_operatingsystems`) - ON (`operatingsystems_fk`=`tbl_serv_operatingsystems`.`serv_operatingsystems_key` AND `platforms_fk` = `tbl_serv_platforms`.`serv_platforms_key`) - LEFT JOIN `tbl_serv_architectures` - ON (`tbl_serv_architectures`.`platforms_fk` = `tbl_serv_platforms`.`serv_platforms_key` AND `tbl_serv_architectures`.`core` = `tbl_serv_targetimages`.`core`) - LEFT JOIN `tbl_serv_map_test_observer_targetimages` - ON (`serv_targetimages_key` = `tbl_serv_map_test_observer_targetimages`.`targetimage_fk`) - LEFT JOIN `tbl_serv_tests` - ON (`test_fk` = `tbl_serv_tests`.`serv_tests_key`) - WHERE (`tbl_serv_targetimages`.`owner_fk` = " . $_SESSION['serv_users_key'] . ") - AND (`tbl_serv_targetimages`.`binary_hash_sha1` is not NULL) - GROUP BY `serv_targetimages_key` - ORDER BY `serv_targetimages_key` DESC"; - $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get test images from database because: ' . mysqli_error($db)); - $nrows = mysqli_num_rows($rs); - mysqli_close($db); - - // If there are no tests for this user, display a message instead of the table: - if ($nrows == 0) { - echo "<p class='warning'><img alt='' src='pics/icons/att.png'>No images uploaded yet</p>"; - } - // If there are tests for this user, display them (with alternating row coloring): - else { - ?> - <table id="test_overview" class="tablesorter" style="display:none"> - <thead> - <tr> - <th width="50px">ID</th> - <th>Name</th> - <th>Description</th> - <th width="100px">Platform</th> - <th width="100px">OS</th> - <th width="200px">Date</th> - <th width="39px" class='qtip_show' title='Actions'>Actions</th> - </tr> - </thead> - <tbody> - <?php - $i = 0; - $max_len = 16; // maximum length of text before beeing cut - while ($row = mysqli_fetch_assoc($rs)) { - $i++; - if ($i%2 == 1) { - echo "<tr class='even'>"; - } else { - echo "<tr class='odd'>"; - } - echo "<td>" . $row['serv_targetimages_key'] . "</td>"; - // Name. If longer than $max_len characters, display as tooltip: - echo "<td class='qtip_show' title='" . $row['name'] . "'>" . $row['name'] . "</td>"; - // Description. If longer than $max_len characters, display as tooltip: - if (strlen($row['description']) <= $max_len) - echo "<td>" . $row['description'] . "</td>"; - else - echo "<td class='qtip_show' title='" . $row['description'] . "'>" . substr($row['description'],0,$max_len) . "...</td>"; - // Platform. If longer than $max_len characters, display as tooltip: - $corenum = in_array($row['platform_name'], $multicore)?': '.$row['core_desc']:''; - echo "<td class='qtip_show' title='" . $row['platform_name'] .$corenum. "'>" . $row['platform_name'] .$corenum. "</td>"; - // OS. If longer than $max_len characters, display as tooltip: - echo "<td class='qtip_show' title='" . $row['os_name'] . "'>" . substr($row['os_name'],0,$max_len) . "</td>"; - // Date - echo "<td title='Date' class='qtip_show'>".date_to_tzdate($row['last_changed'])."</td>"; - // Actions - echo "<td>"; - if (is_null($row['test_fk']) || ($row['test_status'] == "deleted")) { - echo "<span style='display:none'>delete</span>"; // needed to make cell sortable by JQuery - echo "<img src='pics/icons/trash.png' height='16px' alt='Delete' title='Delete test image' class='qtip_show' onClick='document.tstimgdel.imageid.value = " . $row['serv_targetimages_key'] . ";document.tstimgdel.submit();'>"; - } - else { - echo "<span style='display:none'>delete not possible</span>"; // needed to make cell sortable by JQuery - echo "<img src='pics/icons/cancel.png' height='16px' alt='Not Delete' title='This image is used in a test' class='qtip_show'>"; - } - echo "<img src='pics/icons/download.png' height='16px' alt='Download' title='Download test image' class='qtip_show' onClick='document.tstimgdownload.imageid.value = " . $row['serv_targetimages_key'] . ";document.tstimgdownload.submit();'>"; - echo "</td>"; - echo "</tr>"; - } - ?> - </tbody> - </table> - <span id="pager" class="pager"> - <img src="pics/icons/first.gif" alt="first" class="first"> - <img src="pics/icons/prev.gif" alt="prev" class="prev"> - <span class="pagedisplay"></span> - <img src="pics/icons/next.gif" alt="next" class="next"> - <img src="pics/icons/last.gif" alt="last" class="last"> - <input class="pagesize" style="visibility: hidden;" id="pager_num_rows" value="15"> - </span> <br> - <?php }?> - <!-- Forms for processing actions --> - <form name="tstimgdel" method="post" action="image_delete.php"><input type="hidden" name="imageid" value=""></form> - <form name="tstimgdownload" method="post" action="image_download.php"><input type="hidden" name="imageid" value=""></form> - <p><img style="margin-top:2px;margin-right:10px;float:left" src="pics/icons/add.png" height="16px" alt="new test"><a style="color:#666666;text-decoration:none;" href="newimage.php"> add new test image</a></p> + <h1>Manage Images for <?php echo $_SESSION['firstname'] . " " . $_SESSION['lastname'];?></h1> + <?php + /* Platforms with more than one core. */ + $db = db_connect(); + $sql = "select count(core) as corenum, name from tbl_serv_architectures left join tbl_serv_platforms on serv_platforms_key = platforms_fk group by platforms_fk having corenum > 1"; + $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get test images from database because: ' . mysqli_error($db)); + $nrows = mysqli_num_rows($rs); + $multicore = Array(); + while ($row = mysqli_fetch_assoc($rs)) + array_push($multicore, $row['name']); + /* Get all test images of the current user from the database and display them in the table. */ + $sql = "SELECT `serv_targetimages_key`, `tbl_serv_targetimages`.`name` as `name`, `tbl_serv_targetimages`.`description` as `description`, `tbl_serv_architectures`.`description` as `core_desc`, `tbl_serv_operatingsystems`.`name` as `os_name`, `tbl_serv_platforms`.`name` as `platform_name`, `tbl_serv_targetimages`.`last_changed`, `test_fk`, `tbl_serv_tests`.`test_status` + FROM `tbl_serv_targetimages` + LEFT JOIN (`tbl_serv_platforms`, `tbl_serv_operatingsystems`) + ON (`operatingsystems_fk`=`tbl_serv_operatingsystems`.`serv_operatingsystems_key` AND `platforms_fk` = `tbl_serv_platforms`.`serv_platforms_key`) + LEFT JOIN `tbl_serv_architectures` + ON (`tbl_serv_architectures`.`platforms_fk` = `tbl_serv_platforms`.`serv_platforms_key` AND `tbl_serv_architectures`.`core` = `tbl_serv_targetimages`.`core`) + LEFT JOIN `tbl_serv_map_test_observer_targetimages` + ON (`serv_targetimages_key` = `tbl_serv_map_test_observer_targetimages`.`targetimage_fk`) + LEFT JOIN `tbl_serv_tests` + ON (`test_fk` = `tbl_serv_tests`.`serv_tests_key`) + WHERE (`tbl_serv_targetimages`.`owner_fk` = " . $_SESSION['serv_users_key'] . ") + AND (`tbl_serv_targetimages`.`binary_hash_sha1` is not NULL) + GROUP BY `serv_targetimages_key` + ORDER BY `serv_targetimages_key` DESC"; + $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get test images from database because: ' . mysqli_error($db)); + $nrows = mysqli_num_rows($rs); + mysqli_close($db); + + // If there are no tests for this user, display a message instead of the table: + if ($nrows == 0) { + echo "<p class='warning'><img alt='' src='pics/icons/att.png'>No images uploaded yet</p>"; + } + // If there are tests for this user, display them (with alternating row coloring): + else { + ?> + <table id="test_overview" class="tablesorter" style="display:none"> + <thead> + <tr> + <th width="50px">ID</th> + <th>Name</th> + <th>Description</th> + <th width="100px">Platform</th> + <th width="100px">OS</th> + <th width="200px">Date</th> + <th width="39px" class='qtip_show' title='Actions'>Actions</th> + </tr> + </thead> + <tbody> + <?php + $i = 0; + $max_len = 16; // maximum length of text before beeing cut + while ($row = mysqli_fetch_assoc($rs)) { + $i++; + if ($i%2 == 1) { + echo "<tr class='even'>"; + } else { + echo "<tr class='odd'>"; + } + echo "<td>" . $row['serv_targetimages_key'] . "</td>"; + // Name. If longer than $max_len characters, display as tooltip: + echo "<td class='qtip_show' title='" . $row['name'] . "'>" . $row['name'] . "</td>"; + // Description. If longer than $max_len characters, display as tooltip: + if (strlen($row['description']) <= $max_len) + echo "<td>" . $row['description'] . "</td>"; + else + echo "<td class='qtip_show' title='" . $row['description'] . "'>" . substr($row['description'],0,$max_len) . "...</td>"; + // Platform. If longer than $max_len characters, display as tooltip: + $corenum = in_array($row['platform_name'], $multicore)?': '.$row['core_desc']:''; + echo "<td class='qtip_show' title='" . $row['platform_name'] .$corenum. "'>" . $row['platform_name'] .$corenum. "</td>"; + // OS. If longer than $max_len characters, display as tooltip: + echo "<td class='qtip_show' title='" . $row['os_name'] . "'>" . substr($row['os_name'],0,$max_len) . "</td>"; + // Date + echo "<td title='Date' class='qtip_show'>".date_to_tzdate($row['last_changed'])."</td>"; + // Actions + echo "<td>"; + if (is_null($row['test_fk']) || ($row['test_status'] == "deleted")) { + echo "<span style='display:none'>delete</span>"; // needed to make cell sortable by JQuery + echo "<img src='pics/icons/trash.png' height='16px' alt='Delete' title='Delete test image' class='qtip_show' onClick='document.tstimgdel.imageid.value = " . $row['serv_targetimages_key'] . ";document.tstimgdel.submit();'>"; + } + else { + echo "<span style='display:none'>delete not possible</span>"; // needed to make cell sortable by JQuery + echo "<img src='pics/icons/cancel.png' height='16px' alt='Not Delete' title='This image is used in a test' class='qtip_show'>"; + } + echo "<img src='pics/icons/download.png' height='16px' alt='Download' title='Download test image' class='qtip_show' onClick='document.tstimgdownload.imageid.value = " . $row['serv_targetimages_key'] . ";document.tstimgdownload.submit();'>"; + echo "</td>"; + echo "</tr>"; + } + ?> + </tbody> + </table> + <span id="pager" class="pager"> + <img src="pics/icons/first.gif" alt="first" class="first"> + <img src="pics/icons/prev.gif" alt="prev" class="prev"> + <span class="pagedisplay"></span> + <img src="pics/icons/next.gif" alt="next" class="next"> + <img src="pics/icons/last.gif" alt="last" class="last"> + <input class="pagesize" style="visibility: hidden;" id="pager_num_rows" value="15"> + </span> <br> + <?php }?> + <!-- Forms for processing actions --> + <form name="tstimgdel" method="post" action="image_delete.php"><input type="hidden" name="imageid" value=""></form> + <form name="tstimgdownload" method="post" action="image_download.php"><input type="hidden" name="imageid" value=""></form> + <p><img style="margin-top:2px;margin-right:10px;float:left" src="pics/icons/add.png" height="16px" alt="new test"><a style="color:#666666;text-decoration:none;" href="newimage.php"> add new test image</a></p> <!-- END content --> <?php do_layout('Manage Images','Manage Images'); diff --git a/webserver/user/include/auth.php b/webserver/user/include/auth.php index 3d2e153d7b77d0263fac5cdd82d874c0364837fb..f3147a57daa07461d8557aa9593179b5eb888a1b 100644 --- a/webserver/user/include/auth.php +++ b/webserver/user/include/auth.php @@ -1,42 +1,42 @@ <?php - /* - * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" - * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" - * __license__ = "GPL" - * __version__ = "$Revision$" - * __date__ = "$Date$" - * __id__ = "$Id$" - * __source__ = "$URL$" - */ + /* + * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" + * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" + * __license__ = "GPL" + * __version__ = "$Revision$" + * __date__ = "$Date$" + * __id__ = "$Id$" + * __source__ = "$URL$" + */ ?> <?php - require_once('include/libflocklab.php'); - - session_start(); - - // Check if session expired and restart a new one if it did: - if(isset($_SESSION['expires']) && $_SESSION['expires'] < $_SERVER['REQUEST_TIME'] ) { - destroy_session(); - session_start(); - session_regenerate_id(); - } - - // Set session timeout: - $_SESSION['expires'] = $_SERVER['REQUEST_TIME'] + $CONFIG['session']['expiretime']; + require_once('include/libflocklab.php'); + + session_start(); + + // Check if session expired and restart a new one if it did: + if(isset($_SESSION['expires']) && $_SESSION['expires'] < $_SERVER['REQUEST_TIME'] ) { + destroy_session(); + session_start(); + session_regenerate_id(); + } + + // Set session timeout: + $_SESSION['expires'] = $_SERVER['REQUEST_TIME'] + $CONFIG['session']['expiretime']; - $hostname = $_SERVER['HTTP_HOST']; - $path = dirname($_SERVER['PHP_SELF']); + $hostname = $_SERVER['HTTP_HOST']; + $path = dirname($_SERVER['PHP_SELF']); - // Redirect to login page if user not logged in yet: - if (!isset($_SESSION['logged_in']) || !$_SESSION['logged_in']) { - // check for login parameters - if (!(isset($_POST['username']) && isset($_POST['password']) && do_login($_POST['username'], $_POST['password']))) { - if (count($_POST)==0) - $_SESSION['request_path']=$_SERVER['SCRIPT_NAME']; - else - unset($_SESSION['request_path']); - header('Location: https://'.$hostname.($path == '/' ? '' : $path).'/login.php'); - exit; - } - } + // Redirect to login page if user not logged in yet: + if (!isset($_SESSION['logged_in']) || !$_SESSION['logged_in']) { + // check for login parameters + if (!(isset($_POST['username']) && isset($_POST['password']) && do_login($_POST['username'], $_POST['password']))) { + if (count($_POST)==0) + $_SESSION['request_path']=$_SERVER['SCRIPT_NAME']; + else + unset($_SESSION['request_path']); + header('Location: https://'.$hostname.($path == '/' ? '' : $path).'/login.php'); + exit; + } + } ?> diff --git a/webserver/user/include/config.php b/webserver/user/include/config.php index a8cb3d45615bae217e53e2e385b9137bb30b8c22..e519fbde6baaf6d4ab550d6bec6daab1f148fa3f 100644 --- a/webserver/user/include/config.php +++ b/webserver/user/include/config.php @@ -1,25 +1,25 @@ <?php - /* - * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" - * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" - * __license__ = "GPL" - * __version__ = "$Revision$" - * __date__ = "$Date$" - * __id__ = "$Id$" - * __source__ = "$URL$" - */ + /* + * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" + * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" + * __license__ = "GPL" + * __version__ = "$Revision$" + * __date__ = "$Date$" + * __id__ = "$Id$" + * __source__ = "$URL$" + */ ?> <?php - $configfile = "/home/flocklab/testmanagement/user.ini"; - if (!file_exists($configfile)) { - die("File '$configfile' not found!"); - } - $CONFIG = parse_ini_file($configfile, true); - if ($CONFIG === FALSE) { - die("Failed to parse config file!"); - } - if (!file_exists ($CONFIG['session']['dir'])) { - mkdir ($CONFIG['session']['dir']); - } - session_save_path($CONFIG['session']['dir']); + $configfile = "/home/flocklab/testmanagement/user.ini"; + if (!file_exists($configfile)) { + die("File '$configfile' not found!"); + } + $CONFIG = parse_ini_file($configfile, true); + if ($CONFIG === FALSE) { + die("Failed to parse config file!"); + } + if (!file_exists ($CONFIG['session']['dir'])) { + mkdir ($CONFIG['session']['dir']); + } + session_save_path($CONFIG['session']['dir']); ?> diff --git a/webserver/user/include/layout.php b/webserver/user/include/layout.php index 247b40e515d7b243e00d79fb2cf4bbc36fb2d4d5..2b7aa9c6f5a30c94dac8129bb81e8f2bea359003 100644 --- a/webserver/user/include/layout.php +++ b/webserver/user/include/layout.php @@ -1,44 +1,44 @@ <?php - /* - * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" - * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" - * __license__ = "GPL" - * __version__ = "$Revision$" - * __date__ = "$Date$" - * __id__ = "$Id$" - * __source__ = "$URL$" - */ + /* + * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" + * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" + * __license__ = "GPL" + * __version__ = "$Revision$" + * __date__ = "$Date$" + * __id__ = "$Id$" + * __source__ = "$URL$" + */ ?> <?php ob_start(); $LAYOUT=TRUE; function layout_die($status) { - //ob_clean(); - echo "<div class=\"warning\"><div style=\"float:left;\"><img alt=\"\" src=\"pics/icons/att.png\"></div>"; - echo "<p>Fatal Error:</p>"; - echo "<p>".$status."</p>"; - echo "</div><p></p>"; - do_layout('Error', ''); - die(); + //ob_clean(); + echo "<div class=\"warning\"><div style=\"float:left;\"><img alt=\"\" src=\"pics/icons/att.png\"></div>"; + echo "<p>Fatal Error:</p>"; + echo "<p>".$status."</p>"; + echo "</div><p></p>"; + do_layout('Error', ''); + die(); } function do_layout($title, $current_menu_context, $javascript = "") { - $content = file_get_contents('template/index.html'); + $content = file_get_contents('template/index.html'); - // do menu - if (!isset($_SESSION['is_admin']) || !$_SESSION['is_admin']) - $content = preg_replace('/<!-- ADMIN_START -->.*<!-- ADMIN_END -->/', '', $content); - $content = preg_replace('#(right_arrow)(\.png\'[^>]*> '.$current_menu_context.'</a>)#', '$1_red$2', $content); - - // fill in content - $content = str_replace('<!-- TEMPLATE javascript -->', $javascript, $content); - $content = str_replace('<!-- TEMPLATE content -->',ob_get_contents (),$content); - $content = str_replace('<!-- TEMPLATE title -->', $title, $content); - ob_end_clean(); - ob_start("ob_gzhandler"); + // do menu + if (!isset($_SESSION['is_admin']) || !$_SESSION['is_admin']) + $content = preg_replace('/<!-- ADMIN_START -->.*<!-- ADMIN_END -->/', '', $content); + $content = preg_replace('#(right_arrow)(\.png\'[^>]*> '.$current_menu_context.'</a>)#', '$1_red$2', $content); + + // fill in content + $content = str_replace('<!-- TEMPLATE javascript -->', $javascript, $content); + $content = str_replace('<!-- TEMPLATE content -->',ob_get_contents (),$content); + $content = str_replace('<!-- TEMPLATE title -->', $title, $content); + ob_end_clean(); + ob_start("ob_gzhandler"); - // print - echo $content; + // print + echo $content; } ?> \ No newline at end of file diff --git a/webserver/user/include/libflocklab.php b/webserver/user/include/libflocklab.php index 5d4e8cc32e2cc8b5b21b46b8dee0890c9a4b0ad6..46d6c00722a7da1764696cb03171e5038501623a 100644 --- a/webserver/user/include/libflocklab.php +++ b/webserver/user/include/libflocklab.php @@ -1,13 +1,13 @@ <?php - /* - * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" - * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" - * __license__ = "GPL" - * __version__ = "$Revision$" - * __date__ = "$Date$" - * __id__ = "$Id$" - * __source__ = "$URL$" - */ + /* + * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" + * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" + * __license__ = "GPL" + * __version__ = "$Revision$" + * __date__ = "$Date$" + * __id__ = "$Id$" + * __source__ = "$URL$" + */ ?> <?php require_once('include/config.php'); @@ -22,14 +22,14 @@ require_once('include/logging.php'); */ function db_connect() { - global $CONFIG; + global $CONFIG; - $dbh = mysqli_connect($CONFIG['database']['host'], $CONFIG['database']['user'], $CONFIG['database']['password'], $CONFIG['database']['database']) or flocklab_die ('Cannot connect to the database because: ' . mysqli_error($dbh)); - $sql='SET time_zone="+0:00"'; - mysqli_query($dbh, $sql) or flocklab_die('Cannot init timezone for database connection because: ' . mysqli_error($dbh)); - $sql='SET sql_mode=""'; - mysqli_query($dbh, $sql) or flocklab_die('Cannot set sql mode for database connection because: ' . mysqli_error($dbh)); - return($dbh); + $dbh = mysqli_connect($CONFIG['database']['host'], $CONFIG['database']['user'], $CONFIG['database']['password'], $CONFIG['database']['database']) or flocklab_die ('Cannot connect to the database because: ' . mysqli_error($dbh)); + $sql='SET time_zone="+0:00"'; + mysqli_query($dbh, $sql) or flocklab_die('Cannot init timezone for database connection because: ' . mysqli_error($dbh)); + $sql='SET sql_mode=""'; + mysqli_query($dbh, $sql) or flocklab_die('Cannot set sql mode for database connection because: ' . mysqli_error($dbh)); + return($dbh); } @@ -46,10 +46,10 @@ function db_connect() */ function debug() { - if (!ini_get('display_errors')) { - ini_set('display_errors', 1); - } - error_reporting(E_ALL); + if (!ini_get('display_errors')) { + ini_set('display_errors', 1); + } + error_reporting(E_ALL); } /* @@ -62,16 +62,16 @@ function debug() ############################################################################## */ function rrm($dir) { - if (!file_exists($dir)) return true; - if (!is_dir($dir) || is_link($dir)) return unlink($dir); - foreach (scandir($dir) as $item) { - if ($item == '.' || $item == '..') continue; - if (!rrm($dir . "/" . $item)) { - chmod($dir . "/" . $item, 0777); - if (!rrm($dir . "/" . $item)) return false; - }; - } - return rmdir($dir); + if (!file_exists($dir)) return true; + if (!is_dir($dir) || is_link($dir)) return unlink($dir); + foreach (scandir($dir) as $item) { + if ($item == '.' || $item == '..') continue; + if (!rrm($dir . "/" . $item)) { + chmod($dir . "/" . $item, 0777); + if (!rrm($dir . "/" . $item)) return false; + }; + } + return rmdir($dir); } /* ############################################################################## @@ -87,50 +87,50 @@ function rrm($dir) { ############################################################################## */ function do_login($username, $password) { - global $CONFIG; - - // Check username and password: - if (strlen($username)>0 && strlen($password) > 0) { - $db = db_connect(); - $sql = "SELECT serv_users_key, username, firstname, lastname, email, role - FROM tbl_serv_users - WHERE username = '" . mysqli_real_escape_string($db, $username) . "' AND password = '" . mysqli_real_escape_string($db, sha1($password)) . "' AND is_active=1"; - $rs = mysqli_query($db, $sql) or flocklab_die('Cannot authenticate because: ' . mysqli_error($db)); - $rows = mysqli_fetch_array($rs); - if ($rows) { - if ($rows['role'] != 'admin') { - // check for global UI lock - $sql = "SELECT message, time_start, time_end - FROM tbl_serv_web_status - WHERE time_start < UTC_TIMESTAMP() and time_end > UTC_TIMESTAMP() AND ui_lock='true'"; - $rs = mysqli_query($db, $sql) or flocklab_die('Cannot authenticate because: ' . mysqli_error($db)); - if (mysqli_num_rows($rs) > 0) { - $rows = mysqli_fetch_array($rs); - $d = new DateTime($row['time_end']); - return $rows['message'].'<br>Access should again be possible after <span class="time">'.$d->format("U").'</span>'; - } - } - // update user stats - $sql = "UPDATE tbl_serv_users set last_login=NOW(), login_count=login_count+1 - WHERE serv_users_key = " . $rows['serv_users_key']; - mysqli_query($db, $sql); - mysqli_close($db); - // Set session variables for this user: - $_SESSION['logged_in'] = true; - $_SESSION['serv_users_key'] = $rows['serv_users_key']; - $_SESSION['username'] = $rows['username']; - $_SESSION['firstname'] = $rows['firstname']; - $_SESSION['lastname'] = $rows['lastname']; - $_SESSION['email'] = $rows['email']; - $_SESSION['is_admin'] = ($rows['role'] == 'admin') ? true : false; - $_SESSION['expires'] = $_SERVER['REQUEST_TIME'] + $CONFIG['session']['expiretime']; - return true; - } - else { - mysqli_close($db); - } - } - return false; + global $CONFIG; + + // Check username and password: + if (strlen($username)>0 && strlen($password) > 0) { + $db = db_connect(); + $sql = "SELECT serv_users_key, username, firstname, lastname, email, role + FROM tbl_serv_users + WHERE username = '" . mysqli_real_escape_string($db, $username) . "' AND password = '" . mysqli_real_escape_string($db, sha1($password)) . "' AND is_active=1"; + $rs = mysqli_query($db, $sql) or flocklab_die('Cannot authenticate because: ' . mysqli_error($db)); + $rows = mysqli_fetch_array($rs); + if ($rows) { + if ($rows['role'] != 'admin') { + // check for global UI lock + $sql = "SELECT message, time_start, time_end + FROM tbl_serv_web_status + WHERE time_start < UTC_TIMESTAMP() and time_end > UTC_TIMESTAMP() AND ui_lock='true'"; + $rs = mysqli_query($db, $sql) or flocklab_die('Cannot authenticate because: ' . mysqli_error($db)); + if (mysqli_num_rows($rs) > 0) { + $rows = mysqli_fetch_array($rs); + $d = new DateTime($row['time_end']); + return $rows['message'].'<br>Access should again be possible after <span class="time">'.$d->format("U").'</span>'; + } + } + // update user stats + $sql = "UPDATE tbl_serv_users set last_login=NOW(), login_count=login_count+1 + WHERE serv_users_key = " . $rows['serv_users_key']; + mysqli_query($db, $sql); + mysqli_close($db); + // Set session variables for this user: + $_SESSION['logged_in'] = true; + $_SESSION['serv_users_key'] = $rows['serv_users_key']; + $_SESSION['username'] = $rows['username']; + $_SESSION['firstname'] = $rows['firstname']; + $_SESSION['lastname'] = $rows['lastname']; + $_SESSION['email'] = $rows['email']; + $_SESSION['is_admin'] = ($rows['role'] == 'admin') ? true : false; + $_SESSION['expires'] = $_SERVER['REQUEST_TIME'] + $CONFIG['session']['expiretime']; + return true; + } + else { + mysqli_close($db); + } + } + return false; } /* ############################################################################## @@ -142,15 +142,15 @@ function do_login($username, $password) { ############################################################################## */ function destroy_session() { - // Remove the sessions temp directory: - // rrm($_SESSION['tempdir']); + // Remove the sessions temp directory: + // rrm($_SESSION['tempdir']); - // Destroy the session itself: - session_destroy(); + // Destroy the session itself: + session_destroy(); - // Destroy the sesssion cookie if it exists: - if( isset($_COOKIE[session_name()])) - setcookie(session_name(), null, 0); + // Destroy the sesssion cookie if it exists: + if( isset($_COOKIE[session_name()])) + setcookie(session_name(), null, 0); } /* @@ -163,17 +163,17 @@ function destroy_session() { ############################################################################## */ function check_testid($testid, $userid) { - $db = db_connect(); - $sql = "SELECT owner_fk - FROM tbl_serv_tests - WHERE serv_tests_key = " . $testid; - $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get test owner from database because: ' . mysqli_error($db)); - $owner = mysqli_fetch_array($rs); - mysqli_close($db); - if ($owner['owner_fk'] == $userid) - return true; - else - return false; + $db = db_connect(); + $sql = "SELECT owner_fk + FROM tbl_serv_tests + WHERE serv_tests_key = " . $testid; + $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get test owner from database because: ' . mysqli_error($db)); + $owner = mysqli_fetch_array($rs); + mysqli_close($db); + if ($owner['owner_fk'] == $userid) + return true; + else + return false; } /* @@ -186,17 +186,17 @@ function check_testid($testid, $userid) { ############################################################################## */ function check_imageid($imageid, $userid) { - $db = db_connect(); - $sql = "SELECT owner_fk - FROM tbl_serv_targetimages - WHERE serv_targetimages_key = " . $imageid; - $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get test owner from database because: ' . mysqli_error($db)); - $owner = mysqli_fetch_array($rs); - mysqli_close($db); - if ($owner['owner_fk'] == $userid) - return true; - else - return false; + $db = db_connect(); + $sql = "SELECT owner_fk + FROM tbl_serv_targetimages + WHERE serv_targetimages_key = " . $imageid; + $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get test owner from database because: ' . mysqli_error($db)); + $owner = mysqli_fetch_array($rs); + mysqli_close($db); + if ($owner['owner_fk'] == $userid) + return true; + else + return false; } /* @@ -209,18 +209,18 @@ function check_imageid($imageid, $userid) { ############################################################################## */ function get_admin_emails() { - $db = db_connect(); - $sql = "SELECT `email` - FROM tbl_serv_users - WHERE `role` = 'admin'"; - $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get admin emails from database because: ' . mysqli_error($db)); - $admins = Array(); - while ($row=mysqli_fetch_array($rs)) { - array_push($admins, $row['email']); - } - mysqli_close($db); - - return $admins; + $db = db_connect(); + $sql = "SELECT `email` + FROM tbl_serv_users + WHERE `role` = 'admin'"; + $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get admin emails from database because: ' . mysqli_error($db)); + $admins = Array(); + while ($row=mysqli_fetch_array($rs)) { + array_push($admins, $row['email']); + } + mysqli_close($db); + + return $admins; } /* @@ -233,14 +233,14 @@ function get_admin_emails() { ############################################################################## */ function get_user_role($username) { - $db = db_connect(); - $sql = "SELECT role FROM tbl_serv_users WHERE username = '" . mysqli_real_escape_string($db, $username) . "'"; - $rs = mysqli_query($db, $sql) or flocklab_die('Cannot authenticate because: ' . mysqli_error($db)); - $rows = mysqli_fetch_array($rs); - if ($rows) { - return $rows['role']; - } - return 'user'; /* default */ + $db = db_connect(); + $sql = "SELECT role FROM tbl_serv_users WHERE username = '" . mysqli_real_escape_string($db, $username) . "'"; + $rs = mysqli_query($db, $sql) or flocklab_die('Cannot authenticate because: ' . mysqli_error($db)); + $rows = mysqli_fetch_array($rs); + if ($rows) { + return $rows['role']; + } + return 'user'; /* default */ } /* @@ -253,26 +253,26 @@ function get_user_role($username) { ############################################################################## */ function get_available_platforms() { - $db = db_connect(); - $sql = 'SELECT `serv_platforms_key`, `name`, `core`, `tbl_serv_architectures`.`description` `core_desc` - FROM `tbl_serv_platforms` - LEFT JOIN `tbl_serv_architectures` - ON `tbl_serv_architectures`.`platforms_fk` = `tbl_serv_platforms`.`serv_platforms_key` - ORDER BY `name`, `core` ASC'; - $res = mysqli_query($db, $sql) or flocklab_die('Cannot fetch available platforms because: ' . mysqli_error($db)); - $num = mysqli_num_rows($res); - $available_platforms = Array(); - $pkey = -1; - while ($num-- > 0) { - $row = mysqli_fetch_assoc($res); - if ($pkey != $row['serv_platforms_key']) { - $pkey = $row['serv_platforms_key']; - $available_platforms[$row['serv_platforms_key']] = Array(); - } - $available_platforms[$row['serv_platforms_key']][]=Array('name'=>$row['name'], 'core'=>$row['core'], 'core_desc'=>$row['core_desc']); - } - mysqli_close($db); - return $available_platforms; + $db = db_connect(); + $sql = 'SELECT `serv_platforms_key`, `name`, `core`, `tbl_serv_architectures`.`description` `core_desc` + FROM `tbl_serv_platforms` + LEFT JOIN `tbl_serv_architectures` + ON `tbl_serv_architectures`.`platforms_fk` = `tbl_serv_platforms`.`serv_platforms_key` + ORDER BY `name`, `core` ASC'; + $res = mysqli_query($db, $sql) or flocklab_die('Cannot fetch available platforms because: ' . mysqli_error($db)); + $num = mysqli_num_rows($res); + $available_platforms = Array(); + $pkey = -1; + while ($num-- > 0) { + $row = mysqli_fetch_assoc($res); + if ($pkey != $row['serv_platforms_key']) { + $pkey = $row['serv_platforms_key']; + $available_platforms[$row['serv_platforms_key']] = Array(); + } + $available_platforms[$row['serv_platforms_key']][]=Array('name'=>$row['name'], 'core'=>$row['core'], 'core_desc'=>$row['core_desc']); + } + mysqli_close($db); + return $available_platforms; } /* @@ -285,45 +285,45 @@ function get_available_platforms() { ############################################################################## */ function get_available_os() { - $db = db_connect(); - $sql = 'SELECT `serv_operatingsystems_key`, `name` - FROM `tbl_serv_operatingsystems` - ORDER BY `name` ASC'; - $res = mysqli_query($db, $sql) or flocklab_die('Cannot fetch available os because: ' . mysqli_error($db)); - $num = mysqli_num_rows($res); - $available_os = Array(); - while ($num-- > 0) { - $row = mysqli_fetch_assoc($res); - $available_os[$row['serv_operatingsystems_key']]=$row['name']; - } - mysqli_close($db); - return $available_os; + $db = db_connect(); + $sql = 'SELECT `serv_operatingsystems_key`, `name` + FROM `tbl_serv_operatingsystems` + ORDER BY `name` ASC'; + $res = mysqli_query($db, $sql) or flocklab_die('Cannot fetch available os because: ' . mysqli_error($db)); + $num = mysqli_num_rows($res); + $available_os = Array(); + while ($num-- > 0) { + $row = mysqli_fetch_assoc($res); + $available_os[$row['serv_operatingsystems_key']]=$row['name']; + } + mysqli_close($db); + return $available_os; } function get_testconfig($testid) { - $db = db_connect(); - $sql = "SELECT `testconfig_xml` - FROM tbl_serv_tests - WHERE ".($_SESSION['is_admin']?"":("owner_fk = " . $_SESSION['serv_users_key'] . " AND "))."`serv_tests_key`=".mysqli_real_escape_string($db, $testid); - $res = mysqli_query($db, $sql); - if ($res !== false) { - $row = mysqli_fetch_assoc($res); - return $row['testconfig_xml']; - } - return false; + $db = db_connect(); + $sql = "SELECT `testconfig_xml` + FROM tbl_serv_tests + WHERE ".($_SESSION['is_admin']?"":("owner_fk = " . $_SESSION['serv_users_key'] . " AND "))."`serv_tests_key`=".mysqli_real_escape_string($db, $testid); + $res = mysqli_query($db, $sql); + if ($res !== false) { + $row = mysqli_fetch_assoc($res); + return $row['testconfig_xml']; + } + return false; } function get_teststatus($testid) { - $db = db_connect(); - $sql = "SELECT `test_status` - FROM tbl_serv_tests - WHERE owner_fk = " . $_SESSION['serv_users_key'] . " AND `serv_tests_key`=".mysqli_real_escape_string($db, $testid); - $res = mysqli_query($db, $sql); - if ($res !== false) { - $row = mysqli_fetch_assoc($res); - return $row['test_status']; - } - return false; + $db = db_connect(); + $sql = "SELECT `test_status` + FROM tbl_serv_tests + WHERE owner_fk = " . $_SESSION['serv_users_key'] . " AND `serv_tests_key`=".mysqli_real_escape_string($db, $testid); + $res = mysqli_query($db, $sql); + if ($res !== false) { + $row = mysqli_fetch_assoc($res); + return $row['test_status']; + } + return false; } /* @@ -338,23 +338,23 @@ function get_teststatus($testid) { ############################################################################## */ function validate_image($image, &$errors) { - global $CONFIG; - $validate_image_errors = array(); - foreach(Array('name','platform') as $field) - if (!isset($image[$field]) || strlen($image[$field])==0) - array_push($validate_image_errors, "Missing mandatory field <i>".$field."</i>"); - // Get the file and check if it is a valid image - $imagefile = tempnam(sys_get_temp_dir(), 'flocklab'); - file_put_contents($imagefile, $image['data']); - $platform_list = get_available_platforms(); - $cmd = "cd ".$CONFIG['tests']['testmanagementfolder']."; python ".$CONFIG['targetimage']['imagevalidator']." --image=".$imagefile." --platform=". $platform_list[$image['platform']][0]['name']." --core=".$image['core']; - exec($cmd , $output, $ret); - unlink($imagefile); - if ($ret != 0) { - array_push($validate_image_errors, "The supplied file is not a valid image for this platform."); - } - $errors = array_merge($errors, $validate_image_errors); - return count($validate_image_errors)==0; + global $CONFIG; + $validate_image_errors = array(); + foreach(Array('name','platform') as $field) + if (!isset($image[$field]) || strlen($image[$field])==0) + array_push($validate_image_errors, "Missing mandatory field <i>".$field."</i>"); + // Get the file and check if it is a valid image + $imagefile = tempnam(sys_get_temp_dir(), 'flocklab'); + file_put_contents($imagefile, $image['data']); + $platform_list = get_available_platforms(); + $cmd = "cd ".$CONFIG['tests']['testmanagementfolder']."; python ".$CONFIG['targetimage']['imagevalidator']." --image=".$imagefile." --platform=". $platform_list[$image['platform']][0]['name']." --core=".$image['core']; + exec($cmd , $output, $ret); + unlink($imagefile); + if ($ret != 0) { + array_push($validate_image_errors, "The supplied file is not a valid image for this platform."); + } + $errors = array_merge($errors, $validate_image_errors); + return count($validate_image_errors)==0; } /* @@ -369,28 +369,28 @@ function validate_image($image, &$errors) { ############################################################################## */ function check_image_duplicate($image) { - // check hash - $duplicate = false; - $db = db_connect(); - $hash = hash('sha1', $image['data']); - $sql = 'SELECT `serv_targetimages_key`, `binary` - FROM `tbl_serv_targetimages` - WHERE `owner_fk`='.$_SESSION['serv_users_key'].' - AND `binary` IS NOT NULL - AND `binary_hash_sha1`="'.$hash.'" - AND `platforms_fk`='.mysqli_real_escape_string($db, $image['platform']).' - AND `core`='.mysqli_real_escape_string($db, $image['core']); - $res = mysqli_query($db, $sql) or flocklab_die('Cannot compare to other images because: ' . mysqli_error($db)); - $num = mysqli_num_rows($res); - while ($num-- > 0) { - $row = mysqli_fetch_assoc($res); - if (strcmp($row['binary'], $image['data'])==0) { - $duplicate = $row['serv_targetimages_key']; - break; - } - } - mysqli_close($db); - return $duplicate; + // check hash + $duplicate = false; + $db = db_connect(); + $hash = hash('sha1', $image['data']); + $sql = 'SELECT `serv_targetimages_key`, `binary` + FROM `tbl_serv_targetimages` + WHERE `owner_fk`='.$_SESSION['serv_users_key'].' + AND `binary` IS NOT NULL + AND `binary_hash_sha1`="'.$hash.'" + AND `platforms_fk`='.mysqli_real_escape_string($db, $image['platform']).' + AND `core`='.mysqli_real_escape_string($db, $image['core']); + $res = mysqli_query($db, $sql) or flocklab_die('Cannot compare to other images because: ' . mysqli_error($db)); + $num = mysqli_num_rows($res); + while ($num-- > 0) { + $row = mysqli_fetch_assoc($res); + if (strcmp($row['binary'], $image['data'])==0) { + $duplicate = $row['serv_targetimages_key']; + break; + } + } + mysqli_close($db); + return $duplicate; } /* @@ -405,36 +405,36 @@ function check_image_duplicate($image) { ############################################################################## */ function store_image($image) { - $id = null; - $hash = hash('sha1', $image['data']); - $db = db_connect(); - $sql = 'INSERT INTO `tbl_serv_targetimages` (`name`,`description`,`owner_fk`,`operatingsystems_fk`,`platforms_fk`,`core`,`binary`,`binary_hash_sha1`) - VALUES ( - "'.mysqli_real_escape_string($db, trim($image['name'])).'", - "'.mysqli_real_escape_string($db, trim($image['description'])).'", - '.$_SESSION['serv_users_key'].', - '.mysqli_real_escape_string($db, $image['os']).', - '.mysqli_real_escape_string($db, $image['platform']).', - '.mysqli_real_escape_string($db, $image['core']).', - "'.mysqli_real_escape_string($db, $image['data']).'", - "'.$hash.'")'; - mysqli_query($db, $sql) or flocklab_die('Cannot save uploaded images because: ' . mysqli_error($db)); - $id = mysqli_insert_id($db); - mysqli_close($db); - return $id; + $id = null; + $hash = hash('sha1', $image['data']); + $db = db_connect(); + $sql = 'INSERT INTO `tbl_serv_targetimages` (`name`,`description`,`owner_fk`,`operatingsystems_fk`,`platforms_fk`,`core`,`binary`,`binary_hash_sha1`) + VALUES ( + "'.mysqli_real_escape_string($db, trim($image['name'])).'", + "'.mysqli_real_escape_string($db, trim($image['description'])).'", + '.$_SESSION['serv_users_key'].', + '.mysqli_real_escape_string($db, $image['os']).', + '.mysqli_real_escape_string($db, $image['platform']).', + '.mysqli_real_escape_string($db, $image['core']).', + "'.mysqli_real_escape_string($db, $image['data']).'", + "'.$hash.'")'; + mysqli_query($db, $sql) or flocklab_die('Cannot save uploaded images because: ' . mysqli_error($db)); + $id = mysqli_insert_id($db); + mysqli_close($db); + return $id; } // validate test function validate_test($test_config_file, &$errors) { - global $CONFIG; - $validate_test_errors = array(); - $cmd = "python ".$CONFIG['tests']['testvalidator']." -x " . $test_config_file . " -s ".$CONFIG['xml']['schemapath']." -u " . $_SESSION['serv_users_key']; - exec($cmd , $output, $ret); - foreach ($output as $error) { - array_push($validate_test_errors, $error); - } - $errors = array_merge($errors, $validate_test_errors); - return count($validate_test_errors)==0; + global $CONFIG; + $validate_test_errors = array(); + $cmd = "python ".$CONFIG['tests']['testvalidator']." -x " . $test_config_file . " -s ".$CONFIG['xml']['schemapath']." -u " . $_SESSION['serv_users_key']; + exec($cmd , $output, $ret); + foreach ($output as $error) { + array_push($validate_test_errors, $error); + } + $errors = array_merge($errors, $validate_test_errors); + return count($validate_test_errors)==0; } @@ -448,214 +448,214 @@ function validate_test($test_config_file, &$errors) { ############################################################################## */ function trigger_scheduler() { - global $CONFIG; - // use SSH as a way to run the script under the user 'flocklab' - $cmd = "ssh ".$CONFIG['testmanagementserver']['user']."@".$CONFIG['testmanagementserver']['host']." '".$CONFIG['testmanagementserver']['scheduler']." --debug > /dev/null 2>&1 &'"; // ignore output and background the command - exec($cmd); + global $CONFIG; + // use SSH as a way to run the script under the user 'flocklab' + $cmd = "ssh ".$CONFIG['testmanagementserver']['user']."@".$CONFIG['testmanagementserver']['host']." '".$CONFIG['testmanagementserver']['scheduler']." --debug > /dev/null 2>&1 &'"; // ignore output and background the command + exec($cmd); } // check quota function check_quota($testconfig, $exclude_test = NULL, &$quota = NULL) { - if (isset($testconfig->generalConf->scheduleAbsolute)) { - $test_start = new DateTime($testconfig->generalConf->scheduleAbsolute->start); - $test_end = new DateTime($testconfig->generalConf->scheduleAbsolute->end); - $this_runtime = ($test_end->format("U") -$test_start->format("U")) / 60; - } - else { - $this_runtime = $testconfig->generalConf->scheduleAsap->durationSecs / 60; - } - $db = db_connect(); - // check quota - // 1. get scheduled tests / time for this user - $db = db_connect(); - $sql = 'SELECT SUM(TIME_TO_SEC(TIMEDIFF(`time_end_wish`,`time_start_wish`)))/60 as runtime, COUNT(*) as test_num - FROM `tbl_serv_tests` - WHERE `owner_fk` = ' . $_SESSION['serv_users_key']. ' AND (`test_status` IN("planned", "preparing", "running", "cleaning up", "aborting"))' .(is_null($exclude_test)?'':' AND `serv_tests_key`!='.$exclude_test); - $res = mysqli_query($db, $sql) or flocklab_die('Cannot check user quota because: ' . mysqli_error($db)); - $row = mysqli_fetch_assoc($res); - $test_num = $row['test_num']; - if ($test_num==0) - $runtime = 0; - else - $runtime = $row['runtime']; - // 2. compare to quota - $sql = 'SELECT `quota_runtime`, `quota_tests` - FROM `tbl_serv_users` - WHERE `serv_users_key` = ' . $_SESSION['serv_users_key']; - $res = mysqli_query($db, $sql) or flocklab_die('Cannot check user quota because: ' . mysqli_error($db)); - if (mysqli_num_rows($res)==1) { - $row = mysqli_fetch_assoc($res); - if ($quota != NULL) { - $quota['available']=array('runtime'=>$row['quota_runtime'], 'num'=>$row['quota_tests']); - $quota['needed']=array('runtime'=>round($this_runtime + $runtime,2), 'num'=>$test_num+1); - } - mysqli_close($db); - return (($test_num < $row['quota_tests']) && $this_runtime + $runtime <= $row['quota_runtime']); - } - mysqli_close($db); - return false; + if (isset($testconfig->generalConf->scheduleAbsolute)) { + $test_start = new DateTime($testconfig->generalConf->scheduleAbsolute->start); + $test_end = new DateTime($testconfig->generalConf->scheduleAbsolute->end); + $this_runtime = ($test_end->format("U") -$test_start->format("U")) / 60; + } + else { + $this_runtime = $testconfig->generalConf->scheduleAsap->durationSecs / 60; + } + $db = db_connect(); + // check quota + // 1. get scheduled tests / time for this user + $db = db_connect(); + $sql = 'SELECT SUM(TIME_TO_SEC(TIMEDIFF(`time_end_wish`,`time_start_wish`)))/60 as runtime, COUNT(*) as test_num + FROM `tbl_serv_tests` + WHERE `owner_fk` = ' . $_SESSION['serv_users_key']. ' AND (`test_status` IN("planned", "preparing", "running", "cleaning up", "aborting"))' .(is_null($exclude_test)?'':' AND `serv_tests_key`!='.$exclude_test); + $res = mysqli_query($db, $sql) or flocklab_die('Cannot check user quota because: ' . mysqli_error($db)); + $row = mysqli_fetch_assoc($res); + $test_num = $row['test_num']; + if ($test_num==0) + $runtime = 0; + else + $runtime = $row['runtime']; + // 2. compare to quota + $sql = 'SELECT `quota_runtime`, `quota_tests` + FROM `tbl_serv_users` + WHERE `serv_users_key` = ' . $_SESSION['serv_users_key']; + $res = mysqli_query($db, $sql) or flocklab_die('Cannot check user quota because: ' . mysqli_error($db)); + if (mysqli_num_rows($res)==1) { + $row = mysqli_fetch_assoc($res); + if ($quota != NULL) { + $quota['available']=array('runtime'=>$row['quota_runtime'], 'num'=>$row['quota_tests']); + $quota['needed']=array('runtime'=>round($this_runtime + $runtime,2), 'num'=>$test_num+1); + } + mysqli_close($db); + return (($test_num < $row['quota_tests']) && $this_runtime + $runtime <= $row['quota_runtime']); + } + mysqli_close($db); + return false; } // check schedulability function check_schedulability($testconfig, $exclude_test = NULL) { - global $CONFIG; - $db = db_connect(); - $guard_min = $CONFIG['tests']['setuptime'] + $CONFIG['tests']['cleanuptime']; - $guard_setup_min = $CONFIG['tests']['setuptime']; - $guard_cleanup_min = $CONFIG['tests']['cleanuptime']; - $start = new DateTime($testconfig->generalConf->scheduleAbsolute->start); - $end = new DateTime($testconfig->generalConf->scheduleAbsolute->end); - $start->setTimeZone(new DateTimeZone("UTC")); - $end->setTimeZone(new DateTimeZone("UTC")); - $now = new DateTime (); - $now->setTimeZone(new DateTimeZone("UTC")); - // check setup time - if ($start->format("U") - ($guard_setup_min * 60) < $now->format("U")) { - $sched=false; - } - else { - // 1) check for other tests - $sql = 'SELECT COUNT(*) as test_num - FROM `tbl_serv_tests` - WHERE ' .(is_null($exclude_test)?'':' `serv_tests_key`!='.$exclude_test.' AND ').' ('. - // planned tests - '`test_status` NOT IN("finished","failed", "deleted", "todelete", "syncing", "synced", "retention expiring") AND ( - (`time_start_wish` > DATE_ADD("'.$start->format(DATE_ISO8601).'", INTERVAL '.(- $guard_min).' MINUTE) AND `time_end_wish` < DATE_ADD("'.$end->format(DATE_ISO8601).'", INTERVAL '.($guard_min).' MINUTE)) - OR (`time_start_wish` < DATE_ADD("'.$end->format(DATE_ISO8601).'", INTERVAL '.($guard_min).' MINUTE) AND `time_end_wish` > DATE_ADD("'.$start->format(DATE_ISO8601).'", INTERVAL '.(- $guard_min).' MINUTE))))'; - $res = mysqli_query($db, $sql) or flocklab_die('Cannot check schedulability: ' . mysqli_error($db)); - $row = mysqli_fetch_assoc($res); - $sched = $row['test_num']==0; - // 2) check for reservations - if ($sched) { - $sql = 'SELECT max(`user_fk` = '.$_SESSION['serv_users_key'].') as `reservation_match` - FROM `tbl_serv_reservations` LEFT JOIN `tbl_serv_user_groups` ON `group_fk`=`group_id_fk` - WHERE (`time_start` < DATE_ADD("'.$end->format(DATE_ISO8601).'", INTERVAL '.($guard_min).' MINUTE) AND `time_end` > DATE_ADD("'.$start->format(DATE_ISO8601).'", INTERVAL '.(- $guard_min).' MINUTE))'; - $res = mysqli_query($db, $sql) or flocklab_die('Cannot check schedulability: ' . mysqli_error($db)); - if (mysqli_num_rows($res) > 0) { - $row = mysqli_fetch_assoc($res); - $sched = ($row['reservation_match']==1 || is_null($row['reservation_match'])); - } - // else no reservation - } - mysqli_close($db); - } - return $sched; + global $CONFIG; + $db = db_connect(); + $guard_min = $CONFIG['tests']['setuptime'] + $CONFIG['tests']['cleanuptime']; + $guard_setup_min = $CONFIG['tests']['setuptime']; + $guard_cleanup_min = $CONFIG['tests']['cleanuptime']; + $start = new DateTime($testconfig->generalConf->scheduleAbsolute->start); + $end = new DateTime($testconfig->generalConf->scheduleAbsolute->end); + $start->setTimeZone(new DateTimeZone("UTC")); + $end->setTimeZone(new DateTimeZone("UTC")); + $now = new DateTime (); + $now->setTimeZone(new DateTimeZone("UTC")); + // check setup time + if ($start->format("U") - ($guard_setup_min * 60) < $now->format("U")) { + $sched=false; + } + else { + // 1) check for other tests + $sql = 'SELECT COUNT(*) as test_num + FROM `tbl_serv_tests` + WHERE ' .(is_null($exclude_test)?'':' `serv_tests_key`!='.$exclude_test.' AND ').' ('. + // planned tests + '`test_status` NOT IN("finished","failed", "deleted", "todelete", "syncing", "synced", "retention expiring") AND ( + (`time_start_wish` > DATE_ADD("'.$start->format(DATE_ISO8601).'", INTERVAL '.(- $guard_min).' MINUTE) AND `time_end_wish` < DATE_ADD("'.$end->format(DATE_ISO8601).'", INTERVAL '.($guard_min).' MINUTE)) + OR (`time_start_wish` < DATE_ADD("'.$end->format(DATE_ISO8601).'", INTERVAL '.($guard_min).' MINUTE) AND `time_end_wish` > DATE_ADD("'.$start->format(DATE_ISO8601).'", INTERVAL '.(- $guard_min).' MINUTE))))'; + $res = mysqli_query($db, $sql) or flocklab_die('Cannot check schedulability: ' . mysqli_error($db)); + $row = mysqli_fetch_assoc($res); + $sched = $row['test_num']==0; + // 2) check for reservations + if ($sched) { + $sql = 'SELECT max(`user_fk` = '.$_SESSION['serv_users_key'].') as `reservation_match` + FROM `tbl_serv_reservations` LEFT JOIN `tbl_serv_user_groups` ON `group_fk`=`group_id_fk` + WHERE (`time_start` < DATE_ADD("'.$end->format(DATE_ISO8601).'", INTERVAL '.($guard_min).' MINUTE) AND `time_end` > DATE_ADD("'.$start->format(DATE_ISO8601).'", INTERVAL '.(- $guard_min).' MINUTE))'; + $res = mysqli_query($db, $sql) or flocklab_die('Cannot check schedulability: ' . mysqli_error($db)); + if (mysqli_num_rows($res) > 0) { + $row = mysqli_fetch_assoc($res); + $sched = ($row['reservation_match']==1 || is_null($row['reservation_match'])); + } + // else no reservation + } + mysqli_close($db); + } + return $sched; } // remove mappings function remove_test_mappings($testid) { - $db = db_connect(); - $sql = 'DELETE FROM `tbl_serv_map_test_observer_targetimages` - USING `tbl_serv_map_test_observer_targetimages` INNER JOIN `tbl_serv_tests` ON (`tbl_serv_map_test_observer_targetimages`.`test_fk` = `tbl_serv_tests`.`serv_tests_key` ) - WHERE `serv_tests_key` = ' .mysqli_real_escape_string($db, $testid); - mysqli_query($db, $sql) or flocklab_die('Cannot remove test mappings: ' . mysqli_error($db)); - mysqli_close($db); + $db = db_connect(); + $sql = 'DELETE FROM `tbl_serv_map_test_observer_targetimages` + USING `tbl_serv_map_test_observer_targetimages` INNER JOIN `tbl_serv_tests` ON (`tbl_serv_map_test_observer_targetimages`.`test_fk` = `tbl_serv_tests`.`serv_tests_key` ) + WHERE `serv_tests_key` = ' .mysqli_real_escape_string($db, $testid); + mysqli_query($db, $sql) or flocklab_die('Cannot remove test mappings: ' . mysqli_error($db)); + mysqli_close($db); } // add mappings function add_test_mappings($testId, $testconfig) { - $db = db_connect(); - // create mapping entries for every target that participates in the test - foreach($testconfig->targetConf as $tc) { - $observerIds = preg_split("/[\s]+/", trim($tc->obsIds)); - if (isset($tc->targetIds)) - $targedIds = preg_split("/[\s]+/", trim($tc->targetIds)); - else - $targedIds = $observerIds; - if(isset($tc->dbImageId)) { - $dbImageId = iterator_to_array($tc->dbImageId, false); - } - else - $dbImageId = Array('null'); - for($i = 0; $i<count($observerIds);$i++) { - $sql = "SELECT `serv_observer_key` - FROM `tbl_serv_observer` - WHERE `observer_id` = ".$observerIds[$i]; - $res = mysqli_query($db, $sql) or flocklab_die('Cannot store test configuration in database because: ' . mysqli_error($db)); - $row = mysqli_fetch_assoc($res); - $obsKey = $row['serv_observer_key']; - foreach ($dbImageId as $img) { - $sql = "INSERT INTO `tbl_serv_map_test_observer_targetimages` (`observer_fk`, `test_fk`, `targetimage_fk`, `node_id`) - VALUES ( - " . $obsKey. ", - " . $testId . ", - " . $img . ", - " . mysqli_real_escape_string($db, trim($targedIds[$i])).")"; - mysqli_query($db, $sql) or flocklab_die('Cannot store test configuration in database because: ' . mysqli_error($db)); - } - } - } - mysqli_close($db); + $db = db_connect(); + // create mapping entries for every target that participates in the test + foreach($testconfig->targetConf as $tc) { + $observerIds = preg_split("/[\s]+/", trim($tc->obsIds)); + if (isset($tc->targetIds)) + $targedIds = preg_split("/[\s]+/", trim($tc->targetIds)); + else + $targedIds = $observerIds; + if(isset($tc->dbImageId)) { + $dbImageId = iterator_to_array($tc->dbImageId, false); + } + else + $dbImageId = Array('null'); + for($i = 0; $i<count($observerIds);$i++) { + $sql = "SELECT `serv_observer_key` + FROM `tbl_serv_observer` + WHERE `observer_id` = ".$observerIds[$i]; + $res = mysqli_query($db, $sql) or flocklab_die('Cannot store test configuration in database because: ' . mysqli_error($db)); + $row = mysqli_fetch_assoc($res); + $obsKey = $row['serv_observer_key']; + foreach ($dbImageId as $img) { + $sql = "INSERT INTO `tbl_serv_map_test_observer_targetimages` (`observer_fk`, `test_fk`, `targetimage_fk`, `node_id`) + VALUES ( + " . $obsKey. ", + " . $testId . ", + " . $img . ", + " . mysqli_real_escape_string($db, trim($targedIds[$i])).")"; + mysqli_query($db, $sql) or flocklab_die('Cannot store test configuration in database because: ' . mysqli_error($db)); + } + } + } + mysqli_close($db); } set_exception_handler('flocklab_die'); //flocklab_die function flocklab_die($status) { - global $LAYOUT; - if (isset($LAYOUT)) - layout_die($status); - else - die($status); + global $LAYOUT; + if (isset($LAYOUT)) + layout_die($status); + else + die($status); } $states = array( - // image, short desc., long desc. - 'planned'=>array('clock.gif','Planned','Test is planned'), - 'preparing'=>array('wait_small.gif','Preparing','Test is being prepared to run'), - 'running'=>array('wait_small.gif','Running','Test is running'), - 'cleaning up'=>array('wait_small.gif','Cleaning up','Test is being cleaned up'), - 'finished'=>array('finish.png','Finished','Test has finished'), - 'not schedulable'=>array('cancel.png','Not schedulable','Test is not schedulable for some reason, it can only be deleted'), - 'failed'=>array('cancel.png','Failed','Test failed to run'), - 'aborting'=>array('cancel.png','Aborting','Test is being aborted'), - 'syncing'=>array('wait_small.gif','Syncing data','Measurement data for this test is being synchronized'), - 'synced'=>array('wait_small.gif','Synced data','Measurement data for this test has been synchronized'), - 'retention expiring'=>array('att.png','Retention expiring','Retention time almost expired. Test configuration and measurement data will be deleted automatically soon.'), + // image, short desc., long desc. + 'planned'=>array('clock.gif','Planned','Test is planned'), + 'preparing'=>array('wait_small.gif','Preparing','Test is being prepared to run'), + 'running'=>array('wait_small.gif','Running','Test is running'), + 'cleaning up'=>array('wait_small.gif','Cleaning up','Test is being cleaned up'), + 'finished'=>array('finish.png','Finished','Test has finished'), + 'not schedulable'=>array('cancel.png','Not schedulable','Test is not schedulable for some reason, it can only be deleted'), + 'failed'=>array('cancel.png','Failed','Test failed to run'), + 'aborting'=>array('cancel.png','Aborting','Test is being aborted'), + 'syncing'=>array('wait_small.gif','Syncing data','Measurement data for this test is being synchronized'), + 'synced'=>array('wait_small.gif','Synced data','Measurement data for this test has been synchronized'), + 'retention expiring'=>array('att.png','Retention expiring','Retention time almost expired. Test configuration and measurement data will be deleted automatically soon.'), ); function state_icon($state) { - global $states; - $path = 'pics/icons/'; - if (array_key_exists ( $state, $states )) - return $path.$states[$state][0]; - else - return $path.'cancel.png'; + global $states; + $path = 'pics/icons/'; + if (array_key_exists ( $state, $states )) + return $path.$states[$state][0]; + else + return $path.'cancel.png'; } function state_short_description($state) { - global $states; - if (array_key_exists ( $state, $states )) - return $states[$state][1]; - else - return 'Unknown status.'; + global $states; + if (array_key_exists ( $state, $states )) + return $states[$state][1]; + else + return 'Unknown status.'; } function state_long_description($state) { - global $states; - if (array_key_exists ( $state, $states )) - return $states[$state][2]; - else - return 'Test is in unknown status.'; + global $states; + if (array_key_exists ( $state, $states )) + return $states[$state][2]; + else + return 'Test is in unknown status.'; } function date_to_tzdate($date) { - if (is_null($date)) - return ""; - $d = new DateTime($date); - return "<span class='time' style='display:none'>".$d->format("U")."</span>"; + if (is_null($date)) + return ""; + $d = new DateTime($date); + return "<span class='time' style='display:none'>".$d->format("U")."</span>"; } function countries() { - $c = file('include/countries.txt'); - return array_map( - function($line) { return trim(preg_replace('/[A-Z]*\|/','',$line)); }, - $c - ); + $c = file('include/countries.txt'); + return array_map( + function($line) { return trim(preg_replace('/[A-Z]*\|/','',$line)); }, + $c + ); } function explodeobsids($obsids) { - return explode(' ', trim(preg_replace('/\s\s+/',' ',$obsids))); + return explode(' ', trim(preg_replace('/\s\s+/',' ',$obsids))); } ############################################################################## @@ -666,29 +666,29 @@ function explodeobsids($obsids) { # ############################################################################## function resource_slots($duration, $targetnodes) { - global $CONFIG; - $resources = Array(); - $db = db_connect(); - foreach($targetnodes as $tn) { - $sql = " - SELECT ifnull(1*(`b`.`tg_adapt_types_fk`=".$tn['platform']."),0) - + ifnull(2*(`c`.`tg_adapt_types_fk`=".$tn['platform']."),0) - + ifnull(3*(`d`.`tg_adapt_types_fk`=".$tn['platform']."),0) - + ifnull(4*(`e`.`tg_adapt_types_fk`=".$tn['platform']."),0) as slot - FROM `tbl_serv_observer` AS `a` - LEFT JOIN `tbl_serv_tg_adapt_list` AS `b` ON `a`.`slot_1_tg_adapt_list_fk` = `b`.`serv_tg_adapt_list_key` - LEFT JOIN `tbl_serv_tg_adapt_list` AS `c` ON `a`.`slot_2_tg_adapt_list_fk` = `c`.`serv_tg_adapt_list_key` - LEFT JOIN `tbl_serv_tg_adapt_list` AS `d` ON `a`.`slot_3_tg_adapt_list_fk` = `d`.`serv_tg_adapt_list_key` - LEFT JOIN `tbl_serv_tg_adapt_list` AS `e` ON `a`.`slot_4_tg_adapt_list_fk` = `e`.`serv_tg_adapt_list_key` - WHERE `a`.`observer_id` = " .$tn['obsid']; - $res = mysqli_query($db, $sql); - if (mysqli_num_rows($res) == 1) { - $row = mysqli_fetch_assoc($res); - array_push($resources, Array('time_start'=>0, 'time_end'=>$duration + ($CONFIG['tests']['setuptime'] + $CONFIG['tests']['cleanuptime']) * 60, 'obsid'=>$tn['obsid'], 'restype'=>'slot_'.$row['slot'])); - } - } - mysqli_close($db); - return $resources; + global $CONFIG; + $resources = Array(); + $db = db_connect(); + foreach($targetnodes as $tn) { + $sql = " + SELECT ifnull(1*(`b`.`tg_adapt_types_fk`=".$tn['platform']."),0) + + ifnull(2*(`c`.`tg_adapt_types_fk`=".$tn['platform']."),0) + + ifnull(3*(`d`.`tg_adapt_types_fk`=".$tn['platform']."),0) + + ifnull(4*(`e`.`tg_adapt_types_fk`=".$tn['platform']."),0) as slot + FROM `tbl_serv_observer` AS `a` + LEFT JOIN `tbl_serv_tg_adapt_list` AS `b` ON `a`.`slot_1_tg_adapt_list_fk` = `b`.`serv_tg_adapt_list_key` + LEFT JOIN `tbl_serv_tg_adapt_list` AS `c` ON `a`.`slot_2_tg_adapt_list_fk` = `c`.`serv_tg_adapt_list_key` + LEFT JOIN `tbl_serv_tg_adapt_list` AS `d` ON `a`.`slot_3_tg_adapt_list_fk` = `d`.`serv_tg_adapt_list_key` + LEFT JOIN `tbl_serv_tg_adapt_list` AS `e` ON `a`.`slot_4_tg_adapt_list_fk` = `e`.`serv_tg_adapt_list_key` + WHERE `a`.`observer_id` = " .$tn['obsid']; + $res = mysqli_query($db, $sql); + if (mysqli_num_rows($res) == 1) { + $row = mysqli_fetch_assoc($res); + array_push($resources, Array('time_start'=>0, 'time_end'=>$duration + ($CONFIG['tests']['setuptime'] + $CONFIG['tests']['cleanuptime']) * 60, 'obsid'=>$tn['obsid'], 'restype'=>'slot_'.$row['slot'])); + } + } + mysqli_close($db); + return $resources; } ############################################################################## @@ -699,22 +699,22 @@ function resource_slots($duration, $targetnodes) { # ############################################################################## function resource_freq($duration, $targetnodes) { - global $CONFIG; - $db = db_connect(); - $sql = "SELECT serv_platforms_key, freq_2400, freq_868, freq_433 FROM `tbl_serv_platforms`"; - $res = mysqli_query($db, $sql); - $freqs = Array(); - while ($row = mysqli_fetch_assoc($res)) { - $freqs[$row['serv_platforms_key']] = $row; - } - $resources = Array(); - foreach($targetnodes as $tn) { - foreach(Array('freq_2400', 'freq_868', 'freq_433') as $restype) { - if ($freqs[$tn['platform']][$restype] == 1) - array_push($resources, Array('time_start'=>$CONFIG['tests']['setuptime'] * 60, 'time_end'=>$duration + $CONFIG['tests']['setuptime'] * 60, 'obsid'=>$tn['obsid'], 'restype'=>$restype)); - } - } - return $resources; + global $CONFIG; + $db = db_connect(); + $sql = "SELECT serv_platforms_key, freq_2400, freq_868, freq_433 FROM `tbl_serv_platforms`"; + $res = mysqli_query($db, $sql); + $freqs = Array(); + while ($row = mysqli_fetch_assoc($res)) { + $freqs[$row['serv_platforms_key']] = $row; + } + $resources = Array(); + foreach($targetnodes as $tn) { + foreach(Array('freq_2400', 'freq_868', 'freq_433') as $restype) { + if ($freqs[$tn['platform']][$restype] == 1) + array_push($resources, Array('time_start'=>$CONFIG['tests']['setuptime'] * 60, 'time_end'=>$duration + $CONFIG['tests']['setuptime'] * 60, 'obsid'=>$tn['obsid'], 'restype'=>$restype)); + } + } + return $resources; } ############################################################################## @@ -725,38 +725,38 @@ function resource_freq($duration, $targetnodes) { # ############################################################################## function resource_multiplexer($duration, $targetnodes, $xmlconfig) { - global $CONFIG; - $resources = Array(); - $ignoreObs = Array(); - # get the services which use the mux for the whole test - $serviceConfNames = Array('serialConf','gpioTracingConf','gpioActuationConf','powerProfilingConf'); - foreach($xmlconfig->children() as $c) { - if (in_array($c->getName(), $serviceConfNames)) { - if ($c->getName() == 'serialConf' && (!$c->port || $c->port == 'usb')) - continue; - foreach(explodeobsids($c->obsIds) as $obsid) { - if (! in_array($obsid, $ignoreObs)) { - array_push($resources, Array('time_start'=>0, 'time_end'=>$duration + ($CONFIG['tests']['setuptime'] + $CONFIG['tests']['cleanuptime']) * 60, 'obsid'=>(int)$obsid, 'restype'=>'mux')); - array_push($ignoreObs, $obsid); - } - } - } - } - -// TODO: if DAQ is used - foreach($targetnodes as $tn) { - if (! in_array($tn['obsid'], $ignoreObs)) { + global $CONFIG; + $resources = Array(); + $ignoreObs = Array(); + # get the services which use the mux for the whole test + $serviceConfNames = Array('serialConf','gpioTracingConf','gpioActuationConf','powerProfilingConf'); + foreach($xmlconfig->children() as $c) { + if (in_array($c->getName(), $serviceConfNames)) { + if ($c->getName() == 'serialConf' && (!$c->port || $c->port == 'usb')) + continue; + foreach(explodeobsids($c->obsIds) as $obsid) { + if (! in_array($obsid, $ignoreObs)) { + array_push($resources, Array('time_start'=>0, 'time_end'=>$duration + ($CONFIG['tests']['setuptime'] + $CONFIG['tests']['cleanuptime']) * 60, 'obsid'=>(int)$obsid, 'restype'=>'mux')); + array_push($ignoreObs, $obsid); + } + } + } + } + +// TODO: if DAQ is used + foreach($targetnodes as $tn) { + if (! in_array($tn['obsid'], $ignoreObs)) { // NOTE: for now, just reserve mux for whole duration of test, until a single observer can properly handle parallel tests - if ($duration > ($CONFIG['tests']['guard_starttime'] + $CONFIG['tests']['guard_stoptime']) * 60) { - array_push($resources, Array('time_start'=>0, 'time_end'=>($CONFIG['tests']['setuptime'] + $CONFIG['tests']['guard_starttime'])*60, 'obsid'=>$tn['obsid'], 'restype'=>'mux')); - array_push($resources, Array('time_start'=>($CONFIG['tests']['setuptime'] - $CONFIG['tests']['guard_stoptime']) * 60 + $duration, 'time_end'=>$duration + ($CONFIG['tests']['setuptime'] + $CONFIG['tests']['cleanuptime'])*60, 'obsid'=>$tn['obsid'], 'restype'=>'mux')); - } - else { - array_push($resources, Array('time_start'=>0, 'time_end'=>$duration + ($CONFIG['tests']['setuptime'] + $CONFIG['tests']['cleanuptime'])*60, 'obsid'=>$tn['obsid'], 'restype'=>'mux')); - } - } - } - return $resources; + if ($duration > ($CONFIG['tests']['guard_starttime'] + $CONFIG['tests']['guard_stoptime']) * 60) { + array_push($resources, Array('time_start'=>0, 'time_end'=>($CONFIG['tests']['setuptime'] + $CONFIG['tests']['guard_starttime'])*60, 'obsid'=>$tn['obsid'], 'restype'=>'mux')); + array_push($resources, Array('time_start'=>($CONFIG['tests']['setuptime'] - $CONFIG['tests']['guard_stoptime']) * 60 + $duration, 'time_end'=>$duration + ($CONFIG['tests']['setuptime'] + $CONFIG['tests']['cleanuptime'])*60, 'obsid'=>$tn['obsid'], 'restype'=>'mux')); + } + else { + array_push($resources, Array('time_start'=>0, 'time_end'=>$duration + ($CONFIG['tests']['setuptime'] + $CONFIG['tests']['cleanuptime'])*60, 'obsid'=>$tn['obsid'], 'restype'=>'mux')); + } + } + } + return $resources; } ############################################################################## @@ -766,30 +766,30 @@ function resource_multiplexer($duration, $targetnodes, $xmlconfig) { # ############################################################################## function resource_cleanup($targetnodes) { - global $CONFIG; - $resources = Array(); - $db = db_connect(); - foreach($targetnodes as $tn) { - $sql = " - SELECT ifnull(1*(`b`.`tg_adapt_types_fk`=".$tn['platform']."),0) - + ifnull(2*(`c`.`tg_adapt_types_fk`=".$tn['platform']."),0) - + ifnull(3*(`d`.`tg_adapt_types_fk`=".$tn['platform']."),0) - + ifnull(4*(`e`.`tg_adapt_types_fk`=".$tn['platform']."),0) as slot - FROM `tbl_serv_observer` AS `a` - LEFT JOIN `tbl_serv_tg_adapt_list` AS `b` ON `a`.`slot_1_tg_adapt_list_fk` = `b`.`serv_tg_adapt_list_key` - LEFT JOIN `tbl_serv_tg_adapt_list` AS `c` ON `a`.`slot_2_tg_adapt_list_fk` = `c`.`serv_tg_adapt_list_key` - LEFT JOIN `tbl_serv_tg_adapt_list` AS `d` ON `a`.`slot_3_tg_adapt_list_fk` = `d`.`serv_tg_adapt_list_key` - LEFT JOIN `tbl_serv_tg_adapt_list` AS `e` ON `a`.`slot_4_tg_adapt_list_fk` = `e`.`serv_tg_adapt_list_key` - WHERE `a`.`observer_id` = " .$tn['obsid']; - $res = mysqli_query($db, $sql); - if (mysqli_num_rows($res) == 1) { - $row = mysqli_fetch_assoc($res); - array_push($resources, Array('time_start'=>0, 'time_end'=>$CONFIG['tests']['cleanuptime'] * 60, 'obsid'=>$tn['obsid'], 'restype'=>'slot_'.$row['slot'])); - array_push($resources, Array('time_start'=>0, 'time_end'=>$CONFIG['tests']['cleanuptime'] * 60, 'obsid'=>$tn['obsid'], 'restype'=>'mux')); - } - } - mysqli_close($db); - return $resources; + global $CONFIG; + $resources = Array(); + $db = db_connect(); + foreach($targetnodes as $tn) { + $sql = " + SELECT ifnull(1*(`b`.`tg_adapt_types_fk`=".$tn['platform']."),0) + + ifnull(2*(`c`.`tg_adapt_types_fk`=".$tn['platform']."),0) + + ifnull(3*(`d`.`tg_adapt_types_fk`=".$tn['platform']."),0) + + ifnull(4*(`e`.`tg_adapt_types_fk`=".$tn['platform']."),0) as slot + FROM `tbl_serv_observer` AS `a` + LEFT JOIN `tbl_serv_tg_adapt_list` AS `b` ON `a`.`slot_1_tg_adapt_list_fk` = `b`.`serv_tg_adapt_list_key` + LEFT JOIN `tbl_serv_tg_adapt_list` AS `c` ON `a`.`slot_2_tg_adapt_list_fk` = `c`.`serv_tg_adapt_list_key` + LEFT JOIN `tbl_serv_tg_adapt_list` AS `d` ON `a`.`slot_3_tg_adapt_list_fk` = `d`.`serv_tg_adapt_list_key` + LEFT JOIN `tbl_serv_tg_adapt_list` AS `e` ON `a`.`slot_4_tg_adapt_list_fk` = `e`.`serv_tg_adapt_list_key` + WHERE `a`.`observer_id` = " .$tn['obsid']; + $res = mysqli_query($db, $sql); + if (mysqli_num_rows($res) == 1) { + $row = mysqli_fetch_assoc($res); + array_push($resources, Array('time_start'=>0, 'time_end'=>$CONFIG['tests']['cleanuptime'] * 60, 'obsid'=>$tn['obsid'], 'restype'=>'slot_'.$row['slot'])); + array_push($resources, Array('time_start'=>0, 'time_end'=>$CONFIG['tests']['cleanuptime'] * 60, 'obsid'=>$tn['obsid'], 'restype'=>'mux')); + } + } + mysqli_close($db); + return $resources; } ############################################################################## @@ -797,162 +797,162 @@ function resource_cleanup($targetnodes) { # schedule_test # # returns associative array: -# 'feasible' => True / False -# 'start_time' => DateTime time start wish -# 'end_time' => DateTime time end wish +# 'feasible' => True / False +# 'start_time' => DateTime time start wish +# 'end_time' => DateTime time end wish # # if test is ASAP, the next possible start time is reported ############################################################################## function schedule_test($testconfig, $resources, $exclude_test = NULL) { - global $CONFIG; - $db = db_connect(); - $guard_setup_min = $CONFIG['tests']['setuptime']; - $guard_cleanup_min = $CONFIG['tests']['cleanuptime']; - $allow_parallel_tests = $CONFIG['tests']['allowparalleltests']; - $isAsap = isset($testconfig->generalConf->scheduleAsap); - $now = new DateTime (); - $now->setTimeZone(new DateTimeZone("UTC")); - // start is time start wish - setup time - // end is time end wish + cleanup time - if (!$isAsap) { - $start = new DateTime($testconfig->generalConf->scheduleAbsolute->start); - $end = new DateTime($testconfig->generalConf->scheduleAbsolute->end); - $start->setTimeZone(new DateTimeZone("UTC")); - $end->setTimeZone(new DateTimeZone("UTC")); - $start->modify('-'.$guard_setup_min.' minutes'); - $end->modify('+'.$guard_cleanup_min.' minutes'); - if ($start->format("U") < $now->format("U")) { - return Array('feasible'=>False, 'start_time'=>$start, 'end_time'=>$end); - } - } - else { - $start = new DateTime(); // now - $end = clone $start; - $start->setTimeZone(new DateTimeZone("UTC")); - $end->setTimeZone(new DateTimeZone("UTC")); - $end->modify('+'.$testconfig->generalConf->scheduleAsap->durationSecs.' seconds'); - $end->modify('+'.($guard_setup_min + $guard_cleanup_min).' minutes'); - } - $resourcesdict = Array(); - foreach($resources as $r) { - if (!isset($resourcesdict[$r['obskey']][$r['restype']])) - $resourcesdict[$r['obskey']][$r['restype']] = Array(); - array_push($resourcesdict[$r['obskey']][$r['restype']], $r); - } - - $sql = "SELECT UNIX_TIMESTAMP(`time_start`) as `utime_start`, UNIX_TIMESTAMP(`time_end`) as `utime_end`, `observer_fk`, `resource_type` FROM `tbl_serv_resource_allocation` a left join tbl_serv_tests b on (b.serv_tests_key = a.test_fk) WHERE (`time_end` >= '".$start->format(DATE_ISO8601)."' and test_status in ('planned','preparing','running','cleaning up','syncing','synced','aborting')".(isset($exclude_test)?" and `test_fk`!=".$exclude_test:"").")"; - $res_usedresources = mysqli_query($db, $sql); - $sql = "SELECT UNIX_TIMESTAMP(`time_start`) as `utime_start`, UNIX_TIMESTAMP(`time_end`) as `utime_end`, max(ifnull(user_fk,-1) = ".$_SESSION['serv_users_key'].") as `reservation_match` FROM `tbl_serv_reservations` LEFT JOIN `tbl_serv_user_groups` ON `group_fk`=`group_id_fk` WHERE `time_end` >= '".$start->format(DATE_ISO8601)."' GROUP BY `serv_reservation_key` HAVING `reservation_match` = 0"; - $res_reservations = mysqli_query($db, $sql); - - # Now check for all resource usage intervals if they overlap in time with an already scheduled test or reservations - $shiftOffset = $start->format("U"); - $testShift = $start->format("U"); - while(True) { - $maxShift = 0; # keep track of largest shift needed to resolve dependencies - if (mysqli_num_rows($res_reservations)>0) { - mysqli_data_seek($res_reservations, 0); - $ustart = $testShift; - $uend = $end->format("U") + $testShift - $shiftOffset; - while($row = mysqli_fetch_assoc($res_reservations)) { - # for every ret, check for collisions - if($row['utime_start'] <= $uend and $row['utime_end'] >= $ustart) { - if (!$isAsap) - return Array('feasible'=>False, 'start_time'=>$start, 'end_time'=>$end); - else { - $shift = $row['utime_end'] - $ustart; - if($shift > $maxShift) - $maxShift = $shift; - } - } - } - } - if (mysqli_num_rows($res_usedresources)>0) { - mysqli_data_seek($res_usedresources, 0); - while($row = mysqli_fetch_assoc($res_usedresources)) { - if (!$allow_parallel_tests) { - # if observer is used, then treat it as a collision (parallel tests on same observer cause problems) - if (isset($resourcesdict[$row['observer_fk']])) { - # observer is used by the new test, check the start and end times - $ustart = $testShift; - $uend = $end->format("U") + $testShift - $shiftOffset; - if($row['utime_start'] <= $uend and $row['utime_end'] >= $ustart) { - if (!$isAsap) - return Array('feasible'=>False, 'start_time'=>$start, 'end_time'=>$end); - else { - $shift = $row['utime_end'] - $ustart; - if($shift > $maxShift) - $maxShift = $shift; - } - } - } - } else { - # for every ret, check for collisions - if (isset($resourcesdict[$row['observer_fk']]) && isset($resourcesdict[$row['observer_fk']][$row['resource_type']])) { - foreach($resourcesdict[$row['observer_fk']][$row['resource_type']] as $r) { -// echo "<!--";print_r($row);echo "-->"; - if($row['utime_start'] <= $r['time_end'] + $testShift and $row['utime_end'] >= $r['time_start'] + $testShift) { - if (!$isAsap) - return Array('feasible'=>False, 'start_time'=>$start, 'end_time'=>$end); - else { - $shift = $row['utime_end'] - ($r['time_start'] + $testShift); - if($shift > $maxShift) - $maxShift = $shift; - } - } - } - } - } - } - } - if ($maxShift > 0) - $testShift = $testShift + $maxShift + 1; # shift by maxShift and repeat - else - break; # we found a valid schedule - } - mysqli_close($db); - $start->modify('+'.($testShift - $shiftOffset).' seconds'); - $end->modify('+'.($testShift - $shiftOffset).' seconds'); - $start->modify('+'.$guard_setup_min.' minutes'); - $end->modify('-'.$guard_cleanup_min.' minutes'); - return Array('feasible'=>True,'start_time'=>$start, 'end_time'=>$end); + global $CONFIG; + $db = db_connect(); + $guard_setup_min = $CONFIG['tests']['setuptime']; + $guard_cleanup_min = $CONFIG['tests']['cleanuptime']; + $allow_parallel_tests = $CONFIG['tests']['allowparalleltests']; + $isAsap = isset($testconfig->generalConf->scheduleAsap); + $now = new DateTime (); + $now->setTimeZone(new DateTimeZone("UTC")); + // start is time start wish - setup time + // end is time end wish + cleanup time + if (!$isAsap) { + $start = new DateTime($testconfig->generalConf->scheduleAbsolute->start); + $end = new DateTime($testconfig->generalConf->scheduleAbsolute->end); + $start->setTimeZone(new DateTimeZone("UTC")); + $end->setTimeZone(new DateTimeZone("UTC")); + $start->modify('-'.$guard_setup_min.' minutes'); + $end->modify('+'.$guard_cleanup_min.' minutes'); + if ($start->format("U") < $now->format("U")) { + return Array('feasible'=>False, 'start_time'=>$start, 'end_time'=>$end); + } + } + else { + $start = new DateTime(); // now + $end = clone $start; + $start->setTimeZone(new DateTimeZone("UTC")); + $end->setTimeZone(new DateTimeZone("UTC")); + $end->modify('+'.$testconfig->generalConf->scheduleAsap->durationSecs.' seconds'); + $end->modify('+'.($guard_setup_min + $guard_cleanup_min).' minutes'); + } + $resourcesdict = Array(); + foreach($resources as $r) { + if (!isset($resourcesdict[$r['obskey']][$r['restype']])) + $resourcesdict[$r['obskey']][$r['restype']] = Array(); + array_push($resourcesdict[$r['obskey']][$r['restype']], $r); + } + + $sql = "SELECT UNIX_TIMESTAMP(`time_start`) as `utime_start`, UNIX_TIMESTAMP(`time_end`) as `utime_end`, `observer_fk`, `resource_type` FROM `tbl_serv_resource_allocation` a left join tbl_serv_tests b on (b.serv_tests_key = a.test_fk) WHERE (`time_end` >= '".$start->format(DATE_ISO8601)."' and test_status in ('planned','preparing','running','cleaning up','syncing','synced','aborting')".(isset($exclude_test)?" and `test_fk`!=".$exclude_test:"").")"; + $res_usedresources = mysqli_query($db, $sql); + $sql = "SELECT UNIX_TIMESTAMP(`time_start`) as `utime_start`, UNIX_TIMESTAMP(`time_end`) as `utime_end`, max(ifnull(user_fk,-1) = ".$_SESSION['serv_users_key'].") as `reservation_match` FROM `tbl_serv_reservations` LEFT JOIN `tbl_serv_user_groups` ON `group_fk`=`group_id_fk` WHERE `time_end` >= '".$start->format(DATE_ISO8601)."' GROUP BY `serv_reservation_key` HAVING `reservation_match` = 0"; + $res_reservations = mysqli_query($db, $sql); + + # Now check for all resource usage intervals if they overlap in time with an already scheduled test or reservations + $shiftOffset = $start->format("U"); + $testShift = $start->format("U"); + while(True) { + $maxShift = 0; # keep track of largest shift needed to resolve dependencies + if (mysqli_num_rows($res_reservations)>0) { + mysqli_data_seek($res_reservations, 0); + $ustart = $testShift; + $uend = $end->format("U") + $testShift - $shiftOffset; + while($row = mysqli_fetch_assoc($res_reservations)) { + # for every ret, check for collisions + if($row['utime_start'] <= $uend and $row['utime_end'] >= $ustart) { + if (!$isAsap) + return Array('feasible'=>False, 'start_time'=>$start, 'end_time'=>$end); + else { + $shift = $row['utime_end'] - $ustart; + if($shift > $maxShift) + $maxShift = $shift; + } + } + } + } + if (mysqli_num_rows($res_usedresources)>0) { + mysqli_data_seek($res_usedresources, 0); + while($row = mysqli_fetch_assoc($res_usedresources)) { + if (!$allow_parallel_tests) { + # if observer is used, then treat it as a collision (parallel tests on same observer cause problems) + if (isset($resourcesdict[$row['observer_fk']])) { + # observer is used by the new test, check the start and end times + $ustart = $testShift; + $uend = $end->format("U") + $testShift - $shiftOffset; + if($row['utime_start'] <= $uend and $row['utime_end'] >= $ustart) { + if (!$isAsap) + return Array('feasible'=>False, 'start_time'=>$start, 'end_time'=>$end); + else { + $shift = $row['utime_end'] - $ustart; + if($shift > $maxShift) + $maxShift = $shift; + } + } + } + } else { + # for every ret, check for collisions + if (isset($resourcesdict[$row['observer_fk']]) && isset($resourcesdict[$row['observer_fk']][$row['resource_type']])) { + foreach($resourcesdict[$row['observer_fk']][$row['resource_type']] as $r) { +// echo "<!--";print_r($row);echo "-->"; + if($row['utime_start'] <= $r['time_end'] + $testShift and $row['utime_end'] >= $r['time_start'] + $testShift) { + if (!$isAsap) + return Array('feasible'=>False, 'start_time'=>$start, 'end_time'=>$end); + else { + $shift = $row['utime_end'] - ($r['time_start'] + $testShift); + if($shift > $maxShift) + $maxShift = $shift; + } + } + } + } + } + } + } + if ($maxShift > 0) + $testShift = $testShift + $maxShift + 1; # shift by maxShift and repeat + else + break; # we found a valid schedule + } + mysqli_close($db); + $start->modify('+'.($testShift - $shiftOffset).' seconds'); + $end->modify('+'.($testShift - $shiftOffset).' seconds'); + $start->modify('+'.$guard_setup_min.' minutes'); + $end->modify('-'.$guard_cleanup_min.' minutes'); + return Array('feasible'=>True,'start_time'=>$start, 'end_time'=>$end); } // convert from asap to absolute function asap_to_absolute(&$testconfig, $starttime, $endtime) { - // modify test description - global $CONFIG; - $email = clone $testconfig->generalConf->emailResults; - unset($testconfig->generalConf->scheduleAsap); - unset($testconfig->generalConf->emailResults); - $testconfig->generalConf->addChild('scheduleAbsolute'); - $testconfig->generalConf->scheduleAbsolute->addChild('start', $starttime->format(DATE_W3C)); - $testconfig->generalConf->scheduleAbsolute->addChild('end', $endtime->format(DATE_W3C)); - if ($email != '') { - $testconfig->generalConf->addChild('emailResults', $email); - } + // modify test description + global $CONFIG; + $email = clone $testconfig->generalConf->emailResults; + unset($testconfig->generalConf->scheduleAsap); + unset($testconfig->generalConf->emailResults); + $testconfig->generalConf->addChild('scheduleAbsolute'); + $testconfig->generalConf->scheduleAbsolute->addChild('start', $starttime->format(DATE_W3C)); + $testconfig->generalConf->scheduleAbsolute->addChild('end', $endtime->format(DATE_W3C)); + if ($email != '') { + $testconfig->generalConf->addChild('emailResults', $email); + } } // add resource allocations to db function add_resource_allocation($testId, $resources, $starttime) { - global $CONFIG; - $db = db_connect(); - $starttime->modify('-'.$CONFIG['tests']['setuptime'].' minutes'); - foreach($resources as $r) { - $start = clone $starttime; - $end = clone $starttime; - $start->modify('+'.$r['time_start'].' seconds'); - $end->modify('+'.$r['time_end'].' seconds'); - $sql = "INSERT INTO `tbl_serv_resource_allocation` (`time_start`, `time_end`, `test_fk`, `observer_fk`, `resource_type`) - VALUES ( - '" . $start->format(DATE_ISO8601). "', - '" . $end->format(DATE_ISO8601) . "', - " . $testId . ", - " . $r['obskey'] . ", - '" .$r['restype']."')"; - mysqli_query($db, $sql) or flocklab_die('Cannot store test configuration in database because: ' . mysqli_error($db)); - } - mysqli_close($db); + global $CONFIG; + $db = db_connect(); + $starttime->modify('-'.$CONFIG['tests']['setuptime'].' minutes'); + foreach($resources as $r) { + $start = clone $starttime; + $end = clone $starttime; + $start->modify('+'.$r['time_start'].' seconds'); + $end->modify('+'.$r['time_end'].' seconds'); + $sql = "INSERT INTO `tbl_serv_resource_allocation` (`time_start`, `time_end`, `test_fk`, `observer_fk`, `resource_type`) + VALUES ( + '" . $start->format(DATE_ISO8601). "', + '" . $end->format(DATE_ISO8601) . "', + " . $testId . ", + " . $r['obskey'] . ", + '" .$r['restype']."')"; + mysqli_query($db, $sql) or flocklab_die('Cannot store test configuration in database because: ' . mysqli_error($db)); + } + mysqli_close($db); } ############################################################################## @@ -966,279 +966,279 @@ function add_resource_allocation($testId, $resources, $starttime) { # ############################################################################## function update_add_test($xml_config, &$errors, $existing_test_id = NULL, $abort=False) { - global $CONFIG; - if (!$abort) { - $tmp_xmlfile = tempnam(sys_get_temp_dir(), 'flocklab'); - file_put_contents($tmp_xmlfile, $xml_config); - $valid = validate_test($tmp_xmlfile, $errors); - unlink($tmp_xmlfile); - } - else { - $valid = True; - } - if ($valid) { - $testconfig = new SimpleXMLElement($xml_config); - // If no IP address is given for serial service, use the one from which the test was uploaded: - foreach($testconfig->serialConf as $sc) { - if (!isset($sc->remoteIp)) { - if (preg_match ('/((25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])\.){3}(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])/' , $_SERVER['REMOTE_ADDR'])) // we do not support ipv6 on our backend server - $sc->addChild('remoteIp', $_SERVER['REMOTE_ADDR']); - else { - array_push($errors, 'remoteIp: FlockLab does not support IPv6 addresses ('.$_SERVER['REMOTE_ADDR'].'). To use the <a href="https://www.flocklab.ethz.ch/wiki/wiki/Public/Man/Tutorials/Tutorial2#notes">Serial socket feature</a>, please provide an IPv4 address.'); - } - } - } - // extract embedded images - $used_embeddedImages = Array(); - $used_dbImages = Array(); - $embeddedImages = Array(); - $dbImages = Array(); - $available_os = get_available_os(); - $available_platforms = get_available_platforms(); - $targetnodes = Array(); - foreach($testconfig->targetConf as $tc) { - foreach($tc->embeddedImageId as $eId) { - $eId = trim($eId); - if(!in_array($eId, $used_embeddedImages)) - array_push($used_embeddedImages, $eId); - } - foreach($tc->dbImageId as $dbId) { - $dbId = (int)trim($dbId); - if(!in_array($dbId, $used_dbImages)) - array_push($used_dbImages, $dbId); - } - } - if (count($used_dbImages) > 0) { - $db = db_connect(); - $sql = "select `serv_targetimages_key`, `platforms_fk` from tbl_serv_targetimages where `serv_targetimages_key` in (".join(',',$used_dbImages).")"; - $res = mysqli_query($db, $sql) or flocklab_die('Cannot fetch platform information from database because: ' . mysqli_error($db)); - while ($row = mysqli_fetch_assoc($res)) - $dbImages[$row['serv_targetimages_key']] = Array('platform' => $row['platforms_fk']); - mysqli_close($db); - } - foreach($testconfig->imageConf as $im) { - $eId = trim($im->embeddedImageId); - if(array_key_exists($eId, $embeddedImages)) { - array_push($errors, "Provided embedded images do not have unique IDs."); - } - else { - $im_cpy = Array(); - $im_cpy['data'] = base64_decode($im->data); - $im_cpy['embeddedImageId'] = $eId; - $im_cpy['name'] = $im->name; - $im_cpy['description'] = $im->description; - $im_cpy['used'] = in_array($eId, $used_embeddedImages); - foreach($available_platforms as $key => $platform) - if (strcasecmp($platform[0]['name'], trim($im->platform)) == 0) - $im_cpy['platform'] = $key; - if(!isset($im->os) || !in_array($im->os, $available_os)) { - $im_os = 'other'; - } - foreach($available_os as $key => $os) { - if (strcasecmp($os, $im_os) == 0) { - $im_cpy['os'] = $key; - break; - } - } - $im_cpy['core'] = isset($im->core) ? $im->core : 0; - $embeddedImages[$eId] = $im_cpy; - } - } - // check if there are images without a data block: - foreach(array_keys($embeddedImages) as $imID) { - if(strlen($embeddedImages[$imID]['data']) == 0) { - // find the first entry which matches the platform (compare only first 3 characters) - foreach($embeddedImages as $eIm) { - if(strncmp($eIm['platform'], $embeddedImages[$imID]['platform'], 3) && strlen($eIm['data']) > 0) { - $embeddedImages[$imID]['data'] = $eIm['data']; // use the image data of this entry - break; - } - } - if(strlen($embeddedImages[$imID]['data']) == 0) { - // no image data found -> abort - array_push($errors, "No data provided for embedded image ID ".$imID."."); - break; - } - } - } - foreach($used_embeddedImages as $imId) - if (!array_key_exists($imId, $embeddedImages)) - array_push($errors, "Missing embedded image (ID ".$imId.")"); - if (empty($errors)) { - // check quota - if (!check_quota($testconfig, $existing_test_id)) - array_push($errors, "Not enough quota left to run this test."); - else { - # parallel stuff - # 1. calculate required resources: - foreach($testconfig->targetConf as $tc) { - if (count($tc->embeddedImageId)>0) { - $eId = trim($tc->embeddedImageId[0]); - foreach (explodeobsids($tc->obsIds) as $obsid) - array_push($targetnodes, Array('obsid'=>(int)$obsid,'platform'=>$embeddedImages[$eId]['platform'])); - } - else if (count($tc->dbImageId)>0) { - $dbId = (int)trim($tc->dbImageId[0]); - foreach (explodeobsids($tc->obsIds) as $obsid) - array_push($targetnodes, Array('obsid'=>(int)$obsid,'platform'=>$dbImages[$dbId]['platform'])); - } - } - if (isset($testconfig->generalConf->scheduleAsap->durationSecs)) { - $duration = $testconfig->generalConf->scheduleAsap->durationSecs; - } - else { - $start = new DateTime($testconfig->generalConf->scheduleAbsolute->start); - $end = new DateTime($testconfig->generalConf->scheduleAbsolute->end); - $start->setTimeZone(new DateTimeZone("UTC")); - $end->setTimeZone(new DateTimeZone("UTC")); - $duration = $end->format("U") - $start->format("U"); - } - # 1a. slots - $resources = Array(); - if ($abort===True) { - $resources = array_merge($resources, resource_cleanup($targetnodes)); - if (! isset($testconfig->generalConf->scheduleAsap)) { - unset($testconfig->generalConf->scheduleAbsolute); - $testconfig->generalConf->addChild('scheduleAsap'); - $testconfig->generalConf->scheduleAsap->addChild('durationSecs', -60 * $CONFIG['tests']['setuptime']); // no setup time, only cleanup - } - } - else { - $resources = array_merge($resources, resource_slots($duration,$targetnodes)); - # 1b. freq - $resources = array_merge($resources, resource_freq($duration,$targetnodes)); - # 1c. multiplexer - $resources = array_merge($resources, resource_multiplexer($duration,$targetnodes, $testconfig)); - } - #flocklab_log('Try to schedule test. Needed resources are: '. print_r($resources, $return = True)); - # fetch observer keys - $db = db_connect(); - $obskeys=Array(); - $sql = "select `serv_observer_key`, `observer_id` from tbl_serv_observer"; - $res = mysqli_query($db, $sql) or flocklab_die('Cannot fetch observer information from database because: ' . mysqli_error($db)); - while ($row = mysqli_fetch_assoc($res)) - $obskeys[$row['observer_id']] = $row['serv_observer_key']; - foreach ($resources as $i => $r) - $resources[$i]['obskey'] = $obskeys[$r['obsid']]; - $locktime = microtime(true); - acquire_db_lock('resource_allocation'); - $r = schedule_test($testconfig, $resources, $existing_test_id); - if ($abort) { // update test to abort - $db = db_connect(); - // only schedule abort procedure if test has been started and not yet finished - $sql = "SELECT `test_status` FROM tbl_serv_tests WHERE `serv_tests_key`=".$existing_test_id." AND `test_status` IN ('running', 'preparing')"; - $res = mysqli_query($db, $sql); - if (mysqli_num_rows($res)) { - // remove resource allocations - $sql = 'DELETE from tbl_serv_resource_allocation WHERE `test_fk` = ' .$existing_test_id; - mysqli_query($db, $sql) or flocklab_die('Cannot abort test: ' . mysqli_error($db)); - // update test entry - $end = $r['end_time']; - $sql = 'UPDATE `tbl_serv_tests` SET `time_end_wish`="'.mysqli_real_escape_string($db, $end->format(DATE_ISO8601)).'", `test_status`="aborting" WHERE `serv_tests_key`='.$existing_test_id; - mysqli_query($db, $sql) or flocklab_die('Cannot store test configuration in database because: ' . mysqli_error($db)); - $testId = $existing_test_id; - mysqli_close($db); - add_resource_allocation($testId, $resources, $r['start_time']); - } - } - else { - if (!$r['feasible']) - array_push($errors, "Time slot is already used, pick another slot."); - else { - if (isset($testconfig->generalConf->scheduleAsap)) { - // convert from ASAP to absoulte - asap_to_absolute($testconfig, $r['start_time'], $r['end_time']); - } - // strip all embedded images from xml config - // add embedded images to db - $comment = ''; - while(count($testconfig->imageConf) > 0) { - $imgEId = trim($testconfig->imageConf[0]->embeddedImageId); - if ($embeddedImages[$imgEId]['used']) { - $imgId = check_image_duplicate($embeddedImages[$imgEId]); - if ($imgId === false) { - $imgId = store_image($embeddedImages[$imgEId]); - $comment.="<!-- saved embedded image '".$imgEId."' to database (ID: ".$imgId.")-->\n"; - } - else { - $comment.="<!-- reusing existing image from database (ID: ".$imgId.") for '".$imgEId."' -->\n"; - } - $embeddedImages[$imgEId]['dbimgid'] = $imgId; - } - else { - // just strip it - unset($embeddedImages[$imgEId]); - $comment.="<!-- stripped embedded image '".$imgEId."', image is not used in this test -->\n"; - } - unset($testconfig->imageConf[0]); - } - $xml_config = $testconfig->asXML(); - $xml_config = preg_replace('#</testConf>#s',$comment.'</testConf>', $xml_config); - foreach ($embeddedImages as $im) { - // replace embedded image id with db id - $xml_config = preg_replace('#(<)embedded(ImageId\s*[^>]*>)\s*'.$im['embeddedImageId'].'\s*(</)embedded(ImageId\s*>)#s','${1}db${2}'.$im['dbimgid'].'${3}db${4}',$xml_config); - } - // add test to db - $start = new DateTime($testconfig->generalConf->scheduleAbsolute->start); - $start->setTimeZone(new DateTimeZone("UTC")); - $end = new DateTime($testconfig->generalConf->scheduleAbsolute->end); - $end->setTimeZone(new DateTimeZone("UTC")); - if (isset($existing_test_id)) { // update test - // remove mappings and resource allocations - remove_test_mappings($existing_test_id); - $db = db_connect(); - $sql = 'DELETE from tbl_serv_resource_allocation WHERE `test_fk` = ' .$existing_test_id; - mysqli_query($db, $sql) or flocklab_die('Cannot modify test: ' . mysqli_error($db)); - // update test entry - $sql = 'UPDATE `tbl_serv_tests` SET - `title`="'.mysqli_real_escape_string($db, trim($testconfig->generalConf->name)).'", - `description`="'.mysqli_real_escape_string($db, trim($testconfig->generalConf->description)).'", - `testconfig_xml`="'.mysqli_real_escape_string($db, trim($xml_config)).'", - `time_start_wish`="'.mysqli_real_escape_string($db, $start->format(DATE_ISO8601)) .'", - `time_end_wish`="'.mysqli_real_escape_string($db, $end->format(DATE_ISO8601)).'", - `test_status`="planned" - WHERE `serv_tests_key`='.$existing_test_id; - mysqli_query($db, $sql) or flocklab_die('Cannot store test configuration in database because: ' . mysqli_error($db)); - $testId = $existing_test_id; - mysqli_close($db); - } - else { - // add test entry - $db = db_connect(); - $sql = "INSERT INTO `tbl_serv_tests` (`title`, `description`, `owner_fk`, `testconfig_xml`, `time_start_wish`, `time_end_wish`, `test_status`) - VALUES ( - '" . mysqli_real_escape_string($db, trim($testconfig->generalConf->name)) . "', - '" . mysqli_real_escape_string($db, trim($testconfig->generalConf->description)) . "', - '" . mysqli_real_escape_string($db, $_SESSION['serv_users_key']) . "', - '" . mysqli_real_escape_string($db, trim($xml_config)) . "', - '" . mysqli_real_escape_string($db, $start->format(DATE_ISO8601)) . "', - '" . mysqli_real_escape_string($db, $end->format(DATE_ISO8601)) . "', - 'planned')"; - mysqli_query($db, $sql) or flocklab_die('Cannot store test configuration in database because: ' . mysqli_error($db)); - $testId = mysqli_insert_id($db); - mysqli_close($db); - } - // create mapping entries for every target that participates in the test - add_resource_allocation($testId, $resources, $r['start_time']); - $testconfig = new SimpleXMLElement($xml_config); - add_test_mappings($testId, $testconfig); - } - } - release_db_lock('resource_allocation'); - flocklab_log('Schedule for test id '.$testId.' is: '. print_r($r, $return = True)); - $time_elapsed_secs = microtime(true) - $locktime; - echo "<!-- db lock time was ".$time_elapsed_secs." s -->"; - // Ask the FlockLab scheduler to check for work: - trigger_scheduler(); - } - } - } - else - unlink($tmp_xmlfile); - if (!empty($errors)) - return Array('testId'=>Null, 'start'=>Null); - else - return Array('testId'=>$testId, 'start'=>$start); + global $CONFIG; + if (!$abort) { + $tmp_xmlfile = tempnam(sys_get_temp_dir(), 'flocklab'); + file_put_contents($tmp_xmlfile, $xml_config); + $valid = validate_test($tmp_xmlfile, $errors); + unlink($tmp_xmlfile); + } + else { + $valid = True; + } + if ($valid) { + $testconfig = new SimpleXMLElement($xml_config); + // If no IP address is given for serial service, use the one from which the test was uploaded: + foreach($testconfig->serialConf as $sc) { + if (!isset($sc->remoteIp)) { + if (preg_match ('/((25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])\.){3}(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])/' , $_SERVER['REMOTE_ADDR'])) // we do not support ipv6 on our backend server + $sc->addChild('remoteIp', $_SERVER['REMOTE_ADDR']); + else { + array_push($errors, 'remoteIp: FlockLab does not support IPv6 addresses ('.$_SERVER['REMOTE_ADDR'].'). To use the <a href="https://www.flocklab.ethz.ch/wiki/wiki/Public/Man/Tutorials/Tutorial2#notes">Serial socket feature</a>, please provide an IPv4 address.'); + } + } + } + // extract embedded images + $used_embeddedImages = Array(); + $used_dbImages = Array(); + $embeddedImages = Array(); + $dbImages = Array(); + $available_os = get_available_os(); + $available_platforms = get_available_platforms(); + $targetnodes = Array(); + foreach($testconfig->targetConf as $tc) { + foreach($tc->embeddedImageId as $eId) { + $eId = trim($eId); + if(!in_array($eId, $used_embeddedImages)) + array_push($used_embeddedImages, $eId); + } + foreach($tc->dbImageId as $dbId) { + $dbId = (int)trim($dbId); + if(!in_array($dbId, $used_dbImages)) + array_push($used_dbImages, $dbId); + } + } + if (count($used_dbImages) > 0) { + $db = db_connect(); + $sql = "select `serv_targetimages_key`, `platforms_fk` from tbl_serv_targetimages where `serv_targetimages_key` in (".join(',',$used_dbImages).")"; + $res = mysqli_query($db, $sql) or flocklab_die('Cannot fetch platform information from database because: ' . mysqli_error($db)); + while ($row = mysqli_fetch_assoc($res)) + $dbImages[$row['serv_targetimages_key']] = Array('platform' => $row['platforms_fk']); + mysqli_close($db); + } + foreach($testconfig->imageConf as $im) { + $eId = trim($im->embeddedImageId); + if(array_key_exists($eId, $embeddedImages)) { + array_push($errors, "Provided embedded images do not have unique IDs."); + } + else { + $im_cpy = Array(); + $im_cpy['data'] = base64_decode($im->data); + $im_cpy['embeddedImageId'] = $eId; + $im_cpy['name'] = $im->name; + $im_cpy['description'] = $im->description; + $im_cpy['used'] = in_array($eId, $used_embeddedImages); + foreach($available_platforms as $key => $platform) + if (strcasecmp($platform[0]['name'], trim($im->platform)) == 0) + $im_cpy['platform'] = $key; + if(!isset($im->os) || !in_array($im->os, $available_os)) { + $im_os = 'other'; + } + foreach($available_os as $key => $os) { + if (strcasecmp($os, $im_os) == 0) { + $im_cpy['os'] = $key; + break; + } + } + $im_cpy['core'] = isset($im->core) ? $im->core : 0; + $embeddedImages[$eId] = $im_cpy; + } + } + // check if there are images without a data block: + foreach(array_keys($embeddedImages) as $imID) { + if(strlen($embeddedImages[$imID]['data']) == 0) { + // find the first entry which matches the platform (compare only first 3 characters) + foreach($embeddedImages as $eIm) { + if(strncmp($eIm['platform'], $embeddedImages[$imID]['platform'], 3) && strlen($eIm['data']) > 0) { + $embeddedImages[$imID]['data'] = $eIm['data']; // use the image data of this entry + break; + } + } + if(strlen($embeddedImages[$imID]['data']) == 0) { + // no image data found -> abort + array_push($errors, "No data provided for embedded image ID ".$imID."."); + break; + } + } + } + foreach($used_embeddedImages as $imId) + if (!array_key_exists($imId, $embeddedImages)) + array_push($errors, "Missing embedded image (ID ".$imId.")"); + if (empty($errors)) { + // check quota + if (!check_quota($testconfig, $existing_test_id)) + array_push($errors, "Not enough quota left to run this test."); + else { + # parallel stuff + # 1. calculate required resources: + foreach($testconfig->targetConf as $tc) { + if (count($tc->embeddedImageId)>0) { + $eId = trim($tc->embeddedImageId[0]); + foreach (explodeobsids($tc->obsIds) as $obsid) + array_push($targetnodes, Array('obsid'=>(int)$obsid,'platform'=>$embeddedImages[$eId]['platform'])); + } + else if (count($tc->dbImageId)>0) { + $dbId = (int)trim($tc->dbImageId[0]); + foreach (explodeobsids($tc->obsIds) as $obsid) + array_push($targetnodes, Array('obsid'=>(int)$obsid,'platform'=>$dbImages[$dbId]['platform'])); + } + } + if (isset($testconfig->generalConf->scheduleAsap->durationSecs)) { + $duration = $testconfig->generalConf->scheduleAsap->durationSecs; + } + else { + $start = new DateTime($testconfig->generalConf->scheduleAbsolute->start); + $end = new DateTime($testconfig->generalConf->scheduleAbsolute->end); + $start->setTimeZone(new DateTimeZone("UTC")); + $end->setTimeZone(new DateTimeZone("UTC")); + $duration = $end->format("U") - $start->format("U"); + } + # 1a. slots + $resources = Array(); + if ($abort===True) { + $resources = array_merge($resources, resource_cleanup($targetnodes)); + if (! isset($testconfig->generalConf->scheduleAsap)) { + unset($testconfig->generalConf->scheduleAbsolute); + $testconfig->generalConf->addChild('scheduleAsap'); + $testconfig->generalConf->scheduleAsap->addChild('durationSecs', -60 * $CONFIG['tests']['setuptime']); // no setup time, only cleanup + } + } + else { + $resources = array_merge($resources, resource_slots($duration,$targetnodes)); + # 1b. freq + $resources = array_merge($resources, resource_freq($duration,$targetnodes)); + # 1c. multiplexer + $resources = array_merge($resources, resource_multiplexer($duration,$targetnodes, $testconfig)); + } + #flocklab_log('Try to schedule test. Needed resources are: '. print_r($resources, $return = True)); + # fetch observer keys + $db = db_connect(); + $obskeys=Array(); + $sql = "select `serv_observer_key`, `observer_id` from tbl_serv_observer"; + $res = mysqli_query($db, $sql) or flocklab_die('Cannot fetch observer information from database because: ' . mysqli_error($db)); + while ($row = mysqli_fetch_assoc($res)) + $obskeys[$row['observer_id']] = $row['serv_observer_key']; + foreach ($resources as $i => $r) + $resources[$i]['obskey'] = $obskeys[$r['obsid']]; + $locktime = microtime(true); + acquire_db_lock('resource_allocation'); + $r = schedule_test($testconfig, $resources, $existing_test_id); + if ($abort) { // update test to abort + $db = db_connect(); + // only schedule abort procedure if test has been started and not yet finished + $sql = "SELECT `test_status` FROM tbl_serv_tests WHERE `serv_tests_key`=".$existing_test_id." AND `test_status` IN ('running', 'preparing')"; + $res = mysqli_query($db, $sql); + if (mysqli_num_rows($res)) { + // remove resource allocations + $sql = 'DELETE from tbl_serv_resource_allocation WHERE `test_fk` = ' .$existing_test_id; + mysqli_query($db, $sql) or flocklab_die('Cannot abort test: ' . mysqli_error($db)); + // update test entry + $end = $r['end_time']; + $sql = 'UPDATE `tbl_serv_tests` SET `time_end_wish`="'.mysqli_real_escape_string($db, $end->format(DATE_ISO8601)).'", `test_status`="aborting" WHERE `serv_tests_key`='.$existing_test_id; + mysqli_query($db, $sql) or flocklab_die('Cannot store test configuration in database because: ' . mysqli_error($db)); + $testId = $existing_test_id; + mysqli_close($db); + add_resource_allocation($testId, $resources, $r['start_time']); + } + } + else { + if (!$r['feasible']) + array_push($errors, "Time slot is already used, pick another slot."); + else { + if (isset($testconfig->generalConf->scheduleAsap)) { + // convert from ASAP to absoulte + asap_to_absolute($testconfig, $r['start_time'], $r['end_time']); + } + // strip all embedded images from xml config + // add embedded images to db + $comment = ''; + while(count($testconfig->imageConf) > 0) { + $imgEId = trim($testconfig->imageConf[0]->embeddedImageId); + if ($embeddedImages[$imgEId]['used']) { + $imgId = check_image_duplicate($embeddedImages[$imgEId]); + if ($imgId === false) { + $imgId = store_image($embeddedImages[$imgEId]); + $comment.="<!-- saved embedded image '".$imgEId."' to database (ID: ".$imgId.")-->\n"; + } + else { + $comment.="<!-- reusing existing image from database (ID: ".$imgId.") for '".$imgEId."' -->\n"; + } + $embeddedImages[$imgEId]['dbimgid'] = $imgId; + } + else { + // just strip it + unset($embeddedImages[$imgEId]); + $comment.="<!-- stripped embedded image '".$imgEId."', image is not used in this test -->\n"; + } + unset($testconfig->imageConf[0]); + } + $xml_config = $testconfig->asXML(); + $xml_config = preg_replace('#</testConf>#s',$comment.'</testConf>', $xml_config); + foreach ($embeddedImages as $im) { + // replace embedded image id with db id + $xml_config = preg_replace('#(<)embedded(ImageId\s*[^>]*>)\s*'.$im['embeddedImageId'].'\s*(</)embedded(ImageId\s*>)#s','${1}db${2}'.$im['dbimgid'].'${3}db${4}',$xml_config); + } + // add test to db + $start = new DateTime($testconfig->generalConf->scheduleAbsolute->start); + $start->setTimeZone(new DateTimeZone("UTC")); + $end = new DateTime($testconfig->generalConf->scheduleAbsolute->end); + $end->setTimeZone(new DateTimeZone("UTC")); + if (isset($existing_test_id)) { // update test + // remove mappings and resource allocations + remove_test_mappings($existing_test_id); + $db = db_connect(); + $sql = 'DELETE from tbl_serv_resource_allocation WHERE `test_fk` = ' .$existing_test_id; + mysqli_query($db, $sql) or flocklab_die('Cannot modify test: ' . mysqli_error($db)); + // update test entry + $sql = 'UPDATE `tbl_serv_tests` SET + `title`="'.mysqli_real_escape_string($db, trim($testconfig->generalConf->name)).'", + `description`="'.mysqli_real_escape_string($db, trim($testconfig->generalConf->description)).'", + `testconfig_xml`="'.mysqli_real_escape_string($db, trim($xml_config)).'", + `time_start_wish`="'.mysqli_real_escape_string($db, $start->format(DATE_ISO8601)) .'", + `time_end_wish`="'.mysqli_real_escape_string($db, $end->format(DATE_ISO8601)).'", + `test_status`="planned" + WHERE `serv_tests_key`='.$existing_test_id; + mysqli_query($db, $sql) or flocklab_die('Cannot store test configuration in database because: ' . mysqli_error($db)); + $testId = $existing_test_id; + mysqli_close($db); + } + else { + // add test entry + $db = db_connect(); + $sql = "INSERT INTO `tbl_serv_tests` (`title`, `description`, `owner_fk`, `testconfig_xml`, `time_start_wish`, `time_end_wish`, `test_status`) + VALUES ( + '" . mysqli_real_escape_string($db, trim($testconfig->generalConf->name)) . "', + '" . mysqli_real_escape_string($db, trim($testconfig->generalConf->description)) . "', + '" . mysqli_real_escape_string($db, $_SESSION['serv_users_key']) . "', + '" . mysqli_real_escape_string($db, trim($xml_config)) . "', + '" . mysqli_real_escape_string($db, $start->format(DATE_ISO8601)) . "', + '" . mysqli_real_escape_string($db, $end->format(DATE_ISO8601)) . "', + 'planned')"; + mysqli_query($db, $sql) or flocklab_die('Cannot store test configuration in database because: ' . mysqli_error($db)); + $testId = mysqli_insert_id($db); + mysqli_close($db); + } + // create mapping entries for every target that participates in the test + add_resource_allocation($testId, $resources, $r['start_time']); + $testconfig = new SimpleXMLElement($xml_config); + add_test_mappings($testId, $testconfig); + } + } + release_db_lock('resource_allocation'); + flocklab_log('Schedule for test id '.$testId.' is: '. print_r($r, $return = True)); + $time_elapsed_secs = microtime(true) - $locktime; + echo "<!-- db lock time was ".$time_elapsed_secs." s -->"; + // Ask the FlockLab scheduler to check for work: + trigger_scheduler(); + } + } + } + else + unlink($tmp_xmlfile); + if (!empty($errors)) + return Array('testId'=>Null, 'start'=>Null); + else + return Array('testId'=>$testId, 'start'=>$start); } /* @@ -1252,26 +1252,26 @@ function update_add_test($xml_config, &$errors, $existing_test_id = NULL, $abort ############################################################################## */ function acquire_db_lock($key) { - $db = db_connect(); - $done = False; - while(!$done) { - $sql = "lock tables tbl_serv_locks write"; - mysqli_query($db, $sql) or flocklab_die('Cannot acquire database lock because: ' . mysqli_error($db)); - $sql = "delete from tbl_serv_locks where expiry_time < now()"; - mysqli_query($db, $sql) or flocklab_die('Cannot acquire database lock because: ' . mysqli_error($db)); - $sql = "select * from tbl_serv_locks where name='".$key."' limit 1"; - $res = mysqli_query($db, $sql) or flocklab_die('Cannot acquire database lock because: ' . mysqli_error($db)); - if (mysqli_num_rows($res) == 0) { - $sql = "insert into tbl_serv_locks (name, expiry_time) values ('".$key."', ADDTIME(now(),1))"; - mysqli_query($db, $sql) or flocklab_die('Cannot acquire database lock because: ' . mysqli_error($db)); - $done = True; - } - $sql = "unlock tables"; - mysqli_query($db, $sql) or flocklab_die('Cannot acquire database lock because: ' . mysqli_error($db)); - if (!$done) - usleep(250000); - } - mysqli_close($db); + $db = db_connect(); + $done = False; + while(!$done) { + $sql = "lock tables tbl_serv_locks write"; + mysqli_query($db, $sql) or flocklab_die('Cannot acquire database lock because: ' . mysqli_error($db)); + $sql = "delete from tbl_serv_locks where expiry_time < now()"; + mysqli_query($db, $sql) or flocklab_die('Cannot acquire database lock because: ' . mysqli_error($db)); + $sql = "select * from tbl_serv_locks where name='".$key."' limit 1"; + $res = mysqli_query($db, $sql) or flocklab_die('Cannot acquire database lock because: ' . mysqli_error($db)); + if (mysqli_num_rows($res) == 0) { + $sql = "insert into tbl_serv_locks (name, expiry_time) values ('".$key."', ADDTIME(now(),1))"; + mysqli_query($db, $sql) or flocklab_die('Cannot acquire database lock because: ' . mysqli_error($db)); + $done = True; + } + $sql = "unlock tables"; + mysqli_query($db, $sql) or flocklab_die('Cannot acquire database lock because: ' . mysqli_error($db)); + if (!$done) + usleep(250000); + } + mysqli_close($db); } /* @@ -1284,14 +1284,14 @@ function acquire_db_lock($key) { ############################################################################## */ function release_db_lock($key) { - $db = db_connect(); - $sql = "lock tables tbl_serv_locks write"; - mysqli_query($db, $sql) or flocklab_die('Cannot acquire database lock because: ' . mysqli_error($db)); - $sql = "delete from tbl_serv_locks where name = '".$key."'"; - mysqli_query($db, $sql) or flocklab_die('Cannot acquire database lock because: ' . mysqli_error($db)); - $sql = "unlock tables"; - mysqli_query($db, $sql) or flocklab_die('Cannot acquire database lock because: ' . mysqli_error($db)); - mysqli_close($db); + $db = db_connect(); + $sql = "lock tables tbl_serv_locks write"; + mysqli_query($db, $sql) or flocklab_die('Cannot acquire database lock because: ' . mysqli_error($db)); + $sql = "delete from tbl_serv_locks where name = '".$key."'"; + mysqli_query($db, $sql) or flocklab_die('Cannot acquire database lock because: ' . mysqli_error($db)); + $sql = "unlock tables"; + mysqli_query($db, $sql) or flocklab_die('Cannot acquire database lock because: ' . mysqli_error($db)); + mysqli_close($db); } diff --git a/webserver/user/include/logging.php b/webserver/user/include/logging.php index a40f2f4105a2ed709ee8cbafa11da162bbdb6d02..da2a1361798f67664694f7335eae9a1e5b302f88 100644 --- a/webserver/user/include/logging.php +++ b/webserver/user/include/logging.php @@ -1,13 +1,13 @@ <?php - /* - * __author__ = "Roman Lim <lim@tik.ee.ethz.ch>" - * __copyright__ = "Copyright 2016, ETH Zurich, Switzerland" - * __license__ = "GPL" - * __version__ = "$Revision: 2435 $" - * __date__ = "$Date: 2013-09-27 16:03:15 +0200 (Fri, 27 Sep 2013) $" - * __id__ = "$Id: config.php 2435 2013-09-27 14:03:15Z walserc $" - * __source__ = "$URL: svn://svn.ee.ethz.ch/flocklab/trunk/server/webserver/user/include/config.php $" - */ + /* + * __author__ = "Roman Lim <lim@tik.ee.ethz.ch>" + * __copyright__ = "Copyright 2016, ETH Zurich, Switzerland" + * __license__ = "GPL" + * __version__ = "$Revision: 2435 $" + * __date__ = "$Date: 2013-09-27 16:03:15 +0200 (Fri, 27 Sep 2013) $" + * __id__ = "$Id: config.php 2435 2013-09-27 14:03:15Z walserc $" + * __source__ = "$URL: svn://svn.ee.ethz.ch/flocklab/trunk/server/webserver/user/include/config.php $" + */ ?> <?php require_once('include/config.php'); @@ -21,11 +21,11 @@ require_once('include/config.php'); */ function flocklab_log($msg) { - global $CONFIG; - $logfile = $CONFIG['testmanagementserver']['logdir']. '/webserver_user.log'; - $date = date('d/m/Y H:i:s T'); - $msg = trim($msg) ."\n"; - file_put_contents ( $logfile , $date.' - '.$_SERVER['SCRIPT_NAME'].' - '. (isset($_SESSION['username'])?$_SESSION['username']:'no user').' - '.$msg , $flags = FILE_APPEND ); + global $CONFIG; + $logfile = $CONFIG['testmanagementserver']['logdir']. '/webserver_user.log'; + $date = date('d/m/Y H:i:s T'); + $msg = trim($msg) ."\n"; + file_put_contents ( $logfile , $date.' - '.$_SERVER['SCRIPT_NAME'].' - '. (isset($_SESSION['username'])?$_SESSION['username']:'no user').' - '.$msg , $flags = FILE_APPEND ); } ?> diff --git a/webserver/user/include/presets.php b/webserver/user/include/presets.php index 95697edf94081cf2966631b549dc252af0f086fa..aacd36885b2e8953ec07190e922898b68271aebe 100644 --- a/webserver/user/include/presets.php +++ b/webserver/user/include/presets.php @@ -1,17 +1,17 @@ <?php - /* - * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" - * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" - * __license__ = "GPL" - * __version__ = "$Revision$" - * __date__ = "$Date$" - * __id__ = "$Id$" - * __source__ = "$URL$" - */ + /* + * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" + * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" + * __license__ = "GPL" + * __version__ = "$Revision$" + * __date__ = "$Date$" + * __id__ = "$Id$" + * __source__ = "$URL$" + */ ?> <?php - require_once('include/libflocklab.php'); include_once('include/auth.php'); - //debug(); - // Set timezone to UTC: - date_default_timezone_set('UTC'); + require_once('include/libflocklab.php'); include_once('include/auth.php'); + //debug(); + // Set timezone to UTC: + date_default_timezone_set('UTC'); ?> diff --git a/webserver/user/include/recaptcha.php b/webserver/user/include/recaptcha.php index 8a2a3b6b5cce804692f613e540fed77add46e7f2..6c8cee059275764426b035b6e25ec804d66582a9 100755 --- a/webserver/user/include/recaptcha.php +++ b/webserver/user/include/recaptcha.php @@ -1,13 +1,13 @@ <?php - /* - * __author__ = "Reto Da Forno <reto.daforno@tik.ee.ethz.ch>" - * __copyright__ = "Copyright 2018, ETH Zurich, Switzerland, Reto Da Forno" - * __license__ = "GPL" - * __version__ = "$Revision$" - * __date__ = "$Date$" - * __id__ = "$Id$" - * __source__ = "$URL$" - */ + /* + * __author__ = "Reto Da Forno <reto.daforno@tik.ee.ethz.ch>" + * __copyright__ = "Copyright 2018, ETH Zurich, Switzerland, Reto Da Forno" + * __license__ = "GPL" + * __version__ = "$Revision$" + * __date__ = "$Date$" + * __id__ = "$Id$" + * __source__ = "$URL$" + */ ?> <?php require_once('config.php'); ?> <?php diff --git a/webserver/user/index.php b/webserver/user/index.php index 24bf4d693021dd1e614dc9ff0482a105f86a66e6..b8c2431d8d5761ac6f7429706d20d0900625eb05 100644 --- a/webserver/user/index.php +++ b/webserver/user/index.php @@ -1,357 +1,357 @@ <?php - /* - * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" - * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" - * __license__ = "GPL" - * __version__ = "$Revision: 2888 $" - * __date__ = "$Date: 2014-08-22 10:01:11 +0200 (Fri, 22 Aug 2014) $" - * __id__ = "$Id: index.php.normal 2888 2014-08-22 08:01:11Z rlim $" - * __source__ = "$URL: svn://svn.ee.ethz.ch/flocklab/trunk/server/webserver/user/index.php.normal $" - */ + /* + * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" + * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" + * __license__ = "GPL" + * __version__ = "$Revision: 2888 $" + * __date__ = "$Date: 2014-08-22 10:01:11 +0200 (Fri, 22 Aug 2014) $" + * __id__ = "$Id: index.php.normal 2888 2014-08-22 08:01:11Z rlim $" + * __source__ = "$URL: svn://svn.ee.ethz.ch/flocklab/trunk/server/webserver/user/index.php.normal $" + */ ?> <?php require_once('include/layout.php');require_once('include/presets.php'); ?> <script type="text/javascript" src="scripts/jquery.cookie.js"></script> <script type="text/javascript"> - function getResult(testid, timeout) { - // hide ui - if ($(".dlpane").length == 0) { - $("body").first().prepend('<div class="dlpane" style="position:absolute;margin:0;z-index:10000;width:100%;height:100%;background-color:#000;opacity:0.4;filter:alpha(opacity=40);"><\/div>'+ - '<div class="dlpane" style="position:absolute;font-family: Verdana, Arial, Helvetica, sans-serif;width:100%;z-index:10001;background-color:gray"><div class="info" style="width:100%"><div style="float:left;"><img height="50" width="50" alt="" src="pics/icons/wait.gif"><\/div>'+ - '<p>Please wait while test results are being fetched (Id '+testid+'). Depending on the amount of data this could take several minutes.. <\/p><\/div><\/div>' - ); - } - // make ajax query - $.ajax({ - type: "POST", - url: "result_download_archive.php", - data: { testid : testid }, - success: function(data) { - switch (data.status) { - case "fetching": - // poll again - setTimeout("getResult("+data.testid+", "+Math.min(1000,timeout + 100)+")",timeout); - break; - case "success": - // redirect - // new iframe - var frame_id = 'download_'+(new Date()).getTime(); - $('body').append('<iframe style="display:none" name="'+frame_id+'">'); - // set target - $("#downloadform").attr('target', frame_id); - $("[name=testid]","#downloadform").first().val(data.testid); - $("[name=query]","#downloadform").first().val("get"); - $("#downloadform").submit(); - // unhide ui - $(".dlpane").remove(); - break; - case "error": - // unhide ui - $(".dlpane").remove(); - alert("An error occurred: "+data.output); - break; - } - }, - dataType: "json" - }); - } - - function trackTest(testid, status) { - // make ajax query - $.ajax({ - type: "GET", - url: "test_feed.php", - data: { testid : testid }, - success: function(data) { - if (data.length==0 || data[0].test_status != status) { - document.location.href="index.php"; - } - else { - setTimeout("trackTest("+testid+",\""+status+"\")", 5000); - } - }, - fail: function() { - setTimeout("trackTest("+testid+",\""+status+"\")", 30000); - }, - dataType: "json" - }); - } - - var addedTests=Array(); - - function trackNewTests() { - // make ajax query - var now = new Date(); - now=Math.round(now.getTime() / 1000 - 3600 + now.getTimezoneOffset()*60); - var x = now; - $.ajax({ - type: "GET", - url: "test_feed.php", - data: { updatesince : now }, - success: function(data) { - $(data).each(function() { - var testid = parseInt(this.testid); - if ($.inArray(testid,addedTests)<0) { - // reload - document.location.href="index.php"; - } - }); - setTimeout("trackNewTests()", 5000); - }, - fail: function() { - setTimeout("trackNewTests()", 30000); - }, - dataType: "json" - }); - } - - function reschedule(el) { - $("i.starttime>form").each(function() { - var otime = $('input[name=starttime]', this).val(); - $(this).parent().bind("click", function() {reschedule(this)}); - $(this).parent().empty().append(otime); - }); - var otime = $(el).text(); - var testid = $("td:first-child", $(el).parents("tr")).text(); - $(el).empty().append('<form name="reschedule" method="post" action="test_edit.php"><input type="hidden" name="testid" value="'+testid+'"><input style="width:100%" name="starttime" type="text" value="'+otime+'"></form>') - $(el).unbind("click"); - } - - $(document).ready(function() { - var table_rows = Math.max(Math.floor(($(window).height() - 300) / 25),10); - $("#pager_num_rows").attr('value', table_rows); - $("#test_overview") - .tablesorter({widgets: ["zebra"] }) - .tablesorterPager({container: $("#pager"), positionFixed: false}); - $(".qtip_show").qtip( { - content: {text: false}, - style : "flocklab", - }); - $("#test_overview").show(); - $.cookie.json = true; - var test_tbl_state; - try { test_tbl_state = $.cookie('flocklab.testsort'); } - catch (err) { - test_tbl_state = null; - } - if ( test_tbl_state == null) { - test_tbl_state = {s: [[0,1]], p: 0}; - } - $("#test_overview").data('tablesorter').page = test_tbl_state.p; - $("#test_overview").trigger("sorton",[test_tbl_state.s]); - $("#test_overview").bind("applyWidgets",function() { - $.cookie('flocklab.testsort', {s:$("#test_overview").data('tablesorter').sortList, p:$("#test_overview").data('tablesorter').page}); - }); - // time change for not yet running tests - $("i.starttime").bind('click', function() {reschedule(this)}); - trackNewTests(); - }); + function getResult(testid, timeout) { + // hide ui + if ($(".dlpane").length == 0) { + $("body").first().prepend('<div class="dlpane" style="position:absolute;margin:0;z-index:10000;width:100%;height:100%;background-color:#000;opacity:0.4;filter:alpha(opacity=40);"><\/div>'+ + '<div class="dlpane" style="position:absolute;font-family: Verdana, Arial, Helvetica, sans-serif;width:100%;z-index:10001;background-color:gray"><div class="info" style="width:100%"><div style="float:left;"><img height="50" width="50" alt="" src="pics/icons/wait.gif"><\/div>'+ + '<p>Please wait while test results are being fetched (Id '+testid+'). Depending on the amount of data this could take several minutes.. <\/p><\/div><\/div>' + ); + } + // make ajax query + $.ajax({ + type: "POST", + url: "result_download_archive.php", + data: { testid : testid }, + success: function(data) { + switch (data.status) { + case "fetching": + // poll again + setTimeout("getResult("+data.testid+", "+Math.min(1000,timeout + 100)+")",timeout); + break; + case "success": + // redirect + // new iframe + var frame_id = 'download_'+(new Date()).getTime(); + $('body').append('<iframe style="display:none" name="'+frame_id+'">'); + // set target + $("#downloadform").attr('target', frame_id); + $("[name=testid]","#downloadform").first().val(data.testid); + $("[name=query]","#downloadform").first().val("get"); + $("#downloadform").submit(); + // unhide ui + $(".dlpane").remove(); + break; + case "error": + // unhide ui + $(".dlpane").remove(); + alert("An error occurred: "+data.output); + break; + } + }, + dataType: "json" + }); + } + + function trackTest(testid, status) { + // make ajax query + $.ajax({ + type: "GET", + url: "test_feed.php", + data: { testid : testid }, + success: function(data) { + if (data.length==0 || data[0].test_status != status) { + document.location.href="index.php"; + } + else { + setTimeout("trackTest("+testid+",\""+status+"\")", 5000); + } + }, + fail: function() { + setTimeout("trackTest("+testid+",\""+status+"\")", 30000); + }, + dataType: "json" + }); + } + + var addedTests=Array(); + + function trackNewTests() { + // make ajax query + var now = new Date(); + now=Math.round(now.getTime() / 1000 - 3600 + now.getTimezoneOffset()*60); + var x = now; + $.ajax({ + type: "GET", + url: "test_feed.php", + data: { updatesince : now }, + success: function(data) { + $(data).each(function() { + var testid = parseInt(this.testid); + if ($.inArray(testid,addedTests)<0) { + // reload + document.location.href="index.php"; + } + }); + setTimeout("trackNewTests()", 5000); + }, + fail: function() { + setTimeout("trackNewTests()", 30000); + }, + dataType: "json" + }); + } + + function reschedule(el) { + $("i.starttime>form").each(function() { + var otime = $('input[name=starttime]', this).val(); + $(this).parent().bind("click", function() {reschedule(this)}); + $(this).parent().empty().append(otime); + }); + var otime = $(el).text(); + var testid = $("td:first-child", $(el).parents("tr")).text(); + $(el).empty().append('<form name="reschedule" method="post" action="test_edit.php"><input type="hidden" name="testid" value="'+testid+'"><input style="width:100%" name="starttime" type="text" value="'+otime+'"></form>') + $(el).unbind("click"); + } + + $(document).ready(function() { + var table_rows = Math.max(Math.floor(($(window).height() - 300) / 25),10); + $("#pager_num_rows").attr('value', table_rows); + $("#test_overview") + .tablesorter({widgets: ["zebra"] }) + .tablesorterPager({container: $("#pager"), positionFixed: false}); + $(".qtip_show").qtip( { + content: {text: false}, + style : "flocklab", + }); + $("#test_overview").show(); + $.cookie.json = true; + var test_tbl_state; + try { test_tbl_state = $.cookie('flocklab.testsort'); } + catch (err) { + test_tbl_state = null; + } + if ( test_tbl_state == null) { + test_tbl_state = {s: [[0,1]], p: 0}; + } + $("#test_overview").data('tablesorter').page = test_tbl_state.p; + $("#test_overview").trigger("sorton",[test_tbl_state.s]); + $("#test_overview").bind("applyWidgets",function() { + $.cookie('flocklab.testsort', {s:$("#test_overview").data('tablesorter').sortList, p:$("#test_overview").data('tablesorter').page}); + }); + // time change for not yet running tests + $("i.starttime").bind('click', function() {reschedule(this)}); + trackNewTests(); + }); </script> <?php //echo '<br />Notice: Due to maintenance work in our building, some of the observers will be sporadically unavailable from ... to ... .<br /><br />'; echo '<h1>Manage Tests for '.$_SESSION['firstname'] . ' ' . $_SESSION['lastname']. '</h1>'; - /* Get all test of the current user from the database and display them in the table. */ - $db = db_connect(); - $sql = "SELECT serv_tests_key, title, description, time_start_act, time_start_wish, time_end_act, time_end_wish, test_status - FROM tbl_serv_tests - WHERE owner_fk = " . $_SESSION['serv_users_key'] . " AND test_status <> 'deleted' AND test_status <> 'todelete' - ORDER BY serv_tests_key DESC"; - $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get tests from database because: ' . mysqli_error($db)); - $nrows = mysqli_num_rows($rs); - mysqli_close($db); - - // If there are no tests for this user, display a message instead of the table: - if ($nrows == 0) { - echo "<p class='warning'><img alt='' src='pics/icons/att.png'>No test defined yet. Register your first test <a href='newtest.php'>here</a>.</p>"; - } - // If there are tests for this user, display them (with alternating row coloring): - else { - ?> - <table id="test_overview" class="tablesorter" style="display:none"> - <thead> - <tr> - <th width="50px">ID</th> - <th width="113px">Title</th> - <th width="135px">Description</th> - <th width="20px" class='qtip_show' title='State'>State</th> - <th width="190px">Start</th> - <th width="190px">End</th> - <th width="70px" class='qtip_show' title='Actions'>Actions</th> - </tr> - </thead> - <tbody> - <?php - $i = 0; - $max_len = 50; // maximum length of text before being cut - $js = '';$all = array(); - $now = new DateTime(); - $now = $now->format('U'); - while ($row = mysqli_fetch_array($rs)) { - // Find out the state of the test: - $schedulable = true; - $planned = false; - $running = false; - $finished = false; - $preparing = false; - $cleaningup = false; - $failed = false; - $aborting = false; - $syncing = false; - $synced = false; - $retentionexp = false; - switch($row['test_status']) { - case "planned": - $planned = true; - break; - case "preparing": - $preparing = true; - break; - case "running": - $running = true; - break; - case "cleaning up": - $cleaningup = true; - break; - case "finished": - $finished = true; - break; - case "not schedulable": - $schedulable = false; - break; - case "failed": - $failed = true; - break; - case "aborting": - $aborting = true; - break; - case "syncing": - $syncing = true; - break; - case "synced": - $synced = true; - break; - case "retention expiring": - $retentionexp = true; - break; - } - if ($row['test_status'] != "failed" && $row['test_status'] !="finished" && $row['test_status'] !="not schedulable" && $row['test_status'] !="retention expiring") { - $js .='setTimeout("trackTest('.$row['serv_tests_key'].',\"'.$row['test_status'].'\")", 3000);'."\n"; - } - $all[]=$row['serv_tests_key']; - $i++; - if ($i%2 == 1) { - echo "<tr class='even'>"; - } else { - echo "<tr class='odd'>"; - } - echo "<td>" . $row['serv_tests_key'] . "</td>"; - // Title. If longer than $max_len characters, display as tooltip: - echo "<td class='qtip_show' title='" . htmlentities($row['title']) . "'>" . htmlentities($row['title']) . "</td>"; - // Description. If longer than $max_len characters, display as tooltip: - if (strlen($row['description']) <= $max_len) - echo "<td>" . htmlentities($row['description']) . "</td>"; - else - echo "<td class='qtip_show' title='" . htmlentities($row['description']) . "'>" . htmlentities(substr($row['description'],0,$max_len)) . "...</td>"; - // Status: dependent of state of test - // check viz status - $viz = ""; - if (!is_null($row['time_start_act'])) { - $d = new DateTime($row['time_start_act']); - if ($now - $d->format('U') < 21 * 24 * 3600) { - if (file_exists($CONFIG['viz']['dir'].'/'.$row['serv_tests_key'].'_'. $_SESSION['serv_users_key'])) - $viz="<img src='pics/icons/chart.png' style='margin-left:5px' height='16px' alt='Results' title='Preview results' class='qtip_show' onClick='document.resprev.testid.value = " . $row['serv_tests_key'] . ";document.resprev.submit();'>"; - } - } - echo "<td>"; - echo "<span style='display:none'>".$row['test_status']."</span>"; // needed to make cell sortable by JQuery - echo "<img src='".state_icon($row['test_status'])."' height='16px' alt='".state_short_description($row['test_status'])."' title='".state_long_description($row['test_status'])."' class='qtip_show' >"; - echo "</td>"; - // Start time: dependent of state of test - if ( $running || $cleaningup || $finished || $failed || $aborting || $syncing || $synced || $retentionexp) { - echo "<td title='Actual start time' class='qtip_show'>" . date_to_tzdate($row['time_start_act']). "</td>"; - } - elseif ($preparing || $planned) { - echo "<td title='Planned start time' class='qtip_show'><i class='starttime'>" . date_to_tzdate($row['time_start_wish']). "</i></td>"; - } - else - echo "<td title='Test is not schedulable' class='qtip_show'>n/a</td>"; - // End time: dependent of state of test - if ($planned || $preparing || $running) { - echo "<td title='Planned end time' class='qtip_show'><i class='endtime'>" . date_to_tzdate($row['time_end_wish']). "</i></td>"; - } - elseif ($cleaningup || $finished || $failed || $syncing || $synced || $retentionexp) { - echo "<td title='Actual end time' class='qtip_show'>" . date_to_tzdate($row['time_end_act']). "</td>"; - } - elseif ($aborting) - echo "<td title='Test is currently aborting' class='qtip_show'>n/a</td>"; - elseif (!$schedulable) - echo "<td title='Test is not schedulable' class='qtip_show'>n/a</td>"; - else - echo "<td title='Test is in unknown state' class='qtip_show'>n/a</td>"; - // Actions: dependent of state of test - echo "<td>"; - if ($planned) { - echo "<span style='display:none'>planned</span>"; // needed to make cell sortable by JQuery - echo "<img src='pics/icons/trash.png' height='16px' alt='Delete' title='Delete test' class='qtip_show' onClick='document.tstdel.testid.value = " . $row['serv_tests_key'] . ";document.tstdel.submit();'>"; - echo "<img src='pics/icons/edit.png' style='margin-left:3px' height='16px' alt='Edit' title='Edit test' class='qtip_show' onClick='document.tstedt.testid.value = " . $row['serv_tests_key'] . ";document.tstedt.submit();'>"; - } elseif ($running) { - echo "<span style='display:none'>running</span>"; // needed to make cell sortable by JQuery - echo "<img src='pics/icons/cancel.png' height='16px' alt='Abort' title='Abort test' class='qtip_show' onClick='document.tstabrt.testid.value = " . $row['serv_tests_key'] . ";document.tstabrt.submit();'>"; - echo "<img src='pics/icons/chart.png' style='margin-left:5px' height='16px' alt='Results' title='Preview results' class='qtip_show' onClick='document.resprev.testid.value = " . $row['serv_tests_key'] . ";document.resprev.submit();'>"; - } elseif ($preparing || $cleaningup || $syncing || $synced) { - echo "<span style='display:none'>preparing</span>"; // needed to make cell sortable by JQuery - //echo "<img src='pics/icons/cancel.png' height='16px' alt='Abort' title='Abort test' class='qtip_show' onClick='document.tstabrt.testid.value = " . $row['serv_tests_key'] . ";document.tstabrt.submit();'>"; - echo $viz; - } elseif ($finished) { - echo "<span style='display:none'>finished</span>"; // needed to make cell sortable by JQuery - echo "<img src='pics/icons/trash.png' height='16px' alt='Delete' title='Delete test' class='qtip_show' onClick='document.tstdel.testid.value = " . $row['serv_tests_key'] . ";document.tstdel.submit();'>"; - echo "<img src='pics/icons/download.png' style='margin-left:5px' height='16px' alt='Download' title='Download results' class='qtip_show' onClick='getResult(".$row['serv_tests_key'].", 100);'>"; - echo $viz; - } elseif ($retentionexp) { - echo "<span style='display:none'>retentionexpiring</span>"; // needed to make cell sortable by JQuery - echo "<img src='pics/icons/trash.png' height='16px' alt='Delete' title='Delete test' class='qtip_show' onClick='document.tstdel.testid.value = " . $row['serv_tests_key'] . ";document.tstdel.submit();'>"; - echo "<img src='pics/icons/download.png' style='margin-left:5px' height='16px' alt='Download' title='Download results' class='qtip_show' onClick='getResult(".$row['serv_tests_key'].", 100);'>"; - echo $viz; - } elseif ($failed) { - echo "<span style='display:none'>notschedulable</span>"; // needed to make cell sortable by JQuery - echo "<img src='pics/icons/trash.png' height='16px' alt='Delete' title='Delete test.' class='qtip_show' onClick='document.tstdel.testid.value = " . $row['serv_tests_key'] . ";document.tstdel.submit();'>"; - } elseif (!$schedulable) { - echo "<span style='display:none'>notschedulable</span>"; // needed to make cell sortable by JQuery - echo "<img src='pics/icons/trash.png' height='16px' alt='Delete' title='Delete test.' class='qtip_show' onClick='document.tstdel.testid.value = " . $row['serv_tests_key'] . ";document.tstdel.submit();'>"; - } elseif ($aborting) { - } else { - echo "<span style='display:none'>unknown</span>"; // needed to make cell sortable by JQuery - } - echo "<img src='pics/icons/preview.png' style='margin-left:5px' height='16px' alt='Download config' title='Download test configuration.' class='qtip_show' onClick='document.tstcdnl.testid.value = " . $row['serv_tests_key'] . ";document.tstcdnl.submit();'>"; - echo "</td>"; - echo "</tr>"; - } - ?> - </tbody> - </table> - <span id="pager" class="pager"> - <img src="pics/icons/first.gif" alt="first" class="first"> - <img src="pics/icons/prev.gif" alt="prev" class="prev"> - <span class="pagedisplay"></span> - <img src="pics/icons/next.gif" alt="next" class="next"> - <img src="pics/icons/last.gif" alt="last" class="last"> - <input class="pagesize" style="visibility: hidden;" id="pager_num_rows" value="15"> - </span> <br > - <?php - echo "<script type=\"text/javascript\"> - $(document).ready(function() { - ".$js; - if (count($all)>0) - echo 'addedTests.push('.implode(',',$all).');'."\n"; - echo "});\n</script>"; - }?> - <!-- Forms for processing actions --> - <form name="tstdel" method="post" action="test_delete.php"><input type="hidden" name="testid" value=""></form> - <form name="tstedt" method="post" action="test_edit.php"><input type="hidden" name="testid" value=""></form> - <form name="tstabrt" method="post" action="test_abort.php"><input type="hidden" name="testid" value=""></form> - <form name="resprev" method="post" action="result_preview_viz.php"><input type="hidden" name="testid" value=""></form> - <form name="resdwn" method="post" id="downloadform" action="result_download_archive.php"><input type="hidden" name="testid" value=""><input type="hidden" name="query" value=""></form> - <form name="tstcdnl" method="post" action="testconfig_download.php"><input type="hidden" name="testid" value=""></form> - <p><img style="margin-top:2px;margin-right:10px;float:left" src="pics/icons/add.png" height="16px" alt="new test"><a style="color:#666666;text-decoration:none;" href="newtest.php"> add new test</a></p> + /* Get all test of the current user from the database and display them in the table. */ + $db = db_connect(); + $sql = "SELECT serv_tests_key, title, description, time_start_act, time_start_wish, time_end_act, time_end_wish, test_status + FROM tbl_serv_tests + WHERE owner_fk = " . $_SESSION['serv_users_key'] . " AND test_status <> 'deleted' AND test_status <> 'todelete' + ORDER BY serv_tests_key DESC"; + $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get tests from database because: ' . mysqli_error($db)); + $nrows = mysqli_num_rows($rs); + mysqli_close($db); + + // If there are no tests for this user, display a message instead of the table: + if ($nrows == 0) { + echo "<p class='warning'><img alt='' src='pics/icons/att.png'>No test defined yet. Register your first test <a href='newtest.php'>here</a>.</p>"; + } + // If there are tests for this user, display them (with alternating row coloring): + else { + ?> + <table id="test_overview" class="tablesorter" style="display:none"> + <thead> + <tr> + <th width="50px">ID</th> + <th width="113px">Title</th> + <th width="135px">Description</th> + <th width="20px" class='qtip_show' title='State'>State</th> + <th width="190px">Start</th> + <th width="190px">End</th> + <th width="70px" class='qtip_show' title='Actions'>Actions</th> + </tr> + </thead> + <tbody> + <?php + $i = 0; + $max_len = 50; // maximum length of text before being cut + $js = '';$all = array(); + $now = new DateTime(); + $now = $now->format('U'); + while ($row = mysqli_fetch_array($rs)) { + // Find out the state of the test: + $schedulable = true; + $planned = false; + $running = false; + $finished = false; + $preparing = false; + $cleaningup = false; + $failed = false; + $aborting = false; + $syncing = false; + $synced = false; + $retentionexp = false; + switch($row['test_status']) { + case "planned": + $planned = true; + break; + case "preparing": + $preparing = true; + break; + case "running": + $running = true; + break; + case "cleaning up": + $cleaningup = true; + break; + case "finished": + $finished = true; + break; + case "not schedulable": + $schedulable = false; + break; + case "failed": + $failed = true; + break; + case "aborting": + $aborting = true; + break; + case "syncing": + $syncing = true; + break; + case "synced": + $synced = true; + break; + case "retention expiring": + $retentionexp = true; + break; + } + if ($row['test_status'] != "failed" && $row['test_status'] !="finished" && $row['test_status'] !="not schedulable" && $row['test_status'] !="retention expiring") { + $js .='setTimeout("trackTest('.$row['serv_tests_key'].',\"'.$row['test_status'].'\")", 3000);'."\n"; + } + $all[]=$row['serv_tests_key']; + $i++; + if ($i%2 == 1) { + echo "<tr class='even'>"; + } else { + echo "<tr class='odd'>"; + } + echo "<td>" . $row['serv_tests_key'] . "</td>"; + // Title. If longer than $max_len characters, display as tooltip: + echo "<td class='qtip_show' title='" . htmlentities($row['title']) . "'>" . htmlentities($row['title']) . "</td>"; + // Description. If longer than $max_len characters, display as tooltip: + if (strlen($row['description']) <= $max_len) + echo "<td>" . htmlentities($row['description']) . "</td>"; + else + echo "<td class='qtip_show' title='" . htmlentities($row['description']) . "'>" . htmlentities(substr($row['description'],0,$max_len)) . "...</td>"; + // Status: dependent of state of test + // check viz status + $viz = ""; + if (!is_null($row['time_start_act'])) { + $d = new DateTime($row['time_start_act']); + if ($now - $d->format('U') < 21 * 24 * 3600) { + if (file_exists($CONFIG['viz']['dir'].'/'.$row['serv_tests_key'].'_'. $_SESSION['serv_users_key'])) + $viz="<img src='pics/icons/chart.png' style='margin-left:5px' height='16px' alt='Results' title='Preview results' class='qtip_show' onClick='document.resprev.testid.value = " . $row['serv_tests_key'] . ";document.resprev.submit();'>"; + } + } + echo "<td>"; + echo "<span style='display:none'>".$row['test_status']."</span>"; // needed to make cell sortable by JQuery + echo "<img src='".state_icon($row['test_status'])."' height='16px' alt='".state_short_description($row['test_status'])."' title='".state_long_description($row['test_status'])."' class='qtip_show' >"; + echo "</td>"; + // Start time: dependent of state of test + if ( $running || $cleaningup || $finished || $failed || $aborting || $syncing || $synced || $retentionexp) { + echo "<td title='Actual start time' class='qtip_show'>" . date_to_tzdate($row['time_start_act']). "</td>"; + } + elseif ($preparing || $planned) { + echo "<td title='Planned start time' class='qtip_show'><i class='starttime'>" . date_to_tzdate($row['time_start_wish']). "</i></td>"; + } + else + echo "<td title='Test is not schedulable' class='qtip_show'>n/a</td>"; + // End time: dependent of state of test + if ($planned || $preparing || $running) { + echo "<td title='Planned end time' class='qtip_show'><i class='endtime'>" . date_to_tzdate($row['time_end_wish']). "</i></td>"; + } + elseif ($cleaningup || $finished || $failed || $syncing || $synced || $retentionexp) { + echo "<td title='Actual end time' class='qtip_show'>" . date_to_tzdate($row['time_end_act']). "</td>"; + } + elseif ($aborting) + echo "<td title='Test is currently aborting' class='qtip_show'>n/a</td>"; + elseif (!$schedulable) + echo "<td title='Test is not schedulable' class='qtip_show'>n/a</td>"; + else + echo "<td title='Test is in unknown state' class='qtip_show'>n/a</td>"; + // Actions: dependent of state of test + echo "<td>"; + if ($planned) { + echo "<span style='display:none'>planned</span>"; // needed to make cell sortable by JQuery + echo "<img src='pics/icons/trash.png' height='16px' alt='Delete' title='Delete test' class='qtip_show' onClick='document.tstdel.testid.value = " . $row['serv_tests_key'] . ";document.tstdel.submit();'>"; + echo "<img src='pics/icons/edit.png' style='margin-left:3px' height='16px' alt='Edit' title='Edit test' class='qtip_show' onClick='document.tstedt.testid.value = " . $row['serv_tests_key'] . ";document.tstedt.submit();'>"; + } elseif ($running) { + echo "<span style='display:none'>running</span>"; // needed to make cell sortable by JQuery + echo "<img src='pics/icons/cancel.png' height='16px' alt='Abort' title='Abort test' class='qtip_show' onClick='document.tstabrt.testid.value = " . $row['serv_tests_key'] . ";document.tstabrt.submit();'>"; + echo "<img src='pics/icons/chart.png' style='margin-left:5px' height='16px' alt='Results' title='Preview results' class='qtip_show' onClick='document.resprev.testid.value = " . $row['serv_tests_key'] . ";document.resprev.submit();'>"; + } elseif ($preparing || $cleaningup || $syncing || $synced) { + echo "<span style='display:none'>preparing</span>"; // needed to make cell sortable by JQuery + //echo "<img src='pics/icons/cancel.png' height='16px' alt='Abort' title='Abort test' class='qtip_show' onClick='document.tstabrt.testid.value = " . $row['serv_tests_key'] . ";document.tstabrt.submit();'>"; + echo $viz; + } elseif ($finished) { + echo "<span style='display:none'>finished</span>"; // needed to make cell sortable by JQuery + echo "<img src='pics/icons/trash.png' height='16px' alt='Delete' title='Delete test' class='qtip_show' onClick='document.tstdel.testid.value = " . $row['serv_tests_key'] . ";document.tstdel.submit();'>"; + echo "<img src='pics/icons/download.png' style='margin-left:5px' height='16px' alt='Download' title='Download results' class='qtip_show' onClick='getResult(".$row['serv_tests_key'].", 100);'>"; + echo $viz; + } elseif ($retentionexp) { + echo "<span style='display:none'>retentionexpiring</span>"; // needed to make cell sortable by JQuery + echo "<img src='pics/icons/trash.png' height='16px' alt='Delete' title='Delete test' class='qtip_show' onClick='document.tstdel.testid.value = " . $row['serv_tests_key'] . ";document.tstdel.submit();'>"; + echo "<img src='pics/icons/download.png' style='margin-left:5px' height='16px' alt='Download' title='Download results' class='qtip_show' onClick='getResult(".$row['serv_tests_key'].", 100);'>"; + echo $viz; + } elseif ($failed) { + echo "<span style='display:none'>notschedulable</span>"; // needed to make cell sortable by JQuery + echo "<img src='pics/icons/trash.png' height='16px' alt='Delete' title='Delete test.' class='qtip_show' onClick='document.tstdel.testid.value = " . $row['serv_tests_key'] . ";document.tstdel.submit();'>"; + } elseif (!$schedulable) { + echo "<span style='display:none'>notschedulable</span>"; // needed to make cell sortable by JQuery + echo "<img src='pics/icons/trash.png' height='16px' alt='Delete' title='Delete test.' class='qtip_show' onClick='document.tstdel.testid.value = " . $row['serv_tests_key'] . ";document.tstdel.submit();'>"; + } elseif ($aborting) { + } else { + echo "<span style='display:none'>unknown</span>"; // needed to make cell sortable by JQuery + } + echo "<img src='pics/icons/preview.png' style='margin-left:5px' height='16px' alt='Download config' title='Download test configuration.' class='qtip_show' onClick='document.tstcdnl.testid.value = " . $row['serv_tests_key'] . ";document.tstcdnl.submit();'>"; + echo "</td>"; + echo "</tr>"; + } + ?> + </tbody> + </table> + <span id="pager" class="pager"> + <img src="pics/icons/first.gif" alt="first" class="first"> + <img src="pics/icons/prev.gif" alt="prev" class="prev"> + <span class="pagedisplay"></span> + <img src="pics/icons/next.gif" alt="next" class="next"> + <img src="pics/icons/last.gif" alt="last" class="last"> + <input class="pagesize" style="visibility: hidden;" id="pager_num_rows" value="15"> + </span> <br > + <?php + echo "<script type=\"text/javascript\"> + $(document).ready(function() { + ".$js; + if (count($all)>0) + echo 'addedTests.push('.implode(',',$all).');'."\n"; + echo "});\n</script>"; + }?> + <!-- Forms for processing actions --> + <form name="tstdel" method="post" action="test_delete.php"><input type="hidden" name="testid" value=""></form> + <form name="tstedt" method="post" action="test_edit.php"><input type="hidden" name="testid" value=""></form> + <form name="tstabrt" method="post" action="test_abort.php"><input type="hidden" name="testid" value=""></form> + <form name="resprev" method="post" action="result_preview_viz.php"><input type="hidden" name="testid" value=""></form> + <form name="resdwn" method="post" id="downloadform" action="result_download_archive.php"><input type="hidden" name="testid" value=""><input type="hidden" name="query" value=""></form> + <form name="tstcdnl" method="post" action="testconfig_download.php"><input type="hidden" name="testid" value=""></form> + <p><img style="margin-top:2px;margin-right:10px;float:left" src="pics/icons/add.png" height="16px" alt="new test"><a style="color:#666666;text-decoration:none;" href="newtest.php"> add new test</a></p> <?php do_layout('Manage Tests','Manage Tests'); ?> diff --git a/webserver/user/link_feed.php b/webserver/user/link_feed.php index c9063ad83c61d970246029cc52ac4c80de42879c..b4c311813e9d4f5acfe464f7e1ba9b2965fb775e 100644 --- a/webserver/user/link_feed.php +++ b/webserver/user/link_feed.php @@ -1,69 +1,69 @@ <?php - /* - * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" - * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" - * __license__ = "GPL" - * __version__ = "$Revision$" - * __date__ = "$Date$" - * __id__ = "$Id$" - * __source__ = "$URL$" - */ + /* + * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" + * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" + * __license__ = "GPL" + * __version__ = "$Revision$" + * __date__ = "$Date$" + * __id__ = "$Id$" + * __source__ = "$URL$" + */ ?> <?php - require_once('include/auth.php'); - + require_once('include/auth.php'); + if (isset($_GET['p'])) { - //debug(); - $platform = explode( '_' , $_GET['p']); - $radio = count($platform) > 1?"LOWER(radio)='".strtolower($platform[1])."'":'(radio="" OR radio is NULL)'; - $platform = strtolower($platform[0]); - // Connect to database and get available measurements: - $db = db_connect(); - $sql = "SELECT serv_link_measurements_key, begin - FROM `flocklab`.tbl_serv_web_link_measurements - LEFT JOIN tbl_serv_platforms ON (serv_platforms_key = platform_fk) - WHERE LOWER(name)='".mysqli_real_escape_string($db, $platform)."' AND ".$radio." AND links is not NULL - ORDER BY begin ASC - "; - $rs = mysqli_query($db, $sql) or flocklab_die('Error: ' . mysqli_error($db)); - mysqli_close($db); - - // Build the array of tests. If possible, append start and/or end time to the message: - $tests = array( - 'dateTimeFormat'=>'iso8601', - 'events'=> array (), - ); - while ($row = mysqli_fetch_array($rs)) { - - $d = new DateTime($row['begin']); - $tests['events'][]=array( - 'start'=>$d->format(DATE_ISO8601), - //'end'=>$d->format(DATE_ISO8601), - 'durationEvent'=>FALSE, - 'title'=>'', - 'description'=>$row['serv_link_measurements_key'], - ); - } + //debug(); + $platform = explode( '_' , $_GET['p']); + $radio = count($platform) > 1?"LOWER(radio)='".strtolower($platform[1])."'":'(radio="" OR radio is NULL)'; + $platform = strtolower($platform[0]); + // Connect to database and get available measurements: + $db = db_connect(); + $sql = "SELECT serv_link_measurements_key, begin + FROM `flocklab`.tbl_serv_web_link_measurements + LEFT JOIN tbl_serv_platforms ON (serv_platforms_key = platform_fk) + WHERE LOWER(name)='".mysqli_real_escape_string($db, $platform)."' AND ".$radio." AND links is not NULL + ORDER BY begin ASC + "; + $rs = mysqli_query($db, $sql) or flocklab_die('Error: ' . mysqli_error($db)); + mysqli_close($db); + + // Build the array of tests. If possible, append start and/or end time to the message: + $tests = array( + 'dateTimeFormat'=>'iso8601', + 'events'=> array (), + ); + while ($row = mysqli_fetch_array($rs)) { + + $d = new DateTime($row['begin']); + $tests['events'][]=array( + 'start'=>$d->format(DATE_ISO8601), + //'end'=>$d->format(DATE_ISO8601), + 'durationEvent'=>FALSE, + 'title'=>'', + 'description'=>$row['serv_link_measurements_key'], + ); + } - // JSON-encode the array and return it to the calendar: - header('Content-Type: application/json; charset=utf-8'); - echo json_encode($tests); + // JSON-encode the array and return it to the calendar: + header('Content-Type: application/json; charset=utf-8'); + echo json_encode($tests); } else if (isset($_GET['q']) and is_numeric($_GET['q'])) { - // Connect to database and get link measurements: - $db = db_connect(); - $sql = "SELECT CONVERT(links USING utf8) as links - FROM `flocklab`.tbl_serv_web_link_measurements - WHERE serv_link_measurements_key=".$_GET['q']; - $rs = mysqli_query($db, $sql) or flocklab_die('Error: ' . mysqli_error($db)); - mysqli_close($db); - - if (mysqli_num_rows($rs) == 1) { - header('Content-Type: application/xml; charset=utf-8'); - $row = mysqli_fetch_array($rs); - echo $row['links']; - } + // Connect to database and get link measurements: + $db = db_connect(); + $sql = "SELECT CONVERT(links USING utf8) as links + FROM `flocklab`.tbl_serv_web_link_measurements + WHERE serv_link_measurements_key=".$_GET['q']; + $rs = mysqli_query($db, $sql) or flocklab_die('Error: ' . mysqli_error($db)); + mysqli_close($db); + + if (mysqli_num_rows($rs) == 1) { + header('Content-Type: application/xml; charset=utf-8'); + $row = mysqli_fetch_array($rs); + echo $row['links']; + } } ?> diff --git a/webserver/user/login.php b/webserver/user/login.php index adff7cdb265537d0b81486c5c17e13d00ef7fce3..e7217f29cbabc132b766b5d27e9370ed6814038e 100644 --- a/webserver/user/login.php +++ b/webserver/user/login.php @@ -1,105 +1,105 @@ <?php - /* - * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" - * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" - * __license__ = "GPL" - * __version__ = "$Revision$" - * __date__ = "$Date$" - * __id__ = "$Id$" - * __source__ = "$URL$" - */ + /* + * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" + * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" + * __license__ = "GPL" + * __version__ = "$Revision$" + * __date__ = "$Date$" + * __id__ = "$Id$" + * __source__ = "$URL$" + */ ?> <?php - require_once('include/libflocklab.php'); - session_start(); + require_once('include/libflocklab.php'); + session_start(); - $first = ((isset($_REQUEST['first'])) && ($_REQUEST['first'] == "no")) ? false : true; - $login_msg = "Login failed."; - - # if already logged in, then redirect to index.php - if (isset($_SESSION['logged_in']) && $_SESSION['logged_in']) { - header('Location: https://'.$_SERVER[HTTP_HOST].substr($_SERVER[REQUEST_URI], 0, strrpos($_SERVER[REQUEST_URI], "/") + 1)."index.php"); - } - - if ($_SERVER['REQUEST_METHOD'] == 'POST') { - $hostname = $_SERVER['HTTP_HOST']; - $path = dirname($_SERVER['PHP_SELF']); - - // Forward to next page: - if ($_SERVER['SERVER_PROTOCOL'] == 'HTTP/1.1') { - if (substr(php_sapi_name(), 0, 3) == 'cgi') { - header('Status: 303 See Other'); - } - else { - header('HTTP/1.1 303 See Other'); - } - } - - $dologin = do_login($_POST['username'], $_POST['password']); - if ($dologin === true) { - if (isset($_SESSION['request_path'])) { - header('Location: https://'.$hostname.$_SESSION['request_path']); - unset($_SESSION['request_path']); - } - else { - header('Location: https://'.$hostname.($path == '/' ? '' : $path).'/index.php'); - } - exit; - } - else if ($dologin === false){ - header('Location: https://'.$hostname.($path == '/' ? '' : $path).'/login.php?first=no'); - exit; - } - else { - $login_msg = $dologin; - } - } + $first = ((isset($_REQUEST['first'])) && ($_REQUEST['first'] == "no")) ? false : true; + $login_msg = "Login failed."; + + # if already logged in, then redirect to index.php + if (isset($_SESSION['logged_in']) && $_SESSION['logged_in']) { + header('Location: https://'.$_SERVER[HTTP_HOST].substr($_SERVER[REQUEST_URI], 0, strrpos($_SERVER[REQUEST_URI], "/") + 1)."index.php"); + } + + if ($_SERVER['REQUEST_METHOD'] == 'POST') { + $hostname = $_SERVER['HTTP_HOST']; + $path = dirname($_SERVER['PHP_SELF']); + + // Forward to next page: + if ($_SERVER['SERVER_PROTOCOL'] == 'HTTP/1.1') { + if (substr(php_sapi_name(), 0, 3) == 'cgi') { + header('Status: 303 See Other'); + } + else { + header('HTTP/1.1 303 See Other'); + } + } + + $dologin = do_login($_POST['username'], $_POST['password']); + if ($dologin === true) { + if (isset($_SESSION['request_path'])) { + header('Location: https://'.$hostname.$_SESSION['request_path']); + unset($_SESSION['request_path']); + } + else { + header('Location: https://'.$hostname.($path == '/' ? '' : $path).'/index.php'); + } + exit; + } + else if ($dologin === false){ + header('Location: https://'.$hostname.($path == '/' ? '' : $path).'/login.php?first=no'); + exit; + } + else { + $login_msg = $dologin; + } + } ?> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" - "http://www.w3.org/TR/html4/loose.dtd"> + "http://www.w3.org/TR/html4/loose.dtd"> <html> <head> - <link rel="stylesheet" type="text/css" href="css/flocklab.css"> - <link rel="shortcut icon" href="pics/icons/favicon.ico" type="image/x-ico; charset=binary"> - <link rel="icon" href="pics/icons/favicon.ico" type="image/x-ico; charset=binary"> - <title>FlockLab - Login</title> - <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> - <meta name="AUTHOR" content="ETH Zurich, Christoph Walser, CH-8092 Zurich, Switzerland"> - <meta name="COPYRIGHT" content="ETH Zurich, Switzerland"> - <meta name="LANGUAGE" content="English"> - <meta name="ROBOTS" content="noindex, nofollow"> - <meta name="DATE" content="2011"> - <script type="text/javascript" src="scripts/jquery-latest.js"></script> - <script type="text/javascript" src="scripts/timezone-flocklab.js"></script> + <link rel="stylesheet" type="text/css" href="css/flocklab.css"> + <link rel="shortcut icon" href="pics/icons/favicon.ico" type="image/x-ico; charset=binary"> + <link rel="icon" href="pics/icons/favicon.ico" type="image/x-ico; charset=binary"> + <title>FlockLab - Login</title> + <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> + <meta name="AUTHOR" content="ETH Zurich, Christoph Walser, CH-8092 Zurich, Switzerland"> + <meta name="COPYRIGHT" content="ETH Zurich, Switzerland"> + <meta name="LANGUAGE" content="English"> + <meta name="ROBOTS" content="noindex, nofollow"> + <meta name="DATE" content="2011"> + <script type="text/javascript" src="scripts/jquery-latest.js"></script> + <script type="text/javascript" src="scripts/timezone-flocklab.js"></script> </head> <body> - <div id="container" class="container"> - <div id="header" class="header"> - <a href="http://www.flocklab.ethz.ch"><img alt="FlockLab" src="pics/flocklab_eth_logo.png"></a> - </div> <!-- END header --> - <div id="content" class="content"> - <div id="login" class="login"> - <?php - if (!$first) { - echo "<div class=\"warning\"><div style=\"float:left;\"><img alt=\"\" src=\"pics/icons/att.png\"></div>"; - echo "<p>".$login_msg."</p>"; - echo "</div><p></p>"; - } - ?> - <div id="loginspan"> - <p class="warning"> - <img alt="" src="pics/icons/att.png"> - Javascript seems to be turned off in your browser. Turn it on to be able to use FlockLab. - </p> - </div> - </div> <!-- END login --> - </div> <!-- END content --> - <div style="clear:both"></div> - </div> <!-- END container --> - - <script type="text/javascript"> - document.getElementById('loginspan').innerHTML = '<form action="login.php" method="post"><table><tr><td colspan="2"><b>User Login for FlockLab<\/b><\/td><\/tr><tr><td>Username:<\/td><td><input name="username" id="username" type="text"><\/td><\/tr><tr><td>Password:<\/td><td><input name="password" type="password"><input name="first" type="hidden" value="no"><\/td><\/tr><tr><\/tr><tr><td><\/td><td>No login yet? Register here <a href="user_register.php"><img src="pics/icons/right_arrow.png"><\/a><br>Forgot password? Recover it <a href="user_passwordrecovery.php"><img src="pics/icons/right_arrow.png"><\/a><\/td><\/tr><tr><td><\/td><td><input type="submit" value="Login"><\/td><\/tr><\/table><\/form>'; - document.getElementById('username').focus(); - </script> + <div id="container" class="container"> + <div id="header" class="header"> + <a href="http://www.flocklab.ethz.ch"><img alt="FlockLab" src="pics/flocklab_eth_logo.png"></a> + </div> <!-- END header --> + <div id="content" class="content"> + <div id="login" class="login"> + <?php + if (!$first) { + echo "<div class=\"warning\"><div style=\"float:left;\"><img alt=\"\" src=\"pics/icons/att.png\"></div>"; + echo "<p>".$login_msg."</p>"; + echo "</div><p></p>"; + } + ?> + <div id="loginspan"> + <p class="warning"> + <img alt="" src="pics/icons/att.png"> + Javascript seems to be turned off in your browser. Turn it on to be able to use FlockLab. + </p> + </div> + </div> <!-- END login --> + </div> <!-- END content --> + <div style="clear:both"></div> + </div> <!-- END container --> + + <script type="text/javascript"> + document.getElementById('loginspan').innerHTML = '<form action="login.php" method="post"><table><tr><td colspan="2"><b>User Login for FlockLab<\/b><\/td><\/tr><tr><td>Username:<\/td><td><input name="username" id="username" type="text"><\/td><\/tr><tr><td>Password:<\/td><td><input name="password" type="password"><input name="first" type="hidden" value="no"><\/td><\/tr><tr><\/tr><tr><td><\/td><td>No login yet? Register here <a href="user_register.php"><img src="pics/icons/right_arrow.png"><\/a><br>Forgot password? Recover it <a href="user_passwordrecovery.php"><img src="pics/icons/right_arrow.png"><\/a><\/td><\/tr><tr><td><\/td><td><input type="submit" value="Login"><\/td><\/tr><\/table><\/form>'; + document.getElementById('username').focus(); + </script> </body> </html> diff --git a/webserver/user/logout.php b/webserver/user/logout.php index 806fe0ce8e8d186f4e533fb9e08a1778ba20db34..bb7c76e289ceda781b6e54d0000f4bc4684ec3d0 100644 --- a/webserver/user/logout.php +++ b/webserver/user/logout.php @@ -1,18 +1,18 @@ <?php - /* - * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" - * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" - * __license__ = "GPL" - * __version__ = "$Revision$" - * __date__ = "$Date$" - * __id__ = "$Id$" - * __source__ = "$URL$" - */ + /* + * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" + * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" + * __license__ = "GPL" + * __version__ = "$Revision$" + * __date__ = "$Date$" + * __id__ = "$Id$" + * __source__ = "$URL$" + */ ?> <?php - require_once('include/libflocklab.php'); + require_once('include/libflocklab.php'); - session_start(); + session_start(); // Destroy the session (tghis will also remove the temp directory). destroy_session(); diff --git a/webserver/user/newimage.php b/webserver/user/newimage.php index bc27711c0a8bb0d3ca75ae87d8e6f82082e9b30a..e6a5473d485b97e2acc0f8d868a5be93b994fd07 100755 --- a/webserver/user/newimage.php +++ b/webserver/user/newimage.php @@ -1,113 +1,113 @@ <?php - /* - * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" - * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" - * __license__ = "GPL" - * __version__ = "$Revision$" - * __date__ = "$Date$" - * __id__ = "$Id$" - * __source__ = "$URL$" - */ - - // ini_set('display_errors', 1); - // error_reporting(E_ALL); + /* + * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" + * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" + * __license__ = "GPL" + * __version__ = "$Revision$" + * __date__ = "$Date$" + * __id__ = "$Id$" + * __source__ = "$URL$" + */ + + // ini_set('display_errors', 1); + // error_reporting(E_ALL); ?> <?php require_once('include/layout.php');require_once('include/presets.php'); ?> <?php - $errors = array(); - $process = false; - if (isset($_POST['submit'])) { - $process = true; - $image = Array(); - // Get the file and check if it is a valid image - $imagefile = $_FILES['imagefile']; - if ($imagefile["error"] != 0) { - // There was an error during file upload: - array_push($errors, "There was an error when uploading the file."); - } - else { - $image['data']=file_get_contents($imagefile['tmp_name']); - } - foreach(Array('name','description','os') as $field) - $image[$field] = isset($_POST[$field])?$_POST[$field]:null; - if (isset($_POST['platform'])) { - $image['core'] = preg_replace('/.*_/','',$_POST['platform']); - $image['platform'] = preg_replace('/_.*/','',$_POST['platform']); - } - if (validate_image($image, $errors)) { - $dup = check_image_duplicate($image); - if ($dup!==false) - array_push($errors, "Image already exists in database (Id ".$dup.")"); - else - $image_id = store_image($image); - } - } + $errors = array(); + $process = false; + if (isset($_POST['submit'])) { + $process = true; + $image = Array(); + // Get the file and check if it is a valid image + $imagefile = $_FILES['imagefile']; + if ($imagefile["error"] != 0) { + // There was an error during file upload: + array_push($errors, "There was an error when uploading the file."); + } + else { + $image['data']=file_get_contents($imagefile['tmp_name']); + } + foreach(Array('name','description','os') as $field) + $image[$field] = isset($_POST[$field])?$_POST[$field]:null; + if (isset($_POST['platform'])) { + $image['core'] = preg_replace('/.*_/','',$_POST['platform']); + $image['platform'] = preg_replace('/_.*/','',$_POST['platform']); + } + if (validate_image($image, $errors)) { + $dup = check_image_duplicate($image); + if ($dup!==false) + array_push($errors, "Image already exists in database (Id ".$dup.")"); + else + $image_id = store_image($image); + } + } ?> - <?php - /* If the page is called with a file associated, validate it and show the results */ - if ($process && empty($errors)) { - echo "<div class=\"info\"><div style=\"float:left;\"><img alt=\"\" src=\"pics/icons/success.png\"></div>"; - echo "<p>The image (Id ".$image_id.") has been successfully added.</p><ul>"; - echo "</div><p></p>"; - include('images.php'); - echo '<meta http-equiv="Refresh" content="10; URL=images.php">'; - exit(); - } - ?> - <script type="text/javascript"> - $(document).ready(function() { - $('.qtip_show').qtip( { - content: {text: false}, - style : 'flocklab', - }); - $("#uploadform").validate({ - rules: { - imagefile: "required", - }, - errorPlacement: function(error, element) { - error.insertAfter(element); - } - }); - }); - </script> + <?php + /* If the page is called with a file associated, validate it and show the results */ + if ($process && empty($errors)) { + echo "<div class=\"info\"><div style=\"float:left;\"><img alt=\"\" src=\"pics/icons/success.png\"></div>"; + echo "<p>The image (Id ".$image_id.") has been successfully added.</p><ul>"; + echo "</div><p></p>"; + include('images.php'); + echo '<meta http-equiv="Refresh" content="10; URL=images.php">'; + exit(); + } + ?> + <script type="text/javascript"> + $(document).ready(function() { + $('.qtip_show').qtip( { + content: {text: false}, + style : 'flocklab', + }); + $("#uploadform").validate({ + rules: { + imagefile: "required", + }, + errorPlacement: function(error, element) { + error.insertAfter(element); + } + }); + }); + </script> - <h1>Upload Test Image</h1> -<?php - // Show validation errors: - if (!empty($errors)) { - echo "<div class=\"warning\"><div style=\"float:left;\"><img alt=\"\" src=\"pics/icons/att.png\"></div>"; - echo "<p>Please correct the following errors:</p><ul>"; - foreach ($errors as $error) - echo "<li>" . $error . "</li>"; - echo "</div><p></p>"; - } - echo ' - <form id="uploadform" name="uploadform" method="post" action="newimage.php" enctype="multipart/form-data"> - <fieldset> - <legend>Upload new test image</legend> - <span class="formfield qtip_show" title="Provide file in ELF (Executable and Linkable Format) such as exe, srec or sba.">Image File (ELF):*</span><input type="file" name="imagefile" id="imagefile" size="27" class="required"><br /> - <span class="formfield">Name:*</span><input type="text" name="name" size="27" class="required" value="'.(isset($_POST['name'])?htmlentities($_POST['name']):'').'"><br /> - <span class="formfield">Description:</span><textarea name="description" size="27">'.(isset($_POST['description'])?htmlentities($_POST['description']):'').'</textarea><br /> - <span class="formfield">OS:*</span><select name="os" class="required"><option />'; - foreach(get_available_os() as $key => $os) { - echo '<option value="'.$key.'"'.(isset($_POST['os']) && $_POST['os']==$key?' selected="true"':'').'>'.$os.'</option>'; - } - echo '</select><br /> - <span class="formfield">Platform:*</span><select name="platform" class="required"><option />'; - foreach(get_available_platforms() as $key => $platform) { - foreach($platform as $pcore) { - $cdesc = strlen($pcore['core_desc'])>0?': '.$pcore['core_desc']:''; - $corekey = $key.'_'.$pcore['core']; - echo '<option value="'.$corekey.'"'.(isset($_POST['platform']) && $_POST['platform']==$key?' selected="true"':'').'>'.$pcore['name'].$cdesc.'</option>'; - } - } - echo '</select><br /> - </fieldset> - <p></p> - <input type="submit" name="submit" value="Upload image"> - </form>'; - ?> + <h1>Upload Test Image</h1> +<?php + // Show validation errors: + if (!empty($errors)) { + echo "<div class=\"warning\"><div style=\"float:left;\"><img alt=\"\" src=\"pics/icons/att.png\"></div>"; + echo "<p>Please correct the following errors:</p><ul>"; + foreach ($errors as $error) + echo "<li>" . $error . "</li>"; + echo "</div><p></p>"; + } + echo ' + <form id="uploadform" name="uploadform" method="post" action="newimage.php" enctype="multipart/form-data"> + <fieldset> + <legend>Upload new test image</legend> + <span class="formfield qtip_show" title="Provide file in ELF (Executable and Linkable Format) such as exe, srec or sba.">Image File (ELF):*</span><input type="file" name="imagefile" id="imagefile" size="27" class="required"><br /> + <span class="formfield">Name:*</span><input type="text" name="name" size="27" class="required" value="'.(isset($_POST['name'])?htmlentities($_POST['name']):'').'"><br /> + <span class="formfield">Description:</span><textarea name="description" size="27">'.(isset($_POST['description'])?htmlentities($_POST['description']):'').'</textarea><br /> + <span class="formfield">OS:*</span><select name="os" class="required"><option />'; + foreach(get_available_os() as $key => $os) { + echo '<option value="'.$key.'"'.(isset($_POST['os']) && $_POST['os']==$key?' selected="true"':'').'>'.$os.'</option>'; + } + echo '</select><br /> + <span class="formfield">Platform:*</span><select name="platform" class="required"><option />'; + foreach(get_available_platforms() as $key => $platform) { + foreach($platform as $pcore) { + $cdesc = strlen($pcore['core_desc'])>0?': '.$pcore['core_desc']:''; + $corekey = $key.'_'.$pcore['core']; + echo '<option value="'.$corekey.'"'.(isset($_POST['platform']) && $_POST['platform']==$key?' selected="true"':'').'>'.$pcore['name'].$cdesc.'</option>'; + } + } + echo '</select><br /> + </fieldset> + <p></p> + <input type="submit" name="submit" value="Upload image"> + </form>'; + ?> <?php do_layout('Upload New Test Image','Manage Images'); diff --git a/webserver/user/newtest.php b/webserver/user/newtest.php index 756f72f8f3b0539d526ad62fb1216b7315468e9c..64fa96703899db830ef5dfc91dd57621789eaf3e 100755 --- a/webserver/user/newtest.php +++ b/webserver/user/newtest.php @@ -1,13 +1,13 @@ <?php - /* - * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" - * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" - * __license__ = "GPL" - * __version__ = "$Revision$" - * __date__ = "$Date$" - * __id__ = "$Id$" - * __source__ = "$URL$" - */ + /* + * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" + * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" + * __license__ = "GPL" + * __version__ = "$Revision$" + * __date__ = "$Date$" + * __id__ = "$Id$" + * __source__ = "$URL$" + */ ?> <?php require_once('include/layout.php');require_once('include/presets.php'); ?> <?php @@ -29,7 +29,7 @@ else if (!$first) { } else if (!(in_array($xmlfile["type"], array("text/xml", "application/xml")))) { // The uploaded file is not XML: - array_push($errors, "Uploaded file is not XML."); + array_push($errors, "Uploaded file is not XML."); } else { $tmp_xmlfile = $xmlfile['tmp_name']; } @@ -37,18 +37,18 @@ else if (!$first) { // process config if (isset($tmp_xmlfile) && empty($errors)) { - $xml_config = file_get_contents($tmp_xmlfile); - $res = update_add_test($xml_config, $errors); + $xml_config = file_get_contents($tmp_xmlfile); + $res = update_add_test($xml_config, $errors); } - + ?> <?php /* If the page is called with a file associated, validate it and show the results */ if (isset($tmp_xmlfile) && empty($errors)) { echo "<div class=\"info\"><div style=\"float:left;\"><img alt=\"\" src=\"pics/icons/success.png\"></div>"; echo "<!-- cmd --><p>Test (Id ".$res['testId'].") successfully added.</p><!-- cmd --></div>"; - echo "<!-- flocklabscript,".$res['testId'].",".$res['start']->format(DATE_ISO8601).",".$res['start']->format("U")."-->"; + echo "<!-- flocklabscript,".$res['testId'].",".$res['start']->format(DATE_ISO8601).",".$res['start']->format("U")."-->"; echo "<p></p>"; include('index.php'); exit(); diff --git a/webserver/user/platform-support-tinyos.php b/webserver/user/platform-support-tinyos.php index b78d3a3cf09198dd3f134d7bc27541ce0361fc72..9e3d835e2a0a88b12d62d1c06d171aff5954aee3 100755 --- a/webserver/user/platform-support-tinyos.php +++ b/webserver/user/platform-support-tinyos.php @@ -1,13 +1,13 @@ <?php - /* - * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" - * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" - * __license__ = "GPL" - * __version__ = "$Revision: 1296 $" - * __date__ = "$Date: 2011-08-12 17:06:17 +0200 (Fri, 12 Aug 2011) $" - * __id__ = "$Id: newtest.php 1296 2011-08-12 15:06:17Z walserc $" - * __source__ = "$URL: svn://svn.ee.ethz.ch/flocklab/trunk/server/webserver/user/newtest.php $" - */ + /* + * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" + * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" + * __license__ = "GPL" + * __version__ = "$Revision: 1296 $" + * __date__ = "$Date: 2011-08-12 17:06:17 +0200 (Fri, 12 Aug 2011) $" + * __id__ = "$Id: newtest.php 1296 2011-08-12 15:06:17Z walserc $" + * __source__ = "$URL: svn://svn.ee.ethz.ch/flocklab/trunk/server/webserver/user/newtest.php $" + */ ?> <?php $dir="/home/flocklab/flocklab_downloads/platform-support/tinyos-2.x/"; diff --git a/webserver/user/result_download_archive.php b/webserver/user/result_download_archive.php index fb505a1000199bb80c015fe01abe92ecc37f1381..e26b756f38d6f3300efbf73cacf187c0f9c626ab 100755 --- a/webserver/user/result_download_archive.php +++ b/webserver/user/result_download_archive.php @@ -1,57 +1,57 @@ <?php - /* - * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" - * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" - * __license__ = "GPL" - * __version__ = "$Revision$" - * __date__ = "$Date$" - * __id__ = "$Id$" - * __source__ = "$URL$" - */ + /* + * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" + * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" + * __license__ = "GPL" + * __version__ = "$Revision$" + * __date__ = "$Date$" + * __id__ = "$Id$" + * __source__ = "$URL$" + */ ?> <?php include_once('include/presets.php');?> <?php // provide archive if (isset($_POST['testid']) && is_numeric($_POST['testid']) && check_testid($_POST['testid'],$_SESSION['serv_users_key'])) { - $testid = $_POST['testid']; - // check file - $archivepath = $CONFIG['testmanagementserver']['archivedir']; - $archive = - $cmd = "ssh ".$CONFIG['testmanagementserver']['sshflags']." ".$CONFIG['testmanagementserver']['user']."@".$CONFIG['testmanagementserver']['host']." \"ls -l ".$archivepath.'/'.$testid.".tar.gz\""; - exec($cmd , $output, $ret); - if ($ret > 0) { - echo json_encode(array('status'=>'error', 'output'=>'data not available')); - exit(); - } - else { - $size = explode(' ', $output[0]); - $size = preg_replace('/([0-9]*) .*/','$1',$size[4]); - if (strlen($size)>0) - $size=intval($size); - else { - echo json_encode(array('status'=>'error', 'output'=>'could not determine archive size')); - exit(); - } - if (!isset($_POST['query']) || $_POST['query']!='get') { - echo json_encode(array('status'=>'success', 'testid'=>$testid)); - exit(); - } - $cmd = "ssh ".$CONFIG['testmanagementserver']['sshflags']." ".$CONFIG['testmanagementserver']['user']."@".$CONFIG['testmanagementserver']['host']." \"cat ".$archivepath.'/'.$testid.".tar.gz\""; - $stream = popen($cmd, "r"); - // Send the file to the user's browser: - header("Content-Type: application/x-gzip"); - header("Content-Disposition: attachment; filename=\"flocklab_testresults_" . $_POST['testid'] . ".tar.gz\""); - header("Content-Length: ".$size); - $chunksize = 512*1024; - do { - echo fread($stream, $chunksize); - set_time_limit (30); - } - while (!feof($stream)); - pclose($stream); - } - } - else { - echo json_encode(array('status'=>'error', 'output'=>'unknown testid')); - } + $testid = $_POST['testid']; + // check file + $archivepath = $CONFIG['testmanagementserver']['archivedir']; + $archive = + $cmd = "ssh ".$CONFIG['testmanagementserver']['sshflags']." ".$CONFIG['testmanagementserver']['user']."@".$CONFIG['testmanagementserver']['host']." \"ls -l ".$archivepath.'/'.$testid.".tar.gz\""; + exec($cmd , $output, $ret); + if ($ret > 0) { + echo json_encode(array('status'=>'error', 'output'=>'data not available')); + exit(); + } + else { + $size = explode(' ', $output[0]); + $size = preg_replace('/([0-9]*) .*/','$1',$size[4]); + if (strlen($size)>0) + $size=intval($size); + else { + echo json_encode(array('status'=>'error', 'output'=>'could not determine archive size')); + exit(); + } + if (!isset($_POST['query']) || $_POST['query']!='get') { + echo json_encode(array('status'=>'success', 'testid'=>$testid)); + exit(); + } + $cmd = "ssh ".$CONFIG['testmanagementserver']['sshflags']." ".$CONFIG['testmanagementserver']['user']."@".$CONFIG['testmanagementserver']['host']." \"cat ".$archivepath.'/'.$testid.".tar.gz\""; + $stream = popen($cmd, "r"); + // Send the file to the user's browser: + header("Content-Type: application/x-gzip"); + header("Content-Disposition: attachment; filename=\"flocklab_testresults_" . $_POST['testid'] . ".tar.gz\""); + header("Content-Length: ".$size); + $chunksize = 512*1024; + do { + echo fread($stream, $chunksize); + set_time_limit (30); + } + while (!feof($stream)); + pclose($stream); + } + } + else { + echo json_encode(array('status'=>'error', 'output'=>'unknown testid')); + } ?> diff --git a/webserver/user/result_preview_viz.php b/webserver/user/result_preview_viz.php index a399a58088f3b2f431521e4caf8925937671be54..394fe9c786bb235b63fcbdeb18fe503bfd6c306d 100755 --- a/webserver/user/result_preview_viz.php +++ b/webserver/user/result_preview_viz.php @@ -1,13 +1,13 @@ <?php - /* - * __author__ = "Roman Lim <lim@tik.ee.ethz.ch>" - * __copyright__ = "Copyright 2012, ETH Zurich, Switzerland" - * __license__ = "GPL" - * __version__ = "$Revision$" - * __date__ = "$Date$" - * __id__ = "$Id$" - * __source__ = "$URL$" - */ + /* + * __author__ = "Roman Lim <lim@tik.ee.ethz.ch>" + * __copyright__ = "Copyright 2012, ETH Zurich, Switzerland" + * __license__ = "GPL" + * __version__ = "$Revision$" + * __date__ = "$Date$" + * __id__ = "$Id$" + * __source__ = "$URL$" + */ ?> <?php require_once('include/layout.php');require_once('include/presets.php'); @@ -18,449 +18,449 @@ if (isset($_GET['testid'])) else if (isset($_POST['testid'])) $testid = $_POST['testid']; if (isset($testid)) { - // check test_owner = user - if (check_testid($testid, $_SESSION['serv_users_key'])) { - $status = get_teststatus($testid); -// if ($status!='running' && $status!='cleaning up') { -// array_push($errors, "Only running tests have a test preview."); -// } - } - else - array_push($errors, "Test does not belong to you."); - // Show validation errors: - if (isset($errors)) { - if (!empty($errors)) { - echo "<div class=\"warning\"><div style=\"float:left;\"><img alt=\"\" src=\"pics/icons/att.png\"></div>"; - echo "<p>Error occured:</p><ul>"; - foreach ($errors as $error) - echo "<li>" . $error . "</li>"; - echo "</div><p></p>"; - } else { + // check test_owner = user + if (check_testid($testid, $_SESSION['serv_users_key'])) { + $status = get_teststatus($testid); +// if ($status!='running' && $status!='cleaning up') { +// array_push($errors, "Only running tests have a test preview."); +// } + } + else + array_push($errors, "Test does not belong to you."); + // Show validation errors: + if (isset($errors)) { + if (!empty($errors)) { + echo "<div class=\"warning\"><div style=\"float:left;\"><img alt=\"\" src=\"pics/icons/att.png\"></div>"; + echo "<p>Error occured:</p><ul>"; + foreach ($errors as $error) + echo "<li>" . $error . "</li>"; + echo "</div><p></p>"; + } else { $style=' <style type="text/css"> - .gpio0 {background-color: red } - .gpio1 {background-color: blue } - .gpio2 {background-color: #0ff } - .gpio3 {background-color: purple } - .gpio4 {background-color: yellow } + .gpio0 {background-color: red } + .gpio1 {background-color: blue } + .gpio2 {background-color: #0ff } + .gpio3 {background-color: purple } + .gpio4 {background-color: yellow } </style> '; ?> <script type="text/javascript" src="scripts/jquery-ui-1.8.21.custom.min.js"></script> <script type="text/javascript"> function log(msg) { - $('#log').append(msg+'<br>'); + $('#log').append(msg+'<br>'); } function slide_update(progress) { - if (progress===undefined) - var progress = 3; - var r = $('#slide').data('round'); - $('#slide').data('round', r+1); - var p = parseInt($('#slide').css('left')) - progress; - $('#slide').css('left', (p)+'px'); - view_start = slide_start - p / pps * 1e3; - view_end = view_start + parseInt($('#view').css('width')) / pps * 1e3; - - // update timeline - if (r % 3 == 0) - $('.timeline').each(function(index) { - var div = $(this); - var vizmeta = div.data('vizmeta'); - if (!vizmeta.loading && (vizmeta.end < view_end + vizmeta.preloadtime)) { - vizmeta.loading = true; - vizmeta.updater(div); - } - }); + if (progress===undefined) + var progress = 3; + var r = $('#slide').data('round'); + $('#slide').data('round', r+1); + var p = parseInt($('#slide').css('left')) - progress; + $('#slide').css('left', (p)+'px'); + view_start = slide_start - p / pps * 1e3; + view_end = view_start + parseInt($('#view').css('width')) / pps * 1e3; + + // update timeline + if (r % 3 == 0) + $('.timeline').each(function(index) { + var div = $(this); + var vizmeta = div.data('vizmeta'); + if (!vizmeta.loading && (vizmeta.end < view_end + vizmeta.preloadtime)) { + vizmeta.loading = true; + vizmeta.updater(div); + } + }); - // update power - if (r % 3 == 1) - $('.obs > .power').each(function(index) { - var obsdiv = $(this); - var vizmeta = obsdiv.data('vizmeta'); - if (!vizmeta.loading && (vizmeta.end < view_end + vizmeta.preloadtime)) { - vizmeta.loading = true; - vizmeta.updater(obsdiv); - } - }); - // update gpio - if (r % 3 == 2) - $('.obs > .gpio').each(function(index) { - var obsdiv = $(this); - var vizmeta = obsdiv.data('vizmeta'); - if (!vizmeta.loading && (vizmeta.end < view_end + vizmeta.preloadtime)) { - vizmeta.loading = true; - vizmeta.updater(obsdiv); - } - }); - // update player - if (r % 10 == 0) { - // test.start, test.end - // view_end - var player = $('#player_slider'); - var maxlen = player.width(); - var newpos = Math.round((view_end - test.start) / (test.end - test.start+1e3) * (player.width()-$('#player_slider').find('img').first().width())); + // update power + if (r % 3 == 1) + $('.obs > .power').each(function(index) { + var obsdiv = $(this); + var vizmeta = obsdiv.data('vizmeta'); + if (!vizmeta.loading && (vizmeta.end < view_end + vizmeta.preloadtime)) { + vizmeta.loading = true; + vizmeta.updater(obsdiv); + } + }); + // update gpio + if (r % 3 == 2) + $('.obs > .gpio').each(function(index) { + var obsdiv = $(this); + var vizmeta = obsdiv.data('vizmeta'); + if (!vizmeta.loading && (vizmeta.end < view_end + vizmeta.preloadtime)) { + vizmeta.loading = true; + vizmeta.updater(obsdiv); + } + }); + // update player + if (r % 10 == 0) { + // test.start, test.end + // view_end + var player = $('#player_slider'); + var maxlen = player.width(); + var newpos = Math.round((view_end - test.start) / (test.end - test.start+1e3) * (player.width()-$('#player_slider').find('img').first().width())); // prog.width(prog.width()+1); - if (progress > 0) - $('#player_slider').find('img').first().css('left',(newpos)+'px'); - $('#player_slider').find('div').first().width((newpos+9)+'px'); - } - + if (progress > 0) + $('#player_slider').find('img').first().css('left',(newpos)+'px'); + $('#player_slider').find('div').first().width((newpos+9)+'px'); + } + - if (reachedEnd()) - clearInterval(updateInterval); - + if (reachedEnd()) + clearInterval(updateInterval); + }; function updateTimeline(div) { - var vizmeta = div.data('vizmeta'); - div.append('<div style="position:absolute;left:'+Math.round(((vizmeta.end - slide_start) / 1e3 * pps))+'px;top:14px;margin:0;padding:0;height:6px;border-left:1px solid black;"><div style="text-align:center;width:60px;position:absolute;top:-15px;left:-30px">'+((vizmeta.end - test.start) / 1e3)+'<\/div><\/div>'); - vizmeta.end = vizmeta.end + 1e3; - $(div).children().each(function() { - if (parseInt($(this).css('left')) + 30 < -1 * parseInt($('#slide').css('left'))) { - // remove this tick - $(this).remove(); - } - }); + var vizmeta = div.data('vizmeta'); + div.append('<div style="position:absolute;left:'+Math.round(((vizmeta.end - slide_start) / 1e3 * pps))+'px;top:14px;margin:0;padding:0;height:6px;border-left:1px solid black;"><div style="text-align:center;width:60px;position:absolute;top:-15px;left:-30px">'+((vizmeta.end - test.start) / 1e3)+'<\/div><\/div>'); + vizmeta.end = vizmeta.end + 1e3; + $(div).children().each(function() { + if (parseInt($(this).css('left')) + 30 < -1 * parseInt($('#slide').css('left'))) { + // remove this tick + $(this).remove(); + } + }); - if (vizmeta.end < test.end) - vizmeta.loading = false; + if (vizmeta.end < test.end) + vizmeta.loading = false; } function removeOld(obsdiv) { - var oldestdiv = $('img',obsdiv).first(); - if (parseInt($(oldestdiv).css('left')) + parseInt($(oldestdiv).css('width')) < -1 * parseInt($('#slide').css('left'))) { - // remove this image - $(oldestdiv).remove(); - return 1; - } - return 0; + var oldestdiv = $('img',obsdiv).first(); + if (parseInt($(oldestdiv).css('left')) + parseInt($(oldestdiv).css('width')) < -1 * parseInt($('#slide').css('left'))) { + // remove this image + $(oldestdiv).remove(); + return 1; + } + return 0; } - + function powerLoadNext(obsdiv) { - var vizmeta = obsdiv.data('vizmeta'); - var starttime = vizmeta.end - 10; - var obsid = vizmeta.obsid; - $.ajax({ - url: 'viz_feed.php?t='+test.id+'&o='+obsid+'&s='+starttime+'&m=0', - type: 'GET', - // data: { t:""+test.id, o:""+obsid, s:""+starttime, m:"0", q:"1"}, - success: function(data, textStatus, jqXHR) { - var imgstarttime = parseInt(jqXHR.getResponseHeader('Start-Time')); - // add image to slide - var img = $('<img src="" alt="" style="position:absolute;left:'+Math.round(((imgstarttime- slide_start) / 1e3 * pps))+'px">'); - $(img).load(function(evtObj) { - $(obsdiv).append($(this)); - var w = $(this).width(); - vizmeta.end = imgstarttime + w / pps * 1e3; - if (vizmeta.end < view_end + vizmeta.preloadtime) { - powerLoadNext(obsdiv); - } - else { - if (vizmeta.end < test.end) - vizmeta.loading = false; - removeOld(obsdiv); - } - }); - $(img).attr('src', 'viz_feed.php?t='+test.id+'&o='+obsid+'&s='+starttime+'&m=0'); - }, - error: function(data, textStatus, jqXHR) { - // start timer to retry - if (starttime < test.end) { - setTimeout(function() { - if (vizmeta.end < test.end) - vizmeta.loading = false; - }, retryTimeout); - } - } - }); + var vizmeta = obsdiv.data('vizmeta'); + var starttime = vizmeta.end - 10; + var obsid = vizmeta.obsid; + $.ajax({ + url: 'viz_feed.php?t='+test.id+'&o='+obsid+'&s='+starttime+'&m=0', + type: 'GET', + // data: { t:""+test.id, o:""+obsid, s:""+starttime, m:"0", q:"1"}, + success: function(data, textStatus, jqXHR) { + var imgstarttime = parseInt(jqXHR.getResponseHeader('Start-Time')); + // add image to slide + var img = $('<img src="" alt="" style="position:absolute;left:'+Math.round(((imgstarttime- slide_start) / 1e3 * pps))+'px">'); + $(img).load(function(evtObj) { + $(obsdiv).append($(this)); + var w = $(this).width(); + vizmeta.end = imgstarttime + w / pps * 1e3; + if (vizmeta.end < view_end + vizmeta.preloadtime) { + powerLoadNext(obsdiv); + } + else { + if (vizmeta.end < test.end) + vizmeta.loading = false; + removeOld(obsdiv); + } + }); + $(img).attr('src', 'viz_feed.php?t='+test.id+'&o='+obsid+'&s='+starttime+'&m=0'); + }, + error: function(data, textStatus, jqXHR) { + // start timer to retry + if (starttime < test.end) { + setTimeout(function() { + if (vizmeta.end < test.end) + vizmeta.loading = false; + }, retryTimeout); + } + } + }); } function gpioLoadNext(obsdiv) { - var vizmeta = obsdiv.data('vizmeta'); - var starttime = vizmeta.end-10; - if (vizmeta.lastQueryTime > 0 && starttime <= vizmeta.lastQueryTime) - starttime = vizmeta.lastQueryTime + 1; - vizmeta.lastQueryTime = starttime; - var obsid = vizmeta.obsid; - $.ajax({ - url: 'viz_feed.php', - dataType: 'json', - type: 'GET', - data: { t:""+test.id, o:""+obsid, s:""+starttime, m:"1"}, - success: function(data, textStatus, jqXHR) { - var gpiostarttime = parseInt(jqXHR.getResponseHeader('Start-Time')); - var state = vizmeta.state; - // add events to slide - var newdiv = $('<div style="position:absolute;left:'+Math.round((gpiostarttime - slide_start) / 1e3 * pps)+'px"><\/div>'); - $(data.e).each(function(){ - // l p t - var p; - // gpio to array: 71 > LED1, 70 > LED2, 69 > LED3, 113 > INT1, 87 > INT2 - switch (this.p) { - case 71: p=0; break; - case 70: p=1; break; - case 69: p=2; break; - case 113:p=3; break; - case 87: p=4; break; - } - // <div style="position:absolute;left:500px;width:100px;height:5px;background-color:red"><\/div> - if (this.l==0) { - if (state[p].l == 1) { - newdiv.append('<div class="gpio'+p+'" style="position:absolute;top:'+(p*5)+'px;left:'+Math.round((state[p].t - gpiostarttime)/ 1e3 * pps)+'px;width:'+Math.max(1, Math.round((gpiostarttime + this.t - state[p].t) / 1e3 * pps))+'px;height:5px"><\/div>'); - state[p].l = 0;state[p].t=gpiostarttime+this.t; - } - } - else { // rising - if (state[p].l == 0) - state[p].l=1;state[p].t=gpiostarttime+this.t; - } - - state[p].l=this.l; - state[p].t=gpiostarttime+this.t; - }); - - obsdiv.append(newdiv); - // check existing divs to remove - var oldestdiv = $('div', obsdiv).first(); - var bordereventdiv = $('div',oldestdiv).last(); - if ((parseInt(bordereventdiv.css('left')) + parseInt(bordereventdiv.css('width')) + parseInt(oldestdiv.css('left'))) < -1 * parseInt($('#slide').css('left'))) - $(oldestdiv).remove(); - vizmeta.end = gpiostarttime+data.e[data.e.length-1].t; - if (vizmeta.end < view_end + vizmeta.preloadtime) - gpioLoadNext(obsdiv); - else - vizmeta.loading = false; - }, - error: function(data, textStatus, jqXHR) { - // start timer to retry - if (starttime < test.end) { - setTimeout(function() { - if (vizmeta.end < test.end) - vizmeta.loading = false; - }, retryTimeout); - } - } - }); + var vizmeta = obsdiv.data('vizmeta'); + var starttime = vizmeta.end-10; + if (vizmeta.lastQueryTime > 0 && starttime <= vizmeta.lastQueryTime) + starttime = vizmeta.lastQueryTime + 1; + vizmeta.lastQueryTime = starttime; + var obsid = vizmeta.obsid; + $.ajax({ + url: 'viz_feed.php', + dataType: 'json', + type: 'GET', + data: { t:""+test.id, o:""+obsid, s:""+starttime, m:"1"}, + success: function(data, textStatus, jqXHR) { + var gpiostarttime = parseInt(jqXHR.getResponseHeader('Start-Time')); + var state = vizmeta.state; + // add events to slide + var newdiv = $('<div style="position:absolute;left:'+Math.round((gpiostarttime - slide_start) / 1e3 * pps)+'px"><\/div>'); + $(data.e).each(function(){ + // l p t + var p; + // gpio to array: 71 > LED1, 70 > LED2, 69 > LED3, 113 > INT1, 87 > INT2 + switch (this.p) { + case 71: p=0; break; + case 70: p=1; break; + case 69: p=2; break; + case 113:p=3; break; + case 87: p=4; break; + } + // <div style="position:absolute;left:500px;width:100px;height:5px;background-color:red"><\/div> + if (this.l==0) { + if (state[p].l == 1) { + newdiv.append('<div class="gpio'+p+'" style="position:absolute;top:'+(p*5)+'px;left:'+Math.round((state[p].t - gpiostarttime)/ 1e3 * pps)+'px;width:'+Math.max(1, Math.round((gpiostarttime + this.t - state[p].t) / 1e3 * pps))+'px;height:5px"><\/div>'); + state[p].l = 0;state[p].t=gpiostarttime+this.t; + } + } + else { // rising + if (state[p].l == 0) + state[p].l=1;state[p].t=gpiostarttime+this.t; + } + + state[p].l=this.l; + state[p].t=gpiostarttime+this.t; + }); + + obsdiv.append(newdiv); + // check existing divs to remove + var oldestdiv = $('div', obsdiv).first(); + var bordereventdiv = $('div',oldestdiv).last(); + if ((parseInt(bordereventdiv.css('left')) + parseInt(bordereventdiv.css('width')) + parseInt(oldestdiv.css('left'))) < -1 * parseInt($('#slide').css('left'))) + $(oldestdiv).remove(); + vizmeta.end = gpiostarttime+data.e[data.e.length-1].t; + if (vizmeta.end < view_end + vizmeta.preloadtime) + gpioLoadNext(obsdiv); + else + vizmeta.loading = false; + }, + error: function(data, textStatus, jqXHR) { + // start timer to retry + if (starttime < test.end) { + setTimeout(function() { + if (vizmeta.end < test.end) + vizmeta.loading = false; + }, retryTimeout); + } + } + }); } function reachedEnd() { - return view_end > test.end + 1e3; + return view_end > test.end + 1e3; } function startSlide() { - // check viz data - $.ajax({ - url: 'viz_feed.php?t='+test.id, - type: 'GET', - success: function(data, textStatus, jqXHR) { - var range = parseInt(jqXHR.getResponseHeader('Range-Max')) - parseInt(jqXHR.getResponseHeader('Range-Min')); - // if ok, start it - if (range > 60e3 || ((test.end - test.start) < 50e3)) { - $('#wait').remove(); - $('#control').show(); - updateInterval = setInterval("slide_update()", 30); - } - else { - setTimeout("startSlide()", 5e3); - $('#wait').append('.'); - } - }, - error: function(data, textStatus, jqXHR) { - // start timer to retry - setTimeout("startSlide()", 5e3); - $('#wait').append('.'); - } - }); -} - + // check viz data + $.ajax({ + url: 'viz_feed.php?t='+test.id, + type: 'GET', + success: function(data, textStatus, jqXHR) { + var range = parseInt(jqXHR.getResponseHeader('Range-Max')) - parseInt(jqXHR.getResponseHeader('Range-Min')); + // if ok, start it + if (range > 60e3 || ((test.end - test.start) < 50e3)) { + $('#wait').remove(); + $('#control').show(); + updateInterval = setInterval("slide_update()", 30); + } + else { + setTimeout("startSlide()", 5e3); + $('#wait').append('.'); + } + }, + error: function(data, textStatus, jqXHR) { + // start timer to retry + setTimeout("startSlide()", 5e3); + $('#wait').append('.'); + } + }); +} + $(function() { <?php - // get test configuration: - // starttime - // endtime - // observer ids - $testid = $testid; - $testconfig = new SimpleXMLElement(get_testconfig($testid)); - $obsids = array(); - - if(isset($testconfig->powerProfilingConf->obsIds)) { - $obsidsPp = explode(' ',$testconfig->powerProfilingConf->obsIds); - $obsids = array_merge($obsids, array_map('intval', $obsidsPp)); - } - if(isset($testconfig->gpioTracingConf->obsIds)) { - $obsidsGm = explode(' ',$testconfig->gpioTracingConf->obsIds); - $obsids = array_merge($obsids, array_map('intval', $obsidsGm)); - } - $obsids = array_unique($obsids); + // get test configuration: + // starttime + // endtime + // observer ids + $testid = $testid; + $testconfig = new SimpleXMLElement(get_testconfig($testid)); + $obsids = array(); + + if(isset($testconfig->powerProfilingConf->obsIds)) { + $obsidsPp = explode(' ',$testconfig->powerProfilingConf->obsIds); + $obsids = array_merge($obsids, array_map('intval', $obsidsPp)); + } + if(isset($testconfig->gpioTracingConf->obsIds)) { + $obsidsGm = explode(' ',$testconfig->gpioTracingConf->obsIds); + $obsids = array_merge($obsids, array_map('intval', $obsidsGm)); + } + $obsids = array_unique($obsids); - $start = new DateTime($testconfig->generalConf->scheduleAbsolute->start); - $start->setTimeZone(new DateTimeZone("UTC")); - $end = new DateTime($testconfig->generalConf->scheduleAbsolute->end); - $end->setTimeZone(new DateTimeZone("UTC")); + $start = new DateTime($testconfig->generalConf->scheduleAbsolute->start); + $start->setTimeZone(new DateTimeZone("UTC")); + $end = new DateTime($testconfig->generalConf->scheduleAbsolute->end); + $end->setTimeZone(new DateTimeZone("UTC")); - echo ' - test = { - id : '.$testid.', - obs_list : ['.join($obsids,',').'], - start : '.$start->format('U') * 1e3.', - end : '.$end->format('U') * 1e3.' - }; - '; + echo ' + test = { + id : '.$testid.', + obs_list : ['.join($obsids,',').'], + start : '.$start->format('U') * 1e3.', + end : '.$end->format('U') * 1e3.' + }; + '; ?> - // update for every test - // time values in ms since 1.1.1970 - $('#h1title').empty().html('GPIO and power traces (Test-ID: '+test.id+', duration: '+Math.round((test.end - test.start)/1e3,0)+' s)'); + // update for every test + // time values in ms since 1.1.1970 + $('#h1title').empty().html('GPIO and power traces (Test-ID: '+test.id+', duration: '+Math.round((test.end - test.start)/1e3,0)+' s)'); - // constants - pps = 100; // pixel / s - view_end = test.start; - view_start = view_end - parseInt($('#view').css('width')) / pps * 1e3; - slide_start = view_start; - preloadtime = 10e3; - retryTimeout = 5000; // ms - - // TODO: query viz availability - // if available, initialize view - $('#view').css('height', (test.obs_list.length * 60 + 20) + "px"); - $('#labels').css('height', (test.obs_list.length * 60 + 20) + "px"); - - // add t0 - $('#slide').append('<div style="position:absolute;left:'+parseInt($('#view').css('width'))+'px;margin:0;padding:0;height:'+(test.obs_list.length * 60 + 20)+'px;border-right:1px solid blue;"><\/div>'); - $('#slide').append('<div style="position:absolute;left:'+((test.end - slide_start) / 1e3 * pps)+'px;margin:0;padding:0;height:'+(test.obs_list.length * 60 + 20)+'px;border-right:1px solid red;"><\/div>'); - // add timeline - var timeline = $('<div class="timeline" style="margin:0;padding:0;height:20px"><\/div>'); - timeline.data('vizmeta', { start: view_end, end: view_end, loading:false, preloadtime:preloadtime, updater:updateTimeline }); - $('#slide').append(timeline); - $('#slide').data('round', 0); - - // fill slide with initial view - $(test.obs_list).each(function(index) { - var obsdiv = $('<div class="obs" style="margin:0;padding:0;height:60px"><\/div>'); - - var powerdiv = $('<div class="power" style="position:absolute;margin:0;padding:0;height:60px"><\/div>'); - powerdiv.data('vizmeta', { start: view_end, end: view_end - 2e3, obsid:this, loading:false, preloadtime:preloadtime +index*60, updater:powerLoadNext, lastQueryTime:0 }); - $(obsdiv).append(powerdiv); - - var gpiodiv = $('<div class="gpio" style="position:absolute;margin:0;padding:0;height:60px"><\/div>'); - gpiodiv.data('vizmeta', { start: view_end, end: view_end, obsid:this, loading:false, preloadtime:preloadtime +index*60, updater:gpioLoadNext, lastQueryTime:0, state:[{l:0, t:test.start},{l:0, t:test.start},{l:0, t:test.start},{l:0, t:test.start},{l:0, t:test.start}]}); - $(obsdiv).append(gpiodiv); - - $('#slide').append(obsdiv); - $('#labels').append('<div style="height:35px;text-align:right;padding-top:25px;padding-right:2px;font-weight:bold;border-right:1px solid black">'+this+'<\/div>') - }); - - // bind control - $('#control').click(function() { - if (!reachedEnd()) { - clearInterval(updateInterval); - if ($(this).attr('src').match(/pause.png$/)) { - updateInterval = setInterval("slide_update(0)", 30); - $(this).attr('src', 'pics/icons/play.png'); - } - else { - updateInterval = setInterval("slide_update()", 30); - $(this).attr('src', 'pics/icons/pause.png'); - } - } - }); - $('#player_pos').draggable({ - axis: "x" , - containment: "#player_slider", - start: function( event, ui ) { - clearInterval(updateInterval); - - }, - stop: function( event, ui ) { - // set new position - var playerslide = parseInt($('#player_pos').css('left')); - var p = playerslide / ($('#player_slider').width()-$('#player_pos').width()) * (test.end - test.start + 1e3); - p = p * pps / 1e3; - $('#slide').css('left', (-p)+'px'); - view_start = slide_start + p / pps * 1e3; - view_end = view_start + parseInt($('#view').css('width')) / pps * 1e3; - // update timeline - $('.timeline').each(function(index) { - var vizmeta = $(this).data('vizmeta'); - vizmeta.end = parseInt(Math.max(view_start,test.start)/1e3) * 1e3; - vizmeta.loading=false; - $(this).empty(); - }); - // update power - $('.obs > .power').each(function(index) { - var vizmeta = $(this).data('vizmeta'); - vizmeta.end = parseInt(Math.max(view_start-5e3,test.start)/1e3) * 1e3; - while (removeOld(this)){}; - vizmeta.loading=false; - $(this).empty(); - }); - // update gpio - $('.obs > .gpio').each(function(index) { - var vizmeta = $(this).data('vizmeta'); - // here we would need the most recent GPIO state before our view window. As this is not deterministic, we make a conservative guess - vizmeta.lastQueryTime = parseInt(Math.max(view_start-30e3,test.start)/1e3) * 1e3; - vizmeta.end = vizmeta.lastQueryTime; - $(vizmeta.state).each(function(){ - this.l=0; - }); - vizmeta.loading=false; - $(this).empty(); - }); - - clearInterval(updateInterval); - if ($('#control').attr('src').match(/pause.png$/)) { - updateInterval = setInterval("slide_update()", 30); - } - else { - updateInterval = setInterval("slide_update(0)", 30); - } - } - }); + // constants + pps = 100; // pixel / s + view_end = test.start; + view_start = view_end - parseInt($('#view').css('width')) / pps * 1e3; + slide_start = view_start; + preloadtime = 10e3; + retryTimeout = 5000; // ms + + // TODO: query viz availability + // if available, initialize view + $('#view').css('height', (test.obs_list.length * 60 + 20) + "px"); + $('#labels').css('height', (test.obs_list.length * 60 + 20) + "px"); + + // add t0 + $('#slide').append('<div style="position:absolute;left:'+parseInt($('#view').css('width'))+'px;margin:0;padding:0;height:'+(test.obs_list.length * 60 + 20)+'px;border-right:1px solid blue;"><\/div>'); + $('#slide').append('<div style="position:absolute;left:'+((test.end - slide_start) / 1e3 * pps)+'px;margin:0;padding:0;height:'+(test.obs_list.length * 60 + 20)+'px;border-right:1px solid red;"><\/div>'); + // add timeline + var timeline = $('<div class="timeline" style="margin:0;padding:0;height:20px"><\/div>'); + timeline.data('vizmeta', { start: view_end, end: view_end, loading:false, preloadtime:preloadtime, updater:updateTimeline }); + $('#slide').append(timeline); + $('#slide').data('round', 0); + + // fill slide with initial view + $(test.obs_list).each(function(index) { + var obsdiv = $('<div class="obs" style="margin:0;padding:0;height:60px"><\/div>'); + + var powerdiv = $('<div class="power" style="position:absolute;margin:0;padding:0;height:60px"><\/div>'); + powerdiv.data('vizmeta', { start: view_end, end: view_end - 2e3, obsid:this, loading:false, preloadtime:preloadtime +index*60, updater:powerLoadNext, lastQueryTime:0 }); + $(obsdiv).append(powerdiv); + + var gpiodiv = $('<div class="gpio" style="position:absolute;margin:0;padding:0;height:60px"><\/div>'); + gpiodiv.data('vizmeta', { start: view_end, end: view_end, obsid:this, loading:false, preloadtime:preloadtime +index*60, updater:gpioLoadNext, lastQueryTime:0, state:[{l:0, t:test.start},{l:0, t:test.start},{l:0, t:test.start},{l:0, t:test.start},{l:0, t:test.start}]}); + $(obsdiv).append(gpiodiv); + + $('#slide').append(obsdiv); + $('#labels').append('<div style="height:35px;text-align:right;padding-top:25px;padding-right:2px;font-weight:bold;border-right:1px solid black">'+this+'<\/div>') + }); + + // bind control + $('#control').click(function() { + if (!reachedEnd()) { + clearInterval(updateInterval); + if ($(this).attr('src').match(/pause.png$/)) { + updateInterval = setInterval("slide_update(0)", 30); + $(this).attr('src', 'pics/icons/play.png'); + } + else { + updateInterval = setInterval("slide_update()", 30); + $(this).attr('src', 'pics/icons/pause.png'); + } + } + }); + $('#player_pos').draggable({ + axis: "x" , + containment: "#player_slider", + start: function( event, ui ) { + clearInterval(updateInterval); + + }, + stop: function( event, ui ) { + // set new position + var playerslide = parseInt($('#player_pos').css('left')); + var p = playerslide / ($('#player_slider').width()-$('#player_pos').width()) * (test.end - test.start + 1e3); + p = p * pps / 1e3; + $('#slide').css('left', (-p)+'px'); + view_start = slide_start + p / pps * 1e3; + view_end = view_start + parseInt($('#view').css('width')) / pps * 1e3; + // update timeline + $('.timeline').each(function(index) { + var vizmeta = $(this).data('vizmeta'); + vizmeta.end = parseInt(Math.max(view_start,test.start)/1e3) * 1e3; + vizmeta.loading=false; + $(this).empty(); + }); + // update power + $('.obs > .power').each(function(index) { + var vizmeta = $(this).data('vizmeta'); + vizmeta.end = parseInt(Math.max(view_start-5e3,test.start)/1e3) * 1e3; + while (removeOld(this)){}; + vizmeta.loading=false; + $(this).empty(); + }); + // update gpio + $('.obs > .gpio').each(function(index) { + var vizmeta = $(this).data('vizmeta'); + // here we would need the most recent GPIO state before our view window. As this is not deterministic, we make a conservative guess + vizmeta.lastQueryTime = parseInt(Math.max(view_start-30e3,test.start)/1e3) * 1e3; + vizmeta.end = vizmeta.lastQueryTime; + $(vizmeta.state).each(function(){ + this.l=0; + }); + vizmeta.loading=false; + $(this).empty(); + }); + + clearInterval(updateInterval); + if ($('#control').attr('src').match(/pause.png$/)) { + updateInterval = setInterval("slide_update()", 30); + } + else { + updateInterval = setInterval("slide_update(0)", 30); + } + } + }); - // run slider update every .. - // 2s ~ 200px > speed 100px / s - var now = new Date(); - var waittime = 120; - if (now.getTime() - waittime * 1e3 > test.start) { - updateInterval = setInterval("slide_update()", 30); - $('#control').show(); - } - else { - //wait for data - $('#view').append('<div id="wait" style="position:absolute;left:240px;top:50px;background-color:black;color:white;font-weight:bold;font-size:20pt;padding:8px">Processing data, please wait...'+(( test.start - (now - waittime * 1e3))/1000)+'<\/div>'); - setTimeout("startSlide()", test.start - (now - waittime * 1e3)); - } + // run slider update every .. + // 2s ~ 200px > speed 100px / s + var now = new Date(); + var waittime = 120; + if (now.getTime() - waittime * 1e3 > test.start) { + updateInterval = setInterval("slide_update()", 30); + $('#control').show(); + } + else { + //wait for data + $('#view').append('<div id="wait" style="position:absolute;left:240px;top:50px;background-color:black;color:white;font-weight:bold;font-size:20pt;padding:8px">Processing data, please wait...'+(( test.start - (now - waittime * 1e3))/1000)+'<\/div>'); + setTimeout("startSlide()", test.start - (now - waittime * 1e3)); + } }) </script> <div style="position:relative;height:30px"> - <div style="position:absolute"><h1 id="h1title">Viz</h1></div> - <div style="float:right;width:400px"> - <div style="width:400px;position:absolute;top:-30px"> - <img id="control" style="display:none;float:right" src="pics/icons/pause.png" alt="pause/play"> - <div id="player_slider" style="float:left;margin-top:8px;border-style:solid;border-width:1px;border-color:black;width:360px;height:7px;background:url(pics/player_slider_bg.png)"> - <div style="position:absolute;width:0px;height:7px;background:url(pics/player_slider_col.png)"> - <img id="player_pos" src="pics/player_pos.png" style="position:absolute;left:-10px;top:-7px"> - <!-- div style="position:absolute;top:-45px"> - <div style="background-color:black;color:white;padding:4px"><nobr>0s .. 10s</nobr></div> - <center><img src="pics/player_indicator.png"></center> - </div--> - </div> - </div> - </div> + <div style="position:absolute"><h1 id="h1title">Viz</h1></div> + <div style="float:right;width:400px"> + <div style="width:400px;position:absolute;top:-30px"> + <img id="control" style="display:none;float:right" src="pics/icons/pause.png" alt="pause/play"> + <div id="player_slider" style="float:left;margin-top:8px;border-style:solid;border-width:1px;border-color:black;width:360px;height:7px;background:url(pics/player_slider_bg.png)"> + <div style="position:absolute;width:0px;height:7px;background:url(pics/player_slider_col.png)"> + <img id="player_pos" src="pics/player_pos.png" style="position:absolute;left:-10px;top:-7px"> + <!-- div style="position:absolute;top:-45px"> + <div style="background-color:black;color:white;padding:4px"><nobr>0s .. 10s</nobr></div> + <center><img src="pics/player_indicator.png"></center> + </div--> + </div> + </div> + </div> - </div> + </div> </div> <div style="position:relative;margin:0;padding:0"> - <div id="view" style="width:880px;height:100px;overflow:hidden;position:relative;left:20px"> - <div id="slide" style="position:absolute;left:0px"> - </div> - </div> - <div id="labels" style="width:25px;height:100px;position:absolute;top:0;left:-5px"> - <div style="height:20px;"></div> - </div> + <div id="view" style="width:880px;height:100px;overflow:hidden;position:relative;left:20px"> + <div id="slide" style="position:absolute;left:0px"> + </div> + </div> + <div id="labels" style="width:25px;height:100px;position:absolute;top:0;left:-5px"> + <div style="height:20px;"></div> + </div> </div> <div id="log"></div> diff --git a/webserver/user/scripting-support.php b/webserver/user/scripting-support.php index 98148661860e5e6f41e1bc568c4b0088e779db8b..d9bd9c07de3fa9d8f885efada6165b3ae9942f8f 100644 --- a/webserver/user/scripting-support.php +++ b/webserver/user/scripting-support.php @@ -1,13 +1,13 @@ <?php - /* - * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" - * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" - * __license__ = "GPL" - * __version__ = "$Revision$" - * __date__ = "$Date$" - * __id__ = "$Id$" - * __source__ = "$URL$" - */ + /* + * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" + * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" + * __license__ = "GPL" + * __version__ = "$Revision$" + * __date__ = "$Date$" + * __id__ = "$Id$" + * __source__ = "$URL$" + */ ?> <?php $path="/home/flocklab/flocklab_downloads/shell-tool/"; diff --git a/webserver/user/statusbar_feed.php b/webserver/user/statusbar_feed.php index 685ad86f3a483f0d45a3fda6249d2b5e271aab8f..21bb272518d40f8bdad5b962c25cb1ddd37f7aec 100644 --- a/webserver/user/statusbar_feed.php +++ b/webserver/user/statusbar_feed.php @@ -1,61 +1,61 @@ <?php - /* - * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" - * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" - * __license__ = "GPL" - * __version__ = "$Revision$" - * __date__ = "$Date$" - * __id__ = "$Id$" - * __source__ = "$URL$" - */ + /* + * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" + * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" + * __license__ = "GPL" + * __version__ = "$Revision$" + * __date__ = "$Date$" + * __id__ = "$Id$" + * __source__ = "$URL$" + */ - require_once('include/auth.php'); - - //debug(); - - - // Connect to database and get all currently active status messages: - $db = db_connect(); - $sql = "SELECT * - FROM `flocklab`.`tbl_serv_web_status` - WHERE - (`show` = 1) AND - ((UTC_TIMESTAMP() > `time_start`) OR (`time_start` IS NULL)) AND - ((UTC_TIMESTAMP() < `time_end`) OR (`time_end` IS NULL)) - ORDER BY `time_start` ASC, `time_end` ASC - "; - $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get status messages from database because: ' . mysqli_error($db)); - mysqli_close($db); - - // Build the array of events. If possible, append start and/or end time to the message: - $events = array(); - while ($row = mysqli_fetch_array($rs)) { - $msg = "<i>"; - if ($row['time_start'] != "") { - $d = new DateTime($row['time_start']); - $msg = sprintf('%sFrom <div class="time" style="display:inline">%s</div> ', $msg, $d->format('U')); - } - if ($row['time_end'] != "") { - $d = new DateTime($row['time_end']); - if (strlen($msg) == 3) { - $msg = $msg . "Until"; - } else { - $msg = $msg . "until"; - } - $msg = sprintf('%s <div class="time" style="display:inline">%s</div>', $msg, $d->format('U')); - } - if (strlen($msg) > 3) { - $msg = $msg . ": "; - } - $msg = $msg . '</i>'; - if ($row['title'] != "") { - $msg = $msg . '<b>' . $row['title'] . '</b>: '; - } - $msg = $msg . $row['message']; - $events[] = $msg; - } + require_once('include/auth.php'); + + //debug(); + + + // Connect to database and get all currently active status messages: + $db = db_connect(); + $sql = "SELECT * + FROM `flocklab`.`tbl_serv_web_status` + WHERE + (`show` = 1) AND + ((UTC_TIMESTAMP() > `time_start`) OR (`time_start` IS NULL)) AND + ((UTC_TIMESTAMP() < `time_end`) OR (`time_end` IS NULL)) + ORDER BY `time_start` ASC, `time_end` ASC + "; + $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get status messages from database because: ' . mysqli_error($db)); + mysqli_close($db); + + // Build the array of events. If possible, append start and/or end time to the message: + $events = array(); + while ($row = mysqli_fetch_array($rs)) { + $msg = "<i>"; + if ($row['time_start'] != "") { + $d = new DateTime($row['time_start']); + $msg = sprintf('%sFrom <div class="time" style="display:inline">%s</div> ', $msg, $d->format('U')); + } + if ($row['time_end'] != "") { + $d = new DateTime($row['time_end']); + if (strlen($msg) == 3) { + $msg = $msg . "Until"; + } else { + $msg = $msg . "until"; + } + $msg = sprintf('%s <div class="time" style="display:inline">%s</div>', $msg, $d->format('U')); + } + if (strlen($msg) > 3) { + $msg = $msg . ": "; + } + $msg = $msg . '</i>'; + if ($row['title'] != "") { + $msg = $msg . '<b>' . $row['title'] . '</b>: '; + } + $msg = $msg . $row['message']; + $events[] = $msg; + } - // JSON-encode the array and return it to the calendar: - echo json_encode($events); + // JSON-encode the array and return it to the calendar: + echo json_encode($events); ?> diff --git a/webserver/user/terms_of_use.php b/webserver/user/terms_of_use.php index 72ed746c29de481a73481cb6db801ca21b94f471..36978ea36c20a47f7bbdb2e8203a8ea0bdcb071d 100755 --- a/webserver/user/terms_of_use.php +++ b/webserver/user/terms_of_use.php @@ -1,50 +1,50 @@ <?php - /* - * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" - * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" - * __license__ = "GPL" - * __version__ = "$Revision$" - * __date__ = "$Date$" - * __id__ = "$Id: user_register.php 2066 2012-09-21 13:59:54Z walserc $" - * __source__ = "$URL$" - */ + /* + * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" + * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" + * __license__ = "GPL" + * __version__ = "$Revision$" + * __date__ = "$Date$" + * __id__ = "$Id: user_register.php 2066 2012-09-21 13:59:54Z walserc $" + * __source__ = "$URL$" + */ ?> <?php require_once('include/libflocklab.php');?> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" - "http://www.w3.org/TR/html4/loose.dtd"> + "http://www.w3.org/TR/html4/loose.dtd"> <html> <head> - <link rel="stylesheet" type="text/css" href="css/flocklab.css"> - <link rel="shortcut icon" href="pics/icons/favicon.ico" type="image/x-ico; charset=binary"> - <link rel="icon" href="pics/icons/favicon.ico" type="image/x-ico; charset=binary"> + <link rel="stylesheet" type="text/css" href="css/flocklab.css"> + <link rel="shortcut icon" href="pics/icons/favicon.ico" type="image/x-ico; charset=binary"> + <link rel="icon" href="pics/icons/favicon.ico" type="image/x-ico; charset=binary"> - <title>FlockLab - Register Account</title> + <title>FlockLab - Register Account</title> - <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> - <meta name="AUTHOR" content="ETH Zurich, Christoph Walser, CH-8092 Zurich, Switzerland"> - <meta name="COPYRIGHT" content="ETH Zurich, Switzerland"> - <meta name="LANGUAGE" content="English"> - <meta name="ROBOTS" content="noindex, nofollow"> - <meta name="DATE" content="2011-2012"> + <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> + <meta name="AUTHOR" content="ETH Zurich, Christoph Walser, CH-8092 Zurich, Switzerland"> + <meta name="COPYRIGHT" content="ETH Zurich, Switzerland"> + <meta name="LANGUAGE" content="English"> + <meta name="ROBOTS" content="noindex, nofollow"> + <meta name="DATE" content="2011-2012"> </head> <body> - <div id="container" class="container"> - <div id="header" class="header"> - <a href="http://www.flocklab.ethz.ch"><img alt="FlockLab" src="pics/flocklab_eth_logo.png"></a> - </div> <!-- END header --> - <div id="content" class="content"> - <h1>Terms of Use - FlockLab</h1> - By signing up for a FlockLab user account, you accept the following terms of use: - <ol> - <li><b>FlockLab is provided as it is:</b> We cannot give any guarantee about the correctness of its functionalities.</li> - <li><b>Availability:</b> Members of our institute get priority access to FlockLab. We cannot give any guarantee for the availability to external users.</li> - <li><b>Acknowledgement:</b> The usage of FlockLab must be properly acknowledged in all publications produced with the help of FlockLab.</li> - <li><b>Cancelation:</b> A FlockLab user account can be deactivated without notice by FlockLab administrators.</li> - </ol> - These terms become effective 2012/09/21. - </div> <!-- END content --> - <div style="clear:both"></div> - </div> <!-- END container --> + <div id="container" class="container"> + <div id="header" class="header"> + <a href="http://www.flocklab.ethz.ch"><img alt="FlockLab" src="pics/flocklab_eth_logo.png"></a> + </div> <!-- END header --> + <div id="content" class="content"> + <h1>Terms of Use - FlockLab</h1> + By signing up for a FlockLab user account, you accept the following terms of use: + <ol> + <li><b>FlockLab is provided as it is:</b> We cannot give any guarantee about the correctness of its functionalities.</li> + <li><b>Availability:</b> Members of our institute get priority access to FlockLab. We cannot give any guarantee for the availability to external users.</li> + <li><b>Acknowledgement:</b> The usage of FlockLab must be properly acknowledged in all publications produced with the help of FlockLab.</li> + <li><b>Cancelation:</b> A FlockLab user account can be deactivated without notice by FlockLab administrators.</li> + </ol> + These terms become effective 2012/09/21. + </div> <!-- END content --> + <div style="clear:both"></div> + </div> <!-- END container --> </body> </html> diff --git a/webserver/user/test_abort.php b/webserver/user/test_abort.php index 4f81a5d1fe7d5b076902050809bff08f23a8e67d..a806dcf4e84e02b0427061121d58d3901eee81e5 100644 --- a/webserver/user/test_abort.php +++ b/webserver/user/test_abort.php @@ -1,13 +1,13 @@ <?php - /* - * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" - * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" - * __license__ = "GPL" - * __version__ = "$Revision$" - * __date__ = "$Date$" - * __id__ = "$Id$" - * __source__ = "$URL$" - */ + /* + * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" + * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" + * __license__ = "GPL" + * __version__ = "$Revision$" + * __date__ = "$Date$" + * __id__ = "$Id$" + * __source__ = "$URL$" + */ ?> <?php require_once('include/layout.php');require_once('include/presets.php'); ?> <?php @@ -25,149 +25,149 @@ array_push($errors, "Unknown testid."); ?> <script type="text/javascript"> - $(document).ready(function() { - $('.qtip_show').qtip( { - content: {text: false}, - style : 'flocklab', - }); - }); + $(document).ready(function() { + $('.qtip_show').qtip( { + content: {text: false}, + style : 'flocklab', + }); + }); </script> <h1>Manage Tests</h1> - <?php - if (count($errors)>0) { - echo "<div class=\"warning\"><div style=\"float:left;\"><img alt=\"\" src=\"pics/icons/att.png\"></div>"; - echo "<!-- cmd --><p>Error:</p><ul>"; - foreach ($errors as $error) - echo "<li>" . $error . "</li>"; - echo "</ul></div><p><!-- cmd --></p>"; - } - else if (isset($_POST['removeit']) && isset($_POST['testid'])) { - echo "<div class=\"info\"><div style=\"float:left;\"><img alt=\"\" src=\"pics/icons/success.png\"></div>"; - echo "<!-- cmd --><p>The test has been aborted.</p><!-- cmd -->"; - echo "</div><p></p>"; - include('index.php'); - exit(); - } - else { - $db = db_connect(); - $sql = "SELECT serv_tests_key, title, description, time_start_act, time_start_wish, time_end_act, time_end_wish, test_status, `targetimage_fk` - FROM tbl_serv_tests LEFT JOIN tbl_serv_map_test_observer_targetimages ON (serv_tests_key = test_fk) - WHERE owner_fk = " . $_SESSION['serv_users_key'] . " AND serv_tests_key = ".mysqli_real_escape_string($db, $_POST['testid'])." - GROUP BY `targetimage_fk`"; - $res = mysqli_query($db, $sql) or flocklab_die('Cannot fetch test information: ' . mysqli_error($db)); - $row = mysqli_fetch_assoc($res); - // Find out the state of the test: - $schedulable = true; - $planned = false; - $running = false; - $finished = false; - $preparing = false; - $cleaningup = false; - $failed = false; - $aborting = false; - $syncing = false; - $synced = false; - $retentionexp = false; - switch($row['test_status']) { - case "planned": - $planned = true; - break; - case "preparing": - $preparing = true; - break; - case "running": - $running = true; - break; - case "cleaning up": - $cleaningup = true; - break; - case "finished": - $finished = true; - break; - case "not schedulable": - $schedulable = false; - break; - case "failed": - $failed = true; - break; - case "aborting": - $aborting = true; - break; - case "syncing": - $syncing = true; - break; - case "synced": - $synced = true; - break; - case "retention expiring": - $retentionexp = true; - break; - } - echo ' - <form method="post" action="test_abort.php" enctype="multipart/form-data"> - <fieldset> - <legend>Abort test</legend> - <div class="warning"><div style="float:left;"><img alt="" src="pics/icons/att.png"></div> - <p>The following test will be aborted:</p> - <p><table> - <tr><td>Test ID</td><td>'.$row['serv_tests_key'].'</td></tr> - <tr><td>Title</td><td>'.$row['title'].'</td></tr> - <tr><td>Description</td><td style="white-space:normal;">'.$row['description'].'</td></tr> - <tr><td>State</td>'; - echo "<td>"; - echo "<img src='".state_icon($row['test_status'])."' height='16px' alt='".state_short_description($row['test_status'])."' title='".state_long_description($row['test_status'])."' class='qtip_show' />"; - echo '</td> - </tr> - <tr><td>Start</td>'; - // Start time: dependent of state of test - if ($running || $cleaningup || $finished || $failed || $aborting || $syncing || $synced || $retentionexp) { - $d = new DateTime($row['time_start_act']); - echo "<td title='Actual start time' class='qtip_show time'>" . $d->format('U') . "</td>"; - } - elseif ($planned || $preparing) { - $d = new DateTime($row['time_start_wish']); - echo "<td title='Planned start time' class='qtip_show'><i class='time'>" . $d->format('U') . "</i></td>"; - } - elseif (!$schedulable) - echo "<td title='Test is not schedulable' class='qtip_show'>n/a</td>"; - else - echo "<td title='Test is in unknown state' class='qtip_show'>n/a</td>"; - echo '</tr> - <tr><td>End</td>'; - // End time: dependent of state of test - if ($planned || $preparing || $running || $cleaningup || $syncing || $synced || $retentionexp) { - $d = new DateTime($row['time_end_wish']); - echo "<td title='Planned end time' class='qtip_show'><i class='time'>" .$d->format('U'). "</i></td>"; - } - elseif ($finished || $failed) { - $d = new DateTime($row['time_end_act']); - echo "<td title='Actual end time' class='qtip_show time'>" . $d->format('U') . "</td>"; - } - elseif ($aborting) - echo "<td title='Test is currently aborting' class='qtip_show'>n/a</td>"; - elseif (!$schedulable) - echo "<td title='Test is not schedulable' class='qtip_show'>n/a</td>"; - else - echo "<td title='Test is in unknown state' class='qtip_show'>n/a</td>"; - echo '</tr> - <tr><td>Images used</td><td><ul>'; - if (isset($row['targetimage_fk'])) - echo '<li>'.$row['targetimage_fk'].'</li>'; - $num = mysqli_num_rows($res) - 1; - while ($num-- > 0) { - $row = mysqli_fetch_assoc($res); - echo '<li>'.$row['targetimage_fk'].'</li>'; - } - echo '</ul></td><tr> - </table></p> - <input type="hidden" name="testid" value="'.htmlentities($_POST['testid']).'"> - <input type="submit" name="removeit" value="Remove test"> - </fieldset> - <p></p> - </form>'; - } - ?> + <?php + if (count($errors)>0) { + echo "<div class=\"warning\"><div style=\"float:left;\"><img alt=\"\" src=\"pics/icons/att.png\"></div>"; + echo "<!-- cmd --><p>Error:</p><ul>"; + foreach ($errors as $error) + echo "<li>" . $error . "</li>"; + echo "</ul></div><p><!-- cmd --></p>"; + } + else if (isset($_POST['removeit']) && isset($_POST['testid'])) { + echo "<div class=\"info\"><div style=\"float:left;\"><img alt=\"\" src=\"pics/icons/success.png\"></div>"; + echo "<!-- cmd --><p>The test has been aborted.</p><!-- cmd -->"; + echo "</div><p></p>"; + include('index.php'); + exit(); + } + else { + $db = db_connect(); + $sql = "SELECT serv_tests_key, title, description, time_start_act, time_start_wish, time_end_act, time_end_wish, test_status, `targetimage_fk` + FROM tbl_serv_tests LEFT JOIN tbl_serv_map_test_observer_targetimages ON (serv_tests_key = test_fk) + WHERE owner_fk = " . $_SESSION['serv_users_key'] . " AND serv_tests_key = ".mysqli_real_escape_string($db, $_POST['testid'])." + GROUP BY `targetimage_fk`"; + $res = mysqli_query($db, $sql) or flocklab_die('Cannot fetch test information: ' . mysqli_error($db)); + $row = mysqli_fetch_assoc($res); + // Find out the state of the test: + $schedulable = true; + $planned = false; + $running = false; + $finished = false; + $preparing = false; + $cleaningup = false; + $failed = false; + $aborting = false; + $syncing = false; + $synced = false; + $retentionexp = false; + switch($row['test_status']) { + case "planned": + $planned = true; + break; + case "preparing": + $preparing = true; + break; + case "running": + $running = true; + break; + case "cleaning up": + $cleaningup = true; + break; + case "finished": + $finished = true; + break; + case "not schedulable": + $schedulable = false; + break; + case "failed": + $failed = true; + break; + case "aborting": + $aborting = true; + break; + case "syncing": + $syncing = true; + break; + case "synced": + $synced = true; + break; + case "retention expiring": + $retentionexp = true; + break; + } + echo ' + <form method="post" action="test_abort.php" enctype="multipart/form-data"> + <fieldset> + <legend>Abort test</legend> + <div class="warning"><div style="float:left;"><img alt="" src="pics/icons/att.png"></div> + <p>The following test will be aborted:</p> + <p><table> + <tr><td>Test ID</td><td>'.$row['serv_tests_key'].'</td></tr> + <tr><td>Title</td><td>'.$row['title'].'</td></tr> + <tr><td>Description</td><td style="white-space:normal;">'.$row['description'].'</td></tr> + <tr><td>State</td>'; + echo "<td>"; + echo "<img src='".state_icon($row['test_status'])."' height='16px' alt='".state_short_description($row['test_status'])."' title='".state_long_description($row['test_status'])."' class='qtip_show' />"; + echo '</td> + </tr> + <tr><td>Start</td>'; + // Start time: dependent of state of test + if ($running || $cleaningup || $finished || $failed || $aborting || $syncing || $synced || $retentionexp) { + $d = new DateTime($row['time_start_act']); + echo "<td title='Actual start time' class='qtip_show time'>" . $d->format('U') . "</td>"; + } + elseif ($planned || $preparing) { + $d = new DateTime($row['time_start_wish']); + echo "<td title='Planned start time' class='qtip_show'><i class='time'>" . $d->format('U') . "</i></td>"; + } + elseif (!$schedulable) + echo "<td title='Test is not schedulable' class='qtip_show'>n/a</td>"; + else + echo "<td title='Test is in unknown state' class='qtip_show'>n/a</td>"; + echo '</tr> + <tr><td>End</td>'; + // End time: dependent of state of test + if ($planned || $preparing || $running || $cleaningup || $syncing || $synced || $retentionexp) { + $d = new DateTime($row['time_end_wish']); + echo "<td title='Planned end time' class='qtip_show'><i class='time'>" .$d->format('U'). "</i></td>"; + } + elseif ($finished || $failed) { + $d = new DateTime($row['time_end_act']); + echo "<td title='Actual end time' class='qtip_show time'>" . $d->format('U') . "</td>"; + } + elseif ($aborting) + echo "<td title='Test is currently aborting' class='qtip_show'>n/a</td>"; + elseif (!$schedulable) + echo "<td title='Test is not schedulable' class='qtip_show'>n/a</td>"; + else + echo "<td title='Test is in unknown state' class='qtip_show'>n/a</td>"; + echo '</tr> + <tr><td>Images used</td><td><ul>'; + if (isset($row['targetimage_fk'])) + echo '<li>'.$row['targetimage_fk'].'</li>'; + $num = mysqli_num_rows($res) - 1; + while ($num-- > 0) { + $row = mysqli_fetch_assoc($res); + echo '<li>'.$row['targetimage_fk'].'</li>'; + } + echo '</ul></td><tr> + </table></p> + <input type="hidden" name="testid" value="'.htmlentities($_POST['testid']).'"> + <input type="submit" name="removeit" value="Remove test"> + </fieldset> + <p></p> + </form>'; + } + ?> <!-- END content --> <?php do_layout('Manage Tests','Manage Tests'); diff --git a/webserver/user/test_delete.php b/webserver/user/test_delete.php index 60818f6c5b7adbe1e443311ddee49a109185f99f..1ea2b3f2a01a10ad8af3e3458c6dcd8efc631f08 100644 --- a/webserver/user/test_delete.php +++ b/webserver/user/test_delete.php @@ -1,225 +1,225 @@ <?php - /* - * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" - * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" - * __license__ = "GPL" - * __version__ = "$Revision$" - * __date__ = "$Date$" - * __id__ = "$Id$" - * __source__ = "$URL$" - */ + /* + * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" + * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" + * __license__ = "GPL" + * __version__ = "$Revision$" + * __date__ = "$Date$" + * __id__ = "$Id$" + * __source__ = "$URL$" + */ ?> <?php require_once('include/layout.php');require_once('include/presets.php'); ?> <?php - $errors = array(); + $errors = array(); - if (isset($_POST['removeit']) && isset($_POST['testid'])) { - if (check_testid($_POST['testid'], $_SESSION['serv_users_key'])) { - // remove test - $db = db_connect(); - // remove images too ? - $rmimages = array(); - if (isset($_POST['remove_images'])) { - // search for images that are only used in this test - $sql = 'SELECT COUNT(DISTINCT test_fk) as DC, a.targetimage_fk - FROM ( - SELECT targetimage_fk, serv_tests_key - FROM - tbl_serv_tests LEFT JOIN tbl_serv_map_test_observer_targetimages ON (serv_tests_key = test_fk) - WHERE owner_fk = '.$_SESSION['serv_users_key'].' AND serv_tests_key = '.mysqli_real_escape_string($db, $_POST['testid']).' - GROUP BY `targetimage_fk` - ) as a - LEFT JOIN tbl_serv_map_test_observer_targetimages as b ON (a.targetimage_fk = b.targetimage_fk) - GROUP BY targetimage_fk - HAVING DC=1'; - $res = mysqli_query($db, $sql) or flocklab_die('Cannot remove test: ' . mysqli_error($db)); - $num = mysqli_num_rows($res); - while ($num-- > 0) { - $row = mysqli_fetch_assoc($res); - array_push($rmimages, $row['targetimage_fk']); - } - } - mysqli_close($db); - // delete related image binaries (keep metadata for statistics) - $db = db_connect(); - foreach($rmimages as $imid) { - $sql = 'UPDATE `tbl_serv_targetimages` - SET `binary` = NULL - WHERE `serv_targetimages_key` = '.$imid; - mysqli_query($db, $sql) or flocklab_die('Cannot remove test: ' . mysqli_error($db)); - } - // mark test to be deleted - $sql = 'UPDATE tbl_serv_tests SET test_status="todelete" - WHERE `owner_fk` = '.$_SESSION['serv_users_key'].' AND `serv_tests_key` = ' .mysqli_real_escape_string($db, $_POST['testid']); - mysqli_query($db, $sql) or flocklab_die('Cannot remove test: ' . mysqli_error($db)); - // remove resource allocations - $sql = 'DELETE from tbl_serv_resource_allocation - WHERE `test_fk` = ' .mysqli_real_escape_string($db, $_POST['testid']); - mysqli_query($db, $sql) or flocklab_die('Cannot remove test: ' . mysqli_error($db)); - mysqli_close($db); - } - else - array_push($errors, "Test does not belong to you."); - } - if (!isset($_POST['testid'])) - array_push($errors, "Unknown testid."); - - if (isset($_POST['removeit']) && isset($_POST['testid']) && count($errors)==0) { - echo "<div class=\"info\"><div style=\"float:left;\"><img alt=\"\" src=\"pics/icons/success.png\"></div>"; - echo "<!-- cmd --><p>The test has been removed.</p><!-- cmd -->"; - echo "</div><p></p>"; - include('index.php'); - exit(); - } - echo ' - <script type="text/javascript"> - $(document).ready(function() { - $(".qtip_show").qtip( { - content: {text: false}, - style : "flocklab", - }); - }); - </script> - <h1>Manage Tests</h1>'; + if (isset($_POST['removeit']) && isset($_POST['testid'])) { + if (check_testid($_POST['testid'], $_SESSION['serv_users_key'])) { + // remove test + $db = db_connect(); + // remove images too ? + $rmimages = array(); + if (isset($_POST['remove_images'])) { + // search for images that are only used in this test + $sql = 'SELECT COUNT(DISTINCT test_fk) as DC, a.targetimage_fk + FROM ( + SELECT targetimage_fk, serv_tests_key + FROM + tbl_serv_tests LEFT JOIN tbl_serv_map_test_observer_targetimages ON (serv_tests_key = test_fk) + WHERE owner_fk = '.$_SESSION['serv_users_key'].' AND serv_tests_key = '.mysqli_real_escape_string($db, $_POST['testid']).' + GROUP BY `targetimage_fk` + ) as a + LEFT JOIN tbl_serv_map_test_observer_targetimages as b ON (a.targetimage_fk = b.targetimage_fk) + GROUP BY targetimage_fk + HAVING DC=1'; + $res = mysqli_query($db, $sql) or flocklab_die('Cannot remove test: ' . mysqli_error($db)); + $num = mysqli_num_rows($res); + while ($num-- > 0) { + $row = mysqli_fetch_assoc($res); + array_push($rmimages, $row['targetimage_fk']); + } + } + mysqli_close($db); + // delete related image binaries (keep metadata for statistics) + $db = db_connect(); + foreach($rmimages as $imid) { + $sql = 'UPDATE `tbl_serv_targetimages` + SET `binary` = NULL + WHERE `serv_targetimages_key` = '.$imid; + mysqli_query($db, $sql) or flocklab_die('Cannot remove test: ' . mysqli_error($db)); + } + // mark test to be deleted + $sql = 'UPDATE tbl_serv_tests SET test_status="todelete" + WHERE `owner_fk` = '.$_SESSION['serv_users_key'].' AND `serv_tests_key` = ' .mysqli_real_escape_string($db, $_POST['testid']); + mysqli_query($db, $sql) or flocklab_die('Cannot remove test: ' . mysqli_error($db)); + // remove resource allocations + $sql = 'DELETE from tbl_serv_resource_allocation + WHERE `test_fk` = ' .mysqli_real_escape_string($db, $_POST['testid']); + mysqli_query($db, $sql) or flocklab_die('Cannot remove test: ' . mysqli_error($db)); + mysqli_close($db); + } + else + array_push($errors, "Test does not belong to you."); + } + if (!isset($_POST['testid'])) + array_push($errors, "Unknown testid."); + + if (isset($_POST['removeit']) && isset($_POST['testid']) && count($errors)==0) { + echo "<div class=\"info\"><div style=\"float:left;\"><img alt=\"\" src=\"pics/icons/success.png\"></div>"; + echo "<!-- cmd --><p>The test has been removed.</p><!-- cmd -->"; + echo "</div><p></p>"; + include('index.php'); + exit(); + } + echo ' + <script type="text/javascript"> + $(document).ready(function() { + $(".qtip_show").qtip( { + content: {text: false}, + style : "flocklab", + }); + }); + </script> + <h1>Manage Tests</h1>'; - if (count($errors)>0) { - echo "<div class=\"warning\"><div style=\"float:left;\"><img alt=\"\" src=\"pics/icons/att.png\"></div>"; - echo "<!-- cmd --><p>Error:</p><ul>"; - foreach ($errors as $error) - echo "<li>" . $error . "</li>"; - echo "</ul></div><p><!-- cmd --></p>"; - } - else { - $db = db_connect(); - $sql = "SELECT serv_tests_key, title, description, time_start_act, time_start_wish, time_end_act, time_end_wish, test_status, `targetimage_fk` - FROM tbl_serv_tests LEFT JOIN tbl_serv_map_test_observer_targetimages ON (serv_tests_key = test_fk) - WHERE owner_fk = " . $_SESSION['serv_users_key'] . " AND serv_tests_key = ".mysqli_real_escape_string($db, $_POST['testid'])." AND test_status <> 'deleted' AND test_status <> 'todelete' - GROUP BY `targetimage_fk`"; - $res = mysqli_query($db, $sql) or flocklab_die('Cannot fetch test information: ' . mysqli_error($db)); - $row = mysqli_fetch_assoc($res); - // Find out the state of the test: - $schedulable = true; - $planned = false; - $running = false; - $finished = false; - $preparing = false; - $cleaningup = false; - $failed = false; - $aborting = false; - $syncing = false; - $synced = false; - $retentionexp = false; - switch($row['test_status']) { - case "planned": - $planned = true; - break; - case "preparing": - $preparing = true; - break; - case "running": - $running = true; - break; - case "cleaning up": - $cleaningup = true; - break; - case "finished": - $finished = true; - break; - case "not schedulable": - $schedulable = false; - break; - case "failed": - $failed = true; - break; - case "aborting": - $aborting = true; - break; - case "syncing": - $syncing = true; - break; - case "synced": - $synced = true; - break; - case "retention expiring": - $retentionexp = true; - break; - } - echo ' - <form method="post" action="test_delete.php" enctype="multipart/form-data"> - <fieldset> - <legend>Remove test</legend> - <div class="warning"><div style="float:left;"><img alt="" src="pics/icons/att.png"></div>'; - // If the tests is running, it cannot be deleted but must rather be aborted. Show a corresponding warning: - if ($preparing || $running || $cleaningup || $syncing) { - echo ' - <p>The test with ID '.$row['serv_tests_key'].' is currently in state "'.$row['test_status'].'" cannot be deleted but has rather to be aborted. - Please go back to the test overview and reload the table by clicking <a href="index.php">here</a>.</p>'; - } else { - echo ' - <p>The following test will be removed:</p> - <p><table> - <tr><td>Test ID</td><td>'.$row['serv_tests_key'].'</td></tr> - <tr><td>Title</td><td>'.$row['title'].'</td></tr> - <tr><td>Description</td><td style="white-space:normal;">'.$row['description'].'</td></tr> - <tr><td>State</td>'; - echo "<td>"; - echo "<img src='".state_icon($row['test_status'])."' height='16px' alt='".state_short_description($row['test_status'])."' title='".state_long_description($row['test_status'])."' class='qtip_show' />"; - echo " ".state_short_description($row['test_status']); - echo '</td> - </tr> - <tr><td>Start</td>'; - // Start time: dependent of state of test - if ($running || $cleaningup || $finished || $failed || $aborting || $syncing || $synced || $retentionexp) { - $d = new DateTime($row['time_start_act']); - echo "<td title='Actual start time' class='qtip_show time'>" . $d->format('U') . "</td>"; - } - elseif ($planned || $preparing) { - $d = new DateTime($row['time_start_wish']); - echo "<td title='Planned start time' class='qtip_show'><i class='time'>" . $d->format('U') . "</i></td>"; - } - elseif (!$schedulable) - echo "<td title='Test is not schedulable' class='qtip_show'>n/a</td>"; - else - echo "<td title='Test is in unknown state' class='qtip_show'>n/a</td>"; - echo '</tr> - <tr><td>End</td>'; - // End time: dependent of state of test - if ($planned || $preparing || $running || $cleaningup || $syncing || $synced || $retentionexp) { - $d = new DateTime($row['time_end_wish']); - echo "<td title='Planned end time' class='qtip_show'><i class='time'>" .$d->format('U'). "</i></td>"; - } - elseif ($finished || $failed) { - $d = new DateTime($row['time_end_act']); - echo "<td title='Actual end time' class='qtip_show time'>" . $d->format('U') . "</td>"; - } - elseif ($aborting) - echo "<td title='Test is currently aborting' class='qtip_show'>n/a</td>"; - elseif (!$schedulable) - echo "<td title='Test is not schedulable' class='qtip_show'>n/a</td>"; - else - echo "<td title='Test is in unknown state' class='qtip_show'>n/a</td>"; - echo '</tr> - <tr><td>Images used</td><td><ul>'; - if (isset($row['targetimage_fk'])) - echo '<li>'.$row['targetimage_fk'].'</li>'; - $num = mysqli_num_rows($res) - 1; - while ($num-- > 0) { - $row = mysqli_fetch_assoc($res); - echo '<li>'.$row['targetimage_fk'].'</li>'; - } - echo '</ul></td><tr> - </table></p> - <p><input type="checkbox" name="remove_images" values="1" /> Remove also test images that are used in this test.</p> - </div><p></p> - <input type="hidden" name="testid" value="'.htmlentities($_POST['testid']).'"> - <input type="submit" name="removeit" value="Remove test">'; - } - echo ' - </fieldset> - <p></p> - </form>'; - } - ?> + if (count($errors)>0) { + echo "<div class=\"warning\"><div style=\"float:left;\"><img alt=\"\" src=\"pics/icons/att.png\"></div>"; + echo "<!-- cmd --><p>Error:</p><ul>"; + foreach ($errors as $error) + echo "<li>" . $error . "</li>"; + echo "</ul></div><p><!-- cmd --></p>"; + } + else { + $db = db_connect(); + $sql = "SELECT serv_tests_key, title, description, time_start_act, time_start_wish, time_end_act, time_end_wish, test_status, `targetimage_fk` + FROM tbl_serv_tests LEFT JOIN tbl_serv_map_test_observer_targetimages ON (serv_tests_key = test_fk) + WHERE owner_fk = " . $_SESSION['serv_users_key'] . " AND serv_tests_key = ".mysqli_real_escape_string($db, $_POST['testid'])." AND test_status <> 'deleted' AND test_status <> 'todelete' + GROUP BY `targetimage_fk`"; + $res = mysqli_query($db, $sql) or flocklab_die('Cannot fetch test information: ' . mysqli_error($db)); + $row = mysqli_fetch_assoc($res); + // Find out the state of the test: + $schedulable = true; + $planned = false; + $running = false; + $finished = false; + $preparing = false; + $cleaningup = false; + $failed = false; + $aborting = false; + $syncing = false; + $synced = false; + $retentionexp = false; + switch($row['test_status']) { + case "planned": + $planned = true; + break; + case "preparing": + $preparing = true; + break; + case "running": + $running = true; + break; + case "cleaning up": + $cleaningup = true; + break; + case "finished": + $finished = true; + break; + case "not schedulable": + $schedulable = false; + break; + case "failed": + $failed = true; + break; + case "aborting": + $aborting = true; + break; + case "syncing": + $syncing = true; + break; + case "synced": + $synced = true; + break; + case "retention expiring": + $retentionexp = true; + break; + } + echo ' + <form method="post" action="test_delete.php" enctype="multipart/form-data"> + <fieldset> + <legend>Remove test</legend> + <div class="warning"><div style="float:left;"><img alt="" src="pics/icons/att.png"></div>'; + // If the tests is running, it cannot be deleted but must rather be aborted. Show a corresponding warning: + if ($preparing || $running || $cleaningup || $syncing) { + echo ' + <p>The test with ID '.$row['serv_tests_key'].' is currently in state "'.$row['test_status'].'" cannot be deleted but has rather to be aborted. + Please go back to the test overview and reload the table by clicking <a href="index.php">here</a>.</p>'; + } else { + echo ' + <p>The following test will be removed:</p> + <p><table> + <tr><td>Test ID</td><td>'.$row['serv_tests_key'].'</td></tr> + <tr><td>Title</td><td>'.$row['title'].'</td></tr> + <tr><td>Description</td><td style="white-space:normal;">'.$row['description'].'</td></tr> + <tr><td>State</td>'; + echo "<td>"; + echo "<img src='".state_icon($row['test_status'])."' height='16px' alt='".state_short_description($row['test_status'])."' title='".state_long_description($row['test_status'])."' class='qtip_show' />"; + echo " ".state_short_description($row['test_status']); + echo '</td> + </tr> + <tr><td>Start</td>'; + // Start time: dependent of state of test + if ($running || $cleaningup || $finished || $failed || $aborting || $syncing || $synced || $retentionexp) { + $d = new DateTime($row['time_start_act']); + echo "<td title='Actual start time' class='qtip_show time'>" . $d->format('U') . "</td>"; + } + elseif ($planned || $preparing) { + $d = new DateTime($row['time_start_wish']); + echo "<td title='Planned start time' class='qtip_show'><i class='time'>" . $d->format('U') . "</i></td>"; + } + elseif (!$schedulable) + echo "<td title='Test is not schedulable' class='qtip_show'>n/a</td>"; + else + echo "<td title='Test is in unknown state' class='qtip_show'>n/a</td>"; + echo '</tr> + <tr><td>End</td>'; + // End time: dependent of state of test + if ($planned || $preparing || $running || $cleaningup || $syncing || $synced || $retentionexp) { + $d = new DateTime($row['time_end_wish']); + echo "<td title='Planned end time' class='qtip_show'><i class='time'>" .$d->format('U'). "</i></td>"; + } + elseif ($finished || $failed) { + $d = new DateTime($row['time_end_act']); + echo "<td title='Actual end time' class='qtip_show time'>" . $d->format('U') . "</td>"; + } + elseif ($aborting) + echo "<td title='Test is currently aborting' class='qtip_show'>n/a</td>"; + elseif (!$schedulable) + echo "<td title='Test is not schedulable' class='qtip_show'>n/a</td>"; + else + echo "<td title='Test is in unknown state' class='qtip_show'>n/a</td>"; + echo '</tr> + <tr><td>Images used</td><td><ul>'; + if (isset($row['targetimage_fk'])) + echo '<li>'.$row['targetimage_fk'].'</li>'; + $num = mysqli_num_rows($res) - 1; + while ($num-- > 0) { + $row = mysqli_fetch_assoc($res); + echo '<li>'.$row['targetimage_fk'].'</li>'; + } + echo '</ul></td><tr> + </table></p> + <p><input type="checkbox" name="remove_images" values="1" /> Remove also test images that are used in this test.</p> + </div><p></p> + <input type="hidden" name="testid" value="'.htmlentities($_POST['testid']).'"> + <input type="submit" name="removeit" value="Remove test">'; + } + echo ' + </fieldset> + <p></p> + </form>'; + } + ?> <!-- END content --> <?php do_layout('Manage Tests','Manage Tests'); diff --git a/webserver/user/test_edit.php b/webserver/user/test_edit.php index 67e00b057eeb03672ce505d0273dfa6bca574071..211b2670df1d2ed1e788578b39f9e54801f6c883 100644 --- a/webserver/user/test_edit.php +++ b/webserver/user/test_edit.php @@ -1,72 +1,72 @@ <?php - /* - * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" - * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" - * __license__ = "GPL" - * __version__ = "$Revision$" - * __date__ = "$Date$" - * __id__ = "$Id$" - * __source__ = "$URL$" - */ + /* + * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" + * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" + * __license__ = "GPL" + * __version__ = "$Revision$" + * __date__ = "$Date$" + * __id__ = "$Id$" + * __source__ = "$URL$" + */ ?> <?php require_once('include/layout.php');require_once('include/presets.php'); $editable = true; if (isset($_POST['doit']) && isset($_POST['testid']) && isset($_POST['xmlfile'])) { - $xmlfile = $_POST['xmlfile']; - $errors = array(); - // check test_owner = user - if (check_testid($_POST['testid'], $_SESSION['serv_users_key'])) { - $status = get_teststatus($_POST['testid']); - if ($status=='planned') { - $res = update_add_test($_POST['xmlfile'], $errors, $_POST['testid']); - } - else { - array_push($errors, "Only planned tests can be edited."); - $editable = false; - } - } - else - array_push($errors, "Test does not belong to you."); + $xmlfile = $_POST['xmlfile']; + $errors = array(); + // check test_owner = user + if (check_testid($_POST['testid'], $_SESSION['serv_users_key'])) { + $status = get_teststatus($_POST['testid']); + if ($status=='planned') { + $res = update_add_test($_POST['xmlfile'], $errors, $_POST['testid']); + } + else { + array_push($errors, "Only planned tests can be edited."); + $editable = false; + } + } + else + array_push($errors, "Test does not belong to you."); } else if(isset($_POST['testid']) && isset($_POST['starttime'])) { // reschedule request - $new_start_time = strtotime($_POST['starttime']); - $errors = array(); - // check test_owner = user - if (check_testid($_POST['testid'], $_SESSION['serv_users_key'])) { - $status = get_teststatus($_POST['testid']); - if ($status=='planned') { - // get xml_config - $config = get_testconfig($_POST['testid']); - $testconfig = new SimpleXMLElement($config); - // shift start and end time - $timeshift_sec = $new_start_time - strtotime($testconfig->generalConf->scheduleAbsolute->start); - $time = new DateTime ($testconfig->generalConf->scheduleAbsolute->start); - $time->modify($timeshift_sec.' seconds'); - $testconfig->generalConf->scheduleAbsolute->start = $time->format(DATE_W3C); - $time = new DateTime ($testconfig->generalConf->scheduleAbsolute->end); - $time->modify($timeshift_sec.' seconds'); - $testconfig->generalConf->scheduleAbsolute->end = $time->format(DATE_W3C); - // write new xml and validate test - $xmlfile = $testconfig->asXML(); - $res = update_add_test($xmlfile, $errors, $_POST['testid']); - } - } + $new_start_time = strtotime($_POST['starttime']); + $errors = array(); + // check test_owner = user + if (check_testid($_POST['testid'], $_SESSION['serv_users_key'])) { + $status = get_teststatus($_POST['testid']); + if ($status=='planned') { + // get xml_config + $config = get_testconfig($_POST['testid']); + $testconfig = new SimpleXMLElement($config); + // shift start and end time + $timeshift_sec = $new_start_time - strtotime($testconfig->generalConf->scheduleAbsolute->start); + $time = new DateTime ($testconfig->generalConf->scheduleAbsolute->start); + $time->modify($timeshift_sec.' seconds'); + $testconfig->generalConf->scheduleAbsolute->start = $time->format(DATE_W3C); + $time = new DateTime ($testconfig->generalConf->scheduleAbsolute->end); + $time->modify($timeshift_sec.' seconds'); + $testconfig->generalConf->scheduleAbsolute->end = $time->format(DATE_W3C); + // write new xml and validate test + $xmlfile = $testconfig->asXML(); + $res = update_add_test($xmlfile, $errors, $_POST['testid']); + } + } } // Show validation errors: if (isset($errors)) { if (!empty($errors)) { echo '<h1>Edit Test</h1>'; - echo "<div class=\"warning\"><div style=\"float:left;\"><img alt=\"\" src=\"pics/icons/att.png\"></div>"; - echo "<p>Please correct the following errors:</p><ul>"; - foreach ($errors as $error) - echo "<li>" . $error . "</li>"; - echo "</div><p></p>"; + echo "<div class=\"warning\"><div style=\"float:left;\"><img alt=\"\" src=\"pics/icons/att.png\"></div>"; + echo "<p>Please correct the following errors:</p><ul>"; + foreach ($errors as $error) + echo "<li>" . $error . "</li>"; + echo "</div><p></p>"; } else { - echo "<div class=\"info\"><div style=\"float:left;\"><img alt=\"\" src=\"pics/icons/success.png\"></div>"; - echo "<p>The test was successfully updated.</p><ul>"; - echo "</div><p></p>"; - include('index.php'); - exit(); + echo "<div class=\"info\"><div style=\"float:left;\"><img alt=\"\" src=\"pics/icons/success.png\"></div>"; + echo "<p>The test was successfully updated.</p><ul>"; + echo "</div><p></p>"; + include('index.php'); + exit(); } } if ((!isset($errors) || !empty($errors)) && $editable) { @@ -79,10 +79,10 @@ else if(isset($_POST['testid']) && isset($_POST['starttime'])) { // reschedule r echo '<textarea name="xmlfile" style="width:100%;height:500px">'.$config.'</textarea>'; } echo '</div> - <input type="hidden" name="testid" value="'.$_POST['testid'].'"> - <input type="reset" value="reset"> - <input type="submit" name="doit" value="save configuration"> - </form>'; + <input type="hidden" name="testid" value="'.$_POST['testid'].'"> + <input type="reset" value="reset"> + <input type="submit" name="doit" value="save configuration"> + </form>'; } ?> <?php diff --git a/webserver/user/test_feed.php b/webserver/user/test_feed.php index 240ece42c55dfc09928d5799a6ef9a31cef41e2d..a3a3ce913b90928d5e7d3dc08d54926c86b4dfab 100644 --- a/webserver/user/test_feed.php +++ b/webserver/user/test_feed.php @@ -1,32 +1,32 @@ <?php - /* - * __author__ = "Roman Lim <lim@tik.ee.ethz.ch>" - * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Roman Lim" - * __license__ = "GPL" - * __version__ = "$Revision$" - * __date__ = "$Date$" - * __id__ = "$Id$" - * __source__ = "$URL$" - */ + /* + * __author__ = "Roman Lim <lim@tik.ee.ethz.ch>" + * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Roman Lim" + * __license__ = "GPL" + * __version__ = "$Revision$" + * __date__ = "$Date$" + * __id__ = "$Id$" + * __source__ = "$URL$" + */ - require_once('include/auth.php'); - - //debug(); - if ((!isset($_GET['testid']) || !is_numeric($_GET['testid'])) && (!isset($_GET['updatesince']) || !is_numeric($_GET['updatesince']))) { - return; - } - // Connect to database and get the corresponding test info: - $db = db_connect(); - $sql = "SELECT serv_tests_key as testid, title, description, time_start_act, time_start_wish, time_end_act, time_end_wish, test_status - FROM tbl_serv_tests - WHERE owner_fk = " . $_SESSION['serv_users_key'] . " AND test_status <> 'deleted' AND test_status <> 'todelete' AND ".(isset($_GET['testid'])?"serv_tests_key = ".$_GET['testid']:"last_changed >= '".date( 'Y-m-d H:i:s T', $_GET['updatesince'])."'"); - $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get test data from database because: ' . mysqli_error($db)); - mysqli_close($db); + require_once('include/auth.php'); + + //debug(); + if ((!isset($_GET['testid']) || !is_numeric($_GET['testid'])) && (!isset($_GET['updatesince']) || !is_numeric($_GET['updatesince']))) { + return; + } + // Connect to database and get the corresponding test info: + $db = db_connect(); + $sql = "SELECT serv_tests_key as testid, title, description, time_start_act, time_start_wish, time_end_act, time_end_wish, test_status + FROM tbl_serv_tests + WHERE owner_fk = " . $_SESSION['serv_users_key'] . " AND test_status <> 'deleted' AND test_status <> 'todelete' AND ".(isset($_GET['testid'])?"serv_tests_key = ".$_GET['testid']:"last_changed >= '".date( 'Y-m-d H:i:s T', $_GET['updatesince'])."'"); + $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get test data from database because: ' . mysqli_error($db)); + mysqli_close($db); - $all = array(); - while ($row = mysqli_fetch_array($rs, MYSQLI_ASSOC)) { - $all[]=$row; - } - // JSON-encode test info - echo json_encode($all); + $all = array(); + while ($row = mysqli_fetch_array($rs, MYSQLI_ASSOC)) { + $all[]=$row; + } + // JSON-encode test info + echo json_encode($all); ?> diff --git a/webserver/user/testbedstatus.php b/webserver/user/testbedstatus.php index 4d8adb8fd7f1609b4b4b7d76b6f4822203a57a45..e916dd01bb7e10653e4e898502ef7daeb1c05305 100755 --- a/webserver/user/testbedstatus.php +++ b/webserver/user/testbedstatus.php @@ -1,13 +1,13 @@ <?php - /* - * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" - * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" - * __license__ = "GPL" - * __version__ = "$Revision: 2826 $" - * __date__ = "$Date: 2014-05-16 10:46:15 +0200 (Fri, 16 May 2014) $" - * __id__ = "$Id: testbedstatus.php 2826 2014-05-16 08:46:15Z rlim $" - * __source__ = "$URL: svn://svn.ee.ethz.ch/flocklab/trunk/server/webserver/user/testbedstatus.php $" - */ + /* + * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" + * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" + * __license__ = "GPL" + * __version__ = "$Revision: 2826 $" + * __date__ = "$Date: 2014-05-16 10:46:15 +0200 (Fri, 16 May 2014) $" + * __id__ = "$Id: testbedstatus.php 2826 2014-05-16 08:46:15Z rlim $" + * __source__ = "$URL: svn://svn.ee.ethz.ch/flocklab/trunk/server/webserver/user/testbedstatus.php $" + */ ?> <?php require_once('include/layout.php');require_once('include/presets.php'); $javascript = '<link rel="stylesheet" href="css/ui-lightness/jquery-ui-1.8.20.custom.css">'; @@ -20,176 +20,176 @@ $javascript = '<link rel="stylesheet" href="css/ui-lightness/jquery-ui-1.8.20.cu <script type="text/javascript" src="scripts/flocklab-observer-positions.js"></script> <script type="text/javascript" src="scripts/jquery.cookie.js"></script> <script type="text/javascript"> - $(document).ready(function() { - var table_rows = Math.max(Math.floor(($(window).height() - 300) / 25),10); - $("#pager_num_rows").attr('value', table_rows); - $("#statustable") - .tablesorter({widgets: ['zebra']}) - .tablesorterPager({container: $("#pager"), positionFixed: false}); - $('.qtip_show').qtip( { - content: {text: false}, - style : 'flocklab', - }); - $( "#tabs" ).tabs(); - $( "#tabs ul" ).removeClass('ui-corner-all'); - - $.cookie.json = true; - var tabsel; - try { tabsel = $.cookie('flocklab.statetab'); } - catch (err) { - tabsel = null; - } - if ( tabsel == null) { - tabsel = 0; - } - $( "#tabs" ).tabs('select',tabsel); - $( "#tabs" ).bind( "tabsselect", function( event, ui ) { - $.cookie('flocklab.statetab', ui.index); - } ); + $(document).ready(function() { + var table_rows = Math.max(Math.floor(($(window).height() - 300) / 25),10); + $("#pager_num_rows").attr('value', table_rows); + $("#statustable") + .tablesorter({widgets: ['zebra']}) + .tablesorterPager({container: $("#pager"), positionFixed: false}); + $('.qtip_show').qtip( { + content: {text: false}, + style : 'flocklab', + }); + $( "#tabs" ).tabs(); + $( "#tabs ul" ).removeClass('ui-corner-all'); + + $.cookie.json = true; + var tabsel; + try { tabsel = $.cookie('flocklab.statetab'); } + catch (err) { + tabsel = null; + } + if ( tabsel == null) { + tabsel = 0; + } + $( "#tabs" ).tabs('select',tabsel); + $( "#tabs" ).bind( "tabsselect", function( event, ui ) { + $.cookie('flocklab.statetab', ui.index); + } ); - var obs_tbl_state; - try { obs_tbl_state = $.cookie('flocklab.obssort'); } - catch (err) { - obs_tbl_state = null; - } - if ( obs_tbl_state == null) { - obs_tbl_state = {s: [[0,1],[2,0]], p: 0}; - } - $("#statustable").data('tablesorter').page = obs_tbl_state.p; - $("#statustable").trigger("sorton",[obs_tbl_state.s]); - $("#statustable").bind("applyWidgets",function() { - $.cookie('flocklab.obssort', {s:$("#statustable").data('tablesorter').sortList, p:$("#statustable").data('tablesorter').page}); - }); - - // plot map - vis = new pv.Panel() - .width(776) - .height(900) - .canvas("graph"); + var obs_tbl_state; + try { obs_tbl_state = $.cookie('flocklab.obssort'); } + catch (err) { + obs_tbl_state = null; + } + if ( obs_tbl_state == null) { + obs_tbl_state = {s: [[0,1],[2,0]], p: 0}; + } + $("#statustable").data('tablesorter').page = obs_tbl_state.p; + $("#statustable").trigger("sorton",[obs_tbl_state.s]); + $("#statustable").bind("applyWidgets",function() { + $.cookie('flocklab.obssort', {s:$("#statustable").data('tablesorter').sortList, p:$("#statustable").data('tablesorter').page}); + }); + + // plot map + vis = new pv.Panel() + .width(776) + .height(900) + .canvas("graph"); - force = vis.add(pv.Layout.Force) - .nodes(sensornodes).links([]) - .iterations(0); + force = vis.add(pv.Layout.Force) + .nodes(sensornodes).links([]) + .iterations(0); - force.node.add(pv.Dot) - .shapeSize(function(d) {return 230;}) - .fillStyle(function(d) { return d.status=='online'?"green":(d.status=='offline'?"red":"grey"); }) - .strokeStyle(function() {return this.fillStyle().darker()}) - .lineWidth(1) - .left(function(d) {return d.x}) - .bottom(function(d) {return d.y}) - .title(function(d) {return d.status}) - .event("mouseover", function(d) {d.selected = true;vis.render()}) - .event("mouseout", function(d) {d.selected = false;vis.render()}); + force.node.add(pv.Dot) + .shapeSize(function(d) {return 230;}) + .fillStyle(function(d) { return d.status=='online'?"green":(d.status=='offline'?"red":"grey"); }) + .strokeStyle(function() {return this.fillStyle().darker()}) + .lineWidth(1) + .left(function(d) {return d.x}) + .bottom(function(d) {return d.y}) + .title(function(d) {return d.status}) + .event("mouseover", function(d) {d.selected = true;vis.render()}) + .event("mouseout", function(d) {d.selected = false;vis.render()}); - force.label.add(pv.Label) - .text(function(d) {return d.node_id;}) - .font(function() {return "bold 11px sans-serif";}); - - vis.render(); - - // interactive platform names - $('.targetplatform').bind('mouseover', function(event) { - $('.targetplatform').removeClass('bold'); - $('.targetplatform:contains(' + $(this).text()+')').addClass('bold'); - }); - }); + force.label.add(pv.Label) + .text(function(d) {return d.node_id;}) + .font(function() {return "bold 11px sans-serif";}); + + vis.render(); + + // interactive platform names + $('.targetplatform').bind('mouseover', function(event) { + $('.targetplatform').removeClass('bold'); + $('.targetplatform:contains(' + $(this).text()+')').addClass('bold'); + }); + }); </script> - <h1>FlockLab Status</h1> - <div id="tabs"> - <ul> - <li><a href="#tabs-1">Map</a></li> - <li><a href="#tabs-2">Table</a></li> - <li><a href="#tabs-3">3D - Plan</a></li> - </ul> - <div id="tabs-1" style="background-color:white"> - <div style="position:relative;margin-left:60px;padding:0;width:776px;height:800px;background-color:#fff"> - <div id="graph-bg" style="width:776px;height:800px;z-index:1;position:absolute;background:url(pics/flocklab_floormap.png);opacity:0.6;filter:alpha(opacity=60);"></div> - <div id="graph" style="width:776px;height:419px;z-index:2;position:absolute"></div> - </div> - </div> - <div id="tabs-2" style="padding-left:10px"> - <?php - /* Get all status information about the observers from the database and display them in the table. */ - $db = db_connect(); - $sql = "SELECT obs.observer_id, obs.status, obs.last_changed, - slot1.name AS name1, slot1.description AS desc1, - slot2.name AS name2, slot2.description AS desc2, - slot3.name AS name3, slot3.description AS desc3, - slot4.name AS name4, slot4.description AS desc4 - FROM `flocklab`.`tbl_serv_observer` AS obs - LEFT JOIN `flocklab`.`tbl_serv_tg_adapt_list` AS a ON obs.slot_1_tg_adapt_list_fk = a.serv_tg_adapt_list_key - LEFT JOIN `flocklab`.`tbl_serv_tg_adapt_types` AS slot1 ON a.tg_adapt_types_fk = slot1.serv_tg_adapt_types_key - LEFT JOIN `flocklab`.`tbl_serv_tg_adapt_list` AS b ON obs.slot_2_tg_adapt_list_fk = b.serv_tg_adapt_list_key - LEFT JOIN `flocklab`.`tbl_serv_tg_adapt_types` AS slot2 ON b.tg_adapt_types_fk = slot2.serv_tg_adapt_types_key - LEFT JOIN `flocklab`.`tbl_serv_tg_adapt_list` AS c ON obs.slot_3_tg_adapt_list_fk = c.serv_tg_adapt_list_key - LEFT JOIN `flocklab`.`tbl_serv_tg_adapt_types` AS slot3 ON c.tg_adapt_types_fk = slot3.serv_tg_adapt_types_key - LEFT JOIN `flocklab`.`tbl_serv_tg_adapt_list` AS d ON obs.slot_4_tg_adapt_list_fk = d.serv_tg_adapt_list_key - LEFT JOIN `flocklab`.`tbl_serv_tg_adapt_types` AS slot4 ON d.tg_adapt_types_fk = slot4.serv_tg_adapt_types_key - WHERE obs.status!='disabled' AND obs.status!='develop' - ORDER BY obs.observer_id - ;"; - $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get observer information from database because: ' . mysqli_error($db)); - mysqli_close($db); - ?> - <div><table id="statustable" class="tablesorter" style="width:885px"> - <thead> - <tr> - <th style="width:40px">Observer ID</th> - <th>Status</th> - <th>Adapter<BR>Slot 1</th> - <th>Adapter<BR>Slot 2</th> - <th>Adapter<BR>Slot 3</th> - <th>Adapter<BR>Slot 4</th> - <th style="width:190px">Last Change</th> - </tr> - </thead> - <tbody> - <?php - $i = 0; - $js = ''; - while ($row = mysqli_fetch_array($rs)) { - $i++; - echo ($i%2 == 1) ? "<tr class='even'>" : "<tr class='odd'>"; - if ($row['observer_id'] < 10 ) - echo "<td>00"; - elseif ($row['observer_id'] < 100 ) - echo "<td>0"; - else - echo "<td>"; - echo $row['observer_id'] . "</td>"; - echo "<td>" . $row['status'] . "</td>"; - echo "<td class='qtip_show targetplatform' title='"; - echo ($row['name1'] == "") ? "No or unknown adapter installed'></td>" : $row['desc1'] . "'>" . $row['name1'] . "</td>"; - echo "<td class='qtip_show targetplatform' title='"; - echo ($row['name2'] == "") ? "No or unknown adapter installed'></td>" : $row['desc2'] . "'>" . $row['name2'] . "</td>"; - echo "<td class='qtip_show targetplatform' title='"; - echo ($row['name3'] == "") ? "No or unknown adapter installed'></td>" : $row['desc3'] . "'>" . $row['name3'] . "</td>"; - echo "<td class='qtip_show targetplatform' title='"; - echo ($row['name4'] == "") ? "No or unknown adapter installed'></td>" : $row['desc4'] . "'>" . $row['name4'] . "</td>"; - echo "<td>".date_to_tzdate($row['last_changed'])."</td>"; - echo "</tr>"; - $js.='$(sensornodes).each(function(){ if (this.node_id=='.$row['observer_id'].') { this.status="'.$row['status'].'"}});'."\n"; - } - ?> - </tbody> - </table> - <span id="pager" class="pager"> - <img src="pics/icons/first.gif" alt="first" class="first"> - <img src="pics/icons/prev.gif" alt="prev" class="prev"> - <span class="pagedisplay"></span> - <img src="pics/icons/next.gif" alt="next" class="next"> - <img src="pics/icons/last.gif" alt="last" class="last"> - <input class="pagesize" style="visibility: hidden;" id="pager_num_rows" value="15"> - </span> <br > - </div> - </div> - <div id="tabs-3"> - <p> - The main part of FlockLab is located inside our office building (green shaded area on the left of the picture). The outdoor nodes are located on the terrace and the wall of an adjacent building one floor below the indoor nodes (green shaded areas in the middle and right). See "Map"-tab for more information. - </p> - <img alt="" src="pics/flocklab_googleearth.jpg" width="900px"> - </div> + <h1>FlockLab Status</h1> + <div id="tabs"> + <ul> + <li><a href="#tabs-1">Map</a></li> + <li><a href="#tabs-2">Table</a></li> + <li><a href="#tabs-3">3D - Plan</a></li> + </ul> + <div id="tabs-1" style="background-color:white"> + <div style="position:relative;margin-left:60px;padding:0;width:776px;height:800px;background-color:#fff"> + <div id="graph-bg" style="width:776px;height:800px;z-index:1;position:absolute;background:url(pics/flocklab_floormap.png);opacity:0.6;filter:alpha(opacity=60);"></div> + <div id="graph" style="width:776px;height:419px;z-index:2;position:absolute"></div> + </div> + </div> + <div id="tabs-2" style="padding-left:10px"> + <?php + /* Get all status information about the observers from the database and display them in the table. */ + $db = db_connect(); + $sql = "SELECT obs.observer_id, obs.status, obs.last_changed, + slot1.name AS name1, slot1.description AS desc1, + slot2.name AS name2, slot2.description AS desc2, + slot3.name AS name3, slot3.description AS desc3, + slot4.name AS name4, slot4.description AS desc4 + FROM `flocklab`.`tbl_serv_observer` AS obs + LEFT JOIN `flocklab`.`tbl_serv_tg_adapt_list` AS a ON obs.slot_1_tg_adapt_list_fk = a.serv_tg_adapt_list_key + LEFT JOIN `flocklab`.`tbl_serv_tg_adapt_types` AS slot1 ON a.tg_adapt_types_fk = slot1.serv_tg_adapt_types_key + LEFT JOIN `flocklab`.`tbl_serv_tg_adapt_list` AS b ON obs.slot_2_tg_adapt_list_fk = b.serv_tg_adapt_list_key + LEFT JOIN `flocklab`.`tbl_serv_tg_adapt_types` AS slot2 ON b.tg_adapt_types_fk = slot2.serv_tg_adapt_types_key + LEFT JOIN `flocklab`.`tbl_serv_tg_adapt_list` AS c ON obs.slot_3_tg_adapt_list_fk = c.serv_tg_adapt_list_key + LEFT JOIN `flocklab`.`tbl_serv_tg_adapt_types` AS slot3 ON c.tg_adapt_types_fk = slot3.serv_tg_adapt_types_key + LEFT JOIN `flocklab`.`tbl_serv_tg_adapt_list` AS d ON obs.slot_4_tg_adapt_list_fk = d.serv_tg_adapt_list_key + LEFT JOIN `flocklab`.`tbl_serv_tg_adapt_types` AS slot4 ON d.tg_adapt_types_fk = slot4.serv_tg_adapt_types_key + WHERE obs.status!='disabled' AND obs.status!='develop' + ORDER BY obs.observer_id + ;"; + $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get observer information from database because: ' . mysqli_error($db)); + mysqli_close($db); + ?> + <div><table id="statustable" class="tablesorter" style="width:885px"> + <thead> + <tr> + <th style="width:40px">Observer ID</th> + <th>Status</th> + <th>Adapter<BR>Slot 1</th> + <th>Adapter<BR>Slot 2</th> + <th>Adapter<BR>Slot 3</th> + <th>Adapter<BR>Slot 4</th> + <th style="width:190px">Last Change</th> + </tr> + </thead> + <tbody> + <?php + $i = 0; + $js = ''; + while ($row = mysqli_fetch_array($rs)) { + $i++; + echo ($i%2 == 1) ? "<tr class='even'>" : "<tr class='odd'>"; + if ($row['observer_id'] < 10 ) + echo "<td>00"; + elseif ($row['observer_id'] < 100 ) + echo "<td>0"; + else + echo "<td>"; + echo $row['observer_id'] . "</td>"; + echo "<td>" . $row['status'] . "</td>"; + echo "<td class='qtip_show targetplatform' title='"; + echo ($row['name1'] == "") ? "No or unknown adapter installed'></td>" : $row['desc1'] . "'>" . $row['name1'] . "</td>"; + echo "<td class='qtip_show targetplatform' title='"; + echo ($row['name2'] == "") ? "No or unknown adapter installed'></td>" : $row['desc2'] . "'>" . $row['name2'] . "</td>"; + echo "<td class='qtip_show targetplatform' title='"; + echo ($row['name3'] == "") ? "No or unknown adapter installed'></td>" : $row['desc3'] . "'>" . $row['name3'] . "</td>"; + echo "<td class='qtip_show targetplatform' title='"; + echo ($row['name4'] == "") ? "No or unknown adapter installed'></td>" : $row['desc4'] . "'>" . $row['name4'] . "</td>"; + echo "<td>".date_to_tzdate($row['last_changed'])."</td>"; + echo "</tr>"; + $js.='$(sensornodes).each(function(){ if (this.node_id=='.$row['observer_id'].') { this.status="'.$row['status'].'"}});'."\n"; + } + ?> + </tbody> + </table> + <span id="pager" class="pager"> + <img src="pics/icons/first.gif" alt="first" class="first"> + <img src="pics/icons/prev.gif" alt="prev" class="prev"> + <span class="pagedisplay"></span> + <img src="pics/icons/next.gif" alt="next" class="next"> + <img src="pics/icons/last.gif" alt="last" class="last"> + <input class="pagesize" style="visibility: hidden;" id="pager_num_rows" value="15"> + </span> <br > + </div> + </div> + <div id="tabs-3"> + <p> + The main part of FlockLab is located inside our office building (green shaded area on the left of the picture). The outdoor nodes are located on the terrace and the wall of an adjacent building one floor below the indoor nodes (green shaded areas in the middle and right). See "Map"-tab for more information. + </p> + <img alt="" src="pics/flocklab_googleearth.jpg" width="900px"> + </div> </div> <?php diff --git a/webserver/user/testconfig_download.php b/webserver/user/testconfig_download.php index 295444f35a8d51e2e1c9e2f52ce11d51726bcf80..58108dabd60db8027b558d9736fcc13725df65c4 100755 --- a/webserver/user/testconfig_download.php +++ b/webserver/user/testconfig_download.php @@ -1,13 +1,13 @@ <?php - /* - * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" - * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" - * __license__ = "GPL" - * __version__ = "$Revision$" - * __date__ = "$Date$" - * __id__ = "$Id$" - * __source__ = "$URL$" - */ + /* + * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" + * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" + * __license__ = "GPL" + * __version__ = "$Revision$" + * __date__ = "$Date$" + * __id__ = "$Id$" + * __source__ = "$URL$" + */ ?> <?php include_once('include/presets.php');?> <?php diff --git a/webserver/user/topology.php b/webserver/user/topology.php index 404fbddde7a62de20006237ab4d6b5e6a0bb33c7..70b27847ae15feffd5cf11a3cd3788bee4093e8d 100644 --- a/webserver/user/topology.php +++ b/webserver/user/topology.php @@ -1,13 +1,13 @@ <?php - /* - * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" - * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" - * __license__ = "GPL" - * __version__ = "$Revision$" - * __date__ = "$Date$" - * __id__ = "$Id$" - * __source__ = "$URL$" - */ + /* + * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" + * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" + * __license__ = "GPL" + * __version__ = "$Revision$" + * __date__ = "$Date$" + * __id__ = "$Id$" + * __source__ = "$URL$" + */ ?> <?php require_once('include/layout.php');require_once('include/presets.php'); $style=' @@ -74,229 +74,229 @@ var heatmap_done = true; var links; var platforms = { - tmote :{ name:"Tmote", rssi_thresh:6, linkchannel:26, frq:function(k){return (2405 + 5 * (k-11)) + 'MHz'}}, - tinynode: { name:"TinyNode", rssi_thresh:7, linkchannel:5, frq:function(k){return (867.075 + 0.150 * k).toFixed(3) + 'MHz'}}, - // opal_rf212: { name:"Opal(RF212)", rssi_thresh:1, linkchannel:0, frq:function(k){return (2405 + 5 * (k-11)) + 'MHz'}}, - // opal_rf230: { name:"Opal(RF230)", rssi_thresh:0, linkchannel:11, frq:function(k){return (2405 + 5 * (k-11)) + 'MHz'}}, + tmote :{ name:"Tmote", rssi_thresh:6, linkchannel:26, frq:function(k){return (2405 + 5 * (k-11)) + 'MHz'}}, + tinynode: { name:"TinyNode", rssi_thresh:7, linkchannel:5, frq:function(k){return (867.075 + 0.150 * k).toFixed(3) + 'MHz'}}, + // opal_rf212: { name:"Opal(RF212)", rssi_thresh:1, linkchannel:0, frq:function(k){return (2405 + 5 * (k-11)) + 'MHz'}}, + // opal_rf230: { name:"Opal(RF230)", rssi_thresh:0, linkchannel:11, frq:function(k){return (2405 + 5 * (k-11)) + 'MHz'}}, // iris: { name:"IRIS", rssi_thresh:6, linkchannel:11, frq:function(k){return (2405 + 5 * (k-11)) + 'MHz'}}, // mica2: { name:"Mica2", rssi_thresh:0, linkchannel:0, frq:function(k){return (915.998 - 1.921 * k).toFixed(3) + 'MHz'}}, - // cc430: { name:"CC430", rssi_thresh:0, linkchannel:0, frq:function(k){return (915.998 - 1.921 * k).toFixed(3) + 'MHz'}}, - dpp: { name:"DPP", rssi_thresh:0, linkchannel:0, frq:function(k){return (915.998 - 1.921 * k).toFixed(3) + 'MHz'}} + // cc430: { name:"CC430", rssi_thresh:0, linkchannel:0, frq:function(k){return (915.998 - 1.921 * k).toFixed(3) + 'MHz'}}, + dpp: { name:"DPP", rssi_thresh:0, linkchannel:0, frq:function(k){return (915.998 - 1.921 * k).toFixed(3) + 'MHz'}} }; var platform = platforms.tmote; var rssi_channel = platforms.tmote.linkchannel; function loadTestData(newplatform, selected) { - eventSource.clear(); - $("#platform").find("a").removeClass("selected"); - tl.loadJSON("link_feed.php?p="+newplatform, function(json, url) { - $(json.events).each(function() { - this.icon=Timeline_urlPrefix + "images/green-circle.png"; - }); - events = json.events; - eventSource.loadJSON(json, url); - $(selected).find("a").addClass("selected"); - var closest = -1; - var closest_int = -1; - var closest_start = -1; - $(eventSource.getAllEventIterator()._events._a).each(function() { - if (closest < 0 || closest_int > Math.abs(this._start - current_test.start)) { - closest=parseInt(this._obj.description); - closest_int = Math.abs(this._start - current_test.start) - closest_start = this._start; - } - }); - if (closest>0) { - loadTestDataId(closest); - current_test.id = closest; - current_test.start = closest_start; - tl.getBand(0).getEventPainter().paint(); - } - else { - // empty display - var links = new Array(); - $(sensornodes).each(function(){ this.seen = false; }); - force.links(links).iterations(0); - force.reset(); - vis.render(); - $("#heatmap").hide(); - } - }); + eventSource.clear(); + $("#platform").find("a").removeClass("selected"); + tl.loadJSON("link_feed.php?p="+newplatform, function(json, url) { + $(json.events).each(function() { + this.icon=Timeline_urlPrefix + "images/green-circle.png"; + }); + events = json.events; + eventSource.loadJSON(json, url); + $(selected).find("a").addClass("selected"); + var closest = -1; + var closest_int = -1; + var closest_start = -1; + $(eventSource.getAllEventIterator()._events._a).each(function() { + if (closest < 0 || closest_int > Math.abs(this._start - current_test.start)) { + closest=parseInt(this._obj.description); + closest_int = Math.abs(this._start - current_test.start) + closest_start = this._start; + } + }); + if (closest>0) { + loadTestDataId(closest); + current_test.id = closest; + current_test.start = closest_start; + tl.getBand(0).getEventPainter().paint(); + } + else { + // empty display + var links = new Array(); + $(sensornodes).each(function(){ this.seen = false; }); + force.links(links).iterations(0); + force.reset(); + vis.render(); + $("#heatmap").hide(); + } + }); } function setLinkMap(channel) { - if (platform.linkchannel==channel) { - force.links(links).iterations(0); - $( "#amount" ).empty().text( "PRR: " + $( "#slider-range" ).slider( "values", 0 ) + - "% - " + $( "#slider-range" ).slider( "values", 1 ) +"%"); - } - else { - var nolinks = new Array(); - force.links(nolinks).iterations(0); - $( "#amount" ).empty().text( "no PRR available"); - } - force.reset(); - vis.render(); + if (platform.linkchannel==channel) { + force.links(links).iterations(0); + $( "#amount" ).empty().text( "PRR: " + $( "#slider-range" ).slider( "values", 0 ) + + "% - " + $( "#slider-range" ).slider( "values", 1 ) +"%"); + } + else { + var nolinks = new Array(); + force.links(nolinks).iterations(0); + $( "#amount" ).empty().text( "no PRR available"); + } + force.reset(); + vis.render(); } function setHeatMap(channel) { - var scan; - heatmap_done = false; - // check channel for link graph - setLinkMap(channel); - $(rssi_scans).each(function(){ - if (this.channel == channel) - scan = this; - }); - $(sensornodes).each(function(){ - this.rssi=0; - var node = this; - $(scan.nodes).each(function() { - if (node.node_id == this.id) - node.rssi = this.rssi; - }); - }); - heatmap($('#heatmap'), 100, 110); - heatmap_done = true; - if (channel != rssi_channel) { - setHeatMap(rssi_channel); - } + var scan; + heatmap_done = false; + // check channel for link graph + setLinkMap(channel); + $(rssi_scans).each(function(){ + if (this.channel == channel) + scan = this; + }); + $(sensornodes).each(function(){ + this.rssi=0; + var node = this; + $(scan.nodes).each(function() { + if (node.node_id == this.id) + node.rssi = this.rssi; + }); + }); + heatmap($('#heatmap'), 100, 110); + heatmap_done = true; + if (channel != rssi_channel) { + setHeatMap(rssi_channel); + } } function loadTestDataId(id) { - links = new Array(); - $.ajax({ - url: "link_feed.php?q="+id, - success: function(data) { - var waslinkchannel = (platform.linkchannel==rssi_channel); - var network = $("network", data).first(); - var platform_key = $(network).attr('platform').toLowerCase(); - if ($(network).attr('radio') !== undefined) - platform_key = platform_key + '_' + $(network).attr('radio').toLowerCase(); - $.each(platforms, function(key){ - if (key==platform_key) { - platform=this; - } - }); + links = new Array(); + $.ajax({ + url: "link_feed.php?q="+id, + success: function(data) { + var waslinkchannel = (platform.linkchannel==rssi_channel); + var network = $("network", data).first(); + var platform_key = $(network).attr('platform').toLowerCase(); + if ($(network).attr('radio') !== undefined) + platform_key = platform_key + '_' + $(network).attr('radio').toLowerCase(); + $.each(platforms, function(key){ + if (key==platform_key) { + platform=this; + } + }); - // rssi - rssi_scans = []; - var ch = {min:Number.POSITIVE_INFINITY , max:Number.NEGATIVE_INFINITY}; - $("rssiscan", data).each(function(){ - var scan ={channel: parseInt($(this).attr("channel")), nodes:[]}; - ch.min = Math.min(ch.min, parseInt($(this).attr("channel"))); - ch.max = Math.max(ch.max, parseInt($(this).attr("channel"))); - $(this).children().each(function(){ + // rssi + rssi_scans = []; + var ch = {min:Number.POSITIVE_INFINITY , max:Number.NEGATIVE_INFINITY}; + $("rssiscan", data).each(function(){ + var scan ={channel: parseInt($(this).attr("channel")), nodes:[]}; + ch.min = Math.min(ch.min, parseInt($(this).attr("channel"))); + ch.max = Math.max(ch.max, parseInt($(this).attr("channel"))); + $(this).children().each(function(){ - var frq = $(this).attr('frq').split(','); - var id = parseInt($(this).attr('nodeid')); - var rssi = 0; var sum = 0; - var max_frq = 0; - var frq_mode = 0; - $(frq).each(function(index) { + var frq = $(this).attr('frq').split(','); + var id = parseInt($(this).attr('nodeid')); + var rssi = 0; var sum = 0; + var max_frq = 0; + var frq_mode = 0; + $(frq).each(function(index) { if (parseInt(this) > max_frq) { max_frq = parseInt(this); frq_mode = index; } }); - $(frq).each(function(index) { + $(frq).each(function(index) { var w = index - (frq_mode + platform.rssi_thresh); - if (w > 0) - rssi+=parseInt(this) * w; + if (w > 0) + rssi+=parseInt(this) * w; w = Math.max(1, w); - sum += parseInt(this) * w; - }); - scan.nodes.push({id:id, rssi: rssi / sum * 100}); - }); - rssi_scans.push(scan); - }); - if (ch.min == Number.POSITIVE_INFINITY) { - $( "#slider-range-rssi" ).slider( "option" , { min: NaN, max: NaN} ); - $( "#amount-rssi" ).empty().text( "no RSSI data"); - $("#heatmap").hide(); - rssi_channel = platform.linkchannel; - } - else { - if (waslinkchannel) { - rssi_channel = platform.linkchannel; - } - else { - if (rssi_channel < ch.min) - rssi_channel = ch.min; - else if (rssi_channel > ch.max) - rssi_channel = ch.max; - } - $( "#slider-range-rssi" ).slider( "option" , { min: ch.min,max: ch.max, value: rssi_channel} ); - $( "#amount-rssi" ).empty().text( "Ch: "+rssi_channel + " (" + platform.frq(rssi_channel)+")"); - setHeatMap(rssi_channel); - $("#heatmap").show(); - } + sum += parseInt(this) * w; + }); + scan.nodes.push({id:id, rssi: rssi / sum * 100}); + }); + rssi_scans.push(scan); + }); + if (ch.min == Number.POSITIVE_INFINITY) { + $( "#slider-range-rssi" ).slider( "option" , { min: NaN, max: NaN} ); + $( "#amount-rssi" ).empty().text( "no RSSI data"); + $("#heatmap").hide(); + rssi_channel = platform.linkchannel; + } + else { + if (waslinkchannel) { + rssi_channel = platform.linkchannel; + } + else { + if (rssi_channel < ch.min) + rssi_channel = ch.min; + else if (rssi_channel > ch.max) + rssi_channel = ch.max; + } + $( "#slider-range-rssi" ).slider( "option" , { min: ch.min,max: ch.max, value: rssi_channel} ); + $( "#amount-rssi" ).empty().text( "Ch: "+rssi_channel + " (" + platform.frq(rssi_channel)+")"); + setHeatMap(rssi_channel); + $("#heatmap").show(); + } - // topology - $(sensornodes).each(function(){ this.seen = false; }); - $("link", data).each(function(){ - var src_id = parseInt($(this).attr("src")); - var dest_id = parseInt($(this).attr("dest")); - var src, dest; - $(sensornodes).each(function(index){ - if (this.node_id==src_id) { - src=index; - this.seen = true; - } - if (this.node_id==dest_id) { - dest=index; - this.seen = true; - } - }); - if (src===undefined || dest===undefined) { - //alert("warning, src or destination of link not found."); - } - else { - links.push({source:src, target:dest, value:Math.pow(parseFloat($(this).attr("prr"))*10,2)}); - } - }); - setLinkMap(rssi_channel); + // topology + $(sensornodes).each(function(){ this.seen = false; }); + $("link", data).each(function(){ + var src_id = parseInt($(this).attr("src")); + var dest_id = parseInt($(this).attr("dest")); + var src, dest; + $(sensornodes).each(function(index){ + if (this.node_id==src_id) { + src=index; + this.seen = true; + } + if (this.node_id==dest_id) { + dest=index; + this.seen = true; + } + }); + if (src===undefined || dest===undefined) { + //alert("warning, src or destination of link not found."); + } + else { + links.push({source:src, target:dest, value:Math.pow(parseFloat($(this).attr("prr"))*10,2)}); + } + }); + setLinkMap(rssi_channel); - }, - dataType: "xml" - }); + }, + dataType: "xml" + }); } function heatmap(div, xnum, ynum) { - $(div).empty(); - var vis = new pv.Panel() - .width(div.width()) - .height(div.height()) - .canvas("heatmap") - .antialias(false); - var maxdist = 100; - var xf = xnum / div.width(); - var yf = ynum / div.height(); - vis.add(pv.Image) - .imageWidth(xnum) - .imageHeight(ynum) - .image(pv.Scale.linear() - .domain(0, 25, 50, 95) - .range("#fff", "#ee0", "#ff0","#f00") - .by(function(i, j) { - var sumdist = 1/10000; - var sumval = 0; - $(sensornodes).each(function() { - var dist_sqr = Math.pow(Math.pow(i - this.x * xf, 2) + Math.pow(j - this.y * yf, 2),2); - sumdist += 1 / dist_sqr; - sumval += this.rssi / dist_sqr; - }); - return sumval / sumdist; - })); - vis.render(); - // scale hack - var canvas = $("canvas", div).first(); - canvas.width(div.width()); - if(!document.implementation.hasFeature('http://www.w3.org/TR/SVG11/feature#Extensibility','1.1')){ // no support for foreignObject in SVG - canvas.prependTo(div); // move it directly to div - } + $(div).empty(); + var vis = new pv.Panel() + .width(div.width()) + .height(div.height()) + .canvas("heatmap") + .antialias(false); + var maxdist = 100; + var xf = xnum / div.width(); + var yf = ynum / div.height(); + vis.add(pv.Image) + .imageWidth(xnum) + .imageHeight(ynum) + .image(pv.Scale.linear() + .domain(0, 25, 50, 95) + .range("#fff", "#ee0", "#ff0","#f00") + .by(function(i, j) { + var sumdist = 1/10000; + var sumval = 0; + $(sensornodes).each(function() { + var dist_sqr = Math.pow(Math.pow(i - this.x * xf, 2) + Math.pow(j - this.y * yf, 2),2); + sumdist += 1 / dist_sqr; + sumval += this.rssi / dist_sqr; + }); + return sumval / sumdist; + })); + vis.render(); + // scale hack + var canvas = $("canvas", div).first(); + canvas.width(div.width()); + if(!document.implementation.hasFeature('http://www.w3.org/TR/SVG11/feature#Extensibility','1.1')){ // no support for foreignObject in SVG + canvas.prependTo(div); // move it directly to div + } } $(document).ready(function() { @@ -310,9 +310,9 @@ force = vis.add(pv.Layout.Force) .iterations(0); force.link.add(pv.Line).strokeStyle(function(d, l) { - if (l.value < prr_range.min || l.value > prr_range.max) - return; - return d.selected ? "rgba(10, 200, 10, 0.5)" : "rgba(80, 80, 80, 0.2)"; + if (l.value < prr_range.min || l.value > prr_range.max) + return; + return d.selected ? "rgba(10, 200, 10, 0.5)" : "rgba(80, 80, 80, 0.2)"; }); force.node.add(pv.Dot) @@ -327,69 +327,69 @@ force.node.add(pv.Dot) .event("mouseout", function(d) {d.selected = false;vis.render()}); force.label.add(pv.Label) - .text(function(d) {return d.node_id;}) - .font(function() {return "bold 11px sans-serif";}); + .text(function(d) {return d.node_id;}) + .font(function() {return "bold 11px sans-serif";}); vis.render(); $("#platform").empty(); $.each(platforms, function(n) { - $("#platform").append("<li><a href=\"\">"+this.name+"<\/a><\/li>"); - var p = n; - $("li", "#platform").last().bind("click", function() { $("#useinfo").hide();loadTestData(p, this);return false;}); + $("#platform").append("<li><a href=\"\">"+this.name+"<\/a><\/li>"); + var p = n; + $("li", "#platform").last().bind("click", function() { $("#useinfo").hide();loadTestData(p, this);return false;}); }); $("#log").append(""); $( "#slider-range" ).slider({ - range: true, - values: [ 0, 100 ], - slide: function( event, ui ) { - $( "#amount" ).empty().text( "PRR: " + ui.values[ 0 ] + "% - " + ui.values[ 1 ] +"%"); - prr_range.min = ui.values[ 0 ]; - prr_range.max = ui.values[ 1 ]; - vis.render(); - } + range: true, + values: [ 0, 100 ], + slide: function( event, ui ) { + $( "#amount" ).empty().text( "PRR: " + ui.values[ 0 ] + "% - " + ui.values[ 1 ] +"%"); + prr_range.min = ui.values[ 0 ]; + prr_range.max = ui.values[ 1 ]; + vis.render(); + } }); $( "#amount" ).empty().text( "PRR: " + $( "#slider-range" ).slider( "values", 0 ) + - "% - " + $( "#slider-range" ).slider( "values", 1 ) +"%"); + "% - " + $( "#slider-range" ).slider( "values", 1 ) +"%"); $( "#slider-range-rssi" ).slider({ - range: "min", - min: 11, - max: 26, - value: 11, - slide: function( event, ui ) { - $( "#amount-rssi" ).empty().text( "Ch: " + ui.value + " (" + platform.frq(ui.value)+")"); - rssi_channel = ui.value; - if (heatmap_done = true) - setHeatMap(rssi_channel); - } + range: "min", + min: 11, + max: 26, + value: 11, + slide: function( event, ui ) { + $( "#amount-rssi" ).empty().text( "Ch: " + ui.value + " (" + platform.frq(ui.value)+")"); + rssi_channel = ui.value; + if (heatmap_done = true) + setHeatMap(rssi_channel); + } }); $( "#amount-rssi" ).empty().text( "Channel"); // Timeline Timeline.OriginalEventPainter.prototype._showBubble = function(x, y, evt) { - loadTestDataId(evt.getDescription ()); - current_test.id = evt._obj.description; - current_test.start = evt._start; - tl.getBand(0).getEventPainter().paint(); + loadTestDataId(evt.getDescription ()); + current_test.id = evt._obj.description; + current_test.start = evt._start; + tl.getBand(0).getEventPainter().paint(); } var theme = Timeline.ClassicTheme.create(); eventSource = new Timeline.DefaultEventSource(); var bandInfos = [ Timeline.createBandInfo({ - eventSource: eventSource, - width: "70%", - intervalUnit: Timeline.DateTime.DAY, - intervalPixels: 100, - theme: theme + eventSource: eventSource, + width: "70%", + intervalUnit: Timeline.DateTime.DAY, + intervalPixels: 100, + theme: theme }), Timeline.createBandInfo({ - eventSource: eventSource, - width: "30%", - intervalUnit: Timeline.DateTime.MONTH, - intervalPixels: 200, - showEventText: false, - theme: theme + eventSource: eventSource, + width: "30%", + intervalUnit: Timeline.DateTime.MONTH, + intervalPixels: 200, + showEventText: false, + theme: theme }) ]; @@ -397,11 +397,11 @@ var bandInfos = [ bandInfos[1].highlight = true; tl = Timeline.create(document.getElementById("timeline"), bandInfos, Timeline.HORIZONTAL); tl.getBand(0).getEventPainter().setHighlightMatcher(function(evt) { - return (current_test.id == evt._obj.description)?1:-1; + return (current_test.id == evt._obj.description)?1:-1; }); tl.loadJSON("link_feed.php?p=2", function(json, url) { $(json.events).each(function() { - this.icon=Timeline_urlPrefix + "images/green-circle.png"; + this.icon=Timeline_urlPrefix + "images/green-circle.png"; }); eventSource.loadJSON(json, url); }); @@ -432,12 +432,12 @@ var bandInfos = [ <div id="useinfo" style="position:relative;z-index:10;left:3px;height:30px;padding:5px;width:450px;background-color:#DDD"><img src="pics/icons/left_arrow.png" alt=""> <b>Please choose a platform</b></div> </div> <div class="ui-widget" style="margin-left:10px;float:left;height:30px;width:200px"> - <div id="slider-range" style="margin:5px;height:5px"></div> - <div id="amount" style="margin-left:30px">test</div> + <div id="slider-range" style="margin:5px;height:5px"></div> + <div id="amount" style="margin-left:30px">test</div> </div> <div class="ui-widget" style="margin-left:20px;float:left;height:30px;width:200px"> - <div id="slider-range-rssi" style="margin:5px;height:5px"></div> - <div id="amount-rssi" style="margin-left:30px">test</div> + <div id="slider-range-rssi" style="margin:5px;height:5px"></div> + <div id="amount-rssi" style="margin-left:30px">test</div> </div> </div> <div style="position:relative;padding:0;width:776px;height:900px;background-color:#fff"> diff --git a/webserver/user/update_stats.php b/webserver/user/update_stats.php index 95504015a8b9142cba9b3bec78450bb05f5f93e6..0ca5ac162d6512aa06b913a28916650ebfd9ecc4 100644 --- a/webserver/user/update_stats.php +++ b/webserver/user/update_stats.php @@ -1,9 +1,9 @@ <?php - /* - * __author__ = "Reto Da Forno <reto.daforno@tik.ee.ethz.ch>" - * __copyright__ = "Copyright 2017, ETH Zurich, Switzerland" - * __license__ = "GPL" - */ + /* + * __author__ = "Reto Da Forno <reto.daforno@tik.ee.ethz.ch>" + * __copyright__ = "Copyright 2017, ETH Zurich, Switzerland" + * __license__ = "GPL" + */ ?> <?php set_include_path(get_include_path() . PATH_SEPARATOR . "/home/flocklab/public_html/user"); diff --git a/webserver/user/user_edit.php b/webserver/user/user_edit.php index 425255dc17f0e3388016035ab69246c10bfec647..9842b00f548676c7f27d6121c51964766e3c5e36 100755 --- a/webserver/user/user_edit.php +++ b/webserver/user/user_edit.php @@ -1,198 +1,198 @@ <?php - /* - * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" - * __copyright__ = "Copyright 2011, ETH Zurich, Switzerland, Christoph Walser" - * __license__ = "GPL" - * __version__ = "$Revision$" - * __date__ = "$Date$" - * __id__ = "$Id$" - * __source__ = "$URL$" - */ - - //DEBUG ini_set('display_errors', 1); - //DEBUG error_reporting(E_ALL); + /* + * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" + * __copyright__ = "Copyright 2011, ETH Zurich, Switzerland, Christoph Walser" + * __license__ = "GPL" + * __version__ = "$Revision$" + * __date__ = "$Date$" + * __id__ = "$Id$" + * __source__ = "$URL$" + */ + + //DEBUG ini_set('display_errors', 1); + //DEBUG error_reporting(E_ALL); ?> <?php require_once('include/layout.php');require_once('include/presets.php'); ?> <?php - $first = ((isset($_POST['first'])) && ($_POST['first'] == "no")) ? false : true; - $errors = array(); - - // If the page is called for at least the second time, see if the user wants to change anything and store it in the database if needed: - if (!$first) { - // Get the form data: - $firstname = $_POST['firstname']; - $lastname = $_POST['lastname']; - $email = $_POST['email']; - $institutiontype = $_POST['institutiontype']; - $institution = $_POST['institution']; - $country = $_POST['country']; - $passwd = sha1($_POST['passwd']); - $retypepasswd = sha1($_POST['retypepasswd']); - $quota_runtime = $_POST['quotaruntime']; - $quota_tests = $_POST['quotatests']; - $retention_time = $_POST['retentiontime']; - $username = $_POST['username']; - $disable_infomails = (isset($_POST['disableinfomails']) ? $_POST['disableinfomails'] : '0'); - - // Check necessary fields: - if (($institution=="") || ($institutiontype=="") || ($firstname=="") || ($lastname=="") || ($email=="") || ($country=="")) - array_push($errors, "Please fill out all fields marked with an asterisk."); - // Check if passwords are the same: - if ($passwd != $retypepasswd) - array_push($errors, "Passwords are not the same."); - - // If there was no error, change the data in the database: - if (empty($errors)) { - $db = db_connect(); - $sql = "UPDATE `tbl_serv_users` - SET - `lastname` = '" . mysqli_real_escape_string($db, $lastname) . "', - `firstname` = '" . mysqli_real_escape_string($db, $firstname) . "', - `country` = '" . mysqli_real_escape_string($db, $country) . "', - `email` = '" . mysqli_real_escape_string($db, $email) . "', - `institution_type` = '" . mysqli_real_escape_string($db, $institutiontype) . "', - `institution` = '" . mysqli_real_escape_string($db, $institution) . "', - `disable_infomails` = '" . mysqli_real_escape_string($db, $disable_infomails) . "' - WHERE serv_users_key = " . $_SESSION['serv_users_key']; - mysqli_query($db, $sql) or flocklab_die('Cannot update user information in database because: ' . mysqli_error($db)); - // If the password was changed, reflect that also in the database: - if ($passwd != sha1("")) { - $sql = "UPDATE `tbl_serv_users` SET `password` = '" . mysqli_real_escape_string($db, $passwd) . "' WHERE serv_users_key = " . $_SESSION['serv_users_key']; - mysqli_query($db, $sql) or flocklab_die('Cannot update user password in database because: ' . mysqli_error($db)); - } - mysqli_close($db); - } - } else { - // Get the values from the database: - $db = db_connect(); - $sql = "SELECT * - FROM tbl_serv_users - WHERE serv_users_key = " . $_SESSION['serv_users_key']; - $userinfo = mysqli_query($db, $sql) or flocklab_die('Cannot get user information from database because: ' . mysqli_error($db)); - mysqli_close($db); - $row = mysqli_fetch_array($userinfo); - $firstname = $row['firstname']; - $lastname = $row['lastname']; - $username = $row['username']; - $country = $row['country']; - $email = $row['email']; - $password_hash = $row['password']; - $institutiontype = $row['institution_type']; - $institution = $row['institution']; - $quota_runtime = $row['quota_runtime']; - $quota_tests = $row['quota_tests']; - $retention_time = $row['retention_time']; - $disable_infomails= $row['disable_infomails']; - } + $first = ((isset($_POST['first'])) && ($_POST['first'] == "no")) ? false : true; + $errors = array(); + + // If the page is called for at least the second time, see if the user wants to change anything and store it in the database if needed: + if (!$first) { + // Get the form data: + $firstname = $_POST['firstname']; + $lastname = $_POST['lastname']; + $email = $_POST['email']; + $institutiontype = $_POST['institutiontype']; + $institution = $_POST['institution']; + $country = $_POST['country']; + $passwd = sha1($_POST['passwd']); + $retypepasswd = sha1($_POST['retypepasswd']); + $quota_runtime = $_POST['quotaruntime']; + $quota_tests = $_POST['quotatests']; + $retention_time = $_POST['retentiontime']; + $username = $_POST['username']; + $disable_infomails = (isset($_POST['disableinfomails']) ? $_POST['disableinfomails'] : '0'); + + // Check necessary fields: + if (($institution=="") || ($institutiontype=="") || ($firstname=="") || ($lastname=="") || ($email=="") || ($country=="")) + array_push($errors, "Please fill out all fields marked with an asterisk."); + // Check if passwords are the same: + if ($passwd != $retypepasswd) + array_push($errors, "Passwords are not the same."); + + // If there was no error, change the data in the database: + if (empty($errors)) { + $db = db_connect(); + $sql = "UPDATE `tbl_serv_users` + SET + `lastname` = '" . mysqli_real_escape_string($db, $lastname) . "', + `firstname` = '" . mysqli_real_escape_string($db, $firstname) . "', + `country` = '" . mysqli_real_escape_string($db, $country) . "', + `email` = '" . mysqli_real_escape_string($db, $email) . "', + `institution_type` = '" . mysqli_real_escape_string($db, $institutiontype) . "', + `institution` = '" . mysqli_real_escape_string($db, $institution) . "', + `disable_infomails` = '" . mysqli_real_escape_string($db, $disable_infomails) . "' + WHERE serv_users_key = " . $_SESSION['serv_users_key']; + mysqli_query($db, $sql) or flocklab_die('Cannot update user information in database because: ' . mysqli_error($db)); + // If the password was changed, reflect that also in the database: + if ($passwd != sha1("")) { + $sql = "UPDATE `tbl_serv_users` SET `password` = '" . mysqli_real_escape_string($db, $passwd) . "' WHERE serv_users_key = " . $_SESSION['serv_users_key']; + mysqli_query($db, $sql) or flocklab_die('Cannot update user password in database because: ' . mysqli_error($db)); + } + mysqli_close($db); + } + } else { + // Get the values from the database: + $db = db_connect(); + $sql = "SELECT * + FROM tbl_serv_users + WHERE serv_users_key = " . $_SESSION['serv_users_key']; + $userinfo = mysqli_query($db, $sql) or flocklab_die('Cannot get user information from database because: ' . mysqli_error($db)); + mysqli_close($db); + $row = mysqli_fetch_array($userinfo); + $firstname = $row['firstname']; + $lastname = $row['lastname']; + $username = $row['username']; + $country = $row['country']; + $email = $row['email']; + $password_hash = $row['password']; + $institutiontype = $row['institution_type']; + $institution = $row['institution']; + $quota_runtime = $row['quota_runtime']; + $quota_tests = $row['quota_tests']; + $retention_time = $row['retention_time']; + $disable_infomails= $row['disable_infomails']; + } ?> - <script type="text/javascript"> - $(document).ready(function() { - $('.qtip_show').qtip( { - content: {text: false}, - style : 'flocklab', - }); - $("#usereditform").validate({ - rules: { - institution: "required", - institutiontype: "required", - firstname: "required", - lastname: "required", - country: "required", - email: { - required: true, - email: true - }, - passwd: { - required: function(element) { - return $("#retypepasswd").val().length > 0; - }, - minlength: 8 - }, - retypepasswd: { - required: function(element) { - return $("#passwd").val().length > 0; - }, - equalTo: "#passwd" - }, - comments: { - required: function(element) { - return $("#institutiontype").val() == "other"; - } - } - }, - messages: { - retypepasswd: { - equalTo: "The passwords do not match." - }, - comments: "Specify type of institution here." - } - }); - }); - </script> + <script type="text/javascript"> + $(document).ready(function() { + $('.qtip_show').qtip( { + content: {text: false}, + style : 'flocklab', + }); + $("#usereditform").validate({ + rules: { + institution: "required", + institutiontype: "required", + firstname: "required", + lastname: "required", + country: "required", + email: { + required: true, + email: true + }, + passwd: { + required: function(element) { + return $("#retypepasswd").val().length > 0; + }, + minlength: 8 + }, + retypepasswd: { + required: function(element) { + return $("#passwd").val().length > 0; + }, + equalTo: "#passwd" + }, + comments: { + required: function(element) { + return $("#institutiontype").val() == "other"; + } + } + }, + messages: { + retypepasswd: { + equalTo: "The passwords do not match." + }, + comments: "Specify type of institution here." + } + }); + }); + </script> - <h1>User Acccount for <?php echo $_SESSION['firstname'] . " " . $_SESSION['lastname'];?></h1> - <?php - /* If the page is called with a file associated, validate it and show the results */ - if (!$first) { - // Show validation errors: - if (!empty($errors)) { - echo "<div class=\"warning\"><div style=\"float:left;\"><img alt=\"\" src=\"pics/icons/att.png\"></div>"; - echo "<p>Please correct the following errors:</p><ul>"; - foreach ($errors as $error) - echo "<li>" . $error . "</li>"; - echo "</div><p></p>"; - } else { - echo "<div class=\"info\"><div style=\"float:left;\"><img alt=\"\" src=\"pics/icons/success.png\"></div>"; - echo "<p>Account information has been updated..</p><ul>"; - echo "</div><p></p>"; - } - } - ?> - <form id="usereditform" name="usereditform" method="post" action="user_edit.php" enctype="multipart/form-data"> - <fieldset> - <legend>User information</legend> - <span class="formfield">First name:*</span><input type="text" name="firstname" id="firstname" value="<?php echo $firstname;?>" class="required"><br> - <span class="formfield">Last name:*</span><input type="text" name="lastname" id="lastname" value="<?php echo $lastname;?>" class="required"><br> - <span class="formfield">Username:</span><input type="text" name="username" id="username" value="<?php echo $username;?>" disabled="disabled"><br> - <span class="formfield">Email:*</span><input type="text" name="email" id="email" value="<?php echo $email;?>" class="required"><br> - <span class="formfield">Type of Institution:*</span><select name="institutiontype" id="institutiontype"> - <option value="university" <?php echo ($institutiontype == "university") ? 'selected="selected"' : "";?>>University</option> - <option value="researchinstitute" <?php echo ($institutiontype == "researchinstitute") ? 'selected="selected"' : "";?>>Research Institute</option> - <option value="company" <?php echo ($institutiontype == "company") ? 'selected="selected"' : "";?>>Company</option> - <option value="other" <?php echo ($institutiontype == "other") ? 'selected="selected"' : "";?>>Other (specify under comments)</option> - </select><br> - <span class="formfield">Institution:*</span><input type="text" name="institution" id="institution" value="<?php echo $institution;?>" class="required"><br> - <span class="formfield">Country:*</span><select name="country" id="country"> - <option value="" <?php echo (($country == '') ? 'selected="selected"' : "");?></option> - <?php - foreach (countries() as $c) { - echo '<option value="'.$c.'" '.(($country == $c) ? 'selected="selected"' : "").'>'.$c.'</option>'; - } - ?> - </select><br> - <span class="formfield">Password:</span><input type="password" name="passwd" id="passwd" value=""><label id="passwderror" class="error" for="passwd" generated="true" style="display: inline;"></label><br> - <span class="formfield">Retype Password:</span><input type="password" name="retypepasswd" id="retypepasswd" value=""><br> - </fieldset> - <fieldset> - <legend>User quotas</legend> - <span class="qtip_show" title="Maximum number of concurrently scheduled/running tests at any time. Tests that have just finished but whose results are not yet fully processed, are also taken into account."><span class="formfield-extrawide">Maximum number of concurrently scheduled/running tests:</span><?php echo $quota_tests;?> tests</span></span><br> - <span class="qtip_show" title="Maximum total runtime of all scheduled/running tests. Tests that have just finished but whose results are not yet fully processed, are also taken into account."><span class="formfield-extrawide">Maximum total runtime of all scheduled/running tests:</span><?php echo $quota_runtime;?> min</span><br> - <span class="qtip_show" title="After the retention time expired, a test is deleted from the database. You will get an email before the deletion but it is your responsability to save your test data externally before it is deleted."><span class="formfield-extrawide">Retention time for test results:</span><?php echo $retention_time;?> days</span><br> - </fieldset> - <fieldset> - <legend>Various</legend> - <span class="qtip_show" title="If checked, you will not receive test status emails from FlockLab. Error messages are still sent though."><span class="formfield-extrawide">Disable info mails about test status:</span></span><input type="checkbox" name="disableinfomails" id="disableinfomails" value="1" <?php echo ($disable_infomails == "1") ? 'checked="checked"' : '';?>><br> - </fieldset> - <p></p> - <input type="hidden" name="first" value="no"> - <input type="hidden" name="quotaruntime" value="<?php echo $quota_runtime;?>"> - <input type="hidden" name="quotatests" value="<?php echo $quota_tests;?>"> - <input type="hidden" name="retentiontime" value="<?php echo $retention_time;?>"> - <input type="hidden" name="username" value="<?php echo $username;?>"> - <input type="submit" value="Save"> - <input type="button" value="Cancel" onClick="window.location='index.php'"> - </form> + <h1>User Acccount for <?php echo $_SESSION['firstname'] . " " . $_SESSION['lastname'];?></h1> + <?php + /* If the page is called with a file associated, validate it and show the results */ + if (!$first) { + // Show validation errors: + if (!empty($errors)) { + echo "<div class=\"warning\"><div style=\"float:left;\"><img alt=\"\" src=\"pics/icons/att.png\"></div>"; + echo "<p>Please correct the following errors:</p><ul>"; + foreach ($errors as $error) + echo "<li>" . $error . "</li>"; + echo "</div><p></p>"; + } else { + echo "<div class=\"info\"><div style=\"float:left;\"><img alt=\"\" src=\"pics/icons/success.png\"></div>"; + echo "<p>Account information has been updated..</p><ul>"; + echo "</div><p></p>"; + } + } + ?> + <form id="usereditform" name="usereditform" method="post" action="user_edit.php" enctype="multipart/form-data"> + <fieldset> + <legend>User information</legend> + <span class="formfield">First name:*</span><input type="text" name="firstname" id="firstname" value="<?php echo $firstname;?>" class="required"><br> + <span class="formfield">Last name:*</span><input type="text" name="lastname" id="lastname" value="<?php echo $lastname;?>" class="required"><br> + <span class="formfield">Username:</span><input type="text" name="username" id="username" value="<?php echo $username;?>" disabled="disabled"><br> + <span class="formfield">Email:*</span><input type="text" name="email" id="email" value="<?php echo $email;?>" class="required"><br> + <span class="formfield">Type of Institution:*</span><select name="institutiontype" id="institutiontype"> + <option value="university" <?php echo ($institutiontype == "university") ? 'selected="selected"' : "";?>>University</option> + <option value="researchinstitute" <?php echo ($institutiontype == "researchinstitute") ? 'selected="selected"' : "";?>>Research Institute</option> + <option value="company" <?php echo ($institutiontype == "company") ? 'selected="selected"' : "";?>>Company</option> + <option value="other" <?php echo ($institutiontype == "other") ? 'selected="selected"' : "";?>>Other (specify under comments)</option> + </select><br> + <span class="formfield">Institution:*</span><input type="text" name="institution" id="institution" value="<?php echo $institution;?>" class="required"><br> + <span class="formfield">Country:*</span><select name="country" id="country"> + <option value="" <?php echo (($country == '') ? 'selected="selected"' : "");?></option> + <?php + foreach (countries() as $c) { + echo '<option value="'.$c.'" '.(($country == $c) ? 'selected="selected"' : "").'>'.$c.'</option>'; + } + ?> + </select><br> + <span class="formfield">Password:</span><input type="password" name="passwd" id="passwd" value=""><label id="passwderror" class="error" for="passwd" generated="true" style="display: inline;"></label><br> + <span class="formfield">Retype Password:</span><input type="password" name="retypepasswd" id="retypepasswd" value=""><br> + </fieldset> + <fieldset> + <legend>User quotas</legend> + <span class="qtip_show" title="Maximum number of concurrently scheduled/running tests at any time. Tests that have just finished but whose results are not yet fully processed, are also taken into account."><span class="formfield-extrawide">Maximum number of concurrently scheduled/running tests:</span><?php echo $quota_tests;?> tests</span></span><br> + <span class="qtip_show" title="Maximum total runtime of all scheduled/running tests. Tests that have just finished but whose results are not yet fully processed, are also taken into account."><span class="formfield-extrawide">Maximum total runtime of all scheduled/running tests:</span><?php echo $quota_runtime;?> min</span><br> + <span class="qtip_show" title="After the retention time expired, a test is deleted from the database. You will get an email before the deletion but it is your responsability to save your test data externally before it is deleted."><span class="formfield-extrawide">Retention time for test results:</span><?php echo $retention_time;?> days</span><br> + </fieldset> + <fieldset> + <legend>Various</legend> + <span class="qtip_show" title="If checked, you will not receive test status emails from FlockLab. Error messages are still sent though."><span class="formfield-extrawide">Disable info mails about test status:</span></span><input type="checkbox" name="disableinfomails" id="disableinfomails" value="1" <?php echo ($disable_infomails == "1") ? 'checked="checked"' : '';?>><br> + </fieldset> + <p></p> + <input type="hidden" name="first" value="no"> + <input type="hidden" name="quotaruntime" value="<?php echo $quota_runtime;?>"> + <input type="hidden" name="quotatests" value="<?php echo $quota_tests;?>"> + <input type="hidden" name="retentiontime" value="<?php echo $retention_time;?>"> + <input type="hidden" name="username" value="<?php echo $username;?>"> + <input type="submit" value="Save"> + <input type="button" value="Cancel" onClick="window.location='index.php'"> + </form> <?php do_layout('User Account','User Account'); diff --git a/webserver/user/user_passwordrecovery.php b/webserver/user/user_passwordrecovery.php index 2c76077a21f488aa4a9cc3136ba4e2ff5830786e..6f6d1f27c0dabcf94c7fef6632e34da78523e289 100755 --- a/webserver/user/user_passwordrecovery.php +++ b/webserver/user/user_passwordrecovery.php @@ -1,137 +1,137 @@ <?php - /* - * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" - * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" - * __license__ = "GPL" - * __version__ = "$Revision$" - * __date__ = "$Date$" - * __id__ = "$Id$" - * __source__ = "$URL$" - */ + /* + * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" + * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" + * __license__ = "GPL" + * __version__ = "$Revision$" + * __date__ = "$Date$" + * __id__ = "$Id$" + * __source__ = "$URL$" + */ ?> <?php require_once('include/libflocklab.php');?> <?php require_once('include/recaptcha.php');?> <?php - $first = ($_POST['first'] == "no") ? false : true; - $error = false; - $errors = array(); - - // If the page is called for the second time, validate and process form: - if (!$first) { - $emailaddress = $_POST['emailaddress']; - - // Check necessary fields: - if ($emailaddress=="") { - $error = true; - array_push($errors, "Please fill out all fields marked with an asterisk."); - } + $first = ($_POST['first'] == "no") ? false : true; + $error = false; + $errors = array(); + + // If the page is called for the second time, validate and process form: + if (!$first) { + $emailaddress = $_POST['emailaddress']; + + // Check necessary fields: + if ($emailaddress=="") { + $error = true; + array_push($errors, "Please fill out all fields marked with an asterisk."); + } - // Check captcha: - if (recaptcha_verify() == false) { - $error = true; - array_push($errors, "Captcha was not entered correctly."); - } - - // If there was no error, set a new, random password in the DB and send it to the user by email: - if (!$error) { - $db = db_connect(); - // Check if user exists in database: - $sql = "SELECT * FROM `tbl_serv_users` WHERE `email` = '" . mysqli_real_escape_string($db, $emailaddress) . "'"; - $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get user information from database because: ' . mysqli_error($db)); - $rows = mysqli_fetch_array($rs); - if ($rows) { - // Generate new password and store it: - $newpassword = substr(hash('sha512',rand()),0,16); - $newhash = sha1($newpassword); - $sql = "UPDATE `tbl_serv_users` SET `password` = '" . $newhash . "' WHERE `email` = '" . mysqli_real_escape_string($db, $emailaddress) . "'"; - mysqli_query($db, $sql) or flocklab_die('Cannot get set new password for user in database because: ' . mysqli_error($db)); - } - mysqli_close($db); - - // If user was found and password has been set, inform user: - if (isset($newpassword)) { - $subject = "[FlockLab] Request for password recovery"; - $message = "A request for a FlockLab password recovery has been placed on the FlockLab user interface.\n"; - $message = $message . "If this request has not been placed by you, please contact us on ".$CONFIG['smtp']['email'].".\n\n"; - $message = $message . "Your password has been reset to the following new password: \n\n$newpassword\n\n"; - $message = $message . "Please login at ".$CONFIG['xml']['namespace']."/user and change the password in your account settings afterwards.\n"; - $message = $message . "\n"; - $message = wordwrap($message, 70); - $header = 'X-Mailer: PHP/' . phpversion(); - mail($emailaddress, $subject, $message, $header); - } - } - } + // Check captcha: + if (recaptcha_verify() == false) { + $error = true; + array_push($errors, "Captcha was not entered correctly."); + } + + // If there was no error, set a new, random password in the DB and send it to the user by email: + if (!$error) { + $db = db_connect(); + // Check if user exists in database: + $sql = "SELECT * FROM `tbl_serv_users` WHERE `email` = '" . mysqli_real_escape_string($db, $emailaddress) . "'"; + $rs = mysqli_query($db, $sql) or flocklab_die('Cannot get user information from database because: ' . mysqli_error($db)); + $rows = mysqli_fetch_array($rs); + if ($rows) { + // Generate new password and store it: + $newpassword = substr(hash('sha512',rand()),0,16); + $newhash = sha1($newpassword); + $sql = "UPDATE `tbl_serv_users` SET `password` = '" . $newhash . "' WHERE `email` = '" . mysqli_real_escape_string($db, $emailaddress) . "'"; + mysqli_query($db, $sql) or flocklab_die('Cannot get set new password for user in database because: ' . mysqli_error($db)); + } + mysqli_close($db); + + // If user was found and password has been set, inform user: + if (isset($newpassword)) { + $subject = "[FlockLab] Request for password recovery"; + $message = "A request for a FlockLab password recovery has been placed on the FlockLab user interface.\n"; + $message = $message . "If this request has not been placed by you, please contact us on ".$CONFIG['smtp']['email'].".\n\n"; + $message = $message . "Your password has been reset to the following new password: \n\n$newpassword\n\n"; + $message = $message . "Please login at ".$CONFIG['xml']['namespace']."/user and change the password in your account settings afterwards.\n"; + $message = $message . "\n"; + $message = wordwrap($message, 70); + $header = 'X-Mailer: PHP/' . phpversion(); + mail($emailaddress, $subject, $message, $header); + } + } + } ?> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" - "http://www.w3.org/TR/html4/loose.dtd"> + "http://www.w3.org/TR/html4/loose.dtd"> <html> <head> - <link rel="stylesheet" type="text/css" href="css/flocklab.css"> - <link rel="shortcut icon" href="pics/icons/favicon.ico" type="image/x-ico; charset=binary"> - <link rel="icon" href="pics/icons/favicon.ico" type="image/x-ico; charset=binary"> + <link rel="stylesheet" type="text/css" href="css/flocklab.css"> + <link rel="shortcut icon" href="pics/icons/favicon.ico" type="image/x-ico; charset=binary"> + <link rel="icon" href="pics/icons/favicon.ico" type="image/x-ico; charset=binary"> - <title>FlockLab - Password Recovery</title> + <title>FlockLab - Password Recovery</title> - <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> - <meta name="AUTHOR" content="ETH Zurich, Christoph Walser, CH-8092 Zurich, Switzerland"> - <meta name="COPYRIGHT" content="ETH Zurich, Switzerland"> - <meta name="LANGUAGE" content="English"> - <meta name="ROBOTS" content="noindex, nofollow"> - <meta name="DATE" content="2011-2013"> - - <script type="text/javascript" src="scripts/jquery-latest.js"></script> - <script type="text/javascript" src="scripts/jquery.validate.min.js"></script> - <script type="text/javascript"> - $(document).ready(function(){ - $("#flocklabform").validate({ - rules: { - emailaddress: { - required: true, - email: true - } - } - }); - }); - </script> - <script src='https://www.google.com/recaptcha/api.js'></script> + <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> + <meta name="AUTHOR" content="ETH Zurich, Christoph Walser, CH-8092 Zurich, Switzerland"> + <meta name="COPYRIGHT" content="ETH Zurich, Switzerland"> + <meta name="LANGUAGE" content="English"> + <meta name="ROBOTS" content="noindex, nofollow"> + <meta name="DATE" content="2011-2013"> + + <script type="text/javascript" src="scripts/jquery-latest.js"></script> + <script type="text/javascript" src="scripts/jquery.validate.min.js"></script> + <script type="text/javascript"> + $(document).ready(function(){ + $("#flocklabform").validate({ + rules: { + emailaddress: { + required: true, + email: true + } + } + }); + }); + </script> + <script src='https://www.google.com/recaptcha/api.js'></script> </head> <body> - <div id="container" class="container"> - <div id="header" class="header"> - <a href="http://www.flocklab.ethz.ch"><img alt="FlockLab" src="pics/flocklab_eth_logo.png"></a> - </div> <!-- END header --> - <div id="content" class="content"> - <h1>FlockLab Password Recovery</h1> - <form id="flocklabform" name="flocklabform" method="post" action="user_passwordrecovery.php"> - <?php - if ($first || $error) { - if ($error) { - echo "<div class='warning'><div style='float:left;'><img alt='' src='pics/icons/att.png'></div>"; - echo "<p>Please correct the following errors:</p><ul>"; - foreach ($errors as $line) - echo "<li>" . $line . "</li>"; - echo "</ul>"; - echo "</div>"; - } - ?> - <p>Please fill out the form below to request a new password for your FlockLab account. Fields marked with * are mandatory.</p> - <span class="formfield">E-mail Address:*</span><input type="text" name="emailaddress" id="emailaddress" value="<?php echo $emailaddress;?>"><br> - <span class="formfield">Captcha:*</span><?php recaptcha_print(); ?> - <p> - <input type="hidden" name="first" value="no"> - <input type="submit" value="Request new Password"> - <input type="button" value="Cancel" onclick="window.location='login.php'"> - </p> - <?php - } else { - echo "<p class='info'><img alt='' src='pics/icons/info.png'>Your request has been submitted. A new password will be sent to your E-mail address if it is registered in our database.</p>"; - echo "<input type=\"button\" value=\"Finish\" onclick=\"window.location='login.php'\">"; - } - ?> - </form> - </div> <!-- END content --> - <div style="clear:both"></div> - </div> <!-- END container --> + <div id="container" class="container"> + <div id="header" class="header"> + <a href="http://www.flocklab.ethz.ch"><img alt="FlockLab" src="pics/flocklab_eth_logo.png"></a> + </div> <!-- END header --> + <div id="content" class="content"> + <h1>FlockLab Password Recovery</h1> + <form id="flocklabform" name="flocklabform" method="post" action="user_passwordrecovery.php"> + <?php + if ($first || $error) { + if ($error) { + echo "<div class='warning'><div style='float:left;'><img alt='' src='pics/icons/att.png'></div>"; + echo "<p>Please correct the following errors:</p><ul>"; + foreach ($errors as $line) + echo "<li>" . $line . "</li>"; + echo "</ul>"; + echo "</div>"; + } + ?> + <p>Please fill out the form below to request a new password for your FlockLab account. Fields marked with * are mandatory.</p> + <span class="formfield">E-mail Address:*</span><input type="text" name="emailaddress" id="emailaddress" value="<?php echo $emailaddress;?>"><br> + <span class="formfield">Captcha:*</span><?php recaptcha_print(); ?> + <p> + <input type="hidden" name="first" value="no"> + <input type="submit" value="Request new Password"> + <input type="button" value="Cancel" onclick="window.location='login.php'"> + </p> + <?php + } else { + echo "<p class='info'><img alt='' src='pics/icons/info.png'>Your request has been submitted. A new password will be sent to your E-mail address if it is registered in our database.</p>"; + echo "<input type=\"button\" value=\"Finish\" onclick=\"window.location='login.php'\">"; + } + ?> + </form> + </div> <!-- END content --> + <div style="clear:both"></div> + </div> <!-- END container --> </body> </html> diff --git a/webserver/user/user_register.php b/webserver/user/user_register.php index a2eaada8d6d09dc939a929ab882383c816969c44..32228ef441de4ae9ea14182cb3b288fc3a4af335 100755 --- a/webserver/user/user_register.php +++ b/webserver/user/user_register.php @@ -1,238 +1,238 @@ <?php - /* - * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" - * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" - * __license__ = "GPL" - * __version__ = "$Revision$" - * __date__ = "$Date$" - * __id__ = "$Id$" - * __source__ = "$URL$" - */ + /* + * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" + * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" + * __license__ = "GPL" + * __version__ = "$Revision$" + * __date__ = "$Date$" + * __id__ = "$Id$" + * __source__ = "$URL$" + */ ?> <?php require_once('include/libflocklab.php'); ?> <?php require_once('include/recaptcha.php'); ?> <?php - $first = ($_POST['first'] == "no") ? false : true; - $error = false; - $errors = array(); - - // If the page is called for the second time, validate form and send an email to the flocklab admin on success. - if (!$first) { - $institution = $_POST['institution']; - $institutiontype = $_POST['institutiontype']; - $firstname = $_POST['firstname']; - $lastname = $_POST['lastname']; - $emailaddress = $_POST['emailaddress']; - $username = $_POST['username']; - $country = $_POST['country']; - $passwd = sha1($_POST['passwd']); - $retypepasswd = sha1($_POST['retypepasswd']); - $description = $_POST['description']; - $comments = $_POST['comments']; - $termsofuse = $_POST['termsofuse']; - - /* Check necessary fields */ - // Check necessary fields: - if (($institution=="") || ($institutiontype=="") || ($firstname=="") || ($lastname=="") || ($emailaddress=="") || ($passwd=="") || ($retypepasswd=="") || ($description=="") || ($country=="")) { - $error = true; - array_push($errors, "Please fill out all fields marked with an asterisk."); - } - // If institution is "other", it has to be specified in the comments section: - if (($institutiontype == "other") && ($comments == "")) { - $error = true; - array_push($errors, "Please specify your type of institution in the comments section."); - } - // Check if passwords are the same: - if ($passwd != $retypepasswd) { - $error = true; - array_push($errors, "Passwords are not the same."); - } - // Check if username already exists: - $db = db_connect(); - $sql = "SELECT COUNT(*) FROM `tbl_serv_users` WHERE `username` = '" . mysqli_real_escape_string($db, $username) . "'"; - $rs = mysqli_query($db, $sql) or flocklab_die('Cannot check username against database because: ' . mysqli_error($db)); - $row = mysqli_fetch_row($rs); - mysqli_close($db); - if ($row[0] > 0) { - $error = true; - array_push($errors, "Username already exists in database."); - } - - // Check captcha: - if (recaptcha_verify() == false) { - $error = true; - array_push($errors, "Captcha was not entered correctly."); - } - - // Check if terms of use are accepted: - if ($termsofuse <> "yes") { - $error = true; - array_push($errors, "Terms of use have to be accepted."); - } - - // If there was no error, insert the data into the database and send an email to the flocklab admin: - if (!$error) { - $db = db_connect(); - $sql = "INSERT INTO `tbl_serv_users` (`lastname`, `firstname`, `username`, `country`, `password`, `email`, `institution_type`, `institution`, `is_active`,`create_time`) - VALUES ( - '" . mysqli_real_escape_string($db, $lastname) . "', - '" . mysqli_real_escape_string($db, $firstname) . "', - '" . mysqli_real_escape_string($db, $username) . "', - '" . mysqli_real_escape_string($db, $country) . "', - '" . mysqli_real_escape_string($db, $passwd) . "', - '" . mysqli_real_escape_string($db, $emailaddress) . "', - '" . mysqli_real_escape_string($db, $institutiontype) . "', - '" . mysqli_real_escape_string($db, $institution) . "', 0, NOW())"; - mysqli_query($db, $sql) or flocklab_die('Cannot store user information in database because: ' . mysqli_error($db)); - mysqli_close($db); - - $adminemails = get_admin_emails(); - $to = implode(", ", $adminemails); - $subject = "Request for FlockLab user account"; - $message = "A request for a new FlockLab user account has been placed on www.flocklab.ethz.ch/user/user_register.php\n\n"; - $message = $message . "First Name: $firstname\n"; - $message = $message . "Last Name: $lastname\n"; - $message = $message . "Username: $username\n"; - $message = $message . "Country: $country\n"; - $message = $message . "Institution : $institution\n"; - $message = $message . "Institution type: $institutiontype\n"; - $message = $message . "Email: $emailaddress\n"; - $message = $message . "Password SHA1 Hash: $passwd\n"; - $message = $message . "FL will be used for: $description\n"; - $message = $message . "Comments: $comments\n"; - $message = $message . "Terms of use accepted: $termsofuse\n"; - $message = $message . "\n"; - $message = wordwrap($message, 70); - $header = 'Reply-To: ' . $emailaddress . "\r\n" . - 'X-Mailer: PHP/' . phpversion(); - mail($to, $subject, $message, $header); - } - } + $first = ($_POST['first'] == "no") ? false : true; + $error = false; + $errors = array(); + + // If the page is called for the second time, validate form and send an email to the flocklab admin on success. + if (!$first) { + $institution = $_POST['institution']; + $institutiontype = $_POST['institutiontype']; + $firstname = $_POST['firstname']; + $lastname = $_POST['lastname']; + $emailaddress = $_POST['emailaddress']; + $username = $_POST['username']; + $country = $_POST['country']; + $passwd = sha1($_POST['passwd']); + $retypepasswd = sha1($_POST['retypepasswd']); + $description = $_POST['description']; + $comments = $_POST['comments']; + $termsofuse = $_POST['termsofuse']; + + /* Check necessary fields */ + // Check necessary fields: + if (($institution=="") || ($institutiontype=="") || ($firstname=="") || ($lastname=="") || ($emailaddress=="") || ($passwd=="") || ($retypepasswd=="") || ($description=="") || ($country=="")) { + $error = true; + array_push($errors, "Please fill out all fields marked with an asterisk."); + } + // If institution is "other", it has to be specified in the comments section: + if (($institutiontype == "other") && ($comments == "")) { + $error = true; + array_push($errors, "Please specify your type of institution in the comments section."); + } + // Check if passwords are the same: + if ($passwd != $retypepasswd) { + $error = true; + array_push($errors, "Passwords are not the same."); + } + // Check if username already exists: + $db = db_connect(); + $sql = "SELECT COUNT(*) FROM `tbl_serv_users` WHERE `username` = '" . mysqli_real_escape_string($db, $username) . "'"; + $rs = mysqli_query($db, $sql) or flocklab_die('Cannot check username against database because: ' . mysqli_error($db)); + $row = mysqli_fetch_row($rs); + mysqli_close($db); + if ($row[0] > 0) { + $error = true; + array_push($errors, "Username already exists in database."); + } + + // Check captcha: + if (recaptcha_verify() == false) { + $error = true; + array_push($errors, "Captcha was not entered correctly."); + } + + // Check if terms of use are accepted: + if ($termsofuse <> "yes") { + $error = true; + array_push($errors, "Terms of use have to be accepted."); + } + + // If there was no error, insert the data into the database and send an email to the flocklab admin: + if (!$error) { + $db = db_connect(); + $sql = "INSERT INTO `tbl_serv_users` (`lastname`, `firstname`, `username`, `country`, `password`, `email`, `institution_type`, `institution`, `is_active`,`create_time`) + VALUES ( + '" . mysqli_real_escape_string($db, $lastname) . "', + '" . mysqli_real_escape_string($db, $firstname) . "', + '" . mysqli_real_escape_string($db, $username) . "', + '" . mysqli_real_escape_string($db, $country) . "', + '" . mysqli_real_escape_string($db, $passwd) . "', + '" . mysqli_real_escape_string($db, $emailaddress) . "', + '" . mysqli_real_escape_string($db, $institutiontype) . "', + '" . mysqli_real_escape_string($db, $institution) . "', 0, NOW())"; + mysqli_query($db, $sql) or flocklab_die('Cannot store user information in database because: ' . mysqli_error($db)); + mysqli_close($db); + + $adminemails = get_admin_emails(); + $to = implode(", ", $adminemails); + $subject = "Request for FlockLab user account"; + $message = "A request for a new FlockLab user account has been placed on www.flocklab.ethz.ch/user/user_register.php\n\n"; + $message = $message . "First Name: $firstname\n"; + $message = $message . "Last Name: $lastname\n"; + $message = $message . "Username: $username\n"; + $message = $message . "Country: $country\n"; + $message = $message . "Institution : $institution\n"; + $message = $message . "Institution type: $institutiontype\n"; + $message = $message . "Email: $emailaddress\n"; + $message = $message . "Password SHA1 Hash: $passwd\n"; + $message = $message . "FL will be used for: $description\n"; + $message = $message . "Comments: $comments\n"; + $message = $message . "Terms of use accepted: $termsofuse\n"; + $message = $message . "\n"; + $message = wordwrap($message, 70); + $header = 'Reply-To: ' . $emailaddress . "\r\n" . + 'X-Mailer: PHP/' . phpversion(); + mail($to, $subject, $message, $header); + } + } ?> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" - "http://www.w3.org/TR/html4/loose.dtd"> + "http://www.w3.org/TR/html4/loose.dtd"> <html> <head> - <link rel="stylesheet" type="text/css" href="css/flocklab.css"> - <link rel="shortcut icon" href="pics/icons/favicon.ico" type="image/x-ico; charset=binary"> - <link rel="icon" href="pics/icons/favicon.ico" type="image/x-ico; charset=binary"> + <link rel="stylesheet" type="text/css" href="css/flocklab.css"> + <link rel="shortcut icon" href="pics/icons/favicon.ico" type="image/x-ico; charset=binary"> + <link rel="icon" href="pics/icons/favicon.ico" type="image/x-ico; charset=binary"> - <title>FlockLab - Register Account</title> + <title>FlockLab - Register Account</title> - <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> - <meta name="AUTHOR" content="ETH Zurich, Christoph Walser, CH-8092 Zurich, Switzerland"> - <meta name="COPYRIGHT" content="ETH Zurich, Switzerland"> - <meta name="LANGUAGE" content="English"> - <meta name="ROBOTS" content="noindex, nofollow"> - <meta name="DATE" content="2011-2012"> - - <script type="text/javascript" src="scripts/jquery-latest.js"></script> - <script type="text/javascript" src="scripts/jquery.validate.min.js"></script> - <script type="text/javascript"> - $(document).ready(function(){ - $("#flocklabform").validate({ - rules: { - institution: "required", - institutiontype: "required", - country: "required", - firstname: "required", - lastname: "required", - emailaddress: { - required: true, - email: true - }, - username: { - required: true, - minlength: 3, - maxlength: 10 - }, - passwd: { - required: true, - minlength: 8 - }, - retypepasswd: { - required: true, - equalTo: "#passwd" - }, - description: "required", - termsofuse: "required", - comments: { - required: function(element) { - return $("#institutiontype").val() == "other"; - } - } - }, - messages: { - retypepasswd: { - equalTo: "The passwords do not match." - }, - termsofuse: "Please accept the terms of use.", - comments: "Specify type of institution here." - } - }); - }); - </script> - <script src='https://www.google.com/recaptcha/api.js'></script> + <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> + <meta name="AUTHOR" content="ETH Zurich, Christoph Walser, CH-8092 Zurich, Switzerland"> + <meta name="COPYRIGHT" content="ETH Zurich, Switzerland"> + <meta name="LANGUAGE" content="English"> + <meta name="ROBOTS" content="noindex, nofollow"> + <meta name="DATE" content="2011-2012"> + + <script type="text/javascript" src="scripts/jquery-latest.js"></script> + <script type="text/javascript" src="scripts/jquery.validate.min.js"></script> + <script type="text/javascript"> + $(document).ready(function(){ + $("#flocklabform").validate({ + rules: { + institution: "required", + institutiontype: "required", + country: "required", + firstname: "required", + lastname: "required", + emailaddress: { + required: true, + email: true + }, + username: { + required: true, + minlength: 3, + maxlength: 10 + }, + passwd: { + required: true, + minlength: 8 + }, + retypepasswd: { + required: true, + equalTo: "#passwd" + }, + description: "required", + termsofuse: "required", + comments: { + required: function(element) { + return $("#institutiontype").val() == "other"; + } + } + }, + messages: { + retypepasswd: { + equalTo: "The passwords do not match." + }, + termsofuse: "Please accept the terms of use.", + comments: "Specify type of institution here." + } + }); + }); + </script> + <script src='https://www.google.com/recaptcha/api.js'></script> </head> <body> - <div id="container" class="container"> - <div id="header" class="header"> - <a href="http://www.flocklab.ethz.ch"><img alt="FlockLab" src="pics/flocklab_eth_logo.png"></a> - </div> <!-- END header --> - <div id="content" class="content"> - <h1>Register for a FlockLab Account</h1> - <form id="flocklabform" name="flocklabform" method="post" action="user_register.php"> - <?php - if ($first || $error) { - if ($error) { - echo "<div class='warning'><div style='float:left;'><img alt='' src='pics/icons/att.png'></div>"; - echo "<p>Please correct the following errors:</p><ul>"; - foreach ($errors as $line) - echo "<li>" . $line . "</li>"; - echo "</ul>"; - echo "</div>"; - } - ?> - <p>Please fill out the form below to request an account for FlockLab. Fields marked with * are mandatory.</p> - <span class="formfield">Institution:*</span><input type="text" name="institution" id="institution" value="<?php echo $institution;?>"><br> - <span class="formfield">Type of Institution:*</span><select name="institutiontype" id="institutiontype"> - <option value="university" <?php echo ($institutiontype == "university") ? 'selected="selected"' : "";?>>University</option> - <option value="researchinstitute" <?php echo ($institutiontype == "researchinstitute") ? 'selected="selected"' : "";?>>Research Institute</option> - <option value="company" <?php echo ($institutiontype == "company") ? 'selected="selected"' : "";?>>Company</option> - <option value="other" <?php echo ($institutiontype == "other") ? 'selected="selected"' : "";?>>Other (specify under comments)</option> - </select><br> - <span class="formfield">Country:*</span><select name="country" id="country"> - <option value=""></option> - <?php - foreach (countries() as $c) { - echo '<option value="'.$c.'" '.(($country == $c) ? 'selected="selected"' : "").'>'.$c.'</option>'; - } - ?> - </select><br> - <span class="formfield">First Name:*</span><input type="text" name="firstname" id="firstname" value="<?php echo $firstname;?>"><br> - <span class="formfield">Last Name:*</span><input type="text" name="lastname" id="lastname" value="<?php echo $lastname;?>"><br> - <span class="formfield">E-mail Address:*</span><input type="text" name="emailaddress" id="emailaddress" value="<?php echo $emailaddress;?>"><br> - <span class="formfield">Username*:</span><input type="text" name="username" id="username" value="<?php echo $username;?>"><br> - <span class="formfield">Password:*</span><input type="password" name="passwd" id="passwd" value=""><label id="passwderror" class="error" for="passwd" generated="true" style="display: inline;"></label><br> - <span class="formfield">Retype Password:*</span><input type="password" name="retypepasswd" id="retypepasswd" value=""><br> - <span class="formfield">What do you want to do with FlockLab (Please be specific, e.g. what kind of node platform or protocols you intend to use; ...):*</span><textarea name="description" id="description" cols="50" rows="5"><?php echo $description;?></textarea><br> - <span class="formfield">Comments:</span><textarea name="comments" id="comments" cols="50" rows="5"><?php echo $comments;?></textarea><br> - <span class="formfield">Terms of use:*</span><input type="checkbox" name="termsofuse" id="termsofuse" value="yes" <?php echo $termsofuse=='yes' ? 'checked' : '' ;?>> I accept the <a href="http://user.flocklab.ethz.ch/terms_of_use.php" target="_blank">terms of use</a>.<br> - <span class="formfield">Captcha:*</span><?php recaptcha_print(); ?> - <p> - <input type="hidden" name="first" value="no"> - <input type="submit" value="Request Account"> - <input type="button" value="Cancel" onclick="window.location='index.php'"> - </p> - <?php - } else { - echo "<p class='info'><img alt='' src='pics/icons/info.png'>Your request has been submitted. You will be contacted as soon as it is processed.</p>"; - echo "<input type=\"button\" value=\"Finish\" onclick=\"window.location='index.php'\">"; - } - ?> - </form> - </div> <!-- END content --> - <div style="clear:both"></div> - </div> <!-- END container --> + <div id="container" class="container"> + <div id="header" class="header"> + <a href="http://www.flocklab.ethz.ch"><img alt="FlockLab" src="pics/flocklab_eth_logo.png"></a> + </div> <!-- END header --> + <div id="content" class="content"> + <h1>Register for a FlockLab Account</h1> + <form id="flocklabform" name="flocklabform" method="post" action="user_register.php"> + <?php + if ($first || $error) { + if ($error) { + echo "<div class='warning'><div style='float:left;'><img alt='' src='pics/icons/att.png'></div>"; + echo "<p>Please correct the following errors:</p><ul>"; + foreach ($errors as $line) + echo "<li>" . $line . "</li>"; + echo "</ul>"; + echo "</div>"; + } + ?> + <p>Please fill out the form below to request an account for FlockLab. Fields marked with * are mandatory.</p> + <span class="formfield">Institution:*</span><input type="text" name="institution" id="institution" value="<?php echo $institution;?>"><br> + <span class="formfield">Type of Institution:*</span><select name="institutiontype" id="institutiontype"> + <option value="university" <?php echo ($institutiontype == "university") ? 'selected="selected"' : "";?>>University</option> + <option value="researchinstitute" <?php echo ($institutiontype == "researchinstitute") ? 'selected="selected"' : "";?>>Research Institute</option> + <option value="company" <?php echo ($institutiontype == "company") ? 'selected="selected"' : "";?>>Company</option> + <option value="other" <?php echo ($institutiontype == "other") ? 'selected="selected"' : "";?>>Other (specify under comments)</option> + </select><br> + <span class="formfield">Country:*</span><select name="country" id="country"> + <option value=""></option> + <?php + foreach (countries() as $c) { + echo '<option value="'.$c.'" '.(($country == $c) ? 'selected="selected"' : "").'>'.$c.'</option>'; + } + ?> + </select><br> + <span class="formfield">First Name:*</span><input type="text" name="firstname" id="firstname" value="<?php echo $firstname;?>"><br> + <span class="formfield">Last Name:*</span><input type="text" name="lastname" id="lastname" value="<?php echo $lastname;?>"><br> + <span class="formfield">E-mail Address:*</span><input type="text" name="emailaddress" id="emailaddress" value="<?php echo $emailaddress;?>"><br> + <span class="formfield">Username*:</span><input type="text" name="username" id="username" value="<?php echo $username;?>"><br> + <span class="formfield">Password:*</span><input type="password" name="passwd" id="passwd" value=""><label id="passwderror" class="error" for="passwd" generated="true" style="display: inline;"></label><br> + <span class="formfield">Retype Password:*</span><input type="password" name="retypepasswd" id="retypepasswd" value=""><br> + <span class="formfield">What do you want to do with FlockLab (Please be specific, e.g. what kind of node platform or protocols you intend to use; ...):*</span><textarea name="description" id="description" cols="50" rows="5"><?php echo $description;?></textarea><br> + <span class="formfield">Comments:</span><textarea name="comments" id="comments" cols="50" rows="5"><?php echo $comments;?></textarea><br> + <span class="formfield">Terms of use:*</span><input type="checkbox" name="termsofuse" id="termsofuse" value="yes" <?php echo $termsofuse=='yes' ? 'checked' : '' ;?>> I accept the <a href="http://user.flocklab.ethz.ch/terms_of_use.php" target="_blank">terms of use</a>.<br> + <span class="formfield">Captcha:*</span><?php recaptcha_print(); ?> + <p> + <input type="hidden" name="first" value="no"> + <input type="submit" value="Request Account"> + <input type="button" value="Cancel" onclick="window.location='index.php'"> + </p> + <?php + } else { + echo "<p class='info'><img alt='' src='pics/icons/info.png'>Your request has been submitted. You will be contacted as soon as it is processed.</p>"; + echo "<input type=\"button\" value=\"Finish\" onclick=\"window.location='index.php'\">"; + } + ?> + </form> + </div> <!-- END content --> + <div style="clear:both"></div> + </div> <!-- END container --> </body> </html> diff --git a/webserver/user/viz_feed.php b/webserver/user/viz_feed.php index 4fb295e8cd897ff929a10886308cce18803b5912..dc513f67a305f7097c42a1c121ae187f8016a023 100755 --- a/webserver/user/viz_feed.php +++ b/webserver/user/viz_feed.php @@ -1,114 +1,114 @@ <?php - /* - * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" - * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" - * __license__ = "GPL" - * __version__ = "$Revision$" - * __date__ = "$Date$" - * __id__ = "$Id$" - * __source__ = "$URL$" - */ + /* + * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" + * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" + * __license__ = "GPL" + * __version__ = "$Revision$" + * __date__ = "$Date$" + * __id__ = "$Id$" + * __source__ = "$URL$" + */ ?> <?php require_once('include/auth.php'); $script_starttime = microtime(true); if (isset($_GET['t']) && isset($_GET['o']) && isset($_GET['s']) && isset($_GET['m']) && is_numeric($_GET['t']) && is_numeric($_GET['o']) && is_numeric($_GET['s']) && is_numeric($_GET['m'])) { - $testid = $_GET['t']; - $obsid = $_GET['o']; - $starttime = $_GET['s']; - // check access rights for this use and this tests - // check if directory exists - // viz/testid_userfk - $viz_path = $CONFIG['viz']['dir'].'/'.$testid.'_'. $_SESSION['serv_users_key']; - if (!file_exists ($viz_path)) { -// if (!check_testid($testid, $_SESSION['serv_users_key'])) { - header("HTTP/1.0 401 Unauthorized"); - exit(); - } - - // find viz image for request - // images are in $CONFIG['viz']['dir']/<testid> - $oldest = -1; - - if ($_GET['m']==0) { // power - $searchname = 'power'; - $searchending = 'png'; - } - else { - $searchname = 'gpiom'; - $searchending = 'json'; - } + $testid = $_GET['t']; + $obsid = $_GET['o']; + $starttime = $_GET['s']; + // check access rights for this use and this tests + // check if directory exists + // viz/testid_userfk + $viz_path = $CONFIG['viz']['dir'].'/'.$testid.'_'. $_SESSION['serv_users_key']; + if (!file_exists ($viz_path)) { +// if (!check_testid($testid, $_SESSION['serv_users_key'])) { + header("HTTP/1.0 401 Unauthorized"); + exit(); + } + + // find viz image for request + // images are in $CONFIG['viz']['dir']/<testid> + $oldest = -1; + + if ($_GET['m']==0) { // power + $searchname = 'power'; + $searchending = 'png'; + } + else { + $searchname = 'gpiom'; + $searchending = 'json'; + } - foreach (glob($viz_path.'/'.$searchname.'_'.$obsid.'*.'.$searchending) as $filename) { - if ((preg_match('/'.$searchname.'_'.$obsid.'_([0-9]*)\.'.$searchending.'/', $filename, $matches)>0) && ($matches[1]>$starttime)) { - if ($oldest<0 || $oldest > $matches[1]) { - $oldest=$matches[1]; - $imgfilename=$filename; - } - else { - // remove old image - } - } - } + foreach (glob($viz_path.'/'.$searchname.'_'.$obsid.'*.'.$searchending) as $filename) { + if ((preg_match('/'.$searchname.'_'.$obsid.'_([0-9]*)\.'.$searchending.'/', $filename, $matches)>0) && ($matches[1]>$starttime)) { + if ($oldest<0 || $oldest > $matches[1]) { + $oldest=$matches[1]; + $imgfilename=$filename; + } + else { + // remove old image + } + } + } - // output - if ($oldest > 0) { - header("Processing-Time: ".(1000*(microtime(true) - $script_starttime))." ms"); - header("Start-Time: ".$oldest); - //header("Observer-Id: ".$obsid); - if ($_GET['m']==0) { - if (!isset($_GET['q'])) { - //header("HTTP/1.0 304 Not Modified"); - header_remove("Cache-Control"); - header_remove("Pragma"); - header("Expires: ".date(DATE_RFC1123, time() + 300)); - header("Content-Type: image/png"); - header("Start-Time: ".$oldest); - readfile ($imgfilename); - } - else { - // remove old images - // unlink($CONFIG['viz']['dir'].'/'.$testid.'/'.$imgfilename); - } - } - else { - ob_start("ob_gzhandler"); - header("Start-Time: ".$oldest); - readfile ($imgfilename); - } - } - else { - // use some error header to signal missing or no data - header("HTTP/1.0 410 Vizualization data not available"); - } + // output + if ($oldest > 0) { + header("Processing-Time: ".(1000*(microtime(true) - $script_starttime))." ms"); + header("Start-Time: ".$oldest); + //header("Observer-Id: ".$obsid); + if ($_GET['m']==0) { + if (!isset($_GET['q'])) { + //header("HTTP/1.0 304 Not Modified"); + header_remove("Cache-Control"); + header_remove("Pragma"); + header("Expires: ".date(DATE_RFC1123, time() + 300)); + header("Content-Type: image/png"); + header("Start-Time: ".$oldest); + readfile ($imgfilename); + } + else { + // remove old images + // unlink($CONFIG['viz']['dir'].'/'.$testid.'/'.$imgfilename); + } + } + else { + ob_start("ob_gzhandler"); + header("Start-Time: ".$oldest); + readfile ($imgfilename); + } + } + else { + // use some error header to signal missing or no data + header("HTTP/1.0 410 Vizualization data not available"); + } } else if (isset($_GET['t']) && is_numeric($_GET['t'])) { - // look for directory and get timerange of available viz data - $testid = $_GET['t']; - $viz_path = $CONFIG['viz']['dir'].'/'.$testid.'_'. $_SESSION['serv_users_key']; - if (!file_exists ($viz_path)) { - header("HTTP/1.0 401 Unauthorized"); - exit(); - } - $range_min = -1; - $range_max = -1; - - foreach (glob($viz_path.'/*') as $filename) { - if (preg_match('/[a-z]*_[0-9]*_([0-9]*)\..*/', $filename, $matches) > 0) { - if ($range_min<0 || $range_min > $matches[1]) { - $range_min=$matches[1]; - } - else if ($range_max<0 || $range_max < $matches[1]) { - $range_max=$matches[1]; - } - } - } - header("Range-Min: ".$range_min); - header("Range-Max: ".$range_max); + // look for directory and get timerange of available viz data + $testid = $_GET['t']; + $viz_path = $CONFIG['viz']['dir'].'/'.$testid.'_'. $_SESSION['serv_users_key']; + if (!file_exists ($viz_path)) { + header("HTTP/1.0 401 Unauthorized"); + exit(); + } + $range_min = -1; + $range_max = -1; + + foreach (glob($viz_path.'/*') as $filename) { + if (preg_match('/[a-z]*_[0-9]*_([0-9]*)\..*/', $filename, $matches) > 0) { + if ($range_min<0 || $range_min > $matches[1]) { + $range_min=$matches[1]; + } + else if ($range_max<0 || $range_max < $matches[1]) { + $range_max=$matches[1]; + } + } + } + header("Range-Min: ".$range_min); + header("Range-Max: ".$range_max); } else { - // use some error header to signal wrong request - header("HTTP/1.0 400 Bad Request"); + // use some error header to signal wrong request + header("HTTP/1.0 400 Bad Request"); } ?> diff --git a/webserver/user/webdav/HTTP/WebDAV/Server.php b/webserver/user/webdav/HTTP/WebDAV/Server.php index 4d693a33dc57caaffd49a656401c67c2ca262b90..a36fc10ee0ed329b99a9a49174eb8cc2da8a2eff 100755 --- a/webserver/user/webdav/HTTP/WebDAV/Server.php +++ b/webserver/user/webdav/HTTP/WebDAV/Server.php @@ -2166,13 +2166,13 @@ class HTTP_WebDAV_Server */ function bytes($str) { - static $func_overload; - - if (is_null($func_overload)) - { - $func_overload = @extension_loaded('mbstring') ? ini_get('mbstring.func_overload') : 0; - } - return $func_overload & 2 ? mb_strlen($str,'ascii') : strlen($str); + static $func_overload; + + if (is_null($func_overload)) + { + $func_overload = @extension_loaded('mbstring') ? ini_get('mbstring.func_overload') : 0; + } + return $func_overload & 2 ? mb_strlen($str,'ascii') : strlen($str); } } diff --git a/webserver/user/webdav/HTTP/WebDAV/Server/FilesystemFlocklab.php b/webserver/user/webdav/HTTP/WebDAV/Server/FilesystemFlocklab.php index f3497765272fe3b8e1881f06bb7ecee925f1f001..5f75b00ea23facba3cc0a7ec34fe14f0bbcb3b05 100644 --- a/webserver/user/webdav/HTTP/WebDAV/Server/FilesystemFlocklab.php +++ b/webserver/user/webdav/HTTP/WebDAV/Server/FilesystemFlocklab.php @@ -333,7 +333,7 @@ class HTTP_WebDAV_Server_Filesystem extends HTTP_WebDAV_Server $info["props"][] = $this->mkprop("resourcetype", ""); $info["props"][] = $this->mkprop("getcontenttype", $mime); if ($size!=null) - $info["props"][] = $this->mkprop("getcontentlength", $size); + $info["props"][] = $this->mkprop("getcontentlength", $size); return $info; } @@ -481,52 +481,52 @@ class HTTP_WebDAV_Server_Filesystem extends HTTP_WebDAV_Server function getTest($testid, $power){ global $CONFIG; - if ($power) { - // pipe file from archive - $archivepath = $CONFIG['testmanagementserver']['archivedir']; - $cmd = "ssh ".$CONFIG['testmanagementserver']['sshflags']." ".$CONFIG['testmanagementserver']['user']."@".$CONFIG['testmanagementserver']['host']." \"ls ".$archivepath.'/'.$testid.".tar.gz\""; - exec($cmd , $output, $ret); - if ($ret > 0) - return false; - // dump whole archive - $cmd = "ssh ".$CONFIG['testmanagementserver']['sshflags']." ".$CONFIG['testmanagementserver']['user']."@".$CONFIG['testmanagementserver']['host']." \"cat ".$archivepath.'/'.$testid.".tar.gz\""; - $stream = popen($cmd, "r"); - return $stream; - } - else { - flog("nopower"); - $archivepath = $CONFIG['testmanagementserver']['archivedir']; - $split_path = $CONFIG['testmanagementserver']['basedir']; - $cmd = "ssh ".$CONFIG['testmanagementserver']['sshflags']." ".$CONFIG['testmanagementserver']['user']."@".$CONFIG['testmanagementserver']['host']." \"ls ".$archivepath.'/'.$testid.".tar.gz\""; - exec($cmd , $output, $ret); - if ($ret > 0) - return false; - // dump stripped archive - flog("nopower dump"); - $cmd = "ssh ".$CONFIG['testmanagementserver']['sshflags']." ".$CONFIG['testmanagementserver']['user']."@".$CONFIG['testmanagementserver']['host']." \"/bin/zcat ".$archivepath.'/'.$testid.".tar.gz | ".$split_path."/flocklab_archive_split | /usr/bin/pigz\""; - flog("nopower dump ". $cmd); - $stream = popen($cmd, "r"); - return $stream; - } + if ($power) { + // pipe file from archive + $archivepath = $CONFIG['testmanagementserver']['archivedir']; + $cmd = "ssh ".$CONFIG['testmanagementserver']['sshflags']." ".$CONFIG['testmanagementserver']['user']."@".$CONFIG['testmanagementserver']['host']." \"ls ".$archivepath.'/'.$testid.".tar.gz\""; + exec($cmd , $output, $ret); + if ($ret > 0) + return false; + // dump whole archive + $cmd = "ssh ".$CONFIG['testmanagementserver']['sshflags']." ".$CONFIG['testmanagementserver']['user']."@".$CONFIG['testmanagementserver']['host']." \"cat ".$archivepath.'/'.$testid.".tar.gz\""; + $stream = popen($cmd, "r"); + return $stream; + } + else { + flog("nopower"); + $archivepath = $CONFIG['testmanagementserver']['archivedir']; + $split_path = $CONFIG['testmanagementserver']['basedir']; + $cmd = "ssh ".$CONFIG['testmanagementserver']['sshflags']." ".$CONFIG['testmanagementserver']['user']."@".$CONFIG['testmanagementserver']['host']." \"ls ".$archivepath.'/'.$testid.".tar.gz\""; + exec($cmd , $output, $ret); + if ($ret > 0) + return false; + // dump stripped archive + flog("nopower dump"); + $cmd = "ssh ".$CONFIG['testmanagementserver']['sshflags']." ".$CONFIG['testmanagementserver']['user']."@".$CONFIG['testmanagementserver']['host']." \"/bin/zcat ".$archivepath.'/'.$testid.".tar.gz | ".$split_path."/flocklab_archive_split | /usr/bin/pigz\""; + flog("nopower dump ". $cmd); + $stream = popen($cmd, "r"); + return $stream; + } } function getTestSize($testid, $power){ global $CONFIG; if ($power===false) - return null; - // file exists? - $archivepath = $CONFIG['testmanagementserver']['archivedir']; - $cmd = "ssh ".$CONFIG['testmanagementserver']['sshflags']." ".$CONFIG['testmanagementserver']['user']."@".$CONFIG['testmanagementserver']['host']." \"ls -l ".$archivepath.'/'.$testid.".tar.gz\""; - exec($cmd , $output, $ret); - if ($ret > 0) - return 0; - else { - $size = explode(' ', $output[0]); - $size = preg_replace('/([0-9]*) .*/','$1',$size[4]); - if (strlen($size)>0) - return intval($size); - } - return 0; + return null; + // file exists? + $archivepath = $CONFIG['testmanagementserver']['archivedir']; + $cmd = "ssh ".$CONFIG['testmanagementserver']['sshflags']." ".$CONFIG['testmanagementserver']['user']."@".$CONFIG['testmanagementserver']['host']." \"ls -l ".$archivepath.'/'.$testid.".tar.gz\""; + exec($cmd , $output, $ret); + if ($ret > 0) + return 0; + else { + $size = explode(' ', $output[0]); + $size = preg_replace('/([0-9]*) .*/','$1',$size[4]); + if (strlen($size)>0) + return intval($size); + } + return 0; } /** diff --git a/webserver/user/webdav/file.php b/webserver/user/webdav/file.php index 102f58789b7bc4fd63d7ff0489d09e26b6f75d50..b42b7e96d06700960a5dcf78617d32ac47223cae 100644 --- a/webserver/user/webdav/file.php +++ b/webserver/user/webdav/file.php @@ -1,5 +1,5 @@ <?php // $Id$ require_once "HTTP/WebDAV/Server/FilesystemFlocklab.php"; - $server = new HTTP_WebDAV_Server_Filesystem(); - $server->ServeRequest(); + $server = new HTTP_WebDAV_Server_Filesystem(); + $server->ServeRequest(); ?> \ No newline at end of file diff --git a/webserver/user/xmlvalidate.php b/webserver/user/xmlvalidate.php index 6b220c29659bc18d148612acd106182a7693a2b8..64ad4db5b2617bfc4ac6fa8e4e9126d878322fed 100755 --- a/webserver/user/xmlvalidate.php +++ b/webserver/user/xmlvalidate.php @@ -1,95 +1,95 @@ <?php - /* - * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" - * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" - * __license__ = "GPL" - * __version__ = "$Revision$" - * __date__ = "$Date$" - * __id__ = "$Id$" - * __source__ = "$URL$" - */ - - //DEBUG ini_set('display_errors', 1); - //DEBUG error_reporting(E_ALL); + /* + * __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>" + * __copyright__ = "Copyright 2010, ETH Zurich, Switzerland, Christoph Walser" + * __license__ = "GPL" + * __version__ = "$Revision$" + * __date__ = "$Date$" + * __id__ = "$Id$" + * __source__ = "$URL$" + */ + + //DEBUG ini_set('display_errors', 1); + //DEBUG error_reporting(E_ALL); ?> <?php require_once('include/layout.php');require_once('include/presets.php'); ?> <?php - $first = ((isset($_POST['first'])) && ($_POST['first'] == "no")) ? false : true; - $errors = array(); - - // If the page is called for at least the second time, validate the XML file provided by the user: - if (!$first) { - // Get the file and check if it has an XML MIME type: - $xmlfile = $_FILES['xmlfile']; - if ($xmlfile["error"] != 0) { - // There was an error during file upload: - array_push($errors, "There was an error when uploading the file."); - } - elseif (!(in_array($xmlfile["type"], array("text/xml", "application/xml")))) { - // The uploaded file is not XML: - array_push($errors, "Uploaded file is not XML."); - } else { - $cmd = "python ".$CONFIG['tests']['testvalidator']." -x " . $xmlfile['tmp_name'] . " -s ".$CONFIG['xml']['schemapath']." -u " . $_SESSION['serv_users_key']; - exec($cmd , $output, $ret); - foreach ($output as $error) { - array_push($errors, $error); - } - if (empty($errors) && $ret) { - array_push($errors, "unknown error"); - } - } - } + $first = ((isset($_POST['first'])) && ($_POST['first'] == "no")) ? false : true; + $errors = array(); + + // If the page is called for at least the second time, validate the XML file provided by the user: + if (!$first) { + // Get the file and check if it has an XML MIME type: + $xmlfile = $_FILES['xmlfile']; + if ($xmlfile["error"] != 0) { + // There was an error during file upload: + array_push($errors, "There was an error when uploading the file."); + } + elseif (!(in_array($xmlfile["type"], array("text/xml", "application/xml")))) { + // The uploaded file is not XML: + array_push($errors, "Uploaded file is not XML."); + } else { + $cmd = "python ".$CONFIG['tests']['testvalidator']." -x " . $xmlfile['tmp_name'] . " -s ".$CONFIG['xml']['schemapath']." -u " . $_SESSION['serv_users_key']; + exec($cmd , $output, $ret); + foreach ($output as $error) { + array_push($errors, $error); + } + if (empty($errors) && $ret) { + array_push($errors, "unknown error"); + } + } + } ?> - <script type="text/javascript"> - $(document).ready(function() { - $('.qtip_show').qtip( { - content: {text: false}, - style : 'flocklab', - }); - $("#xmluploadform").validate({ - rules: { - xmlfile: "required", - }, - errorPlacement: function(error, element) { - error.insertAfter(element); - } - }); - }); - </script> - <h1>Validate XML Test Configuration</h1> - <?php - /* If the page is called with a file associated, validate it and show the results */ - if (!$first) { - // Show validation errors: - if (!empty($errors)) { - echo "<div class=\"warning\"><div style=\"float:left;\"><img alt=\"\" src=\"pics/icons/att.png\"></div>"; - echo "<!-- cmd --><p>Please correct the following errors:</p><ul>"; - foreach ($errors as $error) - echo "<li>" . $error . "</li>"; - echo "</ul><!-- cmd --></div><p></p>"; - } else { - echo "<div class=\"info\"><div style=\"float:left;\"><img alt=\"\" src=\"pics/icons/success.png\"></div>"; - echo "<!-- cmd --><p>The file validated correctly.</p><!-- cmd --></div>"; - echo "<p></p>"; - echo '<form action="newtest.php" method="post"> - <input type="hidden" name="xmlfile" id="xmlfile" value="'.htmlentities(file_get_contents($xmlfile['tmp_name'])).'"> - <input type="submit" value="Create test"></form>'; - echo '<p style="margin-top:30px">.. or validate another XML test configuration:</p>'; - } - } - ?> - <form id="xmluploadform" name="xmluploadform" method="post" action="xmlvalidate.php" enctype="multipart/form-data"> - <fieldset> - <legend>Upload XML to validate</legend> - <span class="formfield">XML File:*</span><input type="file" name="xmlfile" id="xmlfile" size="27" class="required"><br /> - <p>A template XML test configuration can be downloaded <a href="xml/flocklab_template.xml" target="_blank">here</a>, the XML schema file against which is validated can be found <a href="xml/flocklab.xsd" target="_blank">here</a>.<br> - Detailed information is available on the <a href="https://gitlab.ethz.ch/tec/public/flocklab/wikis/Man/XmlConfig">FlockLab XML Test Configuration File Help page</a>.</p> - </fieldset> - <p></p> - <input type="hidden" name="first" value="no"> - <input type="submit" value="Validate"> - </form> + <script type="text/javascript"> + $(document).ready(function() { + $('.qtip_show').qtip( { + content: {text: false}, + style : 'flocklab', + }); + $("#xmluploadform").validate({ + rules: { + xmlfile: "required", + }, + errorPlacement: function(error, element) { + error.insertAfter(element); + } + }); + }); + </script> + <h1>Validate XML Test Configuration</h1> + <?php + /* If the page is called with a file associated, validate it and show the results */ + if (!$first) { + // Show validation errors: + if (!empty($errors)) { + echo "<div class=\"warning\"><div style=\"float:left;\"><img alt=\"\" src=\"pics/icons/att.png\"></div>"; + echo "<!-- cmd --><p>Please correct the following errors:</p><ul>"; + foreach ($errors as $error) + echo "<li>" . $error . "</li>"; + echo "</ul><!-- cmd --></div><p></p>"; + } else { + echo "<div class=\"info\"><div style=\"float:left;\"><img alt=\"\" src=\"pics/icons/success.png\"></div>"; + echo "<!-- cmd --><p>The file validated correctly.</p><!-- cmd --></div>"; + echo "<p></p>"; + echo '<form action="newtest.php" method="post"> + <input type="hidden" name="xmlfile" id="xmlfile" value="'.htmlentities(file_get_contents($xmlfile['tmp_name'])).'"> + <input type="submit" value="Create test"></form>'; + echo '<p style="margin-top:30px">.. or validate another XML test configuration:</p>'; + } + } + ?> + <form id="xmluploadform" name="xmluploadform" method="post" action="xmlvalidate.php" enctype="multipart/form-data"> + <fieldset> + <legend>Upload XML to validate</legend> + <span class="formfield">XML File:*</span><input type="file" name="xmlfile" id="xmlfile" size="27" class="required"><br /> + <p>A template XML test configuration can be downloaded <a href="xml/flocklab_template.xml" target="_blank">here</a>, the XML schema file against which is validated can be found <a href="xml/flocklab.xsd" target="_blank">here</a>.<br> + Detailed information is available on the <a href="https://gitlab.ethz.ch/tec/public/flocklab/wikis/Man/XmlConfig">FlockLab XML Test Configuration File Help page</a>.</p> + </fieldset> + <p></p> + <input type="hidden" name="first" value="no"> + <input type="submit" value="Validate"> + </form> <?php do_layout('Validate XML','Validate XML Test Config'); ?>