Commit 3b58a453 authored by Reto Da Forno's avatar Reto Da Forno
Browse files

several improvements

- start time for power profiling service changed to UNIX timestamp and <profConf> removed
- don't trigger archiver when results directory doesn't exist
- support for power profiling <fileFormat> tag added
- voltage channels are now subtracted (csv format only)
- XML template adjusted (only one occurrence of <profConf> accepted, new tags added)
- missing checks added testconfig validator for new power profiling <offset> and <duration> tags
- minor cleanup
parent fff12a47
......@@ -617,35 +617,42 @@ def start_test(testid, cur, cn, obsdict_key, obsdict_id):
profconfs = ppconf.xpath('d:profConf', namespaces=ns)
xmlblock = "<obsPowerprofConf>\n"
for profconf in profconfs:
duration = profconf.xpath('d:durationMillisecs', namespaces=ns)[0].text.strip()
xmlblock += "\t<profConf>\n\t\t<duration>%s</duration>" %duration
abs_tim = profconf.xpath('d:absoluteTime', namespaces=ns)
if abs_tim:
absdatetime = absolute2absoluteUTC_time(abs_tim[0].xpath('d:absoluteDateTime', namespaces=ns)[0].text.strip()) # parse xml date
ret = abs_tim[0].xpath('d:absoluteMicrosecs', namespaces=ns)
if ret:
absmicrosec = ret[0].text.strip()
else:
absmicrosec = 0
duration = profconf.xpath('d:duration', namespaces=ns)[0].text.strip()
if not duration:
try:
duration = int(profconf.xpath('d:durationMillisecs', namespaces=ns)[0].text.strip()) / 1000
except:
duration = 0
#xmlblock += "\t<profConf>\n"
xmlblock += "\t<duration>%s</duration>" % duration
# calculate the sampling start
offset = profconf.xpath('d:offset', namespaces=ns)
rel_tim = profconf.xpath('d:relativeTime', namespaces=ns)
if rel_tim:
abs_tim = profconf.xpath('d:absoluteTime', namespaces=ns)
if offset:
offset = int(offset[0].text.strip())
tstart = datetime.datetime.timestamp(starttime + datetime.timedelta(seconds=offset))
elif abs_tim:
tstart = datetime.datetime.timestamp(flocklab.get_xml_timestamp(abs_tim[0].xpath('d:absoluteDateTime', namespaces=ns)[0].text.strip()))
elif rel_tim:
relsec = int(rel_tim[0].xpath('d:offsetSecs', namespaces=ns)[0].text.strip())
ret = rel_tim[0].xpath('d:offsetMicrosecs', namespaces=ns)
if ret:
relmicrosec = int(ret[0].text.strip())
else:
relmicrosec = 0
# Relative times need to be converted into absolute times:
absmicrosec, absdatetime = relative2absolute_time(starttime, relsec, relmicrosec)
xmlblock += "\n\t\t<absoluteTime>\n\t\t\t<absoluteDateTime>%s</absoluteDateTime>\n\t\t\t<absoluteMicrosecs>%s</absoluteMicrosecs>\n\t\t</absoluteTime>" %(absdatetime, absmicrosec)
tstart = datetime.datetime.timestamp(starttime + datetime.timedelta(seconds=relsec))
xmlblock += "\n\t<starttime>%s</starttime>" % (tstart)
# check if config contains samplingRate:
samplingrate = profconf.xpath('d:samplingRate', namespaces=ns)
samplingdivider = profconf.xpath('d:samplingDivider', namespaces=ns)
if samplingdivider:
if samplingrate:
samplingrate = samplingrate[0].text.strip()
xmlblock += "\n\t<samplingRate>%s</samplingRate>" % samplingrate
elif samplingdivider:
samplingdivider = samplingdivider[0].text.strip()
xmlblock += "\n\t<samplingDivider>%s</samplingDivider>" % samplingdivider
else:
samplingdivider = flocklab.config.get('dispatcher', 'default_sampling_divider')
xmlblock += "\n\t\t<samplingDivider>%s</samplingDivider>"%samplingdivider
xmlblock += "\n\t</profConf>\n"
xmlblock += "</obsPowerprofConf>\n\n"
samplingdivider = flocklab.config.get('dispatcher', 'default_sampling_divider')
xmlblock += "\n\t<samplingDivider>%s</samplingDivider>" % samplingdivider
#xmlblock += "\n\t</profConf>\n"
break # for now, only parse the first block
xmlblock += "\n</obsPowerprofConf>\n\n"
for obsid in obsids:
obsid = int(obsid)
obskey = obsdict_id[obsid][0]
......@@ -747,7 +754,7 @@ def start_test(testid, cur, cn, obsdict_key, obsdict_id):
p = subprocess.Popen(cmd)
rs = p.wait()
if rs != 0:
msg = "Could not start database fetcher for test ID %d. Fetcher returned error %d" % (testid, rs)
msg = "Could not start fetcher for test ID %d. Fetcher returned error %d" % (testid, rs)
errors.append(msg)
logger.error(msg)
logger.error("Tried to execute: %s" % (" ".join(cmd)))
......@@ -922,7 +929,7 @@ def stop_test(testid, cur, cn, obsdict_key, obsdict_id, abort=False):
p = subprocess.Popen(cmd)
rs = p.wait()
if rs not in (flocklab.SUCCESS, errno.ENOPKG): # flocklab.SUCCESS (0) is successful stop, ENOPKG (65) means the service was not running.
msg = "Could not stop database fetcher for test ID %d. Fetcher returned error %d" % (testid, rs)
msg = "Could not stop fetcher for test ID %d. Fetcher returned error %d" % (testid, rs)
errors.append(msg)
logger.error(msg)
logger.error("Tried to execute: %s" % (" ".join(cmd)))
......@@ -963,7 +970,13 @@ def prepare_testresults(testid, cur):
errors = []
tree = None
# Check if results directory exists
testresultsdir = "%s/%d" % (flocklab.config.get('fetcher', 'testresults_dir'), testid)
if not os.path.isdir(testresultsdir):
errors.append("Test results directory does not exist.")
return errors
logger.debug("Preparing testresults...")
# Check if user wants test results as email ---
......@@ -991,15 +1004,13 @@ def prepare_testresults(testid, cur):
# Add config XML to results directory
if flocklab.config.get('archiver', 'include_xmlconfig'):
testresultsdir = "%s/%d" % (flocklab.config.get('fetcher', 'testresults_dir'), testid)
if (os.path.isdir(testresultsdir) and (not os.path.exists("%s/testconfig.xml" % testresultsdir))):
if tree:
et = lxml.etree.ElementTree(tree)
et.write("%s/testconfig.xml" % testresultsdir, pretty_print=True)
logger.debug("XML config copied to results folder.")
else:
logger.warn("Could not copy XML config to test results directory.")
if tree:
et = lxml.etree.ElementTree(tree)
et.write("%s/testconfig.xml" % testresultsdir, pretty_print=True)
logger.debug("XML config copied to results folder.")
else:
logger.warn("Could not copy XML config to test results directory.")
# Archive test results ---
cmd = [flocklab.config.get('dispatcher', 'archiverscript'),"--testid=%d" % testid]
if emailResults:
......@@ -1138,10 +1149,10 @@ def relative2absolute_time(starttime, relative_secs, relative_microsecs):
def absolute2absoluteUTC_time(timestring):
tempdatetime = flocklab.get_xml_timestamp(timestring)
absolute_datetime = time.strftime(flocklab.config.get("observer", "timeformat"), time.gmtime(tempdatetime))
return absolute_datetime
### END relative2absolute_time()
def db_register_activity(testid, cur, cn, action, obskeys):
pid = os.getpid()
register_ok = True
......
......@@ -170,7 +170,7 @@ def worker_convert_and_aggregate(queueitem=None, nodeid=None, resultfile_path=No
cur_p = multiprocessing.current_process()
(itemtype, obsid, fdir, f, workerstate) = queueitem
obsdbfile_path = "%s/%s" % (fdir,f)
loggername = "(%s.Obs%d) " % (cur_p.name, obsid)
loggername = "(%s.%d) " % (cur_p.name, obsid)
#logqueue.put_nowait((loggername, logging.DEBUG, "Import file %s"%obsdbfile_path))
# Open file:
dbfile = open(obsdbfile_path, 'rb')
......@@ -248,13 +248,13 @@ def worker_convert_and_aggregate(queueitem=None, nodeid=None, resultfile_path=No
# whole observer DB files.
#
##############################################################################
def worker_gpiotracing(queueitem=None, nodeid=None, resultfile_path=None, vizimgdir=None, viz_f=None, logqueue=None):
def worker_gpiotracing(queueitem=None, nodeid=None, resultfile_path=None, logqueue=None, arg=None):
try:
_errors = []
cur_p = multiprocessing.current_process()
(itemtype, obsid, fdir, f, workerstate) = queueitem
obsdbfile_path = "%s/%s" % (fdir, f)
loggername = "(%s.Obs%d) " % (cur_p.name, obsid)
loggername = "(%s.%d) " % (cur_p.name, obsid)
with open(resultfile_path, "a") as outfile:
infile = open(obsdbfile_path, "r")
......@@ -285,25 +285,39 @@ def worker_gpiotracing(queueitem=None, nodeid=None, resultfile_path=None, vizimg
# whole observer DB files.
#
##############################################################################
def worker_powerprof(queueitem=None, nodeid=None, resultfile_path=None, vizimgdir=None, viz_f=None, logqueue=None):
def worker_powerprof(queueitem=None, nodeid=None, resultfile_path=None, logqueue=None, arg=None):
try:
channel_names = ['I1','V1','V2']
_errors = []
cur_p = multiprocessing.current_process()
(itemtype, obsid, fdir, f, workerstate) = queueitem
obsdbfile_path = "%s/%s" % (fdir, f)
loggername = "(%s.Obs%d) " % (cur_p.name, obsid)
loggername = "(%s.%d) " % (cur_p.name, obsid)
rld_data = RocketLoggerData(obsdbfile_path).merge_channels()
# get network time and convert to UNIX timestamp (UTC)
timeidx = rld_data.get_time(absolute_time=True, time_reference='network') # TODO adjust parameters for RL 1.99+
timeidxunix = timeidx.astype('uint64') / 1e9
rld_dataframe = pd.DataFrame(rld_data.get_data(channel_names), index=timeidxunix, columns=channel_names)
rld_dataframe.insert(0, 'observer_id', obsid)
rld_dataframe.insert(1, 'node_id', nodeid)
rld_dataframe.to_csv(resultfile_path, sep=',', index_label='time', header=False, mode='a')
if arg and arg == 'rld':
# RLD file format
# simply move the file into the results directory
try:
resfilename = "%s.%s.%s.rld" % (os.path.splitext(resultfile_path)[0], obsid, nodeid)
os.rename(obsdbfile_path, resfilename)
except FileExistsError:
# TODO: properly handle case where file already exists (several rld files per observer)
msg = "File '%s' already exists, dropping test results." % (resfilename)
_errors.append((msg, errno.EEXIST, obsid))
logqueue.put_nowait((loggername, logging.ERROR, msg))
else:
# CSV file format
rld_data = RocketLoggerData(obsdbfile_path).merge_channels()
# get network time and convert to UNIX timestamp (UTC)
timeidx = rld_data.get_time(absolute_time=True, time_reference='network') # TODO adjust parameters for RL 1.99+
timeidxunix = timeidx.astype('uint64') / 1e9 # convert to s
current_ch = rld_data.get_data('I1') * 1000 # convert to mA
voltage_ch = rld_data.get_data('V2') - rld_data.get_data('V1') # voltage difference
rld_dataframe = pd.DataFrame(np.hstack((current_ch, voltage_ch)), index=timeidxunix, columns=['I', 'V'])
rld_dataframe.insert(0, 'observer_id', obsid)
rld_dataframe.insert(1, 'node_id', nodeid)
rld_dataframe.to_csv(resultfile_path, sep=',', index_label='time', header=False, mode='a')
os.remove(obsdbfile_path)
os.remove(obsdbfile_path)
except:
msg = "Error in powerprof worker process: %s: %s\n%s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc())
_errors.append((msg, errno.ECOMM, obsid))
......@@ -320,13 +334,13 @@ def worker_powerprof(queueitem=None, nodeid=None, resultfile_path=None, vizimgdi
# worker_logs
#
##############################################################################
def worker_logs(queueitem=None, nodeid=None, resultfile_path=None, vizimgdir=None, viz_f=None, logqueue=None):
def worker_logs(queueitem=None, nodeid=None, resultfile_path=None, logqueue=None, arg=None):
try:
_errors = []
cur_p = multiprocessing.current_process()
(itemtype, obsid, fdir, f, workerstate) = queueitem
obsdbfile_path = "%s/%s" % (fdir, f)
loggername = "(%s.Obs%d) " % (cur_p.name, obsid)
loggername = "(%s.%d) " % (cur_p.name, obsid)
with open(resultfile_path, "a") as outfile:
infile = open(obsdbfile_path, "r")
......@@ -424,7 +438,7 @@ class FetchObsThread(threading.Thread):
def run(self):
try:
self._loggerprefix = "(FetchObsThread.Obs%d) "%self._obsid
self._loggerprefix = "(FetchObsThread.%d) "%self._obsid
self._logger.info(self._loggerprefix + "FetchObsThread starting...")
removelast = True
......@@ -861,7 +875,7 @@ def main(argv):
ret = cur.fetchone()
teststarttime = ret[0]
teststoptime = ret[1]
FlockDAQ = False
ppFileFormat = None
# Find out which services are used to allocate working threads later on ---
# Get the XML config from the database and check which services are used in the test.
......@@ -886,6 +900,13 @@ def main(argv):
logger.debug("Usage of %s detected." % service)
else:
servicesUsed_dict[service] = False
# check which file format the user wants for the power profiling
if servicesUsed_dict['powerprofiling']:
if tree.xpath('//d:powerProfilingConf/d:fileFormat', namespaces=ns):
ppFileFormat = tree.xpath('//d:powerProfilingConf/d:fileFormat', namespaces=ns)[0].text
logger.debug("User wants file format %s for power profiling." % (ppFileFormat))
else:
logger.debug("Element <fileFormat> not detected.")
except:
msg = "XML parsing failed: %s: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]))
errors.append(msg)
......@@ -927,7 +948,7 @@ def main(argv):
elif service == 'gpiotracing':
header = 'timestamp,observer_id,node_id,pin_name,value\n'
elif service == 'powerprofiling':
header = 'timestamp,observer_id,node_id,I1,V1,V2\n'
header = 'timestamp,observer_id,node_id,current[mA],voltage[V]\n'
elif service == 'serial':
header = 'timestamp,observer_id,node_id,direction,output\n'
lock.acquire()
......@@ -1042,14 +1063,14 @@ def main(argv):
# Match the filename against the patterns and schedule an appropriate worker function:
if (re.search("^gpio_monitor_[0-9]{14}\.csv$", f) != None):
pool = service_pools_dict['gpiotracing']
worker_args = [nextitem, nodeid, testresultsfile_dict['gpiotracing'][0], vizimgdir, None, logqueue]
worker_args = [nextitem, nodeid, testresultsfile_dict['gpiotracing'][0], logqueue, None]
worker_f = worker_gpiotracing
logger.debug(loggerprefix + "resultfile_path: %s" % str(testresultsfile_dict['gpiotracing'][0]))
#if (enableviz == 1):
# worker_args[6] = flocklab.viz_gpio_monitor
elif (re.search("^powerprofiling_[0-9]{14}\.rld$", f) != None):
pool = service_pools_dict['powerprofiling']
worker_args = [nextitem, nodeid, testresultsfile_dict['powerprofiling'][0], vizimgdir, None, logqueue]
worker_args = [nextitem, nodeid, testresultsfile_dict['powerprofiling'][0], logqueue, ppFileFormat]
worker_f = worker_powerprof
#if (enableviz == 1):
# worker_args[6] = flocklab.viz_powerprofiling
......@@ -1058,11 +1079,11 @@ def main(argv):
worker_args = [nextitem, nodeid, testresultsfile_dict['serial'][0], testresultsfile_dict['serial'][1], commitsize, vizimgdir, parse_serial, convert_serial, None, logqueue]
elif (re.search("^error_[0-9]{14}\.log$", f) != None):
pool = service_pools_dict['logs']
worker_args = [nextitem, nodeid, testresultsfile_dict['errorlog'][0], vizimgdir, None, logqueue]
worker_args = [nextitem, nodeid, testresultsfile_dict['errorlog'][0], logqueue, None]
worker_f = worker_logs
elif (re.search("^timesync_[0-9]{14}\.log$", f) != None):
pool = service_pools_dict['logs']
worker_args = [nextitem, nodeid, testresultsfile_dict['timesynclog'][0], vizimgdir, None, logqueue]
worker_args = [nextitem, nodeid, testresultsfile_dict['timesynclog'][0], logqueue, None]
worker_f = worker_logs
else:
logger.warn(loggerprefix + "Results file %s/%s from observer %s did not match any of the known patterns" % (fdir, f, obsid))
......
......@@ -344,7 +344,7 @@ class ProxyConnections():
conn, addr = self.condoneQueue.get(False)
self.addHandler(conn, addr, is_observer = True)
except queue.Empty:
pass
pass
# assume it is a socket, do forwarding
else:
m = ''
......
......@@ -1262,3 +1262,20 @@ def get_xml_timestamp(datetimestring):
timestamp = timestamp - offset
return timestamp
### END get_xml_timestamp()
##############################################################################
#
# parse_int() parses a string to int
#
##############################################################################
def parse_int(s):
res = 0
if s:
try:
res = int(float(s.strip())) # higher success rate if first parsed to float
except ValueError:
if logger:
logger.warning("Could not parse %s to int." % (str(s)))
return res
### END parse_int()
......@@ -596,6 +596,24 @@ def main(argv):
if not quiet:
print("Element powerProfilingConf: Some observer IDs have been used but do not have a targetConf element associated with them.")
errcnt = errcnt + 1
# Check simple offset tag
rs = tree.xpath('//d:powerProfilingConf/d:profConf/d:offset', namespaces=ns)
for elem in rs:
ppStart = int(elem.text)
elem2 = elem.getparent().find('d:durationMillisecs', namespaces=ns)
if elem2:
ppDuration = int(elem2.text) / 1000
else:
elem2 = elem.getparent().find('d:duration', namespaces=ns)
ppDuration = int(elem2.text)
if (ppStart > testDuration):
if not quiet:
print(("Line %d: element offset: The offset is bigger than the test duration, thus the action will never take place." % (elem.sourceline)))
errcnt = errcnt + 1
elif (ppStart + ppDuration > testDuration):
if not quiet:
print(("Line %d: element duration/durationMillisecs: Profiling lasts longer than test." % (elem2.sourceline)))
errcnt = errcnt + 1
# Check relative timings:
rs = tree.xpath('//d:powerProfilingConf/d:profConf/d:relativeTime/d:offsetSecs', namespaces=ns)
for elem in rs:
......@@ -605,13 +623,18 @@ def main(argv):
else:
ppStart = int(elem.text)
elem2 = elem.getparent().getparent().find('d:durationMillisecs', namespaces=ns)
if elem2:
ppDuration = int(elem2.text) / 1000
else:
elem2 = elem.getparent().getparent().find('d:duration', namespaces=ns)
ppDuration = int(elem2.text)
if (ppStart > testDuration):
if not quiet:
print(("Line %d: element offsetSecs: The offset is bigger than the test duration, thus the action will never take place." %(elem.sourceline)))
print(("Line %d: element offsetSecs: The offset is bigger than the test duration, thus the action will never take place." % (elem.sourceline)))
errcnt = errcnt + 1
elif (ppStart + int(elem2.text)/1000 > testDuration):
elif (ppStart + ppDuration > testDuration):
if not quiet:
print(("Line %d: element durationMillisecs: Profiling lasts longer than test." %(elem2.sourceline)))
print(("Line %d: element duration/durationMillisecs: Profiling lasts longer than test." % (elem2.sourceline)))
errcnt = errcnt + 1
# Check absolute timings:
rs = tree.xpath('//d:powerProfilingConf/d:profConf/d:absoluteTime/d:absoluteDateTime', namespaces=ns)
......@@ -628,6 +651,11 @@ def main(argv):
else:
ppStart = eventTime
elem2 = elem.getparent().getparent().find('d:durationMillisecs', namespaces=ns)
if elem2:
ppDuration = int(elem2.text) / 1000
else:
elem2 = elem.getparent().getparent().find('d:duration', namespaces=ns)
ppDuration = int(elem2.text)
if (ppStart > testEnd):
if not quiet:
print(("Line %d: element absoluteDateTime: The action is scheduled after the test ends, thus the action will never take place." %(elem.sourceline)))
......@@ -636,9 +664,9 @@ def main(argv):
if not quiet:
print(("Line %d: element absoluteDateTime: The action is scheduled before the test starts, thus the action will never take place." %(elem.sourceline)))
errcnt = errcnt + 1
elif (ppStart + int(elem2.text)/1000 > testEnd):
elif (ppStart + ppDuration > testEnd):
if not quiet:
print(("Line %d: element durationMillisecs: Profiling lasts longer than test." %(elem2.sourceline)))
print(("Line %d: element duration/durationMillisecs: Profiling lasts longer than test." % (elem2.sourceline)))
errcnt = errcnt + 1
#===========================================================================
......
......@@ -112,7 +112,7 @@ $javascript = '<link rel="stylesheet" href="css/ui-lightness/jquery-ui-1.8.20.cu
<?php
/* Get all status information about the observers from the database and display them in the table. */
$db = db_connect();
$sql = "SELECT obs.observer_id, obs.status, obs.last_changed,
$sql = "SELECT obs.observer_id, obs.status, obs.last_changed,
slot1.name AS name1, slot1.description AS desc1,
slot2.name AS name2, slot2.description AS desc2,
slot3.name AS name3, slot3.description AS desc3,
......@@ -127,8 +127,7 @@ $javascript = '<link rel="stylesheet" href="css/ui-lightness/jquery-ui-1.8.20.cu
LEFT JOIN `flocklab`.`tbl_serv_tg_adapt_list` AS d ON obs.slot_4_tg_adapt_list_fk = d.serv_tg_adapt_list_key
LEFT JOIN `flocklab`.`tbl_serv_tg_adapt_types` AS slot4 ON d.tg_adapt_types_fk = slot4.serv_tg_adapt_types_key
WHERE obs.status!='disabled' AND obs.status!='develop'
ORDER BY obs.observer_id
;";
ORDER BY obs.observer_id;";
$rs = mysqli_query($db, $sql) or flocklab_die('Cannot get observer information from database because: ' . mysqli_error($db));
mysqli_close($db);
?>
......
......@@ -3,14 +3,8 @@
<!--
/*
* XML schema for FlockLab test configuration XMLs.
* This schema can be downloaded from http://www.flocklab.ethz.ch/user/xml/flocklab.xsd
* Help is available at http://www.flocklab.ethz.ch/wiki/wiki/Public/Man/XmlConfig
*
*
* __author__ = "Christoph Walser <walser@tik.ee.ethz.ch>"
* __copyright__ = "Copyright 2011-2014, ETH Zurich, Switzerland"
* __license__ = "GPL"
* __id__ = "$Id$"
* This schema can be downloaded from https://user.flocklab.ethz.ch/xml/flocklab.xsd
* Help is available at https://gitlab.ethz.ch/tec/public/flocklab/flocklab/-/wikis/Man/XmlConfig
*/
-->
......@@ -281,13 +275,18 @@
<xs:complexType name="powerProfilingConfType">
<xs:sequence>
<xs:element name="obsIds" type="obsIdListRestType"/>
<xs:element name="profConf" type="profConfType" minOccurs="1" maxOccurs="unbounded"/>
<xs:element name="fileFormat" type="profConfFileFormat" minOccurs="0" maxOccurs="1"/>
<xs:element name="profConf" type="profConfType" minOccurs="1" maxOccurs="1"/>
</xs:sequence>
</xs:complexType>
<xs:complexType name="profConfType">
<xs:sequence>
<xs:element name="durationMillisecs" type="powerProfDurationType"/>
<xs:choice>
<xs:element name="durationMillisecs" type="powerProfDurationMSType"/>
<xs:element name="duration" type="powerProfDurationType"/>
</xs:choice>
<xs:choice>
<xs:element name="offset" type="offsetSecsType"/>
<xs:element name="relativeTime">
<xs:complexType>
<xs:group ref="timingRelativeGroup"/>
......@@ -299,7 +298,10 @@
</xs:complexType>
</xs:element>
</xs:choice>
<xs:element name="samplingDivider" type="powerProfSamplingDividerType" minOccurs="0" maxOccurs="1"/>
<xs:choice>
<xs:element name="samplingDivider" type="powerProfSamplingDividerType" minOccurs="0" maxOccurs="1"/>
<xs:element name="samplingRate" type="powerProfSamplingRateType" minOccurs="0" maxOccurs="1"/>
</xs:choice>
</xs:sequence>
</xs:complexType>
......@@ -395,20 +397,36 @@
<xs:group name="gpioTraccallbackPowerProfAddGroup">
<xs:all>
<xs:element name="durationMillisecs" type="powerProfDurationType"/>
<xs:element name="durationMillisecs" type="powerProfDurationMSType"/>
<xs:element name="offsetSecs" type="offsetSecsType"/>
<xs:element name="offsetMicrosecs" type="microsecsType"/>
</xs:all>
</xs:group>
<!-- Type definition for power profiling file format -->
<xs:simpleType name="profConfFileFormat">
<xs:restriction base="xs:string">
<xs:enumeration value="csv"/>
<xs:enumeration value="rld"/>
</xs:restriction>
</xs:simpleType>
<!-- Type definition for duration of a power profile in milliseconds -->
<xs:simpleType name="powerProfDurationType">
<xs:simpleType name="powerProfDurationMSType">
<xs:restriction base="xs:integer">
<xs:minInclusive value="50"/>
<xs:maxInclusive value="3600000"/>
</xs:restriction>
</xs:simpleType>
<!-- Type definition for duration of a power profile in seconds -->
<xs:simpleType name="powerProfDurationType">
<xs:restriction base="xs:integer">
<xs:minInclusive value="1"/>
<xs:maxInclusive value="3600"/>
</xs:restriction>
</xs:simpleType>
<!-- Type definition for samling divider of a power profile -->
<xs:simpleType name="powerProfSamplingDividerType">
<xs:restriction base="xs:integer">
......@@ -416,6 +434,13 @@
</xs:restriction>
</xs:simpleType>
<!-- Type definition for samling rate of a power profile -->
<xs:simpleType name="powerProfSamplingRateType">
<xs:restriction base="xs:integer">
<xs:pattern value="1|10|100|1000|2000|4000|8000|16000|32000|64000"/>
</xs:restriction>
</xs:simpleType>
<!-- Type definition for time offset in seconds -->
<xs:simpleType name="offsetSecsType">
<xs:restriction base="xs:integer">
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment