# First get the file that contains the list of log files to get
tmp_file_path = src.get_tmp_filename(self.config, "list_log_files.txt")
+ remote_path = os.path.join(self.machine.sat_path, "list_log_files.txt")
self.machine.sftp.get(
- os.path.join(self.machine.sat_path, "list_log_files.txt"),
+ remote_path,
tmp_file_path)
# Read the file and get the result of the command and all the log files
file_lines = [line.replace("\n", "") for line in file_lines]
fstream_tmp.close()
os.remove(tmp_file_path)
- # The first line is the result of the command (0 success or 1 fail)
- self.res_job = file_lines[0]
+
+ try :
+ # The first line is the result of the command (0 success or 1 fail)
+ self.res_job = file_lines[0]
+ except Exception as e:
+ self.err += _("Unable to get status from remote file %s: %s" %
+ (remote_path, str(e)))
for i, job_path_remote in enumerate(file_lines[1:]):
try:
xslDir = os.path.join(runner.cfg.VARS.srcDir, 'xsl')
xslCommand = os.path.join(xslDir, "command.xsl")
xslHat = os.path.join(xslDir, "hat.xsl")
+ xsltest = os.path.join(xslDir, "test.xsl")
imgLogo = os.path.join(xslDir, "LOGO-SAT.png")
# copy the stylesheets in the log directory
shutil.copy2(xslCommand, logDir)
shutil.copy2(xslHat, logDir)
+ src.ensure_path_exists(os.path.join(logDir, "TEST"))
+ shutil.copy2(xsltest, os.path.join(logDir, "TEST"))
shutil.copy2(imgLogo, logDir)
# If the last option is invoked, just, show the last log file
good_result = 0
for __, product_info in products_infos:
# Apply the patch
- return_code, patch_res = apply_patch(runner.cfg, product_info, max_product_name_len, logger)
+ return_code, patch_res = apply_patch(runner.cfg,
+ product_info,
+ max_product_name_len,
+ logger)
logger.write(patch_res, 1, False)
if return_code:
good_result += 1
import src
-# Define all possible option for the make command : sat make <options>
+# Define all possible option for the shell command : sat shell <options>
parser = src.options.Options()
parser.add_option('c', 'command', 'string', 'command',
_('The shell command to execute.'), "")
--- /dev/null
+#!/usr/bin/env python
+#-*- coding:utf-8 -*-
+# Copyright (C) 2010-2012 CEA/DEN
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+import os
+import sys
+import shutil
+import subprocess
+import datetime
+import gzip
+
+try:
+ from hashlib import sha1
+except ImportError:
+ from sha import sha as sha1
+
+import src
+import src.ElementTree as etree
+from src.xmlManager import add_simple_node
+
+# Define all possible option for the test command : sat test <options>
+parser = src.options.Options()
+parser.add_option('a', 'appli', 'string', 'appli',
+ _('Use this option to specify the path to an installed application.'))
+parser.add_option('g', 'grid', 'string', 'grid',
+ _("""Indicate the name of the grid to test.
+\tThis name has to be registered in sat. If your test base is not known by sat, use the option --dir."""))
+parser.add_option('m', 'module', 'list', 'modules',
+ _('Indicate which module(s) to test (subdirectory of the grid).'))
+parser.add_option('t', 'type', 'list', 'types',
+ _('Indicate which type(s) to test (subdirectory of the module).'))
+parser.add_option('d', 'dir', 'string', 'dir',
+ _('Indicate the directory containing the test base.'), "")
+parser.add_option('', 'mode', 'string', 'mode',
+ _("Indicate which kind of test to run. If MODE is 'batch' only python and NO_GUI tests are run."), "normal")
+parser.add_option('', 'display', 'string', 'display',
+ _("""Set the display where to launch SALOME.
+\tIf value is NO then option --show-desktop=0 will be used to launch SALOME."""))
+parser.add_option('n', 'name', 'string', 'session',
+ _('Give a name to the test session (REQUIRED if no product).'))
+parser.add_option('', 'light', 'boolean', 'light',
+ _('Only run minimal tests declared in TestsLight.txt.'), False)
+
+def description():
+ '''method that is called when salomeTools is called with --help option.
+
+ :return: The text to display for the test command description.
+ :rtype: str
+ '''
+ return _("The test command runs a test base on a SALOME installation.")
+
+def parse_option(args, config):
+ (options, args) = parser.parse_args(args)
+
+ if not options.appli:
+ options.appli = ""
+ elif not os.path.isabs(options.appli):
+ if not src.config_has_application(config):
+ raise src.SatException(_("An application is required to use a "
+ "relative path with option --appli"))
+ options.appli = os.path.join(config.APPLICATION.workdir, options.appli)
+
+ if not os.path.exists(options.appli):
+ raise src.SatException(_("Application not found: %s") %
+ options.appli)
+
+ return (options, args)
+
+def ask_a_path():
+ path = raw_input("enter a path where to save the result: ")
+ if path == "":
+ result = raw_input("the result will be not save. Are you sure to "
+ "continue ? [y/n] ")
+ if result == "y":
+ return path
+ else:
+ return ask_a_path()
+
+ elif os.path.exists(path):
+ result = raw_input("Warning, the content of %s will be deleted. Are you"
+ " sure to continue ? [y/n] " % path)
+ if result == "y":
+ return path
+ else:
+ return ask_a_path()
+ else:
+ return path
+
+def save_file(filename, base):
+ f = open(filename, 'r')
+ content = f.read()
+ f.close()
+
+ objectname = sha1(content).hexdigest()
+
+ f = gzip.open(os.path.join(base, '.objects', objectname), 'w')
+ f.write(content)
+ f.close()
+ return objectname
+
+def move_test_results(in_dir, what, out_dir, logger):
+ if out_dir == in_dir:
+ return
+
+ finalPath = out_dir
+ pathIsOk = False
+ while not pathIsOk:
+ try:
+ # create test results directory if necessary
+ #logger.write("FINAL = %s\n" % finalPath, 5)
+ if not os.access(finalPath, os.F_OK):
+ #shutil.rmtree(finalPath)
+ os.makedirs(finalPath)
+ pathIsOk = True
+ except:
+ logger.error(_("%s cannot be created.") % finalPath)
+ finalPath = ask_a_path()
+
+ if finalPath != "":
+ os.makedirs(os.path.join(finalPath, what, 'BASES'))
+
+ # check if .objects directory exists
+ if not os.access(os.path.join(finalPath, '.objects'), os.F_OK):
+ os.makedirs(os.path.join(finalPath, '.objects'))
+
+ logger.write(_('copy tests results to %s ... ') % finalPath, 3)
+ logger.flush()
+ #logger.write("\n", 5)
+
+ # copy env_info.py
+ shutil.copy2(os.path.join(in_dir, what, 'env_info.py'),
+ os.path.join(finalPath, what, 'env_info.py'))
+
+ # for all sub directory (ie grid) in the BASES directory
+ for grid in os.listdir(os.path.join(in_dir, what, 'BASES')):
+ outgrid = os.path.join(finalPath, what, 'BASES', grid)
+ ingrid = os.path.join(in_dir, what, 'BASES', grid)
+
+ # ignore files in root dir
+ if not os.path.isdir(ingrid):
+ continue
+
+ os.makedirs(outgrid)
+ #logger.write(" copy grid %s\n" % grid, 5)
+
+ for module_ in [m for m in os.listdir(ingrid) if os.path.isdir(
+ os.path.join(ingrid, m))]:
+ # ignore source configuration directories
+ if module_[:4] == '.git' or module_ == 'CVS':
+ continue
+
+ outmodule = os.path.join(outgrid, module_)
+ inmodule = os.path.join(ingrid, module_)
+ os.makedirs(outmodule)
+ #logger.write(" copy module %s\n" % module_, 5)
+
+ if module_ == 'RESSOURCES':
+ for file_name in os.listdir(inmodule):
+ if not os.path.isfile(os.path.join(inmodule,
+ file_name)):
+ continue
+ f = open(os.path.join(outmodule, file_name), "w")
+ f.write(save_file(os.path.join(inmodule, file_name),
+ finalPath))
+ f.close()
+ else:
+ for type_name in [t for t in os.listdir(inmodule) if
+ os.path.isdir(os.path.join(inmodule, t))]:
+ outtype = os.path.join(outmodule, type_name)
+ intype = os.path.join(inmodule, type_name)
+ os.makedirs(outtype)
+
+ for file_name in os.listdir(intype):
+ if not os.path.isfile(os.path.join(intype,
+ file_name)):
+ continue
+ if file_name.endswith('result.py'):
+ shutil.copy2(os.path.join(intype, file_name),
+ os.path.join(outtype, file_name))
+ else:
+ f = open(os.path.join(outtype, file_name), "w")
+ f.write(save_file(os.path.join(intype,
+ file_name),
+ finalPath))
+ f.close()
+
+ logger.write(src.printcolors.printc("OK"), 3, False)
+ logger.write("\n", 3, False)
+
+def check_remote_machine(machine_name, logger):
+ logger.write(_("\ncheck the display on %s\n" % machine_name), 4)
+ ssh_cmd = 'ssh -o "StrictHostKeyChecking no" %s "ls"' % machine_name
+ logger.write(_("Executing the command : %s " % ssh_cmd), 4)
+ p = subprocess.Popen(ssh_cmd,
+ shell=True,
+ stdin =subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ p.wait()
+ if p.returncode != 0:
+ logger.write(src.printcolors.printc(src.KO_STATUS) + "\n", 1)
+ logger.write(" " + src.printcolors.printcError(p.stderr.read()), 2)
+ raise src.SatException("No ssh access to the display machine.")
+ else:
+ logger.write(src.printcolors.printcSuccess(src.OK_STATUS) + "\n\n", 4)
+
+##
+# Transform YYYYMMDD_hhmmss into YYYY-MM-DD hh:mm:ss.
+def parse_date(date):
+ if len(date) != 15:
+ return date
+ res = "%s-%s-%s %s:%s:%s" % (date[0:4], date[4:6], date[6:8], date[9:11], date[11:13], date[13:])
+ return res
+
+##
+# Writes a report file from a XML tree.
+def write_report(filename, xmlroot, stylesheet):
+ if not os.path.exists(os.path.dirname(filename)):
+ os.makedirs(os.path.dirname(filename))
+
+ f = open(filename, "w")
+ f.write("<?xml version='1.0' encoding='utf-8'?>\n")
+ if len(stylesheet) > 0:
+ f.write("<?xml-stylesheet type='text/xsl' href='%s'?>\n" % stylesheet)
+ f.write(etree.tostring(xmlroot, encoding='utf-8'))
+ f.close()
+
+##
+# Creates the XML report for a product.
+def create_test_report(config, dest_path, stylesheet, xmlname=""):
+ application_name = config.VARS.application
+ withappli = src.config_has_application(config)
+
+ root = etree.Element("salome")
+ prod_node = etree.Element("product", name=application_name, build=xmlname)
+ root.append(prod_node)
+
+ if withappli:
+
+ add_simple_node(prod_node, "version_to_download", config.APPLICATION.name)
+
+ add_simple_node(prod_node, "out_dir", config.APPLICATION.workdir)
+
+ # add environment
+ exec_node = add_simple_node(prod_node, "exec")
+ exec_node.append(etree.Element("env", name="Host", value=config.VARS.node))
+ exec_node.append(etree.Element("env", name="Architecture", value=config.VARS.dist))
+ exec_node.append(etree.Element("env", name="Number of processors", value=str(config.VARS.nb_proc)))
+ exec_node.append(etree.Element("env", name="Begin date", value=parse_date(config.VARS.datehour)))
+ exec_node.append(etree.Element("env", name="Command", value=config.VARS.command))
+ exec_node.append(etree.Element("env", name="sat version", value=config.INTERNAL.sat_version))
+
+ if 'TESTS' in config:
+ tests = add_simple_node(prod_node, "tests")
+ known_errors = add_simple_node(prod_node, "known_errors")
+ new_errors = add_simple_node(prod_node, "new_errors")
+ amend = add_simple_node(prod_node, "amend")
+ tt = {}
+ for test in config.TESTS:
+ if not tt.has_key(test.grid):
+ tt[test.grid] = [test]
+ else:
+ tt[test.grid].append(test)
+
+ for grid in tt.keys():
+ gn = add_simple_node(tests, "grid")
+ gn.attrib['name'] = grid
+ nb, nb_pass, nb_failed, nb_timeout, nb_not_run = 0, 0, 0, 0, 0
+ modules = {}
+ types = {}
+ for test in tt[grid]:
+ #print test.module
+ if not modules.has_key(test.module):
+ mn = add_simple_node(gn, "module")
+ mn.attrib['name'] = test.module
+ modules[test.module] = mn
+
+ if not types.has_key("%s/%s" % (test.module, test.type)):
+ tyn = add_simple_node(mn, "type")
+ tyn.attrib['name'] = test.type
+ types["%s/%s" % (test.module, test.type)] = tyn
+
+ for script in test.script:
+ tn = add_simple_node(types["%s/%s" % (test.module, test.type)], "test")
+ #tn.attrib['grid'] = test.grid
+ #tn.attrib['module'] = test.module
+ tn.attrib['type'] = test.type
+ tn.attrib['script'] = script.name
+ if 'callback' in script:
+ try:
+ cnode = add_simple_node(tn, "callback")
+ if src.architecture.is_windows():
+ import string
+ cnode.text = filter(lambda x: x in string.printable,
+ script.callback)
+ else:
+ cnode.text = script.callback.decode('string_escape')
+ except UnicodeDecodeError as exc:
+ zz = script.callback[:exc.start] + '?' + script.callback[exc.end-2:]
+ cnode = add_simple_node(tn, "callback")
+ cnode.text = zz.decode("UTF-8")
+ if 'amend' in script:
+ cnode = add_simple_node(tn, "amend")
+ cnode.text = script.amend.decode("UTF-8")
+
+ if script.time < 0:
+ tn.attrib['exec_time'] = "?"
+ else:
+ tn.attrib['exec_time'] = "%.3f" % script.time
+ tn.attrib['res'] = script.res
+
+ if "amend" in script:
+ amend_test = add_simple_node(amend, "atest")
+ amend_test.attrib['name'] = os.path.join(test.module, test.type, script.name)
+ amend_test.attrib['reason'] = script.amend.decode("UTF-8")
+
+ # calculate status
+ nb += 1
+ if script.res == src.OK_STATUS: nb_pass += 1
+ elif script.res == src.TIMEOUT_STATUS: nb_timeout += 1
+ elif script.res == src.KO_STATUS: nb_failed += 1
+ else: nb_not_run += 1
+
+ if "known_error" in script:
+ kf_script = add_simple_node(known_errors, "error")
+ kf_script.attrib['name'] = os.path.join(test.module, test.type, script.name)
+ kf_script.attrib['date'] = script.known_error.date
+ kf_script.attrib['expected'] = script.known_error.expected
+ kf_script.attrib['comment'] = script.known_error.comment.decode("UTF-8")
+ kf_script.attrib['fixed'] = str(script.known_error.fixed)
+ overdue = datetime.datetime.today().strftime("%Y-%m-%d") > script.known_error.expected
+ if overdue:
+ kf_script.attrib['overdue'] = str(overdue)
+
+ elif script.res == src.KO_STATUS:
+ new_err = add_simple_node(new_errors, "new_error")
+ script_path = os.path.join(test.module, test.type, script.name)
+ new_err.attrib['name'] = script_path
+ new_err.attrib['cmd'] = "sat testerror %s -s %s -c 'my comment' -p %s" % \
+ (application_name, script_path, config.VARS.dist)
+
+
+ gn.attrib['total'] = str(nb)
+ gn.attrib['pass'] = str(nb_pass)
+ gn.attrib['failed'] = str(nb_failed)
+ gn.attrib['timeout'] = str(nb_timeout)
+ gn.attrib['not_run'] = str(nb_not_run)
+
+ if len(xmlname) == 0:
+ xmlname = application_name
+ if not xmlname.endswith(".xml"):
+ xmlname += ".xml"
+
+ write_report(os.path.join(dest_path, xmlname), root, stylesheet)
+ return src.OK_STATUS
+
+def run(args, runner, logger):
+ '''method that is called when salomeTools is called with test parameter.
+ '''
+ (options, args) = parse_option(args, runner.cfg)
+
+ if options.grid and options.dir:
+ raise src.SatException(_("The options --grid and --dir are not "
+ "compatible!"))
+
+ with_product = False
+ if runner.cfg.VARS.application != 'None':
+ logger.write(_('Running tests on application %s\n') %
+ src.printcolors.printcLabel(
+ runner.cfg.VARS.application), 1)
+ with_product = True
+ elif options.dir:
+ logger.write(_('Running tests from directory %s\n') %
+ src.printcolors.printcLabel(options.dir), 1)
+ elif not options.grid:
+ raise src.SatException(_('a grid or a directory is required'))
+
+ if with_product:
+ # check if environment is loaded
+ if 'KERNEL_ROOT_DIR' in os.environ:
+ logger.write(src.printcolors.printcWarning(_("WARNING: "
+ "SALOME environment already sourced")) + "\n", 1)
+
+
+ elif options.appli:
+ logger.write(src.printcolors.printcWarning(_("Running SALOME "
+ "application.")) + "\n\n", 1)
+ else:
+ logger.write(src.printcolors.printcWarning(_("WARNING running "
+ "without a product.")) + "\n\n", 1)
+
+ # name for session is required
+ if not options.session:
+ raise src.SatException(_("--name argument is required when no "
+ "product is specified."))
+
+ # check if environment is loaded
+ if not 'KERNEL_ROOT_DIR' in os.environ:
+ raise src.SatException(_("SALOME environment not found") + "\n")
+
+ # set the display
+ show_desktop = (options.display and options.display.upper() == "NO")
+ if options.display and options.display != "NO":
+ remote_name = options.display.split(':')[0]
+ if remote_name != "":
+ check_remote_machine(remote_name, logger)
+ # if explicitly set use user choice
+ os.environ['DISPLAY'] = options.display
+ elif 'DISPLAY' not in os.environ:
+ # if no display set
+ if 'display' in runner.cfg.SITE.test and len(runner.cfg.SITE.test.display) > 0:
+ # use default value for test tool
+ os.environ['DISPLAY'] = runner.cfg.SITE.test.display
+ else:
+ os.environ['DISPLAY'] = "localhost:0.0"
+
+ # initialization
+ #################
+ if with_product:
+ tmp_dir = runner.cfg.SITE.test.tmp_dir_with_product
+ else:
+ tmp_dir = runner.cfg.SITE.test.tmp_dir
+
+ # remove previous tmp dir
+ if os.access(tmp_dir, os.F_OK):
+ try:
+ shutil.rmtree(tmp_dir)
+ except:
+ logger.error(_("error removing TT_TMP_RESULT %s\n")
+ % tmp_dir)
+
+ lines = []
+ lines.append("date = '%s'" % runner.cfg.VARS.date)
+ lines.append("hour = '%s'" % runner.cfg.VARS.hour)
+ lines.append("node = '%s'" % runner.cfg.VARS.node)
+ lines.append("arch = '%s'" % runner.cfg.VARS.dist)
+
+ if 'APPLICATION' in runner.cfg:
+ lines.append("application_info = {}")
+ lines.append("application_info['name'] = '%s'" %
+ runner.cfg.APPLICATION.name)
+ lines.append("application_info['tag'] = '%s'" %
+ runner.cfg.APPLICATION.tag)
+ lines.append("application_info['products'] = %s" %
+ str(runner.cfg.APPLICATION.products))
+
+ content = "\n".join(lines)
+
+ # create hash from session information
+ dirname = sha1(content).hexdigest()
+ session_dir = os.path.join(tmp_dir, dirname)
+ os.makedirs(session_dir)
+ os.environ['TT_TMP_RESULT'] = session_dir
+
+ # create env_info file
+ f = open(os.path.join(session_dir, 'env_info.py'), "w")
+ f.write(content)
+ f.close()
+
+ # create working dir and bases dir
+ working_dir = os.path.join(session_dir, 'WORK')
+ os.makedirs(working_dir)
+ os.makedirs(os.path.join(session_dir, 'BASES'))
+ os.chdir(working_dir)
+
+ if 'PYTHONPATH' not in os.environ:
+ os.environ['PYTHONPATH'] = ''
+ else:
+ for var in os.environ['PYTHONPATH'].split(':'):
+ if var not in sys.path:
+ sys.path.append(var)
+
+ # launch of the tests
+ #####################
+ grid = ""
+ if options.grid:
+ grid = options.grid
+ elif not options.dir and with_product and "test_base" in runner.cfg.APPLICATION:
+ grid = runner.cfg.APPLICATION.test_base.name
+
+ src.printcolors.print_value(logger, _('Display'), os.environ['DISPLAY'], 2)
+ src.printcolors.print_value(logger, _('Timeout'),
+ runner.cfg.SITE.test.timeout, 2)
+ if 'timeout_app' in runner.cfg.SITE.test:
+ src.printcolors.print_value(logger, _('Timeout Salome'),
+ runner.cfg.SITE.test.timeout_app, 2)
+ src.printcolors.print_value(logger, _('Light mode'), options.light, 2)
+ src.printcolors.print_value(logger, _("Working dir"), session_dir, 3)
+
+ # create the test object
+ test_runner = src.test_module.Test(runner.cfg,
+ logger,
+ session_dir,
+ grid=grid,
+ modules=options.modules,
+ types=options.types,
+ appli=options.appli,
+ mode=options.mode,
+ dir_=options.dir,
+ show_desktop=show_desktop,
+ light=options.light)
+
+ # run the test
+ logger.allowPrintLevel = False
+ retcode = test_runner.run_all_tests(options.session)
+ logger.allowPrintLevel = True
+
+ logger.write(_("Tests finished"), 1)
+ logger.write("\n", 2, False)
+
+ logger.write(_("\nGenerate the specific test log\n"), 5)
+ out_dir = os.path.join(runner.cfg.SITE.log.log_dir, "TEST")
+ src.ensure_path_exists(out_dir)
+ name_xml_board = logger.logFileName.split(".")[0] + "board" + ".xml"
+ create_test_report(runner.cfg, out_dir, "test.xsl", xmlname = name_xml_board)
+ xml_board_path = os.path.join(out_dir, name_xml_board)
+ logger.l_logFiles.append(xml_board_path)
+ logger.add_link(os.path.join("TEST", name_xml_board),
+ "board",
+ retcode,
+ "Click on the link to get the detailed test results")
+
+ return retcode
+
log_dir : $USER.workdir + "/LOGS"
}
#base : $USER.workdir + $VARS.sep + "BASE-FROM-SITE"
+ test :{
+ tmp_dir_with_product : '/tmp' + $VARS.sep + $VARS.user + $VARS.sep + $APPLICATION.name + $VARS.sep + 'test'
+ tmp_dir : '/tmp' + $VARS.sep + $VARS.user + $VARS.sep + 'test'
+ timeout : 150
+ }
}
PROJECTS :
from . import environment
from . import fileEnviron
from . import compilation
+from . import test_module
OK_STATUS = "OK"
KO_STATUS = "KO"
NA_STATUS = "NA"
+KNOWNFAILURE_STATUS = "KF"
+TIMEOUT_STATUS = "TIMEOUT"
class SatException(Exception):
'''rename Exception Class
base_path = config.USER.base
return base_path
+def get_salome_version(config):
+ if hasattr(config.APPLICATION, 'version_salome'):
+ Version = config.APPLICATION.version_salome
+ else:
+ KERNEL_info = product.get_product_config(config, "KERNEL")
+ VERSION = os.path.join(
+ KERNEL_info.install_dir,
+ "bin",
+ "salome",
+ "VERSION")
+ if not os.path.isfile(VERSION):
+ return None
+
+ fVERSION = open(VERSION)
+ Version = fVERSION.readline()
+ fVERSION.close()
+
+ VersionSalome = int(only_numbers(Version))
+ return VersionSalome
+
def only_numbers(str_num):
return ''.join([nb for nb in str_num if nb in '0123456789'] or '0')
--- /dev/null
+#!/usr/bin/env python
+#-*- coding:utf-8 -*-
+# Copyright (C) 2010-2013 CEA/DEN
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+import os
+import sys
+import time
+import pickle
+import subprocess
+
+# Display progress
+# ----------------
+def show_progress(logger, top, delai, ss=""):
+ logger.write("\r%s\r%s %s / %s " % ((" " * 30), ss, top, (delai - top)), 4,
+ False)
+ logger.flush()
+
+def write_back(logger, message, level):
+ logger.write("\r%s\r%s" % ((" " * 40), message), level)
+
+# Launch command
+# --------------
+def launch_command(cmd, logger, cwd, args=[], log=None):
+ if log:
+ log = file(log, "a")
+ logger.write("launch: %s\n" % cmd, 5, screenOnly=True)
+ for arg in args:
+ cmd += " " + arg
+ prs = subprocess.Popen(cmd,
+ shell=True,
+ stdout=log,
+ stderr=subprocess.STDOUT,
+ cwd=cwd,
+ executable='/bin/bash')
+ return prs
+
+# Launch a batch
+# --------------
+def batch(cmd, logger, cwd, args=[], log=None, delai=20, sommeil=1):
+ proc = launch_command(cmd, logger, cwd, args, log)
+ top = 0
+ sys.stdout.softspace = True
+ begin = time.time()
+ while proc.poll() is None:
+ if time.time() - begin >= 1:
+ show_progress(logger, top, delai, "batch:")
+ if top == delai:
+ logger.write("batch: time out KILL\n", 3)
+ import signal
+ os.kill(proc.pid, signal.SIGTERM)
+ break
+ else:
+ begin = time.time()
+ time.sleep(sommeil)
+ top += 1
+ sys.stdout.flush()
+ else:
+ write_back(logger, "batch: exit (%s)\n" % str(proc.returncode), 5)
+ return (proc.returncode == 0), top
+
+# Launch a salome process
+# -----------------------
+def batch_salome(cmd, logger, cwd, args, getTmpDir,
+ pendant="SALOME_Session_Server", fin="killSalome.py",
+ log=None, delai=20, sommeil=1, delaiapp=0):
+
+ beginTime = time.time()
+ launch_command(cmd, logger, cwd, args, log)
+
+ if delaiapp == 0:
+ delaiapp = delai
+
+ # first launch salome (looking for .pidict file)
+ top = 0
+ found = False
+ tmp_dir = getTmpDir()
+ while (not found and top < delaiapp):
+ if os.path.exists(tmp_dir):
+ listFile = os.listdir(tmp_dir)
+ else:
+ listFile = []
+
+ for file_name in listFile:
+ if file_name.endswith("pidict"):
+ # sometime we get a old file that will be removed by runSalome.
+ # So we test that we can read it.
+ currentTime = None
+ try:
+ statinfo = os.stat(os.path.join(tmp_dir, file_name))
+ currentTime = statinfo.st_mtime
+ except: pass
+
+ if currentTime and currentTime > beginTime:
+ try:
+ file_ = open(os.path.join(tmp_dir, file_name), "r")
+ process_ids = pickle.load(file_)
+ file_.close()
+ for process_id in process_ids:
+ for __, cmd in process_id.items():
+ if cmd == [pendant]:
+ found = True
+ pidictFile = file_name
+ except:
+ file_.close()
+
+ time.sleep(sommeil)
+ top += 1
+ show_progress(logger, top, delaiapp, "launching salome or appli:")
+
+ # continue or not
+ if found:
+ write_back(logger, "batch_salome: started\n", 5)
+ else:
+ logger.write("batch_salome: FAILED to launch salome or appli\n", 3)
+ return False, -1
+
+ # salome launched run the script
+ top = 0
+ code = None
+ while code is None:
+ show_progress(logger, top, delai, "running salome or appli:")
+
+ if not os.access(os.path.join(tmp_dir, pidictFile), os.F_OK):
+ write_back(logger, "batch_salome: exit\n", 5)
+ code = True
+ elif top >= delai:
+ # timeout kill the test
+ os.system(fin)
+ logger.write("batch_salome: time out KILL\n", 3)
+ code = False
+ else:
+ # still waiting
+ time.sleep(sommeil)
+ top = top + 1
+
+ return code, top
--- /dev/null
+#!/usr/bin/env python
+#-*- coding:utf-8 -*-
+
+import os, sys, traceback
+import os.path
+import time as THEBIGTIME
+
+# set path
+toolsWay = r'${toolsWay}'
+resourcesWay = r'${resourcesWay}'
+outWay = r'${typeDir}'
+tmpDir = r'${tmpDir}'
+
+listTest = ${listTest}
+ignore = ${ignore}
+
+sys.path.append(toolsWay)
+from TOOLS import TOOLS_class
+my_tools = TOOLS_class(resourcesWay, tmpDir, toolsWay)
+
+from TOOLS import SatNotApplicableError
+
+# on set les variables d'environement
+os.environ['TT_BASE_RESSOURCES'] = resourcesWay
+sys.path.append(resourcesWay)
+
+exec_result = open(r'${resultFile}', 'w')
+exec_result.write('Open\n')
+
+__stdout__ = sys.stdout
+__stderr__ = sys.stderr
+
+for test in listTest:
+ pylog = open(os.path.join(outWay, test[:-3] + ".result.py"), "w")
+ testout = open(os.path.join(outWay, test[:-3] + ".out.py"), "w")
+ my_tools.init()
+ sys.stdout = testout
+ sys.stderr = testout
+
+ pylog.write('#-*- coding:utf-8 -*-\n')
+ exec_result.write("Run %s " % test)
+ exec_result.flush()
+
+ try:
+ timeStart = THEBIGTIME.time()
+ execfile(os.path.join(outWay, test), globals(), locals())
+ timeTest = THEBIGTIME.time() - timeStart
+ except SatNotApplicableError, ex:
+ status = "NA"
+ reason = str(ex)
+ exec_result.write("NA\n")
+ timeTest = THEBIGTIME.time() - timeStart
+ pylog.write('status = "NA"\n')
+ pylog.write('time = "' + timeTest.__str__() + '"\n')
+ pylog.write('callback = "%s"\n' % reason)
+ except Exception, ex:
+ status = "KO"
+ reason = ""
+ if ignore.has_key(test):
+ status = "KF"
+ reason = "Known Failure = %s\n\n" % ignore[test]
+ exec_result.write("%s\n" % status)
+ timeTest = THEBIGTIME.time() - timeStart
+ pylog.write('status = "%s" \n' % status)
+ pylog.write('time = "' + timeTest.__str__() + '"\n')
+ pylog.write('callback="""' + reason)
+ exc_type, exc_value, exc_traceback = sys.exc_info()
+ traceback.print_exception(exc_type,
+ exc_value,
+ exc_traceback,
+ None,
+ file=pylog)
+ pylog.write('"""\n')
+ else:
+ exec_result.write("OK\n")
+ pylog.write('status = "OK"\n')
+ pylog.write('time = "' + timeTest.__str__() + '"\n')
+
+ testout.close()
+ sys.stdout = __stdout__
+ sys.stderr = __stderr__
+ my_tools.writeInFiles(pylog)
+ pylog.close()
+
+exec_result.write('Close\n')
+exec_result.close()
+
+if 'PY' not in '${typeName}':
+ import salome_utils
+ killScript = os.path.join(os.environ['KERNEL_ROOT_DIR'],
+ 'bin',
+ 'salome',
+ 'killSalome.py')
+ cmd = '{python} {killScript} {port}'.format(python=os.environ['PYTHONBIN'],
+ killScript=killScript,
+ port=salome_utils.getPortNumber())
+ os.system(cmd)
--- /dev/null
+#!/usr/bin/env python
+#-*- coding:utf-8 -*-
+
+# ToolBox for test framework
+
+import os
+import string
+import subprocess
+
+"""
+Exception class for test errors.
+"""
+class SatTestError(Exception):
+ def __init__(self, value):
+ self.value = value
+ def __str__(self):
+ return repr(self.value)
+
+class SatNotApplicableError(Exception):
+ def __init__(self, value):
+ self.value = value
+ def __str__(self):
+ return repr(self.value)
+
+def ERROR(message):
+ print message
+ raise SatTestError(message)
+
+def NOT_APPLICABLE(message):
+ print message
+ raise SatNotApplicableError(message)
+
+##
+# Compares 2 numbers with tolerance tol.
+def compFloat(f1, f2, tol=10e-10):
+ diff = abs(f1 - f2)
+ print "|f1-f2| = %s (tol=%s)" % (str(diff), str(tol))
+ if diff <= tol:
+ comp = "OK"
+ else:
+ comp = "KO"
+ return comp
+
+##
+# Compares 2 files.
+def compFiles(f1, f2, tol=0):
+ assert os.path.exists(f1), "compFiles: file not found: %s" % f1
+ assert os.path.exists(f2), "compFiles: file not found: %s" % f2
+ diffLine = os.popen("diff -y --suppress-common-lines %s %s" % (f1, f2))
+ diff = len(string.split(diffLine.read(), "\n"))
+ diffLine.close()
+ print "nb of diff lines = %s (tol=%s)" % (str(diff), str(tol))
+ if diff <= tol:
+ comp = "OK"
+ else:
+ comp = "KO"
+ return comp
+
+##
+# Uses mdump to dump a med file.
+def mdump_med(med_file, dump_file, options):
+ assert isinstance(options, list), "Bad options for mdump: %s" % options
+ assert len(options) == 3, "Bad options for mdump: %s" % options
+ cmd = "mdump %s %s" % (med_file, " ".join(options))
+ #print cmd
+
+ df = open(dump_file, "w")
+ pdump = subprocess.Popen(cmd, shell=True, stdout=df)
+ st = pdump.wait()
+ df.close()
+
+ return st
+
+##
+# Compares 2 med files by using mdump.
+def compMED(file1, file2, tol=0, diff_flags=""):
+ assert os.path.exists(file1), "compMED: file not found: %s" % file1
+ assert os.path.exists(file2), "compMED: file not found: %s" % file2
+
+ print
+ print ">>>> compMED"
+ print " file1:", file1
+ print " file2:", file2
+
+ def do_dump(med):
+ dump = os.path.join(os.environ['TT_TMP_RESULT'], os.path.basename(med) + ".mdump")
+ st = mdump_med(med, dump, ["1", "NODALE", "FULL_INTERLACE"])
+ if st != 0 or not os.path.exists(dump):
+ raise Exception("Error mpdump %s" % med)
+
+ # replace file name with "filename"
+ lines = open(dump, "r").readlines()
+ dumpfile = open(dump, "w")
+ for line in lines:
+ try:
+ line.index('Nom universel du maillage')
+ continue
+ except:
+ dumpfile.write(line.replace(med, 'filename'))
+ return dump
+
+ dump1 = do_dump(file1)
+ dump2 = do_dump(file2)
+
+ diff_cmd = "diff %s %s %s" % (diff_flags, dump1, dump2)
+ print " >" + diff_cmd
+ pdiff = subprocess.Popen(diff_cmd, shell=True, stdout=subprocess.PIPE)
+ status = pdiff.wait()
+ print " Diff =", status
+ if status != 0:
+ print pdiff.stdout.read()
+
+ print "<<<< compMED"
+ print
+
+ return status
+
+
+class TOOLS_class:
+ def __init__(self, base_ressources_dir, tmp_dir, test_ressources_dir):
+ self.base_ressources_dir = base_ressources_dir
+ self.tmp_dir = tmp_dir
+ self.test_ressources_dir = test_ressources_dir
+ pass
+
+ def init(self):
+ self.inFiles = []
+
+ def ERROR(self, message):
+ # Simulation d'un plantage
+ ERROR(message)
+
+ def compMED(self, file1, file2, tol=0):
+ return compMED(file1, file2, tol, "--ignore-all-space")
+
+ def compFloat(self, f1, f2, tol=10e-10):
+ return compFloat(f1, f2, tol)
+
+ def compFiles(self, f1, f2, tol=0):
+ return compFiles(f1, f2, tol)
+
+ def get_inFile(self, name=None):
+ if not name:
+ return self.base_ressources_dir
+ self.inFiles.append(name)
+ return os.path.join(self.base_ressources_dir, name)
+
+ def get_outFile(self, name=None):
+ if not name:
+ return self.tmp_dir
+ return os.path.join(self.tmp_dir, name)
+
+ def writeInFiles(self, pylog):
+ pylog.write('inFiles=%s\n' % str(self.inFiles))
+
--- /dev/null
+#!/usr/bin/env python
+#-*- coding:utf-8 -*-
+
+import os, sys, traceback
+import os.path
+import time as THEBIGTIME
+
+# set path
+toolsWay = r'${toolsWay}'
+resourcesWay = r'${resourcesWay}'
+outWay = r'${typeDir}'
+tmpDir = r'${tmpDir}'
+
+listTest = ${listTest}
+ignore = ${ignore}
+
+sys.path.append(toolsWay)
+from TOOLS import TOOLS_class
+my_tools = TOOLS_class(resourcesWay, tmpDir, toolsWay)
+
+from TOOLS import SatNotApplicableError
+
+# on set les variables d'environement
+os.environ['TT_BASE_RESSOURCES'] = resourcesWay
+sys.path.append(resourcesWay)
+
+exec_result = open(r'${resultFile}', 'w')
+exec_result.write('Open\n')
+
+__stdout__ = sys.stdout
+__stderr__ = sys.stderr
+
+for test in listTest:
+ pylog = open(os.path.join(outWay, test[:-3] + ".result.py"), "w")
+ testout = open(os.path.join(outWay, test[:-3] + ".out.py"), "w")
+ my_tools.init()
+ sys.stdout = testout
+ sys.stderr = testout
+
+ pylog.write('#-*- coding:utf-8 -*-\n')
+ exec_result.write("Run %s " % test)
+ exec_result.flush()
+
+ try:
+ timeStart = THEBIGTIME.time()
+ execfile(os.path.join(outWay, test), globals(), locals())
+ timeTest = THEBIGTIME.time() - timeStart
+ except SatNotApplicableError, ex:
+ status = "NA"
+ reason = str(ex)
+ exec_result.write("NA\n")
+ timeTest = THEBIGTIME.time() - timeStart
+ pylog.write('status = "NA"\n')
+ pylog.write('time = "' + timeTest.__str__() + '"\n')
+ pylog.write('callback = "%s"\n' % reason)
+ except Exception, ex:
+ status = "KO"
+ reason = ""
+ if ignore.has_key(test):
+ status = "KF"
+ reason = "Known Failure = %s\n\n" % ignore[test]
+ exec_result.write("%s\n" % status)
+ timeTest = THEBIGTIME.time() - timeStart
+ pylog.write('status = "%s" \n' % status)
+ pylog.write('time = "' + timeTest.__str__() + '"\n')
+ pylog.write('callback="""' + reason)
+ exc_type, exc_value, exc_traceback = sys.exc_info()
+ traceback.print_exception(exc_type,
+ exc_value,
+ exc_traceback,
+ None,
+ file=pylog)
+ pylog.write('"""\n')
+ else:
+ exec_result.write("OK\n")
+ pylog.write('status = "OK"\n')
+ pylog.write('time = "' + timeTest.__str__() + '"\n')
+
+ testout.close()
+ sys.stdout = __stdout__
+ sys.stderr = __stderr__
+ my_tools.writeInFiles(pylog)
+ pylog.close()
+
+exec_result.write('Close\n')
+exec_result.close()
+
+if 'PY' not in '${typeName}':
+ import salome_utils
+ killScript = os.path.join(os.environ['KERNEL_ROOT_DIR'],
+ 'bin',
+ 'salome',
+ 'killSalome.py')
+ cmd = '{python} {killScript} {port}'.format(python=os.environ['PYTHONBIN'],
+ killScript=killScript,
+ port=salome_utils.getPortNumber())
+ os.system(cmd)
--- /dev/null
+#!/usr/bin/env python
+#-*- coding:utf-8 -*-
+# Copyright (C) 2010-2013 CEA/DEN
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+import os, sys, datetime, shutil, string
+import subprocess
+import fork
+import src
+
+# directories not considered as test modules
+C_IGNORE_MODULES = ['.git', '.svn', 'RESSOURCES']
+
+C_TESTS_SOURCE_DIR = "Tests"
+C_TESTS_LIGHT_FILE = "TestsLight.txt"
+
+# Get directory to be used for the temporary files.
+#
+def getTmpDirDEFAULT():
+ if src.architecture.is_windows():
+ directory = os.getenv("TEMP")
+ else:
+ # for Linux: use /tmp/logs/{user} folder
+ directory = os.path.join( '/tmp', 'logs', os.getenv("USER", "unknown"))
+ return directory
+
+class Test:
+ def __init__(self,
+ config,
+ logger,
+ sessionDir,
+ grid="",
+ modules=None,
+ types=None,
+ appli="",
+ mode="normal",
+ dir_="",
+ show_desktop=True,
+ light=False):
+ self.modules = modules
+ self.config = config
+ self.logger = logger
+ self.sessionDir = sessionDir
+ self.dir = dir_
+ self.types = types
+ self.appli = appli
+ self.mode = mode
+ self.show_desktop = show_desktop
+ self.light = light
+
+ if len(self.dir) > 0:
+ self.logger.write("\n", 3, False)
+ self.prepare_grid_from_dir("DIR", self.dir)
+ self.currentGrid = "DIR"
+ else:
+ self.prepare_grid(grid)
+
+ self.settings = {}
+ self.known_errors = None
+
+ # create section for results
+ self.config.TESTS = src.pyconf.Sequence(self.config)
+
+ self.nb_run = 0
+ self.nb_succeed = 0
+ self.nb_timeout = 0
+ self.nb_not_run = 0
+ self.nb_acknoledge = 0
+
+ def _copy_dir(self, source, target):
+ if self.config.VARS.python >= "2.6":
+ shutil.copytree(source, target,
+ symlinks=True,
+ ignore=shutil.ignore_patterns('.git*','.svn*'))
+ else:
+ shutil.copytree(source, target,
+ symlinks=True)
+
+ def prepare_grid_from_dir(self, grid_name, grid_dir):
+ self.logger.write(_("get grid from dir: %s\n") % src.printcolors.printcLabel(grid_dir), 3)
+ if not os.access(grid_dir, os.X_OK):
+ raise src.SatException(_("testbase %(name)s (%(dir)s) does not exist ...\n") % \
+ { 'name': grid_name, 'dir': grid_dir })
+
+ self._copy_dir(grid_dir, os.path.join(self.sessionDir, 'BASES', grid_name))
+
+ def prepare_grid_from_git(self, grid_name, grid_base, grid_tag):
+ self.logger.write(
+ _("get grid '%(grid)s' with '%(tag)s' tag from git\n") % {
+ "grid" : src.printcolors.printcLabel(grid_name),
+ "tag" : src.printcolors.printcLabel(grid_tag)},
+ 3)
+ try:
+ def set_signal(): # pragma: no cover
+ """see http://bugs.python.org/issue1652"""
+ import signal
+ signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+
+ cmd = "git clone --depth 1 %(base)s %(dir)s"
+ cmd += " && cd %(dir)s"
+ if grid_tag=='master':
+ cmd += " && git fetch origin %(branch)s"
+ else:
+ cmd += " && git fetch origin %(branch)s:%(branch)s"
+ cmd += " && git checkout %(branch)s"
+ cmd = cmd % { 'branch': grid_tag, 'base': grid_base, 'dir': grid_name }
+
+ self.logger.write("> %s\n" % cmd, 5)
+ if src.architecture.is_windows():
+ # preexec_fn not supported on windows platform
+ res = subprocess.call(cmd,
+ cwd=os.path.join(self.sessionDir, 'BASES'),
+ shell=True,
+ stdout=self.logger.logTxtFile,
+ stderr=subprocess.PIPE)
+ else:
+ res = subprocess.call(cmd,
+ cwd=os.path.join(self.sessionDir, 'BASES'),
+ shell=True,
+ preexec_fn=set_signal,
+ stdout=self.logger.logTxtFile,
+ stderr=subprocess.PIPE)
+ if res != 0:
+ raise src.SatException(_("Error: unable to get test base '%(name)s' from git '%(repo)s'.") % \
+ { 'name': grid_name, 'repo': grid_base })
+
+ except OSError:
+ self.logger.error(_("git is not installed. exiting...\n"))
+ sys.exit(0)
+
+ def prepare_grid_from_svn(self, user, grid_name, grid_base):
+ self.logger.write(_("get grid '%s' from svn\n") % src.printcolors.printcLabel(grid_name), 3)
+ try:
+ def set_signal(): # pragma: no cover
+ """see http://bugs.python.org/issue1652"""
+ import signal
+ signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+
+ cmd = "svn checkout --username %(user)s %(base)s %(dir)s"
+ cmd = cmd % { 'user': user, 'base': grid_base, 'dir': grid_name }
+
+ self.logger.write("> %s\n" % cmd, 5)
+ if src.architecture.is_windows():
+ # preexec_fn not supported on windows platform
+ res = subprocess.call(cmd,
+ cwd=os.path.join(self.sessionDir, 'BASES'),
+ shell=True,
+ stdout=self.logger.logTxtFile,
+ stderr=subprocess.PIPE)
+ else:
+ res = subprocess.call(cmd,
+ cwd=os.path.join(self.sessionDir, 'BASES'),
+ shell=True,
+ preexec_fn=set_signal,
+ stdout=self.logger.logTxtFile,
+ stderr=subprocess.PIPE)
+
+ if res != 0:
+ raise src.SatException(_("Error: unable to get test base '%(nam"
+ "e)s' from svn '%(repo)s'.") % \
+ { 'name': grid_name, 'repo': grid_base })
+
+ except OSError:
+ self.logger.error(_("svn is not installed. exiting...\n"))
+ sys.exit(0)
+
+ ##
+ # Configure tests base.
+ def prepare_grid(self, grid_name):
+ src.printcolors.print_value(self.logger,
+ _("Testing grid"),
+ grid_name,
+ 3)
+ self.logger.write("\n", 3, False)
+
+ # search for the grid
+ test_base_info = None
+ for project_name in self.config.PROJECTS.projects:
+ project_info = self.config.PROJECTS.projects[project_name]
+ for t_b_info in project_info.test_bases:
+ if t_b_info.name == grid_name:
+ test_base_info = t_b_info
+
+ if not test_base_info:
+ message = _("########## WARNING: grid '%s' not found\n") % grid_name
+ raise src.SatException(message)
+
+ if test_base_info.get_sources == "dir":
+ self.prepare_grid_from_dir(grid_name, test_base_info.info.dir)
+ elif test_base_info.get_sources == "git":
+ self.prepare_grid_from_git(grid_name,
+ test_base_info.info.base,
+ self.config.APPLICATION.test_base.tag)
+ elif test_base_info.get_sources == "svn":
+ svn_user = src.get_cfg_param(test_base_info.svn_info,
+ "svn_user",
+ self.config.USER.svn_user)
+ self.prepare_grid_from_svn(svn_user,
+ grid_name,
+ test_base_info.info.base)
+ else:
+ raise src.SatException(_("unknown source type '%(type)s' for testb"
+ "ase '%(grid)s' ...\n") % {
+ 'type': test_base_info.get_sources,
+ 'grid': grid_name })
+
+ self.currentGrid = grid_name
+
+ ##
+ # Searches if the script is declared in known errors pyconf.
+ # Update the status if needed.
+ def search_known_errors(self, status, test_module, test_type, test):
+ test_path = os.path.join(test_module, test_type, test)
+ if not src.config_has_application(self.config):
+ return status, []
+
+ if self.known_errors is None:
+ return status, []
+
+ platform = self.config.VARS.arch
+ application = self.config.VARS.application
+ error = self.known_errors.get_error(test_path, application, platform)
+ if error is None:
+ return status, []
+
+ if status == src.OK_STATUS:
+ if not error.fixed:
+ # the error is fixed
+ self.known_errors.fix_error(error)
+ #import testerror
+ #testerror.write_test_failures(
+ # self.config.TOOLS.testerror.file_path,
+ # self.known_errors.errors)
+ return status, [ error.date,
+ error.expected,
+ error.comment,
+ error.fixed ]
+
+ if error.fixed:
+ self.known_errors.unfix_error(error)
+ #import testerror
+ #testerror.write_test_failures(self.config.TOOLS.testerror.file_path,
+ # self.known_errors.errors)
+
+ delta = self.known_errors.get_expecting_days(error)
+ kfres = [ error.date, error.expected, error.comment, error.fixed ]
+ if delta < 0:
+ return src.KO_STATUS, kfres
+ return src.KNOWNFAILURE_STATUS, kfres
+
+ ##
+ # Read the *.result.py files.
+ def read_results(self, listTest, has_timed_out):
+ results = {}
+ for test in listTest:
+ resfile = os.path.join(self.currentDir,
+ self.currentModule,
+ self.currentType,
+ test[:-3] + ".result.py")
+
+ # check if <test>.result.py file exists
+ if not os.path.exists(resfile):
+ results[test] = ["?", -1, "", []]
+ else:
+ gdic, ldic = {}, {}
+ execfile(resfile, gdic, ldic)
+
+ status = src.TIMEOUT_STATUS
+ if not has_timed_out:
+ status = src.KO_STATUS
+
+ if ldic.has_key('status'):
+ status = ldic['status']
+
+ expected = []
+ if status == src.KO_STATUS or status == src.OK_STATUS:
+ status, expected = self.search_known_errors(status,
+ self.currentModule,
+ self.currentType,
+ test)
+
+ callback = ""
+ if ldic.has_key('callback'):
+ callback = ldic['callback']
+ elif status == src.KO_STATUS:
+ callback = "CRASH"
+
+ exec_time = -1
+ if ldic.has_key('time'):
+ try:
+ exec_time = float(ldic['time'])
+ except:
+ pass
+
+ results[test] = [status, exec_time, callback, expected]
+
+ return results
+
+ ##
+ # Generates the script to be run by Salome.
+ # This python script includes init and close statements and a loop
+ # calling all the scripts of a single directory.
+ def generate_script(self, listTest, script_path, ignoreList):
+ # open template file
+ template_file = open(os.path.join(self.config.VARS.srcDir,
+ "test",
+ "scriptTemplate.py"), 'r')
+ template = string.Template(template_file.read())
+
+ # create substitution dictionary
+ d = dict()
+ d['resourcesWay'] = os.path.join(self.currentDir, 'RESSOURCES')
+ d['tmpDir'] = os.path.join(self.sessionDir, 'WORK')
+ d['toolsWay'] = os.path.join(self.config.VARS.srcDir, "test")
+ d['typeDir'] = os.path.join(self.currentDir,
+ self.currentModule,
+ self.currentType)
+ d['resultFile'] = os.path.join(self.sessionDir, 'WORK', 'exec_result')
+ d['listTest'] = listTest
+ d['typeName'] = self.currentType
+ d['ignore'] = ignoreList
+
+ # create script with template
+ script = open(script_path, 'w')
+ script.write(template.safe_substitute(d))
+ script.close()
+
+ # Find the getTmpDir function that gives access to *pidict file directory.
+ # (the *pidict file exists when SALOME is launched)
+ def get_tmp_dir(self):
+ # Rare case where there is no KERNEL in module list
+ # (for example MED_STANDALONE)
+ if ('APPLICATION' in self.config
+ and 'KERNEL' not in self.config.APPLICATION.products
+ and 'KERNEL_ROOT_DIR' not in os.environ):
+ return getTmpDirDEFAULT
+
+ # Case where "sat test" is launched in an existing SALOME environment
+ if 'KERNEL_ROOT_DIR' in os.environ:
+ root_dir = os.environ['KERNEL_ROOT_DIR']
+
+ if ('APPLICATION' in self.config
+ and 'KERNEL' in self.config.APPLICATION.products):
+ root_dir = src.product.get_product_config(self.config,
+ "KERNEL").install_dir
+
+ # Case where there the appli option is called (with path to launcher)
+ if len(self.appli) > 0:
+ # There are two cases : The old application (runAppli)
+ # and the new one
+ launcherName = os.path.basename(self.appli)
+ launcherDir = os.path.dirname(self.appli)
+ if launcherName == 'runAppli':
+ # Old application
+ cmd = "for i in " + launcherDir + "/env.d/*.sh; do source ${i};"
+ " done ; echo $KERNEL_ROOT_DIR"
+ else:
+ # New application
+ cmd = "echo -e 'import os\nprint os.environ[\"KERNEL_ROOT_DIR\""
+ "]' > tmpscript.py; %s shell tmpscript.py" % self.appli
+ root_dir = subprocess.Popen(cmd,
+ stdout=subprocess.PIPE,
+ shell=True,
+ executable='/bin/bash').communicate()[0].split()[-1]
+
+ # import module salome_utils from KERNEL that gives
+ # the right getTmpDir function
+ import imp
+ (file_, pathname, description) = imp.find_module("salome_utils",
+ [os.path.join(root_dir,
+ 'bin',
+ 'salome')])
+ try:
+ module = imp.load_module("salome_utils",
+ file_,
+ pathname,
+ description)
+ return module.getLogDir
+ except:
+ module = imp.load_module("salome_utils",
+ file_,
+ pathname,
+ description)
+ return module.getTmpDir
+ finally:
+ if file_:
+ file_.close()
+
+
+ def get_test_timeout(self, test_name, default_value):
+ if (self.settings.has_key("timeout") and
+ self.settings["timeout"].has_key(test_name)):
+ return self.settings["timeout"][test_name]
+
+ return default_value
+
+ def generate_launching_commands(self, typename):
+ # Case where "sat test" is launched in an existing SALOME environment
+ if 'KERNEL_ROOT_DIR' in os.environ:
+ binSalome = "runSalome"
+ binPython = "python"
+ killSalome = "killSalome.py"
+
+ # Rare case where there is no KERNEL in module list
+ # (for example MED_STANDALONE)
+ if ('APPLICATION' in self.config and
+ 'KERNEL' not in self.config.APPLICATION.products):
+ binSalome = "runSalome"
+ binPython = "python"
+ killSalome = "killSalome.py"
+ src.environment.load_environment(self.config, False, self.logger)
+ return binSalome, binPython, killSalome
+
+ # Case where there the appli option is called (with path to launcher)
+ if len(self.appli) > 0:
+ # There are two cases : The old application (runAppli)
+ # and the new one
+ launcherName = os.path.basename(self.appli)
+ launcherDir = os.path.dirname(self.appli)
+ if launcherName == 'runAppli':
+ # Old application
+ binSalome = self.appli
+ binPython = ("for i in " +
+ launcherDir +
+ "/env.d/*.sh; do source ${i}; done ; python")
+ killSalome = ("for i in " +
+ launcherDir +
+ "/env.d/*.sh; do source ${i}; done ; killSalome.py'")
+ return binSalome, binPython, killSalome
+ else:
+ # New application
+ binSalome = self.appli
+ binPython = self.appli + ' shell'
+ killSalome = self.appli + ' killall'
+ return binSalome, binPython, killSalome
+
+ # SALOME version detection and APPLI repository detection
+ VersionSalome = src.get_salome_version(self.config)
+ appdir = 'APPLI'
+ if "APPLI" in self.config and "application_name" in self.config.APPLI:
+ appdir = self.config.APPLI.application_name
+
+ # Case where SALOME has NOT the launcher that uses the SalomeContext API
+ if VersionSalome < 730:
+ binSalome = os.path.join(self.config.APPLI.module_appli_install_dir,
+ appdir,
+ "runAppli")
+ binPython = "python"
+ killSalome = "killSalome.py"
+ src.environment.load_environment(self.config, False, self.logger)
+ return binSalome, binPython, killSalome
+
+ # Case where SALOME has the launcher that uses the SalomeContext API
+ if VersionSalome >= 730:
+ if 'profile' not in self.config.APPLICATION:
+ # Before revision of application concept
+ launcher_name = self.config.APPLI.launch_alias_name
+ binSalome = os.path.join(self.config.APPLICATION.workdir,
+ appdir,
+ launcher_name)
+ else:
+ # After revision of application concept
+ launcher_name = self.config.APPLICATION.profile.launcher_name
+ binSalome = os.path.join(self.config.APPLICATION.workdir,
+ launcher_name)
+
+ if src.architecture.is_windows():
+ binSalome += '.bat'
+
+ binPython = binSalome + ' shell'
+ killSalome = binSalome + ' killall'
+ return binSalome, binPython, killSalome
+
+ return binSalome, binPython, killSalome
+
+
+ ##
+ # Runs tests of a type (using a single instance of Salome).
+ def run_tests(self, listTest, ignoreList):
+ out_path = os.path.join(self.currentDir,
+ self.currentModule,
+ self.currentType)
+ typename = "%s/%s" % (self.currentModule, self.currentType)
+ time_out = self.get_test_timeout(typename,
+ self.config.SITE.test.timeout)
+
+ time_out_salome = src.get_cfg_param(self.config.SITE.test,
+ "timeout_app",
+ self.config.SITE.test.timeout)
+
+ # generate wrapper script
+ script_path = os.path.join(out_path, 'wrapperScript.py')
+ self.generate_script(listTest, script_path, ignoreList)
+
+ tmpDir = self.get_tmp_dir()
+
+ binSalome, binPython, killSalome = self.generate_launching_commands(
+ typename)
+ if self.settings.has_key("run_with_modules") \
+ and self.settings["run_with_modules"].has_key(typename):
+ binSalome = (binSalome +
+ " -m %s" % self.settings["run_with_modules"][typename])
+
+ logWay = os.path.join(self.sessionDir, "WORK", "log_cxx")
+
+ status = False
+ ellapsed = -1
+ if self.currentType.startswith("NOGUI_"):
+ # runSalome -t (bash)
+ status, ellapsed = fork.batch(binSalome, self.logger,
+ os.path.join(self.sessionDir, "WORK"),
+ [ "-t",
+ "--shutdown-server=1",
+ script_path ],
+ delai=time_out,
+ log=logWay)
+
+ elif self.currentType.startswith("PY_"):
+ # python script.py
+ status, ellapsed = fork.batch(binPython, self.logger,
+ os.path.join(self.sessionDir, "WORK"),
+ [script_path],
+ delai=time_out, log=logWay)
+
+ else:
+ opt = "-z 0"
+ if self.show_desktop: opt = "--show-desktop=0"
+ status, ellapsed = fork.batch_salome(binSalome,
+ self.logger,
+ os.path.join(self.sessionDir,
+ "WORK"),
+ [ opt,
+ "--shutdown-server=1",
+ script_path ],
+ getTmpDir=tmpDir,
+ fin=killSalome,
+ delai=time_out,
+ log=logWay,
+ delaiapp=time_out_salome)
+
+ self.logger.write("status = %s, ellapsed = %s\n" % (status, ellapsed),
+ 5)
+
+ # create the test result to add in the config object
+ test_info = src.pyconf.Mapping(self.config)
+ test_info.grid = self.currentGrid
+ test_info.module = self.currentModule
+ test_info.type = self.currentType
+ test_info.script = src.pyconf.Sequence(self.config)
+
+ script_results = self.read_results(listTest, ellapsed == time_out)
+ for sr in sorted(script_results.keys()):
+ self.nb_run += 1
+
+ # create script result
+ script_info = src.pyconf.Mapping(self.config)
+ script_info.name = sr
+ script_info.res = script_results[sr][0]
+ script_info.time = script_results[sr][1]
+ if script_info.res == src.TIMEOUT_STATUS:
+ script_info.time = time_out
+ if script_info.time < 1e-3: script_info.time = 0
+
+ callback = script_results[sr][2]
+ if script_info.res != src.OK_STATUS and len(callback) > 0:
+ script_info.callback = callback
+
+ kfres = script_results[sr][3]
+ if len(kfres) > 0:
+ script_info.known_error = src.pyconf.Mapping(self.config)
+ script_info.known_error.date = kfres[0]
+ script_info.known_error.expected = kfres[1]
+ script_info.known_error.comment = kfres[2]
+ script_info.known_error.fixed = kfres[3]
+
+ # add it to the list of results
+ test_info.script.append(script_info, '')
+
+ # display the results
+ if script_info.time > 0:
+ exectime = "(%7.3f s)" % script_info.time
+ else:
+ exectime = ""
+
+ sp = "." * (35 - len(script_info.name))
+ self.logger.write(self.write_test_margin(3), 3)
+ self.logger.write("script %s %s %s %s\n" % (
+ src.printcolors.printcLabel(script_info.name),
+ sp,
+ src.printcolors.printc(script_info.res),
+ exectime), 3, False)
+ if script_info and len(callback) > 0:
+ self.logger.write("Exception in %s\n%s\n" % \
+ (script_info.name,
+ src.printcolors.printcWarning(callback)), 2, False)
+
+ if script_info.res == src.OK_STATUS:
+ self.nb_succeed += 1
+ elif script_info.res == src.KNOWNFAILURE_STATUS:
+ self.nb_acknoledge += 1
+ elif script_info.res == src.TIMEOUT_STATUS:
+ self.nb_timeout += 1
+ elif script_info.res == src.NA_STATUS:
+ self.nb_run -= 1
+ elif script_info.res == "?":
+ self.nb_not_run += 1
+
+ self.config.TESTS.append(test_info, '')
+
+ ##
+ # Runs all tests of a type.
+ def run_type_tests(self, light_test):
+ if self.light:
+ if not any(map(lambda l: l.startswith(self.currentType),
+ light_test)):
+ # no test to run => skip
+ return
+
+ self.logger.write(self.write_test_margin(2), 3)
+ self.logger.write("Type = %s\n" % src.printcolors.printcLabel(
+ self.currentType), 3, False)
+
+ # prepare list of tests to run
+ tests = os.listdir(os.path.join(self.currentDir,
+ self.currentModule,
+ self.currentType))
+ tests = filter(lambda l: l.endswith(".py"), tests)
+ tests = sorted(tests, key=str.lower)
+
+ if self.light:
+ tests = filter(lambda l: os.path.join(self.currentType,
+ l) in light_test, tests)
+
+ # build list of known failures
+ cat = "%s/%s/" % (self.currentModule, self.currentType)
+ ignoreDict = {}
+ for k in self.ignore_tests.keys():
+ if k.startswith(cat):
+ ignoreDict[k[len(cat):]] = self.ignore_tests[k]
+
+ self.run_tests(tests, ignoreDict)
+
+ ##
+ # Runs all tests of a module.
+ def run_module_tests(self):
+ self.logger.write(self.write_test_margin(1), 3)
+ self.logger.write("Module = %s\n" % src.printcolors.printcLabel(
+ self.currentModule), 3, False)
+
+ module_path = os.path.join(self.currentDir, self.currentModule)
+
+ types = []
+ if self.types is not None:
+ types = self.types # user choice
+ else:
+ # use all scripts in module
+ types = filter(lambda l: l not in C_IGNORE_MODULES,
+ os.listdir(module_path))
+ types = filter(lambda l: os.path.isdir(os.path.join(module_path,
+ l)), types)
+
+ # in batch mode keep only modules with NOGUI or PY
+ if self.mode == "batch":
+ types = filter(lambda l: ("NOGUI" in l or "PY" in l), types)
+
+ light_test = []
+ if self.light:
+ light_path = os.path.join(module_path, C_TESTS_LIGHT_FILE)
+ if not os.path.exists(light_path):
+ types = []
+ msg = src.printcolors.printcWarning(_("List of light tests not"
+ " found: %s") % light_path)
+ self.logger.write(msg + "\n")
+ else:
+ # read the file
+ light_file = open(light_path, "r")
+ light_test = map(lambda l: l.strip(), light_file.readlines())
+
+ types = sorted(types, key=str.lower)
+ for type_ in types:
+ if not os.path.exists(os.path.join(module_path, type_)):
+ self.logger.write(self.write_test_margin(2), 3)
+ self.logger.write(src.printcolors.printcWarning("Type %s not "
+ "found" % type_) + "\n", 3, False)
+ else:
+ self.currentType = type_
+ self.run_type_tests(light_test)
+
+ ##
+ # Runs test grid.
+ def run_grid_tests(self):
+ res_dir = os.path.join(self.currentDir, "RESSOURCES")
+ os.environ['PYTHONPATH'] = (res_dir +
+ os.pathsep +
+ os.environ['PYTHONPATH'])
+ os.environ['TT_BASE_RESSOURCES'] = res_dir
+ src.printcolors.print_value(self.logger,
+ "TT_BASE_RESSOURCES",
+ res_dir,
+ 4)
+ self.logger.write("\n", 4, False)
+
+ self.logger.write(self.write_test_margin(0), 3)
+ grid_label = "Grid = %s\n" % src.printcolors.printcLabel(
+ self.currentGrid)
+ self.logger.write(grid_label, 3, False)
+ self.logger.write("-" * len(src.printcolors.cleancolor(grid_label)), 3)
+ self.logger.write("\n", 3, False)
+
+ # load settings
+ settings_file = os.path.join(res_dir, "test_settings.py")
+ if os.path.exists(settings_file):
+ gdic, ldic = {}, {}
+ execfile(settings_file, gdic, ldic)
+ self.logger.write(_("Load test settings\n"), 3)
+ self.settings = ldic['settings_dic']
+ self.ignore_tests = ldic['known_failures_list']
+ if isinstance(self.ignore_tests, list):
+ self.ignore_tests = {}
+ self.logger.write(src.printcolors.printcWarning("known_failur"
+ "es_list must be a dictionary (not a list)") + "\n", 1, False)
+ else:
+ self.ignore_tests = {}
+ self.settings.clear()
+
+ # read known failures pyconf
+ if "testerror" in self.config.SITE:
+ #import testerror
+ #self.known_errors = testerror.read_test_failures(
+ # self.config.TOOLS.testerror.file_path,
+ # do_error=False)
+ pass
+ else:
+ self.known_errors = None
+
+ if self.modules is not None:
+ modules = self.modules # given by user
+ else:
+ # select all the modules (i.e. directories) in the directory
+ modules = filter(lambda l: l not in C_IGNORE_MODULES,
+ os.listdir(self.currentDir))
+ modules = filter(lambda l: os.path.isdir(
+ os.path.join(self.currentDir, l)),
+ modules)
+
+ modules = sorted(modules, key=str.lower)
+ for module in modules:
+ if not os.path.exists(os.path.join(self.currentDir, module)):
+ self.logger.write(self.write_test_margin(1), 3)
+ self.logger.write(src.printcolors.printcWarning(
+ "Module %s does not exist\n" % module), 3, False)
+ else:
+ self.currentModule = module
+ self.run_module_tests()
+
+ def run_script(self, script_name):
+ if 'APPLICATION' in self.config and script_name in self.config.APPLICATION:
+ script = self.config.APPLICATION[script_name]
+ if len(script) == 0:
+ return
+
+ self.logger.write("\n", 2, False)
+ if not os.path.exists(script):
+ self.logger.write(src.printcolors.printcWarning("WARNING: scrip"
+ "t not found: %s" % script) + "\n", 2)
+ else:
+ self.logger.write(src.printcolors.printcHeader("----------- sta"
+ "rt %s" % script_name) + "\n", 2)
+ self.logger.write("Run script: %s\n" % script, 2)
+ subprocess.Popen(script, shell=True).wait()
+ self.logger.write(src.printcolors.printcHeader("----------- end"
+ " %s" % script_name) + "\n", 2)
+
+ def run_all_tests(self, session_name=""):
+ initTime = datetime.datetime.now()
+
+ self.run_script('test_setup')
+ self.logger.write("\n", 2, False)
+
+ self.logger.write(src.printcolors.printcHeader(
+ _("=== STARTING TESTS")) + "\n", 2)
+ self.logger.write("\n", 2, False)
+ self.currentDir = os.path.join(self.sessionDir,
+ 'BASES',
+ self.currentGrid)
+ self.run_grid_tests()
+
+ # calculate total execution time
+ totalTime = datetime.datetime.now() - initTime
+ totalTime -= datetime.timedelta(microseconds=totalTime.microseconds)
+ self.logger.write("\n", 2, False)
+ self.logger.write(src.printcolors.printcHeader(_("=== END TESTS")), 2)
+ self.logger.write(" %s\n" % src.printcolors.printcInfo(str(totalTime)),
+ 2,
+ False)
+
+ #
+ # Start the tests
+ #
+ self.run_script('test_cleanup')
+ self.logger.write("\n", 2, False)
+
+ # evaluate results
+ res_count = "%d / %d" % (self.nb_succeed,
+ self.nb_run - self.nb_acknoledge)
+
+ res_out = _("Tests Results: %(succeed)d / %(total)d\n") % \
+ { 'succeed': self.nb_succeed, 'total': self.nb_run }
+ if self.nb_succeed == self.nb_run:
+ res_out = src.printcolors.printcSuccess(res_out)
+ else:
+ res_out = src.printcolors.printcError(res_out)
+ self.logger.write(res_out, 1)
+
+ if self.nb_timeout > 0:
+ self.logger.write(_("%d tests TIMEOUT\n") % self.nb_timeout, 1)
+ res_count += " TO: %d" % self.nb_timeout
+ if self.nb_not_run > 0:
+ self.logger.write(_("%d tests not executed\n") % self.nb_not_run, 1)
+ res_count += " NR: %d" % self.nb_not_run
+
+ status = src.OK_STATUS
+ if self.nb_run - self.nb_succeed - self.nb_acknoledge > 0:
+ status = src.KO_STATUS
+ elif self.nb_acknoledge:
+ status = src.KNOWNFAILURE_STATUS
+
+ self.logger.write(_("Status: %s\n" % status), 3)
+
+ if session_name is not None and len(session_name) > 0:
+ self.config.RESULTS.test["session"] = session_name
+
+ return self.nb_run - self.nb_succeed - self.nb_acknoledge
+
+ ##
+ # Write margin to show test results.
+ def write_test_margin(self, tab):
+ if tab == 0:
+ return ""
+ return "| " * (tab - 1) + "+ "
+
--- /dev/null
+<?xml version="1.0" encoding="utf-8"?>
+
+<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
+
+<xsl:template match="/">
+<html>
+<head>
+<title>Tests of <xsl:value-of select="//product/@name" /></title>
+<style type="text/css">
+ .def { font-family: Arial, Verdana, "Times New Roman", Times, serif;}
+ .OK { background-color:#00FF00; }
+ .KO { background-color:#FF0000; }
+ .KF { background-color:#FFA500; }
+ .NA { background-color:#BBBBBB; }
+ .PASS { background-color:#00FF00; }
+ .FAILED { background-color:#F22000; }
+ .TIMEOUT { background-color:#DFFF00; }
+ .OK2 { color:#00FF00; }
+ .KO2 { color:#F22000; font-weight: bold; }
+ .NA2 { color:#BBBBBB; }
+ .CHECK2 { color:#FFA500; }
+ .ok { color:#00AA00; }
+ .ko { color:#AA0000; }
+ .new { background-color:#FF5500; }
+ .undercontrol { background-color:#AA0000; }
+ .ignored { color:#A0A0A0; }
+ div.pqlist { -moz-column-count: 5;
+ overflow: auto;
+ max-height: 250px;
+ }
+ table.pq { width: 100%;
+ margin:0px;
+ padding:0px;
+ border-collapse: collapse;
+ empty-cells: show;
+ border-style: hidden;
+ }
+ table { margin:0px;
+ padding:0px;
+ border-collapse:collapse;
+ empty-cells: show;
+ border: solid 1px;
+ }
+ td.KO2 { border: solid 1px black; padding: 0px; }
+ td.OK2 { border: solid 1px black; padding: 0px; }
+ td { border: solid 1px black; padding: 1px; }
+ h2 { text-align: center; }
+ .legend { font-weight: bold;
+ text-align: center;
+ }
+ span.covered { display:-moz-inline-box; display: inline-block;
+ height:18px;
+ vertical-align:top;
+ background: #00df00;
+ }
+ span.uncovered { display:-moz-inline-box; display: inline-block;
+ height:18px;
+ vertical-align:top;
+ background: #df0000;
+ }
+ span.ignored { display:-moz-inline-box; display: inline-block;
+ height:18px;
+ vertical-align:top;
+ background: #dfff00;
+ }
+ span.knownfailure { display:-moz-inline-box; display: inline-block;
+ height:18px;
+ vertical-align:top;
+ background: #ffa500;
+ }
+ span.notApplicable { display:-moz-inline-box; display: inline-block;
+ height:18px;
+ vertical-align:top;
+ background: #bbbbbb;
+ }
+ span.zero { color: #A0A0A0; }
+ a.node { color:#0000FF; text-decoration: none; visited: #FF0000; }
+
+</style>
+<script language="JavaScript"><![CDATA[
+ function Toggle(id) {
+ var element = document.getElementById(id);
+
+ if ( element.style.display == "none" )
+ element.style.display = "block";
+ else
+ element.style.display = "none";
+ }
+
+ function collapseall() {
+ var divlist = document.getElementsByName("mod");
+ for (i = 0; i < divlist.length; i++)
+ {
+ divlist[i].style.display = "none";
+ }
+ }
+
+ ]]></script>
+</head>
+
+<body class="def">
+
+ <xsl:apply-templates select="//product" mode="test" />
+
+ <br/>
+ <br/>
+
+ </body>
+ </html>
+</xsl:template>
+
+<xsl:template match="product" mode="test">
+
+ <h3>Tests</h3>
+
+ <xsl:for-each select="tests/grid">
+ <b>grid <xsl:value-of select="@name" /></b><br/><br/>
+ <a name="test"/>
+ <xsl:apply-templates select="." mode="test-grid" />
+ </xsl:for-each>
+
+</xsl:template>
+
+<xsl:template match="grid" mode="test-grid">
+ <table>
+ <!-- Header -->
+ <tr bgcolor="#9acd32">
+ <th width="150">module</th>
+ <th width="100">success</th>
+ <th width="200"></th>
+ <th width="100">total</th>
+ <th width="100">pass</th>
+ <th width="100">failed</th>
+ <th width="100">timeout</th>
+ <th width="100">known failures</th>
+ <th width="100">not run</th>
+ <th width="100">N/A</th>
+ <th width="100">Time</th>
+ </tr>
+
+ <xsl:for-each select="./module">
+ <xsl:variable name="total" select="count(.//test)"/>
+ <xsl:variable name="failureCount" select="count(.//test[@res='KO'])"/>
+ <xsl:variable name="successCount" select="count(.//test[@res='OK'])"/>
+ <xsl:variable name="timeoutCount" select="count(.//test[@res='TIMEOUT'])"/>
+ <xsl:variable name="knownFailures" select="count(.//test[@res='KF'])"/>
+ <xsl:variable name="notApplicable" select="count(.//test[@res='NA'])"/>
+ <xsl:variable name="ignoreCount" select="$total - $successCount - $failureCount - $knownFailures - $notApplicable"/>
+ <xsl:variable name="successRate" select="$successCount div $total"/>
+
+ <tr>
+ <td><a href="#test" class="node" title="voir">
+ <xsl:attribute name="onclick">javascript:collapseall();Toggle('mod_<xsl:value-of select="../@name"/>.<xsl:value-of select="@name"/>');</xsl:attribute>
+ <xsl:attribute name="id">img_<xsl:value-of select="@name"/></xsl:attribute><xsl:value-of select="@name"/> </a>
+ </td>
+
+ <td align="right">
+ <xsl:call-template name="display-percent">
+ <xsl:with-param name="value" select="$successRate"/>
+ </xsl:call-template>
+ </td>
+ <td width="210px" align="center">
+ <!-- Progress bar -->
+ <xsl:if test="round($successCount * 200 div $total) != 0">
+ <span class="covered">
+ <xsl:attribute name="style">width:<xsl:value-of select="round($successCount * 200 div $total)"/>px</xsl:attribute> 
+ </span>
+ </xsl:if>
+ <xsl:if test="round($failureCount * 200 div $total) != 0">
+ <span class="uncovered">
+ <xsl:attribute name="style">width:<xsl:value-of select="round($failureCount * 200 div $total)"/>px</xsl:attribute> 
+ </span>
+ </xsl:if>
+ <xsl:if test="round($knownFailures * 200 div $total) != 0">
+ <span class="knownfailure">
+ <xsl:attribute name="style">width:<xsl:value-of select="round($knownFailures * 200 div $total)"/>px</xsl:attribute> 
+ </span>
+ </xsl:if>
+ <xsl:if test="round($notApplicable * 200 div $total) != 0">
+ <span class="notApplicable">
+ <xsl:attribute name="style">width:<xsl:value-of select="round($notApplicable * 200 div $total)"/>px</xsl:attribute> 
+ </span>
+ </xsl:if>
+ <xsl:if test="round($ignoreCount * 200 div $total) != 0">
+ <span class="ignored">
+ <xsl:attribute name="style">width:<xsl:value-of select="round($ignoreCount * 200 div $total)"/>px</xsl:attribute> 
+ </span>
+ </xsl:if>
+ </td>
+ <td align="right"><xsl:value-of select="$total" /></td>
+ <td align="right"><xsl:value-of select="$successCount" /></td>
+ <xsl:call-template name="display-count"><xsl:with-param name="value" select="$failureCount"/></xsl:call-template>
+
+ <xsl:call-template name="display-count"><xsl:with-param name="value" select="$timeoutCount"/></xsl:call-template>
+ <xsl:call-template name="display-count"><xsl:with-param name="value" select="$knownFailures"/></xsl:call-template>
+ <xsl:call-template name="display-count"><xsl:with-param name="value" select="$ignoreCount"/></xsl:call-template>
+ <xsl:call-template name="display-count"><xsl:with-param name="value" select="$notApplicable"/></xsl:call-template>
+ <td align="right"><xsl:value-of select="format-number(sum(.//test/@exec_time), '0.0')" /></td>
+ </tr>
+ </xsl:for-each>
+
+ <!-- Summary Row -->
+ <xsl:variable name="GrandTotal" select="count(//test)"/>
+ <xsl:variable name="TotalFailure" select="count(//test[@res='KO'])"/>
+ <xsl:variable name="TotalSuccess" select="count(//test[@res='OK'])"/>
+ <xsl:variable name="TotalTimeout" select="count(//test[@res='TIMEOUT'])"/>
+ <xsl:variable name="TotalKnownFailures" select="count(//test[@res='KF'])"/>
+ <xsl:variable name="TotalNA" select="count(//test[@res='NA'])"/>
+ <xsl:variable name="TotalIgnore" select="$GrandTotal - $TotalSuccess - $TotalFailure - $TotalKnownFailures - $TotalNA"/>
+ <xsl:variable name="TotalSuccessRate" select="$TotalSuccess div $GrandTotal"/>
+
+ <tr bgcolor="#EF9C9C">
+ <td>Total</td>
+ <td align="right">
+ <xsl:call-template name="display-percent">
+ <xsl:with-param name="value" select="$TotalSuccessRate"/>
+ </xsl:call-template>
+ </td>
+ <td width="210px" align="center">
+ <xsl:if test="round($TotalSuccess * 200 div $GrandTotal) != 0">
+ <span class="covered">
+ <xsl:attribute name="style">width:<xsl:value-of select="round($TotalSuccess * 200 div $GrandTotal)"/>px</xsl:attribute> 
+ </span>
+ </xsl:if>
+ <xsl:if test="round($TotalFailure * 200 div $GrandTotal) != 0">
+ <span class="uncovered">
+ <xsl:attribute name="style">width:<xsl:value-of select="round($TotalFailure * 200 div $GrandTotal)"/>px</xsl:attribute> 
+ </span>
+ </xsl:if>
+ <xsl:if test="round($TotalKnownFailures * 200 div $GrandTotal) != 0">
+ <span class="knownfailure">
+ <xsl:attribute name="style">width:<xsl:value-of select="round($TotalKnownFailures * 200 div $GrandTotal)"/>px</xsl:attribute> 
+ </span>
+ </xsl:if>
+ <xsl:if test="round($TotalIgnore * 200 div $GrandTotal) != 0">
+ <span class="ignored">
+ <xsl:attribute name="style">width:<xsl:value-of select="round($TotalIgnore * 200 div $GrandTotal)"/>px</xsl:attribute> 
+ </span>
+ </xsl:if>
+ </td>
+ <td align="right"><xsl:value-of select="$GrandTotal" /></td>
+ <td align="right"><xsl:value-of select="$TotalSuccess" /></td>
+ <td align="right"><xsl:value-of select="$TotalFailure" /></td>
+ <td align="right"><xsl:value-of select="$TotalTimeout" /></td>
+ <td align="right"><xsl:value-of select="$TotalKnownFailures" /></td>
+ <td align="right"><xsl:value-of select="$TotalIgnore" /></td>
+ <td align="right"><xsl:value-of select="$TotalNA" /></td>
+ <td align="right"><xsl:value-of select="format-number(sum(//test/@exec_time), '0.0')" /></td>
+ </tr>
+ </table>
+
+ <br/>
+ <!-- Show details -->
+ <xsl:for-each select="./module">
+ <xsl:sort select="@name" />
+ <xsl:sort select="@type" />
+
+ <div style="display:none" name="mod"><xsl:attribute name="id">mod_<xsl:value-of select="../@name"/>.<xsl:value-of select="@name"/></xsl:attribute>
+ Tests of module <b><xsl:value-of select="@name"/></b>
+ <table width="100%">
+ <tr bgcolor="#9acd32">
+ <th width="100">type</th>
+ <th>script</th>
+ <th width="100">result</th>
+ <th width="100">time</th>
+ </tr>
+
+ <xsl:for-each select="./type">
+ <xsl:sort select="@name" />
+
+ <tr>
+ <td align="center"><xsl:attribute name="rowspan"><xsl:value-of select="count(./test)+count(.//callback)+1" /></xsl:attribute>
+ <xsl:value-of select="@name" />
+ <br/>(<xsl:value-of select="format-number(sum(./test/@exec_time), '0')" /> s)
+ </td>
+ </tr>
+
+ <xsl:for-each select="./test">
+ <xsl:sort select="@script" />
+
+ <xsl:choose>
+ <xsl:when test="count(./callback) != 0">
+ <tr>
+ <td align="left"><xsl:attribute name="class"><xsl:value-of select="@res" /></xsl:attribute><xsl:value-of select="@script" /></td>
+ <td align="center"><xsl:attribute name="class"><xsl:value-of select="@res" /></xsl:attribute><xsl:value-of select="@res" /></td>
+ <td align="right"><xsl:value-of select="format-number(@exec_time, '0.0')" /></td>
+ </tr>
+ <tr>
+ <td align="left" colspan="3" class="linkification-disabled"><xsl:value-of select="./callback" /></td>
+ </tr>
+ </xsl:when>
+ <xsl:otherwise>
+ <tr>
+ <td align="left"><xsl:value-of select="@script" /></td>
+ <td align="center"><xsl:attribute name="class"><xsl:value-of select="@res" /></xsl:attribute><xsl:value-of select="@res" /></td>
+ <td align="right"><xsl:value-of select="format-number(@exec_time, '0.0')" /></td>
+ </tr>
+ </xsl:otherwise>
+ </xsl:choose>
+ <xsl:if test="count(./amend) != 0">
+ <tr>
+ <td class="ko"><b>Amended</b></td>
+ <td align="left" colspan="3"><xsl:value-of select="./amend" /></td>
+ </tr>
+ </xsl:if>
+ </xsl:for-each>
+ </xsl:for-each>
+
+ </table>
+ </div>
+ </xsl:for-each>
+
+</xsl:template>
+
+<xsl:template name="display-percent">
+ <xsl:param name="value"/>
+ <xsl:value-of select="format-number($value, '00.00 %')"/>
+</xsl:template>
+
+<xsl:template name="display-count">
+ <xsl:param name="value"/>
+ <td align="right">
+ <xsl:if test="$value > 0">
+ <xsl:value-of select="$value"/>
+ </xsl:if>
+ <xsl:if test="$value = 0"><span class="zero">0</span></xsl:if>
+ </td>
+</xsl:template>
+
+</xsl:stylesheet>