#!/usr/bin/env python
#-*- coding:utf-8 -*-
-# Copyright (C) 2010-2012 CEA/DEN
+# Copyright (C) 2010-2018 CEA/DEN
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# if section APPLICATION.virtual_app does not exists create one
if "virtual_app" not in runner.cfg.APPLICATION:
msg = _("The section APPLICATION.virtual_app is not defined in the product.")
- logger.error(UTS.red(msg)
+ logger.error(UTS.red(msg))
return RCO.ReturnCode("KO", msg)
# get application dir
# remove previous application
if os.path.exists(appli_dir):
- write_step(logger, _("Removing previous application directory"))
- rres = src.KO_STATUS
+ logger.info(get_step(_("Removing previous application directory")))
+ rres = "<KO>"
try:
shutil.rmtree(appli_dir)
- rres = src.OK_STATUS
+ rres = "<OK>"
finally:
- logger.write(src.printcolors.printc(rres) + "\n", 3, False)
+ logger.info(rres + "\n")
# generate the application
try:
details.append(str(exc))
raise
finally:
- logger.write("\n", 3, False)
+ logger.info("\n")
return RCO.ReturnCode("OK")
else: # find relative path
os.symlink(appli_path, alias_path)
-##
-# add the definition of a module to out stream.
def add_module_to_appli(out, module, has_gui, module_path, logger, flagline):
+ """add the definition of a module to out stream."""
if not os.path.exists(module_path):
if not flagline:
- logger.write("\n", 3, False)
+ logger.info("\n")
flagline = True
- logger.write(" " + src.printcolors.printcWarning(_(
- "WARNING: module %s not installed") % module) + "\n", 3)
+ logger.warning(" %s\n" + _("module %s not installed") % module)
- out.write(' <module name="%s" gui="%s" path="%s"/>\n' % (module,
- has_gui,
- module_path))
+ out.write(' <module name="%s" gui="%s" path="%s"/>\n' % \
+ (module, has_gui, module_path))
return flagline
##
return config_file
-##
-# Customizes the application by editing SalomeApp.xml.
+
def customize_app(config, appli_dir, logger):
+ """Customizes the application by editing SalomeApp.xml."""
if 'configure' not in config.APPLICATION.virtual_app \
or len(config.APPLICATION.virtual_app.configure) == 0:
return
- # shortcut to get an element (section or parameter) from parent.
def get_element(parent, name, strtype):
+ """shortcut to get an element (section or parameter) from parent."""
for c in parent.getchildren():
if c.attrib['name'] == name:
return c
elt.attrib['name'] = name
return elt
- # shortcut method to create a node
def add_simple_node(parent, node_name, text=None):
+ """shortcut method to create a node"""
n = etree.Element(node_name)
if text is not None:
try:
n.text = text.strip("\n\t").decode("UTF-8")
except:
- sys.stderr.write("################ %s %s\n" % (node_name, text))
+ logger.error("problem decode UTF8 %s:\n%s\n" % \
+ (node_name, UTS.toHex(text)))
n.text = "?"
parent.append(n)
return n
document = tree.getroot()
assert document is not None, "document tag not found"
- logger.write("\n", 4)
+ logger.info("\n")
for section_name in config.APPLICATION.virtual_app.configure:
for parameter_name in config.APPLICATION.virtual_app.configure[section_name]:
parameter_value = config.APPLICATION.virtual_app.configure[section_name][parameter_name]
- logger.write(" configure: %s/%s = %s\n" % (section_name,
+ logger.info(" configure: %s/%s = %s\n" % (section_name,
parameter_name,
- parameter_value), 4)
+ parameter_value))
section = get_element(document, section_name, "section")
parameter = get_element(section, parameter_name, "parameter")
parameter.attrib['value'] = parameter_value
command = "python %s --prefix=%s --config=%s" % (script,
appli_dir,
config_file)
- logger.write("\n>" + command + "\n", 5, False)
+ logger.debug("\n>" + command + "\n")
res = subprocess.call(command,
shell=True,
cwd=target_dir,
return res
-##
-#
-def write_step(logger, message, level=3, pad=50):
- logger.write("%s %s " % (message, '.' * (pad - len(message.decode("UTF-8")))), level)
- logger.flush()
+def get_step(logger, message, pad=50):
+ """
+ returns 'message ........ ' with pad 50 by default
+ avoid colors '<color>' for now in message
+ """
+ return "%s %s " % (message, '.' * (pad - len(message.decode("UTF-8")))
##
# Creates a SALOME application.
if display:
for w in warn:
if w not in SALOME_modules:
- msg = _("WARNING: module %s is required to create application\n") % w
- logger.write(src.printcolors.printcWarning(msg), 2)
+ msg = _("module %s is required to create application\n") % w
+ logger.warning(msg)
# generate the launch file
retcode = generate_launch_file(config,
SALOME_modules)
if retcode == 0:
- cmd = src.printcolors.printcLabel("%s/salome" % appli_dir)
+ cmd = UTS.label("%s/salome" % appli_dir)
if display:
- logger.write("\n", 3, False)
- logger.write(_("To launch the application, type:\n"), 3, False)
- logger.write(" %s" % (cmd), 3, False)
- logger.write("\n", 3, False)
+ msg = _("To launch the application, type:"))
+ logger.info("\n%s\n %s\n" % (msg, cmd))
return retcode
def get_SALOME_modules(config):
if len(catalog) > 0 and not os.path.exists(catalog):
raise IOError(_("Catalog not found: %s") % catalog)
- write_step(logger, _("Creating environment files"))
- status = src.KO_STATUS
+ logger.info(get_step(_("Creating environment files")))
+ status = "<KO>"
VersionSalome = src.get_salome_version(config)
- if VersionSalome>=820:
+ if VersionSalome >= 820:
# for salome 8+ we use a salome context file for the virtual app
app_shell="cfg"
env_ext="cfg"
logger,
shells=[app_shell],
silent=True)
- status = src.OK_STATUS
+ status = "<OK>"
finally:
- logger.write(src.printcolors.printc(status) + "\n", 2, False)
+ logger.info(status + "\n")
# build the application (the name depends upon salome version
env_file = os.path.join(config.APPLICATION.workdir, "env_launch." + env_ext)
- write_step(logger, _("Building application"), level=2)
+ logger.info(get_step(_("Building application"))
cf = create_config_file(config, l_SALOME_modules, env_file, logger)
# create the application directory
os.makedirs(appli_dir)
# generate the application
- status = src.KO_STATUS
+ status = "<KO>"
try:
retcode = generate_application(config, appli_dir, cf, logger)
customize_app(config, appli_dir, logger)
- status = src.OK_STATUS
+ status = "<OK>"
finally:
- logger.write(src.printcolors.printc(status) + "\n", 2, False)
+ logger.info(status + "\n")
# copy the catalog if one
if len(catalog) > 0:
catfile = src.get_tmp_filename(config, "CatalogResources.xml")
catalog = file(catfile, "w")
- catalog.write("<!DOCTYPE ResourcesCatalog>\n<resources>\n")
+ catalog.write("""\
+<!DOCTYPE ResourcesCatalog>
+<resources>
+""")
+
for k in machines:
- logger.write(" ssh %s " % (k + " ").ljust(20, '.'), 4)
- logger.flush()
+ logger.info(" ssh %s " % (k + " ").ljust(20, '.'), 4)
ssh_cmd = 'ssh -o "StrictHostKeyChecking no" %s %s' % (k, cmd)
p = subprocess.Popen(ssh_cmd, shell=True,
p.wait()
if p.returncode != 0:
- logger.write(src.printcolors.printc(src.KO_STATUS) + "\n", 4)
- logger.write(" " + src.printcolors.printcWarning(p.stderr.read()),
- 2)
+ logger.error("<KO>\n%s\n" % UTS.red(p.stderr.read()))
else:
- logger.write(src.printcolors.printc(src.OK_STATUS) + "\n", 4)
+ logger.info("<OK>\n")
lines = p.stdout.readlines()
freq = lines[0][:-1].split(':')[-1].split('.')[0].strip()
nb_proc = len(lines) -1
memory = lines[-1].split(':')[-1].split()[0].strip()
memory = int(memory) / 1000
-
- catalog.write(" <machine\n")
- catalog.write(" protocol=\"ssh\"\n")
- catalog.write(" nbOfNodes=\"1\"\n")
- catalog.write(" mode=\"interactif\"\n")
- catalog.write(" OS=\"LINUX\"\n")
- catalog.write(" CPUFreqMHz=\"%s\"\n" % freq)
- catalog.write(" nbOfProcPerNode=\"%s\"\n" % nb_proc)
- catalog.write(" memInMB=\"%s\"\n" % memory)
- catalog.write(" userName=\"%s\"\n" % user)
- catalog.write(" name=\"%s\"\n" % k)
- catalog.write(" hostname=\"%s\"\n" % k)
- catalog.write(" >\n")
- catalog.write(" </machine>\n")
-
+
+ msg = """\
+ <machine
+ protocol="ssh"
+ nbOfNodes="1"
+ mode="interactif"
+ OS="LINUX"
+ CPUFreqMHz="%s"
+ nbOfProcPerNode="%s"
+ memInMB="%s"
+ userName="%s"
+ name="%s"
+ hostname="%s"
+ >
+ </machine>
+"""
+ msg = msg % (freq, nb_proc, memory, user, k, k)
+ catalog.write(msg)
+
catalog.write("</resources>\n")
catalog.close()
return catfile
products_infos = get_products_list(options, runner.cfg, logger)
# Print some informations
- logger.write(_('Executing the check command in the build '
- 'directories of the application %s\n') % \
- src.printcolors.printcLabel(runner.cfg.VARS.application), 1)
+ msg = _('Executing the check command in the build directories of the application')
+ logger.info("%s %s\n" % (msg, UTS.label(runner.cfg.VARS.application)))
info = [(_("BUILD directory"),
os.path.join(runner.cfg.APPLICATION.workdir, 'BUILD'))]
- src.print_info(logger, info)
+ UTS.logger_info_tuples(logger, info)
# Call the function that will loop over all the products and execute
# the right command(s)
# Print the final state
nb_products = len(products_infos)
if res == 0:
- final_status = "OK"
+ final_status = "<OK>"
else:
- final_status = "KO"
+ final_status = "<KO>"
- logger.write(_("\nCheck: %(status)s (%(1)d/%(2)d)\n") % \
- { 'status': src.printcolors.printc(final_status),
+ logger.info(_("\nCheck: %(status)s (%(1)d/%(2)d)\n") % \
+ { 'status': final_status,
'1': nb_products - res,
- '2': nb_products }, 1)
+ '2': nb_products })
return res
return products_infos
def log_step(logger, header, step):
- logger.write("\r%s%s" % (header, " " * 20), 3)
- logger.write("\r%s%s" % (header, step), 3)
- logger.write("\n==== %s \n" % src.printcolors.printcInfo(step), 4)
- logger.flush()
+ logger.info("\r%s%s" % (header, " " * 20))
+ logger.info("\r%s%s" % (header, step))
def log_res_step(logger, res):
if res == 0:
- logger.write("%s \n" % src.printcolors.printcSuccess("OK"), 4)
- logger.flush()
+ logger.debug("<OK>\n")
else:
- logger.write("%s \n" % src.printcolors.printcError("KO"), 4)
- logger.flush()
+ logger.debug("<KO>\n")
def check_all_products(config, products_infos, logger):
'''Execute the proper configuration commands
'''
p_name, p_info = p_name_info
-
- # Logging
- logger.write("\n", 4, False)
- logger.write("################ ", 4)
- header = _("Check of %s") % src.printcolors.printcLabel(p_name)
+
+ header = _("Check of %s") % UTS.label(p_name)
header += " %s " % ("." * (20 - len(p_name)))
- logger.write(header, 3)
- logger.write("\n", 4, False)
- logger.flush()
+ logger.info(header)
# Verify if the command has to be launched or not
ignored = False
+ msg += ""
if not src.get_property_in_product_cfg(p_info, CHECK_PROPERTY):
- msg = _("The product %s is defined as not having tests. product ignored.") % p_name
- logger.write("%s\n" % msg, 4)
+ msg += _("The product %s is defined as not having tests: product ignored.\n") % p_name
ignored = True
if "build_dir" not in p_info:
- msg = _("No build_dir key defined in the config file of %s: product ignored.") % p_name
- logger.write("%s\n" % msg, 4)
+ msg += _("The product %s have no 'build_dir' key: product ignored.\n") % p_name
ignored = True
if not src.product.product_compiles(p_info):
- msg = _("The product %s is defined as not compiling. "
- "product ignored." % p_name)
- logger.write("%s\n" % msg, 4)
+ msg += _("The product %s is defined as not compiling: product ignored.\n") % p_name
ignored = True
-
+
+ logger.info("%s\n" % msg)
# Get the command to execute for script products
cmd_found = True
command = ""
if command == "Not found":
cmd_found = False
msg = _("""\
-WARNING: The product %(name)s is defined as having tests.
- But it is compiled using a script and the key 'test_build'
- is not defined in the definition of %(name)""") % {"name": p_name}
- logger.write("%s\n" % msg, 4)
+The product %s is defined as having tests.
+But it is compiled using a script and the key 'test_build'
+is not defined in the definition of %(name)\n""") % p_name
+ logger.warning(msg)
if ignored or not cmd_found:
log_step(logger, header, "ignored")
- logger.write("==== %(name)s %(IGNORED)s\n" % \
- { "name" : p_name ,
- "IGNORED" : src.printcolors.printcInfo("IGNORED")},
- 4)
- logger.write("\n", 3, False)
- logger.flush()
+ logger.debug("==== %s %s\n" % (p_name, "IGNORED")
if not cmd_found:
return 1
return 0
# Log the result
if res > 0:
- logger.write("\r%s%s" % (header, " " * len_end_line), 3)
- logger.write("\r" + header + src.printcolors.printcError("KO"))
- logger.write("==== %(KO)s in check of %(name)s \n" % \
- { "name" : p_name , "KO" : src.printcolors.printcInfo("ERROR")}, 4)
- logger.flush()
+ logger.info("\r%s%s" % (header, " " * len_end_line))
+ logger.info("\r" + header + "<KO>\n")
+ logger.debug("==== <KO> in check of %s\n" % p_name)
else:
- logger.write("\r%s%s" % (header, " " * len_end_line), 3)
- logger.write("\r" + header + src.printcolors.printcSuccess("OK"))
- logger.write("==== %s \n" % src.printcolors.printcInfo("OK"), 4)
- logger.write("==== Check of %(name)s %(OK)s \n" % \
- { "name" : p_name , "OK" : src.printcolors.printcInfo("OK")}, 4)
- logger.flush()
- logger.write("\n", 3, False)
+ logger.info("\r%s%s" % (header, " " * len_end_line))
+ logger.info("\r" + header + "<OK>\n")
+ logger.debug("==== <OK> in check of %s\n" % p_name)
+ logger.info("\n")
return res
#!/usr/bin/env python
#-*- coding:utf-8 -*-
-# Copyright (C) 2010-2012 CEA/DEN
+# Copyright (C) 2010-2018 CEA/DEN
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
if options.properties:
oExpr = re.compile(PROPERTY_EXPRESSION)
if not oExpr.search(options.properties):
- msg = _('WARNING: the "--properties" options must have the '
- 'following syntax:\n--properties <property>:<value>')
- logger.write(src.printcolors.printcWarning(msg), 1)
- logger.write("\n", 1)
+ msg = _("""\
+The '--properties' options must have the following syntax:
+ --properties <property>:<value>\n""")
+ logger.error(msg)
options.properties = None
options.sources_without_dev)
if len(l_dir_to_suppress) == 0:
- logger.write(src.printcolors.printcWarning(_("Nothing to suppress\n")))
- sat_command = (config.VARS.salometoolsway +
- config.VARS.sep +
- "sat -h clean")
- logger.write(_("Please specify what you want to suppress: tap '%s'\n") % sat_command)
+ sat_command = ("sat -h clean")
+ msg = _("Nothing to suppress, Please specify what you want to suppress.")
+ logger.error(msg + "\nsee: '%s'\n" % sat_command)
return RCO.ReturnCode("KO", "specify what you want to suppress")
# Check with the user if he really wants to suppress the directories
if not runner.options.batch:
- logger.write(_("Remove the following directories ?\n"), 1)
+ msg = _("Remove the following directories ?\n")
for directory in l_dir_to_suppress:
- logger.write(" %s\n" % directory, 1)
+ msg += " %s\n" % directory
+ logger.info(msg)
rep = input(_("Are you sure you want to continue? [Yes/No] "))
if rep.upper() != _("YES"):
return RCO.ReturnCode("OK", "user do not want to continue")
logging
'''
for path in l_paths:
+ strpath = str(path)
if not path.isdir():
- msg = _("WARNING: the path %s does not "
- "exists (or is not a directory)\n") % path.__str__()
- logger.write(src.printcolors.printcWarning(msg), 1)
+ msg = _("The path %s does not exists (or is not a directory)\n") % strpath
+ logger.warning(msg)
else:
- logger.write(_("Removing %s ...") % path.__str__())
+ logger.info(_("Removing %s ...") % strpath )
path.rm()
- logger.write('%s\n' % src.printcolors.printc(src.OK_STATUS), 3)
+ logger.info('<OK>\n')
#!/usr/bin/env python
#-*- coding:utf-8 -*-
-# Copyright (C) 2010-2012 CEA/DEN
+# Copyright (C) 2010-2018 CEA/DEN
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
src.check_config_has_application( runner.cfg )
# Print some informations
- logger.write(_('Executing the compile commands in the build '
- 'directories of the products of the application %s\n') %
- src.printcolors.printcLabel(runner.cfg.VARS.application), 1)
+ nameApp = str(runner.cfg.VARS.application)
+ srcDir = os.path.join(runner.cfg.APPLICATION.workdir, 'SOURCES')
+ buildDir = os.path.join(runner.cfg.APPLICATION.workdir, 'BUILD')
- info = [
- (_("SOURCE directory"),
- os.path.join(runner.cfg.APPLICATION.workdir, 'SOURCES')),
- (_("BUILD directory"),
- os.path.join(runner.cfg.APPLICATION.workdir, 'BUILD'))
- ]
- src.print_info(logger, info)
+ msg = _("Application %s, executing compile commands in build directories of products.\n"
+ logger.info(msg % UTS.label(nameApp))
+
+ info = [ (_("SOURCE directory"), srcDir),
+ (_("BUILD directory"),buildDir) ]
+ UTS.logger_info_tuples(logger, info)
# Get the list of products to treat
products_infos = get_products_list(options, runner.cfg, logger)
# Print the final state
nb_products = len(products_infos)
if res == 0:
- final_status = "OK"
+ final_status = "<OK>"
else:
- final_status = "KO"
+ final_status = "<KO>"
- logger.write(_("\nCompilation: %(status)s (%(1)d/%(2)d)\n") %
- { 'status': src.printcolors.printc(final_status),
+ logger.info(_("\nCompilation: %(status)s (%(1)d/%(2)d)\n") % \
+ { 'status': final_status,
'1': nb_products - res,
- '2': nb_products }, 1)
+ '2': nb_products })
code = res
if code != 0:
return l_depends_not_installed
def log_step(logger, header, step):
- logger.write("\r%s%s" % (header, " " * 30), 3)
- logger.write("\r%s%s" % (header, step), 3)
- logger.write("\n==== %s \n" % src.printcolors.printcInfo(step), 4)
- logger.flush()
+ logger.info("\r%s%s" % (header, " " * 30))
+ logger.info("\r%s%s" % (header, step))
+ logger.debug("\n==== %s \n" % step)
def log_res_step(logger, res):
if res == 0:
- logger.write("%s \n" % src.printcolors.printcSuccess("OK"), 4)
- logger.flush()
+ logger.debug("<OK>\n")
else:
- logger.write("%s \n" % src.printcolors.printcError("KO"), 4)
- logger.flush()
+ logger.debug("<KO>\n")
+
def compile_all_products(sat, config, options, products_infos, logger):
- '''Execute the proper configuration commands
- in each product build directory.
+ """\
+ Execute the proper configuration commands
+ in each product build directory.
:param config Config: The global configuration
:param products_info list: List of
:param logger Logger: The logger instance to use for the display and logging
:return: the number of failing commands.
:rtype: int
- '''
+ """
res = 0
for p_name_info in products_infos:
# Logging
len_end_line = 30
- logger.write("\n", 4, False)
- logger.write("################ ", 4)
- header = _("Compilation of %s") % src.printcolors.printcLabel(p_name)
- header += " %s " % ("." * (len_end_line - len(p_name)))
- logger.write(header, 3)
- logger.write("\n", 4, False)
- logger.flush()
-
+ header = _("Compilation of %s") % UTS.label(p_name)
+ header += " %s \n" % ("." * (len_end_line - len(p_name)))
+ logger.info(header)
+
# Do nothing if the product is not compilable
- if ("properties" in p_info and "compilation" in p_info.properties and
- p_info.properties.compilation == "no"):
+ if ("properties" in p_info and \
+ "compilation" in p_info.properties and \
+ p_info.properties.compilation == "no"):
+
log_step(logger, header, "ignored")
- logger.write("\n", 3, False)
+ logger.info("\n")
continue
# Do nothing if the product is native
if src.product.product_is_native(p_info):
log_step(logger, header, "native")
- logger.write("\n", 3, False)
+ logger.info("\n")
continue
# Clean the build and the install directories
# Check if it was already successfully installed
if src.product.check_installation(p_info):
- logger.write(_("Already installed\n"))
+ logger.info(_("Already installed\n"))
continue
# If the show option was called, do not launch the compilation
if options.no_compile:
- logger.write(_("Not installed\n"))
+ logger.info(_("Not installed\n"))
continue
# Check if the dependencies are installed
l_depends_not_installed = check_dependencies(config, p_name_info)
if len(l_depends_not_installed) > 0:
log_step(logger, header, "")
- logger.write(src.printcolors.printcError(
- _("ERROR : the following product(s) is(are) mandatory: ")))
+ msg = _("the following products are mandatory:\n")
for prod_name in l_depends_not_installed:
- logger.write(src.printcolors.printcError(prod_name + " "))
- logger.write("\n")
+ msg += "%s\n" % prod_name
+ logger.error(msg)
continue
# Call the function to compile the product
if error_step != "CHECK":
# Clean the install directory if there is any
- logger.write(
- _("Cleaning the install directory if there is any\n"), 5)
+ logger.debug(_("Cleaning the install directory if there is any\n"))
sat.clean(config.VARS.application +
" --products " + p_name +
" --install",
# Log the result
if res_prod > 0:
- logger.write("\r%s%s" % (header, " " * len_end_line), 3)
- logger.write("\r" + header + src.printcolors.printcError("KO ") + error_step)
- logger.write("\n==== %(KO)s in compile of %(name)s \n" %
- { "name" : p_name , "KO" : src.printcolors.printcInfo("ERROR")}, 4)
+ logger.info("\r%s%s" % (header, " " * len_end_line))
+ logger.info("\r" + header + "<KO> ") + error_step)
+ logger.debug("\n==== <KO> in compile of %s\n" % p_name
if error_step == "CHECK":
- logger.write(_("\nINSTALL directory = %s") %
- src.printcolors.printcInfo(p_info.install_dir), 3)
- logger.flush()
+ logger.info(_("\nINSTALL directory = %s") % p_info.install_dir)
else:
- logger.write("\r%s%s" % (header, " " * len_end_line), 3)
- logger.write("\r" + header + src.printcolors.printcSuccess("OK"))
- logger.write(_("\nINSTALL directory = %s") %
- src.printcolors.printcInfo(p_info.install_dir), 3)
- logger.write("\n==== %s \n" % src.printcolors.printcInfo("OK"), 4)
- logger.write("\n==== Compilation of %(name)s %(OK)s \n" %
- { "name" : p_name , "OK" : src.printcolors.printcInfo("OK")}, 4)
- logger.flush()
- logger.write("\n", 3, False)
+ logger.info("\r%s%s" % (header, " " * len_end_line)
+ logger.info("\r" + header + "<OK>")
+ logger.info(_("\nINSTALL directory = %s") % p_info.install_dir)
+ logger.debug("\n==== <OK> in compile of %s\n" % p_name)
+ logger.info("\n")
if res_prod != 0 and options.stop_first_fail:
if res==0 and not(os.path.exists(p_info.install_dir)):
res = 1
error_step = "NO INSTALL DIR"
- msg = _("Error: despite the fact that all the steps ended successfully,"
- " no install directory was found !")
- logger.write(src.printcolors.printcError(msg), 4)
- logger.write("\n", 4)
+ msg = _("despite all the steps ended successfully, no install directory was found\n")
+ logger.error(msg)
return res, len_end, error_step
# Add the config file corresponding to the dependencies/versions of the
# product that have been successfully compiled
if res==0:
- logger.write(_("Add the config file in installation directory\n"), 5)
+ logger.debug(_("Add the config file in installation directory\n"))
add_compile_config_file(p_info, config)
if options.check:
if src.product.product_has_script(p_info):
# if the product has a compilation script,
# it is executed during make step
- scrit_path_display = src.printcolors.printcLabel(
+ scrit_path_display = UTS.label(
p_info.compil_script)
log_step(logger, header, "SCRIPT " + scrit_path_display)
len_end_line = len(scrit_path_display)
error_step = ""
# Logging and sat command call for the script step
- scrit_path_display = src.printcolors.printcLabel(p_info.compil_script)
+ scrit_path_display = UTS.label(p_info.compil_script)
log_step(logger, header, "SCRIPT " + scrit_path_display)
len_end_line = len_end + len(scrit_path_display)
res = sat.script(config.VARS.application + " --products " + p_name,
if ('APPLICATION' not in config and
'open_application' not in config): # edit user pyconf
usercfg = os.path.join(config.VARS.personalDir, 'SAT.pyconf')
- logger.write(_("Opening %s\n") % usercfg, 3)
+ logger.info(_("Opening %s\n") % usercfg)
src.system.show_in_editor(editor, usercfg, logger)
else:
# search for file <application>.pyconf and open it
for path in config.PATHS.APPLICATIONPATH:
pyconf_path = os.path.join(path, config.VARS.application + ".pyconf")
if os.path.exists(pyconf_path):
- logger.write(_("Opening %s\n") % pyconf_path, 3)
+ logger.info(_("Opening %s\n") % pyconf_path)
src.system.show_in_editor(editor, pyconf_path, logger)
break
# perform the copy
shutil.copyfile(source_full_path, dest_file)
- logger.write(_("%s has been created.\n") % dest_file)
+ logger.info(_("%s has been created.\n") % dest_file)
# case : display all the available pyconf applications
elif options.list:
elif options.show_patchs:
src.check_config_has_application(config)
# Print some informations
- logger.write(_('Show the patchs of application %s\n') % \
- src.printcolors.printcLabel(config.VARS.application), 3)
- logger.write("\n", 2, False)
+ logger.info(_('Show the patchs of application %s\n') % \
+ UTS.label(config.VARS.application))
show_patchs(config, logger)
# case: print all the products name of the application (internal use for completion)
elif options.completion:
for product_name in config.APPLICATION.products.keys():
- logger.write("%s\n" % product_name)
+ logger.info("%s\n" % product_name)
return RCO.ReturnCode("OK")
products_infos = get_products_list(options, runner.cfg, logger)
# Print some informations
- logger.write(_('Configuring the sources of the application %s\n') %
- src.printcolors.printcLabel(runner.cfg.VARS.application), 1)
+ logger.info(_('Configuring the sources of the application %s\n') %
+ UTS.label(runner.cfg.VARS.application))
info = [(_("BUILD directory"),
os.path.join(runner.cfg.APPLICATION.workdir, 'BUILD'))]
- src.print_info(logger, info)
+ UTS.logger_info_tuples(logger, info)
# Call the function that will loop over all the products and execute
# the right command(s)
# Print the final state
nb_products = len(products_infos)
if res == 0:
- final_status = "OK"
+ final_status = "<OK>"
else:
- final_status = "KO"
+ final_status = "<KO>"
- logger.write(_("\nConfiguration: %(status)s (%(valid_result)d/%(nb_products)d)\n") % \
- { 'status': src.printcolors.printc(final_status),
- 'valid_result': nb_products - res,
- 'nb_products': nb_products }, 1)
+ logger.info(_("\nConfiguration: %(status)s (%(1)d/%(2)d)\n") % \
+ { 'status': final_status,
+ '1': nb_products - res,
+ '2': nb_products }, 1)
return res
return products_infos
def log_step(logger, header, step):
- logger.write("\r%s%s" % (header, " " * 20), 3)
- logger.write("\r%s%s" % (header, step), 3)
- logger.write("\n==== %s \n" % src.printcolors.printcInfo(step), 4)
+ logger.info("\r%s%s" % (header, " " * 20))
+ logger.info("\r%s%s" % (header, step))
+ logger.debug("\n==== %s \n" % UTS.info(step))
logger.flush()
def log_res_step(logger, res):
if res == 0:
- logger.write("%s \n" % src.printcolors.printcSuccess("OK"), 4)
- logger.flush()
+ logger.debug("<OK>")
else:
- logger.write("%s \n" % src.printcolors.printcError("KO"), 4)
- logger.flush()
+ logger.debug("<KO>")
def configure_all_products(config, products_infos, conf_option, logger):
'''Execute the proper configuration commands
p_name, p_info = p_name_info
# Logging
- logger.write("\n", 4, False)
- logger.write("################ ", 4)
- header = _("Configuration of %s") % src.printcolors.printcLabel(p_name)
+ header = _("Configuration of %s") % UTS.label(p_name)
header += " %s " % ("." * (20 - len(p_name)))
- logger.write(header, 3)
- logger.write("\n", 4, False)
- logger.flush()
-
+ logger.info(header)
+
# Do nothing if he product is not compilable
- if ("properties" in p_info and "compilation" in p_info.properties and
- p_info.properties.compilation == "no"):
+ if ("properties" in p_info and \
+ "compilation" in p_info.properties and \
+ p_info.properties.compilation == "no"):
+
log_step(logger, header, "ignored")
- logger.write("\n", 3, False)
+ logger.info("\n")
return 0
# Instantiate the class that manages all the construction commands
# Log the result
if res > 0:
- logger.write("\r%s%s" % (header, " " * 20), 3)
- logger.write("\r" + header + src.printcolors.printcError("KO"))
- logger.write("==== %(KO)s in configuration of %(name)s \n" %
- { "name" : p_name , "KO" : src.printcolors.printcInfo("ERROR")}, 4)
- logger.flush()
+ logger.info("\r%s%s" % (header, " " * 20))
+ logger.info("\r" + header + "<KO>")
+ logger.debug("==== <KO> in configuration of %s\n" % p_name)
else:
- logger.write("\r%s%s" % (header, " " * 20), 3)
- logger.write("\r" + header + src.printcolors.printcSuccess("OK"))
- logger.write("==== %s \n" % src.printcolors.printcInfo("OK"), 4)
- logger.write("==== Configuration of %(name)s %(OK)s \n" %
- { "name" : p_name , "OK" : src.printcolors.printcInfo("OK")}, 4)
- logger.flush()
- logger.write("\n", 3, False)
+ logger.info("\r%s%s" % (header, " " * 20))
+ logger.info("\r" + header + "<OK>")
+ logger.debug("==== <OK> in configuration of %s\n" % p_name)
+ logger.info("\n")
return res
write_all_source_files(runner.cfg, logger, out_dir=out_dir, shells=shell,
prefix=options.prefix, env_info=environ_info)
- logger.write("\n", 3, False)
+ logger.info("\n")
#TODO return code
def write_all_source_files(config,
for shell in shells:
if shell not in C_SHELLS:
- logger.write(_("Unknown shell: %s\n") % shell, 2)
+ logger.warning(_("Unknown shell: %s\n") % shell)
else:
shells_list.append(src.environment.Shell(shell, C_SHELLS[shell]))
if not(os.path.isdir(dir_path)):
msg = _("%s does not exists or is not a directory path: it will be ignored" %
dir_path)
- logger.write("%s\n" % src.printcolors.printcWarning(msg), 3)
+ logger.warning("%s\n" % msg)
continue
l_dir_path.append(dir_path)
(_("Ignored extensions"), extension_ignored),
(_("Ignored directories"), directories_ignored)
]
- print_info(logger, info)
+ UTS.logger_info_tuples(logger, info)
# Get all the files and paths
- logger.write(_("Store all file paths ... "), 3)
- logger.flush()
+ logger.info(_("Store all file paths ... "), 3)
dic, fic = list_directory(l_dir_path,
extension_ignored,
files_ignored,
directories_ignored)
- logger.write(src.printcolors.printcSuccess('OK\n'), 3)
+ logger.info("<OK>\n")
# Eliminate all the singletons
len_fic = len(fic)
dic.remove(dic[i])
# Format the resulting variable to get a dictionary
- logger.write(_("\n\nCompute the dict {files : [list of pathes]} ... "), 3)
+ logger.info(_("\n\nCompute the dict for file -> list of paths ... "))
fic.sort()
len_fic = len(fic)
rg_fic = range(0,len_fic)
l_path.append(fic_path[1])
dic_fic_paths[the_file] = l_path
- logger.write(src.printcolors.printcSuccess('OK\n'), 3)
+ logger.info("<OK>\n')
# End the execution if no duplicates were found
if len(dic_fic_paths) == 0:
- logger.write(_("No duplicate files found.\n"), 3)
+ logger.info(_("No duplicate files found.\n"))
return 0
# Check that there are no singletons in the result (it would be a bug)
for elem in dic_fic_paths:
if len(dic_fic_paths[elem])<2:
- logger.write(
- _("WARNING : element %s has not more than two paths.\n") % elem,
- 3 )
+ logger.warning(_("Element %s has not more than two paths.\n") % elem)
# Display the results
- logger.write(src.printcolors.printcInfo(_('\nResults:\n\n')), 3)
+ logger.info(_('\nResults:\n\n'))
max_file_name_lenght = max(map(lambda l: len(l), dic_fic_paths.keys()))
for fich in dic_fic_paths:
- logger.write(src.printcolors.printcLabel(fich), 1)
sp = " " * (max_file_name_lenght - len(fich))
- logger.write(sp, 1)
+ msg = UTS.label(fich) + sp
for rep in dic_fic_paths[fich]:
- logger.write(rep, 1)
- logger.write(" ", 1)
- logger.write("\n", 1)
+ msg += rep + " "
+ logger.info(msg + "\n")
return 0
:param val float: val must be between valMin and valMax.
'''
if val < self.valMin or val > self.valMax:
- self.logger.write(src.printcolors.printcWarning(_(
- 'WARNING : wrong value for the progress bar.\n')), 3)
+ self.logger.error(_("Wrong value for the progress bar.\n')))
else:
perc = (float(val-self.valMin) / (self.valMax - self.valMin)) * 100.
nb_equals = int(perc * self.length / 100)
out = '\r %s : %3d %% [%s%s]' % (self.name, perc, nb_equals*'=',
(self.length - nb_equals)*' ' )
- self.logger.write(out, 3)
- self.logger.flush()
+ self.logger.info(out)
src.check_config_has_application(runner.cfg)
logger.write(_('Generation of SALOME modules for application %s\n') % \
- src.printcolors.printcLabel(runner.cfg.VARS.application), 1)
+ UTS.label(runner.cfg.VARS.application), 1)
status = src.KO_STATUS
if isinstance(yacsgen_dir, tuple):
# The check failed
__, error = yacsgen_dir
- msg = _("Error: %s") % error
- logger.write(src.printcolors.printcError(msg), 1)
- logger.write("\n", 1)
+ msg = _("check yacsgen: %s\n") % error
+ logger.error(msg)
return 1
# Make the generator module visible by python
context = build_context(runner.cfg, logger)
for product in products:
- header = _("Generating %s") % src.printcolors.printcLabel(product)
+ header = _("Generating %s") % UTS.label(product)
header += " %s " % ("." * (20 - len(product)))
logger.write(header, 3)
logger.flush()
def generate_component_list(config, product_info, context, logger):
res = "?"
- logger.write("\n", 3)
+ logger.info("\n")
for compo in src.product.get_product_components(product_info):
- header = " %s %s " % (src.printcolors.printcLabel(compo),
- "." * (20 - len(compo)))
+ header = " %s %s " % (UTS.label(compo), "." * (20 - len(compo)))
res = generate_component(config,
compo,
product_info,
header,
logger)
if config.USER.output_verbose_level == 3:
- logger.write("\r%s%s\r%s" % (header, " " * 20, header), 3)
- logger.write(src.printcolors.printc(res), 3, False)
- logger.write("\n", 3, False)
+ logger.info("\r%s%s\r%s" % (header, " " * 20, header))
+ logger.info(res + "\n")
return res
def generate_component(config, compo, product_info, context, header, logger):
warn = _("product %(product)s is not defined. Include it in the"
" application or define $%(env)s.") % \
{ "product": p, "env": prod_env}
- logger.write(src.printcolors.printcWarning(warn), 1)
+ logger.write(UTS.red(warn), 1)
logger.write("\n", 3, False)
val = ""
val = ctxenv.environ.environ[prod_env]
# Print some informations
logger.write(_('Local Settings of SAT %s\n\n') % \
- src.printcolors.printcLabel(runner.cfg.VARS.salometoolsway), 1)
+ UTS.label(runner.cfg.VARS.salometoolsway), 1)
res = 0
("archive_dir", config.LOCAL.archive_dir),
("VCS", config.LOCAL.VCS),
("tag", config.LOCAL.tag)]
- src.print_info(logger, info)
+ UTS.logger_info_tuples(logger, info)
return 0
# If it is a file, do nothing and return error
if path.isfile():
- msg = _("ERROR: The given path is a file. Please provide a path to a directory")
- logger.write(src.printcolors.printcError(msg), 1)
+ msg = _("""\
+The given path is a file: %s
+Please provide a path to a directory\n""") % UTS.blue(path_to_check)
+ logger.error(msg)
return 1
# Try to create the given path
try:
src.ensure_path_exists(str(path))
except Exception as e:
- err = src.printcolors.printcError(str(e))
- msg = _("Unable to create the directory '%(1)s': %(2)s\n") % \
- {"1": str(path), "2": err}
- logger.write(msg, 1)
+ msg = _("""\
+Unable to create the directory %s:
+
+%s\n""") % (UTS.blue(str(path)), UTS.yellow(e)
+ logger.error(msg)
return 1
return 0
# Make sure the jobs_config option has been called
if not options.jobs_cfg:
message = _("The option --jobs_config is required\n")
- logger.write(src.printcolors.printcError(message))
+ logger.error(message)
return 1
# Make sure the name option has been called
if not options.job:
message = _("The option --name is required\n")
- logger.write(src.printcolors.printcError(message))
+ logger.error(message)
return 1
# Find the file in the directories
- found = False
- for cfg_dir in l_cfg_dir:
- file_jobs_cfg = os.path.join(cfg_dir, options.jobs_cfg)
- if not file_jobs_cfg.endswith('.pyconf'):
- file_jobs_cfg += '.pyconf'
+ found = True
+ fPyconf = options.jobs_cfg
+ if not file_jobs_cfg.endswith('.pyconf'):
+ fPyconf += '.pyconf'
- if not os.path.exists(file_jobs_cfg):
- continue
- else:
+ for cfg_dir in l_cfg_dir:
+ file_jobs_cfg = os.path.join(cfg_dir, fPyconf)
+ if os.path.exists(file_jobs_cfg):
found = True
break
-
+
if not found:
- msg = _("The file configuration %(name_file)s was not found.\n"
- "Use the --list option to get the possible files.")
- src.printcolors.printcError(msg)
+ msg = _("""\
+The job file configuration %s was not found.
+Use the --list option to get the possible files.""") % UTS.blue(fPyconf)
+ logger.error(msg)
return 1
- info = [
- (_("Platform"), runner.cfg.VARS.dist),
- (_("File containing the jobs configuration"), file_jobs_cfg)
- ]
- src.print_info(logger, info)
+ info = [ (_("Platform"), runner.cfg.VARS.dist),
+ (_("File containing the jobs configuration"), file_jobs_cfg) ]
+ UTS.logger_info_tuples(logger, info)
# Read the config that is in the file
config_jobs = src.read_config_from_a_file(file_jobs_cfg)
found = True
break
if not found:
- msg = _("Impossible to find the job '%(job_name)s' in %(jobs_config_file)s" % \
- {"job_name" : options.job, "jobs_config_file" : file_jobs_cfg})
- logger.write(src.printcolors.printcError(msg) + "\n")
+ msg = _("Impossible to find the job %s in %s\n" % \
+ (options.job, file_jobs_cfg)
+ logger.error(msg)
return 1
# Find the maximum length of the commands in order to format the display
# Get dynamically the command function to call
sat_command = runner.__getattr__(sat_command_name)
- logger.write("Executing " +
- src.printcolors.printcLabel(command) + " ", 3)
- logger.write("." * (len_max_command - len(command)) + " ", 3)
- logger.flush()
+ logger.info("Executing " + UTS.label(command) + " " +
+ "." * (len_max_command - len(command)) + " ")
error = ""
stack = ""
# Print the status of the command
if code == 0:
nb_pass += 1
- logger.write('%s\n' % src.printcolors.printc(src.OK_STATUS), 3)
+ logger.info("<OK>\n")
else:
if sat_command_name != "test":
res = 1
- logger.write('%s %s\n' % (src.printcolors.printc(src.KO_STATUS), error), 3)
+ logger.write('<KO>: %s\n' % error)
if len(stack) > 0:
logger.write('stack: %s\n' % stack, 3)
# Print the final state
if res == 0:
- final_status = "OK"
+ final_status = "<OK>"
else:
- final_status = "KO"
+ final_status = "<KO>"
- logger.write(_("\nCommands: %(status)s (%(1)d/%(2)d)\n") % \
- { 'status': src.printcolors.printc(final_status),
- '1': nb_pass,
- '2': len(commands) }, 3)
+ logger.info(_("\nCommands: %s (%d/%d)\n") % \
+ (final_status, nb_pass, len(commands)))
return res
if options.list:
for cfg_dir in l_cfg_dir:
if not options.no_label:
- logger.write("------ %s\n" %
- src.printcolors.printcHeader(cfg_dir))
+ logger.info("------ %s\n" % UTS.blue(cfg_dir))
if not os.path.exists(cfg_dir):
continue
for f in sorted(os.listdir(cfg_dir)):
# Make sure the jobs_config option has been called
if not options.jobs_cfg:
message = _("The option --jobs_config is required\n")
- src.printcolors.printcError(message)
+ logger.error(message)
return 1
# Find the file in the directories, unless it is a full path
if not found:
msg = _("""\
The file configuration %s was not found.
-Use the --list option to get the possible files.""") % config_file
- logger.write("%s\n" % src.printcolors.printcError(msg), 1)
+Use the --list option to get the possible files.\n""") % config_file
+ logger.error(msg)
return 1
l_conf_files_path.append(file_jobs_cfg)
# Read the config that is in the file
one_config_jobs = src.read_config_from_a_file(file_jobs_cfg)
merger.merge(config_jobs, one_config_jobs)
- info = [
- (_("Platform"), runner.cfg.VARS.dist),
- (_("Files containing the jobs configuration"), l_conf_files_path)
- ]
- src.print_info(logger, info)
+ info = [(_("Platform"), runner.cfg.VARS.dist),
+ (_("Files containing the jobs configuration"), l_conf_files_path)]
+ UTS.logger_info_tuples(logger, info)
if options.only_jobs:
l_jb = PYCONF.Sequence()
gui = None
if options.publish:
- logger.write(src.printcolors.printcInfo(
+ logger.write(UTS.info(
_("Initialize the xml boards : ")), 5)
logger.flush()
logger,
file_boards = options.input_boards)
- logger.write(src.printcolors.printcSuccess("OK"), 5)
- logger.write("\n\n", 5)
- logger.flush()
+ logger.debug("<OK>\n\n")
# Display the list of the xml files
- logger.write(src.printcolors.printcInfo(("Here is the list of published"
+ logger.write(UTS.info(("Here is the list of published"
" files :\n")), 4)
logger.write("%s\n" % gui.xml_global_file.logFile, 4)
for board in gui.d_xml_board_files.keys():
today_jobs.run_jobs()
except KeyboardInterrupt:
interruped = True
- logger.write("\n\n%s\n\n" %
- (src.printcolors.printcWarning(_("Forced interruption"))), 1)
+ logger.critical(UTS.red(_("KeyboardInterrupt forced interruption\n"))
except Exception as e:
- msg = _("CRITICAL ERROR: The jobs loop has been interrupted\n")
- logger.write("\n\n%s\n" % src.printcolors.printcError(msg) )
- logger.write("%s\n" % str(e))
- # get stack
- __, __, exc_traceback = sys.exc_info()
- fp = tempfile.TemporaryFile()
- traceback.print_tb(exc_traceback, file=fp)
- fp.seek(0)
- stack = fp.read()
- logger.write("\nTRACEBACK:\n%s\n" % stack.replace('"',"'"), 1)
+ # verbose debug message with traceback
+ msg = _("Exception raised, the jobs loop has been interrupted:\n\n%s\n")
+ import traceback
+ logger.critical( msg % UTS.yellow(traceback.format_exc()))
finally:
res = 0
if interruped:
res = 1
msg = _("Killing the running jobs and trying to get the corresponding logs\n")
- logger.write(src.printcolors.printcWarning(msg))
+ logger.write(UTS.red(msg))
# find the potential not finished jobs and kill them
for jb in today_jobs.ljobs:
jb.kill_remote_process()
except Exception as e:
msg = _("Failed to kill job %(1)s: %(2)s\n") % {"1": jb.name, "2": e}
- logger.write(src.printcolors.printcWarning(msg))
+ logger.write(UTS.red(msg))
if jb.res_job != "0":
res = 1
if interruped:
(name: %(1)s host: %(2)s, port: %(3)s, user: %(4)s) is OK
whereas there were no connection request""" %
{"1": self.name, "2": self.host, "3": self.port, "4": self.user} )
- logger.write( src.printcolors.printcWarning(message))
+ logger.write( UTS.red(message))
return self._connection_successful
def copy_sat(self, sat_local_path, job_file):
:rtype: (paramiko.channel.ChannelFile, paramiko.channel.ChannelFile,
paramiko.channel.ChannelFile)
'''
+ import traceback
try:
# Does not wait the end of the command
(stdin, stdout, stderr) = self.ssh.exec_command(command)
except self.paramiko.SSHException:
- message = src.KO_STATUS + _(
- ": the server failed to execute the command\n")
- logger.write( src.printcolors.printcError(message))
+ msg = _("<KO>: the paramiko server failed to execute the command\n")
+ msg += "command: '%s'\n" % command
+ msg += "\n%s\n" % UTS.yellow(traceback.format_exc())
+ logger.critical(msg)
return (None, None, None)
- except:
- logger.write( src.printcolors.printcError(src.KO_STATUS + '\n'))
+ except Exception as e:
+ msg = _("<KO>: an exception raised on ssh.exec_command:\n")
+ msg += "command: '%s'\n" % command
+ msg += "\n%s\n" % UTS.yellow(traceback.format_exc())
+ logger.critical(msg)
return (None, None, None)
- else:
- return (stdin, stdout, stderr)
+ return (stdin, stdout, stderr)
def close(self):
'''Close the ssh connection
# Do not get the files if the command is not finished
if not self.has_finished():
msg = _("Trying to get log files whereas the job is not finished.")
- self.logger.write(src.printcolors.printcWarning(msg))
+ self.logger.write(UTS.red(msg))
return
# First get the file that contains the list of log files to get
msg2 = _("Trying to launch the job \"%s\" whereas it has "
"already been launched.") % self.name
self.logger.write(
- src.printcolors.printcWarning("%s\n%s\n" % (msg,msg2)) )
+ UTS.red("%s\n%s\n" % (msg,msg2)) )
return
# Do not execute the command if the machine could not be reached
machine_head = "Informations about connection :\n"
underline = (len(machine_head) - 2) * "-"
- self.logger.write(src.printcolors.printcInfo(
+ self.logger.write(UTS.info(
machine_head+underline+"\n"))
self.machine.write_info(self.logger)
- self.logger.write(src.printcolors.printcInfo("out : \n"))
+ self.logger.write(UTS.info("out : \n"))
if self.out == "":
self.logger.write("Unable to get output\n")
else:
self.logger.write(self.out + "\n")
- self.logger.write(src.printcolors.printcInfo("err : \n"))
+ self.logger.write(UTS.info("err : \n"))
self.logger.write(self.err + "\n")
def get_status(self):
msg = _("""\
WARNING: The job '%s' do not have the key 'machine'.
This job is ignored.\n""") % job_def.name
- self.logger.write(src.printcolors.printcWarning(msg))
+ self.logger.write(UTS.red(msg))
continue
name_machine = job_def.machine
This machine is not defined in the configuration file.
The job will not be launched.
""") % {"job" : job_def.name, "machine" : name_machine}
- self.logger.write(src.printcolors.printcWarning(msg))
+ self.logger.write(UTS.red(msg))
continue
a_job = self.define_job(job_def, a_machine)
:return: Nothing
:rtype: N\A
'''
- self.logger.write(src.printcolors.printcInfo((
+ self.logger.write(UTS.info((
"Establishing connection with all the machines :\n")))
for machine in self.lmachines:
# little algorithm in order to display traces
# Print the status of the copy
if res_copy == 0:
- self.logger.write('\r%s' %
+ self.logger.write('\r%s' % \
((len(begin_line)+len(endline)+20) * " "), 3)
- self.logger.write('\r%s%s%s' %
- (begin_line,
- endline,
- src.printcolors.printc(src.OK_STATUS)), 3)
+ self.logger.info('\r%s%s%s' % (begin_line, endline, "<OK>"))
else:
- self.logger.write('\r%s' %
+ self.logger.write('\r%s' % \
((len(begin_line)+len(endline)+20) * " "), 3)
- self.logger.write('\r%s%s%s %s' %
- (begin_line,
- endline,
- src.printcolors.printc(src.KO_STATUS),
- _("Copy of SAT failed: %s") % res_copy), 3)
+ self.logger.info('\r%s%s%s %s' % \
+ (begin_line, endline, "<KO>",
+ _("Copy of SAT failed: %s") % res_copy))
else:
self.logger.write('\r%s' %
((len(begin_line)+len(endline)+20) * " "), 3)
- self.logger.write('\r%s%s%s %s' %
- (begin_line,
- endline,
- src.printcolors.printc(src.KO_STATUS),
- msg), 3)
- self.logger.write("\n", 3)
+ self.logger.write('\r%s%s%s %s' % (begin_line, endline, "<KO>", msg))
+ self.logger.info("\n")
- self.logger.write("\n")
+ self.logger.info("\n")
def is_occupied(self, hostname):
empty = self.str_of_length("empty", len_col)
display_line += "|" + empty
else:
- display_line += "|" + src.printcolors.printcInfo(
+ display_line += "|" + UTS.info(
self.str_of_length(jb.name, len_col))
self.logger.write("\r" + display_line + "|")
# Print header
self.logger.write(
- src.printcolors.printcInfo(_('Executing the jobs :\n')) )
+ UTS.info(_('Executing the jobs :\n')) )
text_line = ""
for host_port in self.lhosts:
host = host_port[0]
'''
for jb in self.ljobs:
- self.logger.write(src.printcolors.printcLabel(
+ self.logger.write(UTS.label(
"#------- Results for job %s -------#\n" % jb.name))
jb.write_results()
self.logger.write("\n\n")
except Exception as e:
msg = _("WARNING: the file '%(1)s' can not be read, it will be "
"ignored\n%(2)s") % {"1": file_path, "2": e}
- self.logger.write("%s\n" % src.printcolors.printcWarning(
+ self.logger.write("%s\n" % UTS.red(
msg), 5)
# Construct the dictionnary self.history
src.xmlManager.add_simple_node(xmlj, "state", job.get_status())
src.xmlManager.add_simple_node(xmlj, "begin", T0)
src.xmlManager.add_simple_node(xmlj, "end", Tf)
- src.xmlManager.add_simple_node(xmlj, "out",
- src.printcolors.cleancolor(job.out))
- src.xmlManager.add_simple_node(xmlj, "err",
- src.printcolors.cleancolor(job.err))
+ src.xmlManager.add_simple_node(xmlj, "out", UTS.cleancolor(job.out))
+ src.xmlManager.add_simple_node(xmlj, "err", UTS.cleancolor(job.err))
src.xmlManager.add_simple_node(xmlj, "res", str(job.res_job))
if len(job.remote_log_files) > 0:
src.xmlManager.add_simple_node(xmlj,
if display:
# Write the launcher file
logger.write(_("Generating launcher for %s :\n") %
- src.printcolors.printcLabel(config.VARS.application), 1)
- logger.write(" %s\n" % src.printcolors.printcLabel(filepath), 1)
+ UTS.label(config.VARS.application), 1)
+ logger.write(" %s\n" % UTS.label(filepath), 1)
# open the file and write into it
launch_file = open(filepath, "w")
# Write into it
catalog.write("<!DOCTYPE ResourcesCatalog>\n<resources>\n")
for k in machines:
- logger.write(" ssh %s " % (k + " ").ljust(20, '.'), 4)
- logger.flush()
+ logger.debug(" ssh %s " % (k + " ").ljust(20, '.'))
# Verify that the machine is accessible
ssh_cmd = 'ssh -o "StrictHostKeyChecking no" %s %s' % (k, cmd)
p.wait()
if p.returncode != 0: # The machine is not accessible
- logger.write(src.printcolors.printc(src.KO_STATUS) + "\n", 4)
- logger.write(" " +
- src.printcolors.printcWarning(p.stderr.read()), 2)
+ logger.error("<KO>: The machine %s is not accessible:\n%s\n" % k +
+ UTS.red(p.stderr.read()))
else:
# The machine is accessible, write the corresponding section on
# the xml file
- logger.write(src.printcolors.printc(src.OK_STATUS) + "\n", 4)
+ logger.debug("<OK>: The machine %s is accessible:\n" % k)
lines = p.stdout.readlines()
freq = lines[0][:-1].split(':')[-1].split('.')[0].strip()
nb_proc = len(lines) -1
nb_files_log_dir = len(glob.glob(os.path.join(logDir, "*")))
info = [("log directory", logDir),
("number of log files", nb_files_log_dir)]
- src.print_info(logger, info)
+ UTS.logger_info_tuples(logger, info)
# If the clean options is invoked,
# do nothing but deleting the concerned files.
remove_log_file(pyconfFilePath, logger)
- logger.write(src.printcolors.printcSuccess("OK\n"))
- logger.write("%i logs deleted.\n" % nbClean)
+ logger.write("<OK>\n%i logs deleted.\n" % nbClean)
return 0
# determine the commands to show in the hat log
index = 0
# loop on all files and print it with date, time and command name
for __, date, hour, cmd, cmdAppli in lLogsFiltered:
- num = src.printcolors.printcLabel("%2d" % (nb_logs - index))
+ num = UTS.label("%2d" % (nb_logs - index))
logger.write("%s: %13s %s %s %s\n" %
(num, cmd, date, hour, cmdAppli), 1, False)
index += 1
src.logger.update_hat_xml(logDir,
application = runner.cfg.VARS.application,
notShownCommands = notShownCommands)
- logger.write(src.printcolors.printc("OK"), 3)
- logger.write("\n", 3)
+ logger.info("<OK>\n"))
# open the hat xml in the user editor
if not options.no_browser:
:param logger Logger: the logger instance to use for the print
'''
if os.path.exists(filePath):
- logger.write(src.printcolors.printcWarning("Removing ")
+ logger.write(UTS.red("Removing ")
+ filePath + "\n", 5)
os.remove(filePath)
context and traces
:param logger Logger: the logging instance to use in order to print.
'''
- logger.write(_("Reading ") + src.printcolors.printcHeader(filePath) + "\n", 5)
+ logger.debug(_("Reading %s\n") % filePath)
# Instantiate the ReadXmlFile class that reads xml files
xmlRead = src.xmlManager.ReadXmlFile(filePath)
# Get the attributes containing the context (user, OS, time, etc..)
for attrib in dAttrText:
lAttrText.append((attrib, dAttrText[attrib]))
logger.write("\n", 1)
- src.print_info(logger, lAttrText)
+ UTS.logger_info_tuples(logger, lAttrText)
# Get the traces
command_traces = xmlRead.get_node_text('Log')
# Print it if there is any
if command_traces:
- logger.write(src.printcolors.printcHeader(
- _("Here are the command traces :\n")), 1)
- logger.write(command_traces, 1)
- logger.write("\n", 1)
+ logger.info(UTS.header(_("Here are the command traces :\n")))
+ logger.info(command_traces + "\n" )
def getMaxFormat(aListOfStr, offset=1):
"""returns format for columns width as '%-30s"' for example"""
k = index + i * col_size
if k < nb:
l = log_dirs[k]
- str_indice = src.printcolors.printcLabel("%2d" % (k+1))
+ str_indice = UTS.label("%2d" % (k+1))
log_name = l
logger.write(fmt2 % (str_indice, log_name), 1, False)
logger.write("\n", 1, False)
# display the available logs
for i, (__, file_name) in enumerate(sorted(l_time_file)):
- str_indice = src.printcolors.printcLabel("%2d" % (i+1))
+ str_indice = UTS.label("%2d" % (i+1))
opt = []
my_stat = os.stat(os.path.join(product_log_dir, file_name))
opt.append(str(datetime.datetime.fromtimestamp(my_stat[stat.ST_MTIME])))
# Print some informations
logger.write(
_('Executing the make command in the build directories of the application %s\n') %
- src.printcolors.printcLabel(runner.cfg.VARS.application), 1)
+ UTS.label(runner.cfg.VARS.application), 1)
info = [(_("BUILD directory"),
os.path.join(runner.cfg.APPLICATION.workdir, 'BUILD'))]
- src.print_info(logger, info)
+ UTS.logger_info_tuples(logger, info)
# Call the function that will loop over all the products and execute
# the right command(s)
# Print the final state
nb_products = len(products_infos)
if res == 0:
- final_status = "OK"
+ final_status = "<OK>"
else:
- final_status = "KO"
+ final_status = "<KO>"
- logger.write(_("\nMake: %(status)s (%(1)d/%(2)d)\n") %
- { 'status': src.printcolors.printc(final_status),
- '1': nb_products - res,
- '2': nb_products }, 1)
+ logger.info(_("\nMake: %s (%d/%d)\n") % \
+ (final_status, nb_products - res, nb_products))
return res
def log_step(logger, header, step):
logger.write("\r%s%s" % (header, " " * 20), 3)
logger.write("\r%s%s" % (header, step), 3)
- logger.write("\n==== %s \n" % src.printcolors.printcInfo(step), 4)
+ logger.write("\n==== %s \n" % UTS.info(step), 4)
logger.flush()
def log_res_step(logger, res):
if res == 0:
- logger.write("%s \n" % src.printcolors.printcSuccess("OK"), 4)
- logger.flush()
+ logger.debug("<OK>\n")
else:
- logger.write("%s \n" % src.printcolors.printcError("KO"), 4)
- logger.flush()
+ logger.debug("<KO>\n")
+
def make_all_products(config, products_infos, make_option, logger):
'''Execute the proper configuration commands
# Logging
logger.write("\n", 4, False)
logger.write("################ ", 4)
- header = _("Make of %s") % src.printcolors.printcLabel(p_name)
+ header = _("Make of %s") % UTS.label(p_name)
header += " %s " % ("." * (20 - len(p_name)))
logger.write(header, 3)
logger.write("\n", 4, False)
# Log the result
if res > 0:
logger.write("\r%s%s" % (header, " " * len_end_line), 3)
- logger.write("\r" + header + src.printcolors.printcError("KO"))
- logger.write("==== %(KO)s in make of %(name)s \n" %
- { "name" : p_name , "KO" : src.printcolors.printcInfo("ERROR")}, 4)
- logger.flush()
+ logger.write("\r" + header + "<KO>")
+ logger.debug("==== <KO> in make of %s\n" % p_name)
else:
logger.write("\r%s%s" % (header, " " * len_end_line), 3)
- logger.write("\r" + header + src.printcolors.printcSuccess("OK"))
- logger.write("==== %s \n" % src.printcolors.printcInfo("OK"), 4)
- logger.write("==== Make of %(name)s %(OK)s \n" %
- { "name" : p_name , "OK" : src.printcolors.printcInfo("OK")}, 4)
- logger.flush()
- logger.write("\n", 3, False)
+ logger.write("\r" + header + "<OK>")
+ logger.debug("==== <OK> in make of %s\n" % p_name)
+ logger.write("\n")
return res
# Print some informations
logger.write(_('Executing the make install command in the build directories of the application %s\n') %
- src.printcolors.printcLabel(runner.cfg.VARS.application), 1)
+ UTS.label(runner.cfg.VARS.application), 1)
info = [(_("BUILD directory"),
os.path.join(runner.cfg.APPLICATION.workdir, 'BUILD'))]
- src.print_info(logger, info)
+ UTS.logger_info_tuples(logger, info)
# Call the function that will loop over all the products and execute
# the right command(s)
# Print the final state
nb_products = len(products_infos)
if res == 0:
- final_status = "OK"
+ final_status = "<OK>"
else:
- final_status = "KO"
+ final_status = "<KO>"
- logger.write(_("\nMake install: %(status)s (%(1)d/%(2)d)\n") % \
- { 'status': src.printcolors.printc(final_status),
- '1': nb_products - res,
- '2': nb_products }, 1)
+ logger.info(_("\nMake install: %s (%d/%d)\n") % \
+ (final_status, nb_products - res, nb_products))
return res
def log_step(logger, header, step):
logger.write("\r%s%s" % (header, " " * 20), 3)
logger.write("\r%s%s" % (header, step), 3)
- logger.write("\n==== %s \n" % src.printcolors.printcInfo(step), 4)
+ logger.write("\n==== %s \n" % UTS.info(step), 4)
logger.flush()
def log_res_step(logger, res):
if res == 0:
- logger.write("%s \n" % src.printcolors.printcSuccess("OK"), 4)
- logger.flush()
+ logger.debug("<OK>\n")
else:
- logger.write("%s \n" % src.printcolors.printcError("KO"), 4)
- logger.flush()
+ logger.debug("<KO>\n")
def makeinstall_all_products(config, products_infos, logger):
'''Execute the proper configuration commands
# Logging
logger.write("\n", 4, False)
logger.write("################ ", 4)
- header = _("Make install of %s") % src.printcolors.printcLabel(p_name)
+ header = _("Make install of %s") % UTS.label(p_name)
header += " %s " % ("." * (20 - len(p_name)))
logger.write(header, 3)
logger.write("\n", 4, False)
# Log the result
if res > 0:
logger.write("\r%s%s" % (header, " " * 20), 3)
- logger.write("\r" + header + src.printcolors.printcError("KO"))
- logger.write("==== %(KO)s in make install of %(name)s \n" %
- { "name" : p_name , "KO" : src.printcolors.printcInfo("ERROR")}, 4)
- logger.flush()
+ logger.write("\r" + header + "<KO>")
+ logger.error("==== <KO> in make install of s\n" % p_name)
else:
logger.write("\r%s%s" % (header, " " * 20), 3)
- logger.write("\r" + header + src.printcolors.printcSuccess("OK"))
- logger.write("==== %s \n" % src.printcolors.printcInfo("OK"), 4)
- logger.write("==== Make install of %(name)s %(OK)s \n" %
- { "name" : p_name , "OK" : src.printcolors.printcInfo("OK")}, 4)
- logger.flush()
+ logger.write("\r" + header + "<OK>")
+ logger.write("==== <OK> in make install of %s\n" % p_name)
logger.write("\n", 3, False)
return res
import tarfile
import codecs
import string
+import traceback
from commands.application import get_SALOME_modules
# Check if no option for package type
if all_option_types.count(True) == 0:
- msg = _("ERROR: needs a type for the package\n"
- " Use one of the following options:\n"
- " --binaries, --sources, --project or --salometools")
- logger.write(src.printcolors.printcError(msg), 1)
- logger.write("\n", 1)
+ msg = _("""\
+Needs a type for the package
+Use one of the following options:
+ '--binaries' '--sources' '--project' or '--salometools'\n""")
+ logger.error(msg)
return 1
# The repository where to put the package if not Binary or Source
# Display information
logger.write(_("Packaging application %s\n") % \
- src.printcolors.printcLabel(runner.cfg.VARS.application), 1)
+ UTS.label(runner.cfg.VARS.application), 1)
# Get the default directory where to put the packages
package_default_path = os.path.join(runner.cfg.APPLICATION.workdir, "PACKAGE")
if options.project:
# check that the project is visible by SAT
if options.project not in runner.cfg.PROJECTS.project_file_paths:
- local_path = os.path.join(runner.cfg.VARS.salometoolsway,
- "data",
- "local.pyconf")
- msg = _("ERROR: the project %(proj)s is not visible by salomeTools."
- "\nPlease add it in the %(local)s file.") % \
- {"proj" : options.project, "local" : local_path}
- logger.write(src.printcolors.printcError(msg), 1)
- logger.write("\n", 1)
+ local_path = os.path.join(
+ runner.cfg.VARS.salometoolsway, "data", "local.pyconf")
+ msg = _("""\
+The project %s is not visible by salomeTools.
+Please add it in the %s file.\n""") % (options.project, local_path)
+ logger.error(msg)
return 1
# Remove the products that are filtered by the --without_property option
if options.sat:
archive_name += ("salomeTools_" + runner.cfg.INTERNAL.sat_version)
if len(archive_name)==0: # no option worked
- msg = _("Error: Cannot name the archive\n"
- " check if at least one of the following options was "
- "selected: --binaries, --sources, --project or"
- " --salometools")
- logger.write(src.printcolors.printcError(msg), 1)
- logger.write("\n", 1)
+ msg = _("""\
+Cannot name the archive.
+check if at least one of the following options was selected:
+ '--binaries' '--sources' '--project' or '--salometools'\n""")
+ logger.error(msg)
return 1
path_targz = os.path.join(dir_name, archive_name + ".tgz")
logger.write("\n", 3)
msg = _("Preparation of files to add to the archive")
- logger.write(src.printcolors.printcLabel(msg), 2)
+ logger.write(UTS.label(msg), 2)
logger.write("\n", 2)
d_files_to_add={} # content of the archive
d_files_to_add.update(project_package(options.project, tmp_working_dir))
if not(d_files_to_add):
- msg = _("Error: Empty dictionnary to build the archive!\n")
- logger.write(src.printcolors.printcError(msg), 1)
- logger.write("\n", 1)
+ msg = _("Empty dictionnary to build the archive.\n")
+ logger.error(msg)
return 1
# Add the README file in the package
logger.write("\n", 2)
- logger.write(src.printcolors.printcLabel(_("Actually do the package")), 2)
+ logger.write(UTS.label(_("Actually do the package")), 2)
logger.write("\n", 2)
try:
# Add the files to the tarfile object
res = add_files(tar, archive_name, d_files_to_add, logger, f_exclude=filter_function)
tar.close()
+
except KeyboardInterrupt:
- logger.write(src.printcolors.printcError("\nERROR: forced interruption\n"), 1)
- logger.write(_("Removing the temporary working directory ... "), 1)
+ logger.critical(UTS.red(_("KeyboardInterrupt forced interruption\n"))
+ logger.info(_("Removing the temporary working directory ... "))
# remove the working directory
shutil.rmtree(tmp_working_dir)
- logger.write(_("OK"), 1)
- logger.write(_("\n"), 1)
+ logger.info("<OK>")
return 1
# remove the working directory
# Add it in the archive
try:
tar.add(local_path, arcname=in_archive, exclude=f_exclude)
- logger.write(src.printcolors.printcSuccess(_("OK")), 3)
+ logger.info("<OK>\n")
except Exception as e:
- logger.write(src.printcolors.printcError(_("KO ")), 3)
- logger.write(str(e), 3)
+ logger.info("<KO> %s\n" str(e))
success = 1
- logger.write("\n", 3)
return success
def exclude_VCS_and_extensions(filename):
text_missing_prods = ""
for p_name in l_not_installed:
text_missing_prods += "-" + p_name + "\n"
+
+ msg = _("There are missing products installations:\n")
+ logger.warning(msg + text_missing_prods))
if not options.force_creation:
- msg = _("ERROR: there are missing products installations:")
- logger.write("%s\n%s" % (src.printcolors.printcError(msg),
- text_missing_prods),
- 1)
return None
- else:
- msg = _("WARNING: there are missing products installations:")
- logger.write("%s\n%s" % (src.printcolors.printcWarning(msg),
- text_missing_prods),
- 1)
# Do the same for sources
if len(l_sources_not_present) > 0:
text_missing_prods = ""
for p_name in l_sources_not_present:
text_missing_prods += "-" + p_name + "\n"
+
+ msg = _("There are missing products sources:\n")
+ logger.warning(msg + text_missing_prods)
if not options.force_creation:
- msg = _("ERROR: there are missing products sources:")
- logger.write("%s\n%s" % (src.printcolors.printcError(msg),
- text_missing_prods),
- 1)
return None
- else:
- msg = _("WARNING: there are missing products sources:")
- logger.write("%s\n%s" % (src.printcolors.printcWarning(msg),
- text_missing_prods),
- 1)
# construct the name of the directory that will contain the binaries
binaries_dir_name = "BINARIES-" + config.VARS.dist
logger.write("\n", 2, False)
if good_result == len(products_infos):
- status = src.OK_STATUS
- res_count = "%d / %d" % (good_result, good_result)
+ status = "<OK>"
else:
- status = src.KO_STATUS
- res_count = "%d / %d" % (good_result, len(products_infos))
+ status = "<KO>"
# write results
- logger.write("Patching sources of the application:", 1)
- logger.write(" " + src.printcolors.printc(status), 1, False)
- logger.write(" (%s)\n" % res_count, 1, False)
-
+ logger.info(_("\nPatching sources of the application: %s (%d/%d)\n") % \
+ (status, good_result, len(products_infos)))
+
return len(products_infos) - good_result
# if the product is native, do not apply patch
if src.product.product_is_native(product_info):
# display and log
- logger.write('%s: ' % src.printcolors.printcLabel(product_info.name), 4)
+ logger.write('%s: ' % UTS.label(product_info.name), 4)
logger.write(' ' * (max_product_name_len - len(product_info.name)), 4, False)
logger.write("\n", 4, False)
msg = _("The %s product is native. Do not apply any patch.") % product_info.name
if not "patches" in product_info or len(product_info.patches) == 0:
# display and log
- logger.write('%s: ' % src.printcolors.printcLabel(product_info.name), 4)
+ logger.write('%s: ' % UTS.label(product_info.name), 4)
logger.write(' ' * (max_product_name_len - len(product_info.name)), 4, False)
logger.write("\n", 4, False)
msg = _("No patch for the %s product") % product_info.name
return True, ""
else:
# display and log
- logger.write('%s: ' % src.printcolors.printcLabel(product_info.name), 3)
+ logger.write('%s: ' % UTS.label(product_info.name), 3)
logger.write(' ' * (max_product_name_len - len(product_info.name)), 3, False)
logger.write("\n", 4, False)
if not os.path.exists(product_info.source_dir):
msg = _("No sources found for the %s product\n") % product_info.name
- logger.write(src.printcolors.printcWarning(msg), 1)
+ logger.write(UTS.red(msg), 1)
return False, ""
# At this point, there one or more patches and the source directory exists
logger.logTxtFile.flush()
# Call the command
- res_cmd = (subprocess.call(patch_cmd,
- shell=True,
- cwd=product_info.source_dir,
- stdout=logger.logTxtFile,
- stderr=subprocess.STDOUT) == 0)
+ res_cmd = subprocess.call(
+ patch_cmd,
+ shell=True,
+ cwd=product_info.source_dir,
+ stdout=logger.logTxtFile,
+ stderr=subprocess.STDOUT )
+
+ res_cmd = (res_cmd == 0)
else:
res_cmd = False
- details.append(" " +
- src.printcolors.printcError(_("Not a valid patch: %s") % patch))
+ details.append(" " + UTS.red(_("Not a valid patch: %s\n")) % patch)
res.append(res_cmd)
if res_cmd:
- message = (_("Apply patch %s") %
- src.printcolors.printcHighlight(patch))
+ message = _("Apply patch %s") % UTS.blue(patch)
else:
- message = src.printcolors.printcWarning(
- _("Failed to apply patch %s") % patch)
+ message = _("Failed to apply patch %s") % UTS.red(patch)
if config.USER.output_verbose_level >= 3:
retcode.append(" %s" % message)
if len(l_products_not_getted) > 0:
msg = _("Do not get the source of the following products in development mode\n"
" Use the --force option to overwrite it.\n")
- logger.write(src.printcolors.printcWarning(msg), 1)
+ logger.write(UTS.red(msg), 1)
args_product_opt_clean = remove_products(args_product_opt_clean,
l_products_not_getted,
logger)
if len(l_products_with_patchs) > 0:
msg = _("do not patch the following products in development mode\n"
" Use the --force_patch option to overwrite it.\n")
- logger.write(src.printcolors.printcWarning(msg), 1)
+ logger.write(UTS.red(msg), 1)
args_product_opt_patch = remove_products(args_product_opt_patch,
l_products_with_patchs,
logger)
if options.prefix is None:
msg = _("The --%s argument is required\n") % "prefix"
- logger.write(src.printcolors.printcWarning(msg), 1)
+ logger.write(UTS.red(msg), 1)
return 1
retcode = generate_profile_sources( runner.cfg, options, logger )
# Display information : how to get the logs
messageFirstPart = _("\nEnd of execution. To see the traces, "
"please tap the following command :\n")
- messageSecondPart = src.printcolors.printcLabel(
+ messageSecondPart = UTS.label(
runner.cfg.VARS.salometoolsway +
os.sep +
"sat log " +
# Print some informations
msg = ('Executing the script in the build directories of the application %s\n') % \
- src.printcolors.printcLabel(runner.cfg.VARS.application)
+ UTS.label(runner.cfg.VARS.application)
logger.write(msg, 1)
info = [(_("BUILD directory"), os.path.join(runner.cfg.APPLICATION.workdir, 'BUILD'))]
- src.print_info(logger, info)
+ UTS.logger_info_tuples(logger, info)
# Call the function that will loop over all the products and execute
# the right command(s)
# Print the final state
nb_products = len(products_infos)
if res == 0:
- final_status = "OK"
+ final_status = "<OK>"
else:
- final_status = "KO"
+ final_status = "<KO>"
- logger.write(_("\nScript: %(status)s (%(1)d/%(2)d)\n") % \
- { 'status': src.printcolors.printc(final_status),
- '1': nb_products - res,
- '2': nb_products }, 1)
+ logger.info( _("\nScript: %(s (%d/%d)\n") % \
+ (final_status, nb_products - res, nb_products) )
return res
def log_step(logger, header, step):
logger.write("\r%s%s" % (header, " " * 20), 3)
logger.write("\r%s%s" % (header, step), 3)
- logger.write("\n==== %s \n" % src.printcolors.printcInfo(step), 4)
+ logger.write("\n==== %s \n" % UTS.info(step), 4)
logger.flush()
def log_res_step(logger, res):
if res == 0:
- logger.write("%s \n" % src.printcolors.printcSuccess("OK"), 4)
- logger.flush()
+ logger.debug("<OK>\n")
else:
- logger.write("%s \n" % src.printcolors.printcError("KO"), 4)
- logger.flush()
+ logger.debug("<KO>\n")
def run_script_all_products(config, products_infos, nb_proc, logger):
'''Execute the script in each product build directory.
# Logging
logger.write("\n", 4, False)
logger.write("################ ", 4)
- header = _("Running script of %s") % src.printcolors.printcLabel(p_name)
+ header = _("Running script of %s") % UTS.label(p_name)
header += " %s " % ("." * (20 - len(p_name)))
logger.write(header, 3)
logger.write("\n", 4, False)
# Execute the script
len_end_line = 20
- script_path_display = src.printcolors.printcLabel(p_info.compil_script)
+ script_path_display = UTS.label(p_info.compil_script)
log_step(logger, header, "SCRIPT " + script_path_display)
len_end_line += len(script_path_display)
res = builder.do_script_build(p_info.compil_script, number_of_proc=nb_proc)
# Log the result
if res > 0:
logger.write("\r%s%s" % (header, " " * len_end_line), 3)
- logger.write("\r" + header + src.printcolors.printcError("KO"))
- logger.write("==== %(KO)s in script execution of %(name)s \n" % \
- { "name" : p_name , "KO" : src.printcolors.printcInfo("ERROR")}, 4)
- logger.flush()
+ logger.write("\r" + header + "<KO>")
+ logger.debug("==== <KO> in script execution of %s\n" % p_name)
else:
logger.write("\r%s%s" % (header, " " * len_end_line), 3)
- logger.write("\r" + header + src.printcolors.printcSuccess("OK"))
- logger.write("==== %s \n" % src.printcolors.printcInfo("OK"), 4)
- logger.write("==== Script execution of %(name)s %(OK)s \n" % \
- { "name" : p_name , "OK" : src.printcolors.printcInfo("OK")}, 4)
- logger.flush()
- logger.write("\n", 3, False)
+ logger.write("\r" + header + "<OK>"))
+ logger.debug("==== <OK> in script execution of %s\n" % p_name)
+ logger.write("\n")
return res
# Make sure the command option has been called
if not options.command:
- message = _("The option --command is required\n")
- logger.write(src.printcolors.printcError(message))
+ msg = _("The option --command is required\n")
+ logger.error(msg)
return 1
# Print the input command
# Format the result to be 0 (success) or 1 (fail)
if res != 0:
res = 1
- logger.write(src.printcolors.printc("KO"), 3)
+ logger.info("<KO>\n")
else:
- logger.write(src.printcolors.printc("OK"), 3)
-
- logger.write("\n",3)
+ logger.info("<OK>\n")
return res
# Print some informations
logger.write(_('Getting sources of the application %s\n') % \
- src.printcolors.printcLabel(config.VARS.application), 1)
+ UTS.label(config.VARS.application), 1)
logger.info(" workdir = %s\n" % config.APPLICATION.workdir)
# Get the products list with products informations regarding the options
# Display the results (how much passed, how much failed, etc...)
details = []
-
- logger.write("\n", 2, False)
- if good_result == len(products_infos):
- res_count = "%d / %d" % (good_result, good_result)
- returnCode = RCO.ReturnCode("OK", "source "+res_count)
+ nbExpected = len(products_infos)
+ msgCount = "(%d/%d)" % (good_result, nbExpected)
+ if good_result == nbExpected:
+ status = "OK"
+ msg = _("Getting sources of the application")
+ logger.info("\n%s %s: <%s>.\n" % (msg, msgCount, status))
else:
- res_count = "%d / %d" % (good_result, len(products_infos))
- returnCode = RCO.ReturnCode("KO", "source "+res_count)
- for product in results:
- if results[product] == 0 or results[product] is None:
- details.append(product)
-
- result = len(products_infos) - good_result
-
- # write results
- logger.write(_("Getting sources of the application:"), 1)
- logger.write(" " + src.printcolors.printc(status), 1, False)
- logger.write(" (%s)\n" % res_count, 1, False)
-
- if len(details) > 0:
- logger.write(_("Following sources haven't been get:\n"), 2)
- logger.write(" ".join(details), 2)
- logger.write("\n", 2, False)
+ status = "KO"
+ msg = _("Some sources haven't been get")
+ details = [p for p in results if (results[product] == 0 or results[product] is None)]
+ details = " ".join(details)
+ logger.info("\n%s %s: <%s>.\n%s\n" % (msg, msgCount, status, details))
- return returnCode
+ return RCO.ReturnCode(status, "%s %s" % msg, msgCount)
def get_source_for_dev(config, product_info, source_dir, logger, pad):
logger.write(" " * (pad+2), 3, False)
logger.write('dev: %s ... ' % \
- src.printcolors.printcInfo(product_info.source_dir), 3, False)
+ UTS.info(product_info.source_dir), 3, False)
logger.flush()
return retcode
# Get the repository address. (from repo_dev key if the product is
# in dev mode.
if is_dev and 'repo_dev' in product_info.git_info:
- coflag = src.printcolors.printcHighlight(coflag.upper())
+ coflag = coflag.upper()
repo_git = product_info.git_info.repo_dev
else:
repo_git = product_info.git_info.repo
# Display informations
- logger.write('%s:%s' % (coflag, src.printcolors.printcInfo(repo_git)), 3,
- False)
- logger.write(' ' * (pad + 50 - len(repo_git)), 3, False)
- logger.write(' tag:%s' % src.printcolors.printcInfo(
- product_info.git_info.tag),
- 3,
- False)
- logger.write(' %s. ' % ('.' * (10 - len(product_info.git_info.tag))), 3,
- False)
- logger.flush()
- logger.write('\n', 5, False)
+ msg = "'%s:%s" % (coflag, repo_git)
+ msg += " " * (pad + 50 - len(repo_git))
+ msg += " tag:%s" % product_info.git_info.tag
+ msg += "%s. " % "." * (10 - len(product_info.git_info.tag))
+ logger.write("\n" + msg)
+
# Call the system function that do the extraction in git mode
retcode = src.system.git_extract(repo_git,
product_info.git_info.tag,
product_info.archive_info.archive_name)
logger.write('arc:%s ... ' % \
- src.printcolors.printcInfo(product_info.archive_info.archive_name),
+ UTS.info(product_info.archive_info.archive_name),
3, False)
logger.flush()
# Call the system function that do the extraction in archive mode
def get_source_from_dir(product_info, source_dir, logger):
if "dir_info" not in product_info:
- msg = _("Error: you must put a dir_info section in the file %s.pyconf") % \
+ msg = _("You must put a dir_info section in the file %s.pyconf") % \
product_info.name
- logger.write("\n%s\n" % src.printcolors.printcError(msg), 1)
+ logger.error(msg)
return False
if "dir" not in product_info.dir_info:
msg = _("Error: you must put a dir in the dir_info section in the file %s.pyconf") % \
product_info.name
- logger.write("\n%s\n" % src.printcolors.printcError(msg), 1)
+ logger.error(msg)
return False
# check that source exists
if not os.path.exists(product_info.dir_info.dir):
- msg = _("ERROR: the dir '%(1)s' defined in the file %(2)s.pyconf does not exists") % \
- {"1": product_info.dir_info.dir, "2": product_info.name}
- logger.write("\n%s\n" % src.printcolors.printcError(msg), 1)
+ msg = _("The dir %s defined in the file %s.pyconf does not exists") % \
+ (product_info.dir_info.dir, product_info.name)
+ logger.error(msg)
return False
- logger.write('DIR: %s ... ' % src.printcolors.printcInfo(
+ logger.write('DIR: %s ... ' % UTS.info(
product_info.dir_info.dir), 3)
- logger.flush()
retcode = src.Path(product_info.dir_info.dir).copy(source_dir)
product_info.cvs_info.product_base)
coflag = 'cvs'
- if checkout: coflag = src.printcolors.printcHighlight(coflag.upper())
-
- logger.write('%s:%s' % (coflag, src.printcolors.printcInfo(cvs_line)),
- 3, False)
- logger.write(' ' * (pad + 50 - len(cvs_line)), 3, False)
- logger.write(' src:%s' % src.printcolors.printcInfo(product_info.cvs_info.source),
- 3, False)
- logger.write(' ' * (pad + 1 - len(product_info.cvs_info.source)), 3, False)
- logger.write(' tag:%s' % src.printcolors.printcInfo(product_info.cvs_info.tag),
- 3, False)
+ if checkout: coflag = coflag.upper()
+
+ msg = '%s:%s' % (coflag, cvs_line)
+ msg += " " * (pad + 50 - len(cvs_line))
+ msg += " src:%s" % product_info.cvs_info.source
+ msg += " " * (pad + 1 - len(product_info.cvs_info.source))
+ msg += " tag:%s" % product_info.cvs_info.tag
+
# at least one '.' is visible
- logger.write(' %s. ' % ('.' * (10 - len(product_info.cvs_info.tag))),
- 3, False)
- logger.flush()
- logger.write('\n', 5, False)
+ msg += " %s. " % ("." * (10 - len(product_info.cvs_info.tag)))
+
+ logger.write(msg)
# Call the system function that do the extraction in cvs mode
retcode = src.system.cvs_extract(protocol, user,
:rtype: boolean
'''
coflag = 'svn'
- if checkout: coflag = src.printcolors.printcHighlight(coflag.upper())
+ if checkout: coflag = coflag.upper()
+
+ logger.write('%s:%s ... ' % (coflag, product_info.svn_info.repo)
- logger.write('%s:%s ... ' % (coflag, src.printcolors.printcInfo(product_info.svn_info.repo)),
- 3, False)
- logger.flush()
- logger.write('\n', 5, False)
# Call the system function that do the extraction in svn mode
retcode = src.system.svn_extract(user,
product_info.svn_info.repo,
'''
# Get the application environment
- logger.write(_("Set the application environment\n"), 5)
+ logger.info(_("Set the application environment\n"))
env_appli = src.environment.SalomeEnviron(config,
src.environment.Environ(dict(os.environ)))
env_appli.set_application_env(logger)
if product_info.get_source == "native":
# skip
- logger.write('%s ' % src.printcolors.printc(RCO.OK_STATUS),
- 3,
- False)
- msg = _("INFORMATION : do nothing because the product is of type 'native'.\n")
- logger.write(msg, 3)
+ msg = "<OK>" + _("\ndo nothing because the product is of type 'native'.\n")
+ logger.write(msg)
return True
if product_info.get_source == "fixed":
# skip
- logger.write('%s ' % src.printcolors.printc(RCO.OK_STATUS),
- 3,
- False)
- msg = _("INFORMATION : do nothing because the product is of type 'fixed'.\n")
- logger.write(msg, 3)
+ msg = "<OK>" + _("\ndo nothing because the product is of type 'fixed'.\n")
+ logger.write(msg)
return True
# if the get_source is not in [git, archive, cvs, svn, fixed, native]
- logger.write(_("Unknown get source method '%(get)s' for product %(product)s") % \
- {'get': product_info.get_source, 'product': product_info.name},
- 3, False)
- logger.write(" ... ", 3, False)
- logger.flush()
+ msg = _("Unknown get source method '%s' for product %s") % \
+ ( product_info.get_source, product_info.name)
+ logger.write("%s ... " % msg)
return False
def get_all_product_sources(config, products, logger):
source_dir = src.Path('')
# display and log
- logger.write('%s: ' % src.printcolors.printcLabel(product_name), 3)
+ logger.write('%s: ' % UTS.label(product_name), 3)
logger.write(' ' * (max_product_name_len - len(product_name)), 3, False)
logger.write("\n", 4, False)
# the product is not in development mode
is_dev = src.product.product_is_dev(product_info)
if source_dir.exists():
- logger.write('%s ' % src.printcolors.printc(RCO.OK_STATUS),
- 3,
- False)
- msg = _("INFORMATION : Not doing anything because the source"
- " directory already exists.\n")
- logger.write(msg, 3)
+ logger.info("<OK>\n")
+ msg = _("Nothing done because source directory existing yet.\n")
+ logger.info(msg)
good_result = good_result + 1
# Do not get the sources and go to next product
continue
check_OK, wrong_path = check_sources(product_info, logger)
if not check_OK:
# Print the missing file path
- msg = _("The required file %s does not exists. ") % wrong_path
- logger.write(src.printcolors.printcError("\nERROR: ") + msg, 3)
+ msg = _("The required file %s does not exists.\n") % wrong_path
+ logger.error(msg)
retcode = False
# show results
results[product_name] = retcode
if retcode:
# The case where it succeed
- res = RCO.OK_STATUS
+ res = "<OK>"
good_result = good_result + 1
else:
# The case where it failed
- res = RCO.KO_STATUS
+ res = "<KO>"
# print the result
if not(src.product.product_is_fixed(product_info) or
src.product.product_is_native(product_info)):
- logger.write('%s\n' % src.printcolors.printc(res), 3, False)
+ logger.info('%s\n' % res)
return good_result, results
if ("present_files" in product_info and
"source" in product_info.present_files):
l_files_to_be_tested = product_info.present_files.source
+ res = True # all ok a priori
+ filesKo = "" # None
for file_path in l_files_to_be_tested:
- # The path to test is the source directory
- # of the product joined the file path provided
+ # add source directory of the product
path_to_test = os.path.join(product_info.source_dir, file_path)
- logger.write(_("\nTesting existence of file: \n"), 5)
- logger.write(path_to_test, 5)
+ msg = _("File %s testing existence:" % path_to_test)
if not os.path.exists(path_to_test):
- return False, path_to_test
- logger.write(src.printcolors.printcSuccess(" OK\n"), 5)
- return True, ""
+ logger.debug("%s <KO>\n" % msg)
+ res = False
+ # return False, path_to_test #break at first
+ filesKo += path_to_test + "\n" # check all
+ else:
+ logger.debug("%s <OK>\n" % msg)
+ return res, filesKo
logger = self.getLogger()
options = self.getOptions()
+ msg_miss = _("The --%s argument is required\n")
if options.template is None:
- msg = _("Error: the --%s argument is required\n") % "template"
- logger.write(src.printcolors.printcError(msg), 1)
- logger.write("\n", 1)
+ logger.error(msg_miss % "template")
return 1
if options.target is None and options.info is None:
- msg = _("Error: the --%s argument is required\n") % "target"
- logger.write(src.printcolors.printcError(msg), 1)
- logger.write("\n", 1)
+ logger.error(msg_miss % "target")
return 1
if "APPLICATION" in runner.cfg:
- msg = _("Error: this command does not use a product.")
- logger.write(src.printcolors.printcError(msg), 1)
- logger.write("\n", 1)
+ msg = _("This command does not use a product.\n")
+ logger.error(msg)
return 1
if options.info:
return get_template_info(runner.cfg, options.template, logger)
if options.name is None:
- msg = _("Error: the --%s argument is required\n") % "name"
- logger.write(src.printcolors.printcError(msg), 1)
- logger.write("\n", 1)
+ logger.error(msg_miss % "name")
return 1
if not options.name.replace('_', '').isalnum():
- msg = _("Error: component name must contains only alphanumeric "
- "characters and no spaces\n")
- logger.write(src.printcolors.printcError(msg), 1)
- logger.write("\n", 1)
+ msg = _("""\
+Component name must contains only alphanumeric characters and no spaces\n""")
+ logger.error(msg)
return 1
# CNC inutile
# Ask user confirmation if a module of the same name already exists
#if options.name in runner.cfg.PRODUCTS and not runner.options.batch:
- # logger.write(src.printcolors.printcWarning(
+ # logger.write(UTS.red(
# _("A module named '%s' already exists." % options.name)), 1)
# logger.write("\n", 1)
# rep = input(_("Are you sure you want to continue? [Yes/No] "))
# return 1
if options.target is None:
- msg = _("Error: the --%s argument is required\n") % "target"
- logger.write(src.printcolors.printcError(msg), 1)
- logger.write("\n", 1)
+ logger.error(msg_miss % "target")
return 1
target_dir = os.path.join(options.target, options.name)
if os.path.exists(target_dir):
- msg = _("Error: the target already exists: %s") % target_dir
- logger.write(src.printcolors.printcError(msg), 1)
- logger.write("\n", 1)
+ msg = _("The target already exists: %s\n") % target_dir
+ logger.error(msg)
return 1
# CNC inutile
# if "_APPLI" not in options.name and not runner.options.batch:
# msg = _("An Application module named '..._APPLI' "
# "is usually recommended.")
- # logger.write(src.printcolors.printcWarning(msg), 1)
+ # logger.write(UTS.red(msg), 1)
# logger.write("\n", 1)
# rep = input(_("Are you sure you want to continue? [Yes/No] "))
# if rep.upper() != _("YES"):
msg += ' destination = %s\n' % target_dir
msg += ' name = %\ns' % options.name
msg += ' template = %s\n' % options.template
- logger.write(msg)
+ logger.info(msg)
conf_values = None
if options.param is not None:
for elt in options.param.split(","):
param_def = elt.strip().split('=')
if len(param_def) != 2:
- msg = _("Error: bad parameter definition")
- logger.write(src.printcolors.printcError(msg), 1)
- logger.write("\n", 1)
+ msg = _("Bad parameter definition: '%s'\n") % elt
+ logger.error(msg)
return 1
conf_values[param_def[0].strip()] = param_def[1].strip()
target_dir, conf_values, logger)
if retcode == 0:
- logger.write(_(
- "The sources were created in %s") % src.printcolors.printcInfo(
- target_dir), 3)
- logger.write(src.printcolors.printcWarning(_("\nDo not forget to put "
- "them in your version control system.")), 3)
-
- logger.write("\n", 3)
+ logger.info(_("The sources were created in %s\n") % UTS.info(target_dir))
+ msg = _("Do not forget to put them in your version control system.\n")
+ logger.info("\n" + UTS.red(msg))
+ else:
+ logger.info("\n")
return retcode
# copy the template
if os.path.isfile(template_src_dir):
logger.write(" " + _(
- "Extract template %s\n") % src.printcolors.printcInfo(
+ "Extract template %s\n") % UTS.info(
template), 4)
src.system.archive_extract(template_src_dir, target_dir)
else:
logger.write(" " + _(
- "Copy template %s\n") % src.printcolors.printcInfo(
+ "Copy template %s\n") % UTS.info(
template), 4)
shutil.copytree(template_src_dir, target_dir)
logger.write("\n", 5)
tsettings = TemplateSettings(compo_name, settings_file, target_dir)
# first rename the files
- logger.write(" " + src.printcolors.printcLabel(_("Rename files\n")), 4)
+ logger.write(" " + UTS.label(_("Rename files\n")), 4)
for root, dirs, files in os.walk(target_dir):
for fic in files:
ff = fic.replace(tsettings.file_subst, compo_name)
# rename the directories
logger.write("\n", 5)
- logger.write(" " + src.printcolors.printcLabel(_("Rename directories\n")),
+ logger.write(" " + UTS.label(_("Rename directories\n")),
4)
for root, dirs, files in os.walk(target_dir, topdown=False):
for rep in dirs:
# ask for missing parameters
logger.write("\n", 5)
- logger.write(" " + src.printcolors.printcLabel(
+ logger.write(" " + UTS.label(
_("Make substitution in files\n")), 4)
logger.write(" " + _("Delimiter =") + " %s\n" % tsettings.delimiter_char,
5)
logger.write(" %s %s\n" % (changed, fpath[pathlen:]), 5)
if not tsettings.has_pyconf:
- logger.write(src.printcolors.printcWarning(_(
+ logger.write(UTS.red(_(
"Definition for sat not found in settings file.")) + "\n", 2)
else:
definition = tsettings.pyconf % dico
f.write(definition)
f.close
logger.write(_(
- "Create configuration file: ") + src.printcolors.printcInfo(
+ "Create configuration file: ") + UTS.info(
pyconf_file) + "\n", 2)
if len(tsettings.post_command) > 0:
cmd = tsettings.post_command % dico
logger.write("\n", 5, True)
logger.write(_(
- "Run post command: ") + src.printcolors.printcInfo(cmd) + "\n", 3)
+ "Run post command: ") + UTS.info(cmd) + "\n", 3)
p = subprocess.Popen(cmd, shell=True, cwd=target_dir)
p.wait()
if runner.cfg.VARS.application != 'None':
logger.write(
_('Running tests on application %s\n') %
- src.printcolors.printcLabel(runner.cfg.VARS.application), 1)
+ UTS.label(runner.cfg.VARS.application), 1)
with_application = True
elif not options.base:
raise Exception(
if with_application:
# check if environment is loaded
if 'KERNEL_ROOT_DIR' in os.environ:
- logger.write( src.printcolors.printcWarning(
+ logger.write( UTS.red(
_("WARNING: SALOME environment already sourced")) + "\n", 1 )
elif options.launcher:
- logger.write(src.printcolors.printcWarning(
- _("Running SALOME application.")) + "\n\n", 1)
+ logger.write(UTS.red(_("Running SALOME application.")) + "\n\n", 1)
else:
- msg = _("Impossible to find any launcher.\n"
- "Please specify an application or a launcher")
- logger.write(src.printcolors.printcError(msg))
- logger.write("\n")
+ msg = _("""\
+Impossible to find any launcher.
+Please specify an application or a launcher\n""")
+ logger.error(msg)
return 1
# set the display
finalPath))
f.close()
- logger.write(src.printcolors.printc("OK"), 3, False)
- logger.write("\n", 3, False)
+ logger.info("<OK>\n")
def check_remote_machine(machine_name, logger):
- logger.write(_("\ncheck the display on %s\n") % machine_name, 4)
+ logger.debug(_("Check the display on %s\n") % machine_name)
ssh_cmd = 'ssh -o "StrictHostKeyChecking no" %s "ls"' % machine_name
- logger.write(_("Executing the command : %s ") % ssh_cmd, 4)
+ logger.debug(_("Executing the command : %s\n") % ssh_cmd)
p = subprocess.Popen(ssh_cmd,
shell=True,
stdin =subprocess.PIPE,
stderr=subprocess.PIPE)
p.wait()
if p.returncode != 0:
- logger.write(src.printcolors.printc(src.KO_STATUS) + "\n", 1)
- logger.write(" " + src.printcolors.printcError(p.stderr.read()), 2)
- logger.write(src.printcolors.printcWarning(
- _("No ssh access to the display machine.")), 1)
+ msg = "<KO> on '%s'" % ssh_cmd
+ logger.critical(msg)
+ logger.error(UTS.red(p.stderr.read()))
+ logger.error(UTS.red(_("No ssh access to the display machine %s.") % machine_name))
else:
- logger.write(src.printcolors.printcSuccess(src.OK_STATUS) + "\n\n", 4)
+ logger.debug("<OK>\n")
##
# Creates the XML report for a product.
("<bright>", ST.BRIGHT),
("<normal>", ST.NORMAL),
("<reset>", ST.RESET_ALL),
+ ("<info>", ST.RESET_ALL),
("<header>", FG.BLUE),
+ ("<warning>", FG.RED),
+ ("<error>", FG.RED + ST.BRIGHT),
+ ("<critical>", FG.RED + ST.BRIGHT),
("<OK>", FG.GREEN + ST.BRIGHT + "OK" + ST.RESET_ALL),
("<KO>", FG.RED + ST.BRIGHT + "KO" + ST.RESET_ALL),
)
("<bright>", ""),
("<normal>", ""),
("<reset>", ""),
+ ("<info>", ""),
("<header>", ""),
+ ("<warning>", ""),
+ ("<error>", ""),
+ ("<critical>", ""),
("<OK>", "OK"),
("<KO>", "KO"),
) )
return self.logs
def toColor(msg):
+ """
+ automatically clean the message of color tags '<red> ...
+ if the terminal output stdout is redirected by user
+ if not, replace tags with ansi color codes
+ example:
+ >> sat compile SALOME > log.txt
+ """
if not ('isatty' in dir(sys.stdout) and sys.stdout.isatty()):
- # clean the message color if the terminal is redirected by user
- # ex: sat compile appli > log.txt
+ # clean the message color (if the terminal is redirected by user)
return replace(msg, _tagsNone)
else:
return replace(msg, _tags)
+
+def cleanColor(msg):
+ """clean the message of color tags '<red> ... """
+ return replace(msg, _tagsNone)
def toColor_AnsiToWin32(msg):
"""for test debug no wrapping"""
# Shortcut method to log in log file.
def log(self, text, level, showInfo=True):
self.logger.write(text, level, showInfo)
- self.logger.logTxtFile.write(src.printcolors.cleancolor(text))
+ self.logger.logTxtFile.write(UTS.cleancolor(text))
self.logger.flush()
##
hh = 'MSBUILD /m:%s' % str(nb_proc)
if self.debug_mode:
- hh += " " + src.printcolors.printcWarning("DEBUG")
+ hh += " " + UTS.red("DEBUG")
# make
command = 'msbuild'
command = command + " /maxcpucount:" + str(nb_proc)
# script found
self.logger.write(_("Compile %(product)s using script %(script)s\n") %
{ 'product': self.product_info.name,
- 'script': src.printcolors.printcLabel(script) }, 4)
+ 'script': UTS.label(script) }, 4)
try:
import imp
product = self.product_info.name
import src.pyconf as PYCONF
class ConfigOpener:
- '''Class that helps to find an application pyconf
- in all the possible directories (pathList)
- '''
+ """
+ Class that helps to find an application pyconf
+ in all the possible directories (pathList)
+ """
def __init__(self, pathList):
'''Initialization
_("Error in configuration file: (1)s.pyconf\n %(2)s") % \
{ 'application': application, 'error': str(e) } )
else:
- sys.stdout.write(src.printcolors.printcWarning(
+ sys.stdout.write(UTS.red(
"There is an error in the file %s.pyconf.\n" % \
cfg.VARS.application))
do_merge = False
if ( not('-e' in parser.parse_args()[1]) or
('--edit' in parser.parse_args()[1]) and
command == 'config' ):
- sys.stdout.write(src.printcolors.printcWarning("%s\n" % str(e)))
+ sys.stdout.write(UTS.red("%s\n" % str(e)))
raise Exception(
_("Error in configuration file: %s.pyconf\n") % application )
else:
- sys.stdout.write(src.printcolors.printcWarning(
+ sys.stdout.write(UTS.red(
"ERROR: in file %s.pyconf. Opening the file with the default viewer\n" % \
cfg.VARS.application))
- sys.stdout.write("\n%s\n" % src.printcolors.printcWarning(str(e)))
+ sys.stdout.write("\n%s\n" % UTS.red(str(e)))
do_merge = False
else:
'''
# check if file exists
if not os.path.exists(path):
- return "'%s' %s" % (path, src.printcolors.printcError(_("** not found")))
+ return "path '%s' ** not found" % path
# check extension
if len(ext) > 0:
fe = os.path.splitext(path)[1].lower()
if fe not in ext:
- return "'%s' %s" % (path, src.printcolors.printcError(_("** bad extension")))
+ return "path '%s' ** bad extension" % path
return path
def msgAdd(label, value):
"""
- local short named macro
+ local short named macro for convenience
appending show_product_info.msg variable
"""
msg += " %s = %s\n" % (label, value)
msg = _("WARNING: the logs_paths_in_file option will "
"not be taken into account.\nHere is the error:")
logger_command.write("%s\n%s\n\n" % (
- src.printcolors.printcWarning(msg),
+ UTS.red(msg),
str(e)))
self.options.logs_paths_in_file = None
def write(title, var="", force=None, fmt="\n#### DEBUG: %s:\n%s\n"):
"""write sys.stderr a message if _debug[-1]==True or optionaly force=True"""
if _debug[-1] or force:
- if 'pyconf.Config' in str(type(var)):
+ if '.Config' in str(type(var)):
sys.stderr.write(fmt % (title, indent(getStrConfigDbg(var))))
if 'loggingSat.UnittestStream' in str(type(var)):
sys.stderr.write(fmt % (title, indent(var.getLogs())))
"""
if not self.silent:
self.logger.write(_("Create environment file %s\n") %
- src.printcolors.printcLabel(filename), 3)
+ UTS.label(filename), 3)
# create then env object
env_file = open(os.path.join(self.out_dir, filename), "w")
"""
if not self.silent:
self.logger.write(_("Create configuration file %s\n") %
- src.printcolors.printcLabel(filename.name), 3)
+ UTS.label(filename.name), 3)
# create then env object
tmp = src.fileEnviron.get_file_environ(filename,
--- /dev/null
+#!/usr/bin/env python
+#-*- coding:utf-8 -*-
+
+__doc__="""
+Utility for print environment variables
+
+examples:
+ - split all or specific environment variables $XXX(s)...
+ >> environs.py -> all
+ >> environs.py SHELL PATH -> specific $SHELL $PATH
+
+ - split all or specific environment variables on pattern $*XXX*(s)...
+ >> environs.py --pat ROOT -> specific $*ROOT*
+
+ - split search specific substrings in contents of environment variables $XXX(s)...
+ >> environs.py --grep usr -> all specific environment variables containing usr
+
+tips:
+ - create unix alias as shortcut for bash console
+ >> alias envs=".../environs.py"
+"""
+
+import sys
+import os
+
+def _test_var_args(args):
+ for arg in args:
+ print "another arg:", arg
+
+def _printOneLineOrNot(i, env):
+ splitenv = env[i].split(":")
+ done = False
+ nb = 20
+ for j in splitenv:
+ if j!="":
+ if not done:
+ print "{:<30} = {}".format(i, j)
+ done=True
+ else:
+ print "{:<30} {}".format(" ", j)
+
+def print_split_environs(args=[]):
+ env=os.environ
+ for i in sorted(env):
+ if (len(args)==0) or (i in args):
+ _printOneLineOrNot(i, env)
+
+def print_split_pattern_environs(args=[]):
+ env=os.environ
+ for i in sorted(env):
+ ok = False
+ for j in args:
+ if j in i:
+ ok = True
+ #print "i %s j %s %s" % (i,j,ok)
+ if (len(args)==0) or (ok):
+ _printOneLineOrNot(i, env)
+
+def print_grep_environs(args=[]):
+ env=os.environ
+ for i in sorted(env):
+ for j in env[i].split(":"):
+ for a in args:
+ if a in j:
+ #print i+" contains "+j
+ print '{:<20} contains {}'.format(i,j)
+
+
+if __name__ == '__main__':
+ import sys
+ args=sys.argv[1:]
+ if len(args)<1:
+ print_split_environs()
+ elif args[0] in ["-h","--help"]:
+ print __doc__
+ elif args[0] in ["-g","--grep"]:
+ print_grep_environs(args[1:])
+ elif args[0] in ["-p","--pat"]:
+ print_split_pattern_environs(args[1:])
+ else:
+ print_split_environs(args)
+
def write(self, command, name, value, sign="="):
import src
self.output.write(" %s%s %s %s %s\n" % \
- (src.printcolors.printcLabel(command),
+ (UTS.label(command),
" " * (12 - len(command)),
- src.printcolors.printcInfo(name), sign, value))
+ UTS.info(name), sign, value))
def is_defined(self, name):
return self.defined.has_key(name)
def prepare_testbase_from_dir(self, testbase_name, testbase_dir):
self.logger.write(_("get test base from dir: %s\n") % \
- src.printcolors.printcLabel(testbase_dir), 3)
+ UTS.label(testbase_dir), 3)
if not os.access(testbase_dir, os.X_OK):
raise Exception(
_("testbase %(name)s (%(dir)s) does not exist ...\n") % \
testbase_tag):
self.logger.write(
_("get test base '%(testbase)s' with '%(tag)s' tag from git\n") % \
- { "testbase" : src.printcolors.printcLabel(testbase_name),
- "tag" : src.printcolors.printcLabel(testbase_tag) },
+ { "testbase" : UTS.label(testbase_name),
+ "tag" : UTS.label(testbase_tag) },
3)
try:
def set_signal(): # pragma: no cover
def prepare_testbase_from_svn(self, user, testbase_name, testbase_base):
self.logger.write(_("get test base '%s' from svn\n") %
- src.printcolors.printcLabel(testbase_name), 3)
+ UTS.label(testbase_name), 3)
try:
def set_signal(): # pragma: no cover
"""see http://bugs.python.org/issue1652"""
exectime = ""
sp = "." * (35 - len(script_info.name))
- self.logger.write(self.write_test_margin(3), 3)
- self.logger.write("script %s %s %s %s\n" % (
- src.printcolors.printcLabel(script_info.name),
- sp,
- src.printcolors.printc(script_info.res),
- exectime), 3, False)
+ self.logger.info(self.write_test_margin(3))
+ self.logger.info("script %s %s %s %s\n" % \
+ (UTS.label(script_info.name),sp,script_info.res,exectime)
if script_info and len(callback) > 0:
- self.logger.write("Exception in %s\n%s\n" % \
- (script_info.name,
- src.printcolors.printcWarning(callback)), 2, False)
+ self.logger.error("Exception in %s\n%s\n" % \
+ (script_info.name, UTS.red(callback)))
if script_info.res == src.OK_STATUS:
self.nb_succeed += 1
self.logger.write(self.write_test_margin(2), 3)
self.logger.write("Session = %s\n" % \
- src.printcolors.printcLabel(self.currentsession), 3, False)
+ UTS.label(self.currentsession), 3, False)
# prepare list of tests to run
tests = os.listdir(os.path.join(self.currentDir,
def run_grid_tests(self):
self.logger.write(self.write_test_margin(1), 3)
self.logger.write("grid = %s\n" % \
- src.printcolors.printcLabel(self.currentgrid), 3, False)
+ UTS.label(self.currentgrid), 3, False)
grid_path = os.path.join(self.currentDir, self.currentgrid)
if not os.path.exists(os.path.join(grid_path, session_)):
self.logger.write(self.write_test_margin(2), 3)
self.logger.write(
- src.printcolors.printcWarning("Session %s not found" % session_) + "\n", 3, False)
+ UTS.red("Session %s not found" % session_) + "\n", 3, False)
else:
self.currentsession = session_
self.run_session_tests()
- ##
- # Runs test testbase.
+
def run_testbase_tests(self):
+ """Runs test testbase"""
logger = self.logger
res_dir = os.path.join(self.currentDir, "RESSOURCES")
+ DBG.write("fix what the pythonpath ?", )
os.environ['PYTHONPATH'] = (res_dir +
os.pathsep +
os.environ['PYTHONPATH'])
+
+ DBG.tofix("fix what the hell is the pythonpath ?", os.environ['PYTHONPATH'], True)
+
os.environ['TT_BASE_RESSOURCES'] = res_dir
logger.debug(" %s = %s\n" % ("TT_BASE_RESSOURCES", res_dir)
self.logger.write("\n", 4, False)
self.logger.write(self.write_test_margin(0), 3)
- testbase_label = "Test base = %s\n" % \
- src.printcolors.printcLabel(self.currentTestBase)
+ testbase_label = "Test base = %s\n" % UTS.label(self.currentTestBase)
self.logger.write(testbase_label, 3, False)
- self.logger.write("-" * len(src.printcolors.cleancolor(testbase_label)), 3)
+ self.logger.write("-" * len(UTS.cleancolor(testbase_label)), 3)
self.logger.write("\n", 3, False)
# load settings
self.ignore_tests = ldic['known_failures_list']
if isinstance(self.ignore_tests, list):
self.ignore_tests = {}
- self.logger.write(src.printcolors.printcWarning("known_failur"
+ self.logger.write(UTS.red("known_failur"
"es_list must be a dictionary (not a list)") + "\n", 1, False)
else:
self.ignore_tests = {}
for grid in grids:
if not os.path.exists(os.path.join(self.currentDir, grid)):
self.logger.write(self.write_test_margin(1), 3)
- self.logger.write(src.printcolors.printcWarning(
+ self.logger.write(UTS.red(
"grid %s does not exist\n" % grid), 3, False)
else:
self.currentgrid = grid
self.logger.write("\n", 2, False)
if not os.path.exists(script):
- self.logger.write(src.printcolors.printcWarning(
- "WARNING: script not found: %s" % script) + "\n", 2)
+ self.logger.warning("script not found: %s" % script)
else:
- self.logger.write(src.printcolors.printcHeader(
- "----------- start %s" % script_name) + "\n", 2)
- self.logger.write("Run script: %s\n" % script, 2)
+ self.logger.info("----------- start %s\n" % script_name))
+ self.logger.info("Run script: %s\n" % script)
subprocess.Popen(script, shell=True).wait()
- self.logger.write(src.printcolors.printcHeader(
- "----------- end %s" % script_name) + "\n", 2)
+ self.logger.info("----------- end %s\n" % script_name))
def run_all_tests(self):
initTime = datetime.datetime.now()
self.run_script('test_setup')
- self.logger.write("\n", 2, False)
-
- self.logger.write(src.printcolors.printcHeader(
- _("=== STARTING TESTS")) + "\n", 2)
- self.logger.write("\n", 2, False)
- self.currentDir = os.path.join(self.tmp_working_dir,
- 'BASES',
- self.currentTestBase)
+
+ self.logger.info("\n\n" + _("=== STARTING TESTS\n"))
+ self.currentDir = os.path.join(
+ self.tmp_working_dir, 'BASES', self.currentTestBase)
self.run_testbase_tests()
# calculate total execution time
totalTime = datetime.datetime.now() - initTime
totalTime -= datetime.timedelta(microseconds=totalTime.microseconds)
- self.logger.write("\n", 2, False)
- self.logger.write(src.printcolors.printcHeader(_("=== END TESTS")), 2)
- self.logger.write(" %s\n" % src.printcolors.printcInfo(str(totalTime)),
- 2,
- False)
+ self.logger.write("\n\n" + _("=== END TESTS %s\n") % str(totalTime))
- #
# Start the tests
- #
self.run_script('test_cleanup')
self.logger.write("\n", 2, False)
# evaluate results
- res_count = "%d / %d" % \
+ res_count = "(%d/%d)" % \
(self.nb_succeed, self.nb_run - self.nb_acknoledge)
- res_out = _("Tests Results: %(1)d/%(2)d\n") % \
- { '1': self.nb_succeed, '2': self.nb_run }
+ res_out = _("Tests Results: (%d/%d)\n") % (self.nb_succeed, self.nb_run)
if self.nb_succeed == self.nb_run:
- res_out = src.printcolors.printcSuccess(res_out)
+
+ res_out = UTS.green(res_out)
else:
- res_out = src.printcolors.printcError(res_out)
- self.logger.write(res_out, 1)
+ res_out = UTS.red(res_out)
+ self.logger.info(res_out)
if self.nb_timeout > 0:
- self.logger.write(_("%d tests TIMEOUT\n") % self.nb_timeout, 1)
+ self.logger.info(_("%d tests TIMEOUT\n") % self.nb_timeout)
res_count += " TO: %d" % self.nb_timeout
if self.nb_not_run > 0:
- self.logger.write(_("%d tests not executed\n") % self.nb_not_run, 1)
+ self.logger.info(_("%d tests not executed\n") % self.nb_not_run)
res_count += " NR: %d" % self.nb_not_run
- status = src.OK_STATUS
+ status = "<OK>"
if self.nb_run - self.nb_succeed - self.nb_acknoledge > 0:
- status = src.KO_STATUS
+ status = "<KO>"
elif self.nb_acknoledge:
- status = src.KNOWNFAILURE_STATUS
+ status = UTS.green("KNOWN FAILURE")
- self.logger.write(_("Status: %s\n") % status, 3)
+ self.logger.info(_("Status: %s\n") % status)
return self.nb_run - self.nb_succeed - self.nb_acknoledge
- ##
# Write margin to show test results.
def write_test_margin(self, tab):
+ """indent with '| ... +' to show test results."""
if tab == 0:
return ""
return "| " * (tab - 1) + "+ "
import errno
import stat
+from srs.coloringSat import cleanColors # as shortcut
##############################################################################
# file system utilities
##############################################################################
def ensure_path_exists(p):
- '''Create a path if not existing
+ """Create a path if not existing
:param p str: The path.
- '''
+ """
if not os.path.exists(p):
os.makedirs(p)
def replace_in_file(filein, strin, strout):
- '''Replace <strin> by <strout> in file <filein>
- '''
+ """Replace <strin> by <strout> in file <filein>"""
+ with open(filein, "r") as f:
+ contents = f.read()
shutil.move(filein, filein + "_old")
- fileout= filein
- filein = filein + "_old"
- fin = open(filein, "r")
- fout = open(fileout, "w")
- for line in fin:
- fout.write(line.replace(strin, strout))
-
+ with open(filein, "r") as f:
+ f.write(contents.replace(strin, strout))
+
##############################################################################
# Utils class to simplify path manipulations.
##############################################################################
# logger and color utilities
##############################################################################
def formatTuples(tuples):
- '''format the tuples variable in a tabulated way.
+ """
+ format 'label = value' the tuples in a tabulated way.
:param tuples list: The list of tuples to format
- :return: The tabulated text. (lines ' label = value')
- '''
+ :return: The tabulated text. (mutiples lines)
+ """
# find the maximum length of the first value of the tuples in info
smax = max(map(lambda l: len(l[0]), tuples))
# Print each item of tuples with good indentation
return msg
def formatValue(label, value, suffix=""):
- """format 'label = value' with the info color
+ """
+ format 'label = value' with the info color
:param label int: the label to print.
:param value str: the value to print.
- :param suffix str: the suffix to add at the end.
+ :param suffix str: the optionnal suffix to add at the end.
"""
msg = " %s = %s %s" % (label, value, suffix)
return msg
-def print_info(logger, tuples):
- '''format the tuples variable in a tabulated way.
-
- :param logger Logger: The logging instance to use for the prints.
- :param tuples list: The list of tuples to display
- '''
+def logger_info_tuples(logger, tuples):
+ """
+ for convenience
+ format as formatTuples() and call logger.info()
+ """
msg = formatTuples(tuples)
logger.info(msg)
-
-_colors = "BLACK RED GREEN YELLOW BLUE MAGENTA CYAN WHITE".lower().split(" ")
+# for convenience
+_colors = "BLACK RED GREEN YELLOW BLUE MAGENTA CYAN WHITE".lower().split(" ")
+
def black(msg):
return "<black>"+msg+"<reset>"
def red(msg):
return "<red>"+msg+"<reset>"
-def greem(msg):
+def green(msg):
return "<green>"+msg+"<reset>"
def yellow(msg):
def white(msg):
return "<white>"+msg+"<reset>"
+def normal(msg):
+ return "<normal>"+msg+"<reset>"
+
+def reset(msg):
+ return "<reset>"+msg
+
+def info(msg):
+ return "<info>"+msg+"<reset>"
+
+def header(msg):
+ return "<info>"+msg+"<reset>"
+
+def warning(msg):
+ return "<warning>"+msg+"<reset>"
+
+def error(msg):
+ return "<error>"+msg+"<reset>"
+
+def critical(msg):
+ return "<critical>"+msg+"<reset>"
+
##############################################################################
# list and dict utilities