p_name, p_info = p_name_info
header = _("Check of %s") % UTS.label(p_name)
- UTS.init_log_step(logger,header)
+ logger.logStep_begin(header) # needs logStep_end
# Verify if the command has to be launched or not
ignored = False
logger.warning(msg)
if ignored or not cmd_found:
- UTS.log_step(logger, "ignored")
+ logger.logStep("ignored")
if not cmd_found:
return RCO.ReturnCode("KO", "command not found product %s" % p_name)
return RCO.ReturnCode("OK", "ignored product %s" % p_name)
builder = COMP.Builder(config, logger, p_info)
# Prepare the environment
- UTS.log_step(logger, "PREPARE ENV")
+ logger.logStep("PREPARE ENV")
res_prepare = builder.prepare()
- UTS.log_step(logger, res_prepare)
+ logger.logStep(res_prepare)
# Launch the check
- UTS.log_step(logger, "CHECK")
+ logger.logStep("CHECK")
res = builder.check(command=command)
- UTS.log_step(logger, res)
+ logger.logStep(res)
return res
p_name, p_info = p_name_info
if header is not None: # close previous step in for loop
- UTS.end_log_step(logger, res[-1])
+ logger.logStep_end(res[-1])
# Logging
header = _("Compilation of %s ...") % UTS.label(p_name)
- UTS.init_log_step(logger, header)
+ logger.logStep_begin(header) # needs logStep_end
# Do nothing if the product is not compilable
if ("properties" in p_info and \
"compilation" in p_info.properties and \
p_info.properties.compilation == "no"):
- UTS.log_step(logger, "ignored")
+ logger.logStep("ignored")
res.append(RCO.ReturnCode("OK", "compile %s ignored" % p_name))
continue
# Do nothing if the product is native
if PROD.product_is_native(p_info):
- UTS.log_step(logger, "native")
+ logger.logStep("native")
res.append(RCO.ReturnCode("OK", "no compile %s as native" % p_name))
continue
# Clean the build and the install directories
# if the corresponding options was called
if options.clean_all:
- UTS.log_step(logger, "CLEAN BUILD AND INSTALL")
+ logger.logStep("CLEAN BUILD AND INSTALL")
# import time
# time.sleep(5)
- # UTS.log_step(logger, header, "IIYOO")
+ # logger.logStep(header, "IIYOO")
# raise Exception('YOO')
cmd_args = "--products %s --build --install" % p_name
rc = self.executeMicroCommand("clean", nameAppli, cmd_args)
# Clean the the install directory
# if the corresponding option was called
if options.clean_install and not options.clean_all:
- UTS.log_step(logger, "CLEAN INSTALL")
+ logger.logStep("CLEAN INSTALL")
cmd_args = "--products %s --install" % p_name
rc = self.executeMicroCommand("clean", nameAppli, cmd_args)
if not rc.isOk():
# Check if it was already successfully installed
if PROD.check_installation(p_info):
- UTS.log_step(logger, _("already installed"))
+ logger.logStep(_("already installed"))
res.append(RCO.ReturnCode("OK", "no compile %s as already installed" % p_name))
continue
# If the show option was called, do not launch the compilation
if options.no_compile:
- UTS.log_step(logger, _("No compile and install as show option"))
+ logger.logStep(_("No compile and install as show option"))
res.append(RCO.ReturnCode("OK", "no compile %s as show option" % p_name))
continue
# Check if the dependencies are installed
l_depends_not_installed = check_dependencies(config, p_name_info)
if len(l_depends_not_installed) > 0:
- UTS.log_step(logger, "<KO>")
+ logger.logStep("<KO>")
msg = _("the following products are mandatory:\n")
for prod_name in sorted(l_depends_not_installed):
msg += "%s\n" % prod_name
else:
# Ok Clean the build directory if the compilation and tests succeed
if options.clean_build_after:
- UTS.log_step(logger, "CLEAN BUILD")
+ logger.logStep("CLEAN BUILD")
cmd_args = "--products %s --build" % p_name
rc0 = self.executeMicroCommand("clean", nameAppli, cmd_args)
if header is not None: # close last step in for loop
- UTS.end_log_step(logger, res[-1])
+ logger.logStep_end(res[-1])
resAll = RCO.ReturnCodeFromList(res)
nbOk = sum(1 for r in res if r.isOk())
if options.check:
# Do the unit tests (call the check command)
- UTS.log_step(logger, "CHECK")
+ logger.logStep("CHECK")
cmd_args = "--products %s" % p_name
rc0 = self.executeMicroCommand("check", nameAppli, cmd_args)
if not rc0.isOk():
error_step = ""
# Logging and sat command call for configure step
- UTS.log_step(logger, "CONFIGURE")
+ logger.logStep("CONFIGURE")
cmd_args = "--products %s" % p_name
rc = self.executeMicroCommand("configure", nameAppli, cmd_args)
if not rc.isOk():
# if the product has a compilation script,
# it is executed during make step
script_path_display = UTS.label(p_info.compil_script)
- UTS.log_step(logger, "SCRIPT " + script_path_display)
+ logger.logStep("SCRIPT " + script_path_display)
else:
- UTS.log_step(logger, "MAKE")
+ logger.logStep("MAKE")
cmd_args = "--products %s" % p_name
# Get the make_flags option if there is any
res.append(rc)
# Logging and sat command call for make install step
- UTS.log_step(logger, "MAKE INSTALL")
+ logger.logStep("MAKE INSTALL")
cmd_args = "--products %s" % p_name
rc = self.executeMicroCommand("makeinstall", nameAppli, cmd_args)
if not rc.isOk():
# Logging and sat command call for the script step
script_path_display = UTS.label(p_info.compil_script)
- UTS.log_step(logger, "SCRIPT %s" % script_path_display)
+ logger.logStep("SCRIPT %s" % script_path_display)
# res = sat.script(config.VARS.application + " --products " + p_name, verbose = 0, logger_add_link = logger)
cmd_args = "--products %s" % p_name
res = self.executeMicroCommand("script", nameAppli, cmd_args)
- UTS.log_step(logger, res)
+ logger.logStep(res)
return res
# case : display all the available pyconf applications
elif options.list:
- lproduct = []
- # search in all directories that can have pyconf applications
- for path in config.PATHS.APPLICATIONPATH:
- # print a header
- if not options.no_label:
- logger.info(UTS.header("------ %s" % path))
- msg = "" # only one multiline info
- if not os.path.exists(path):
- msg += (UTS.red( _("Directory not found")) + "\n" )
- else:
- for f in sorted(os.listdir(path)):
- # ignore file that does not ends with .pyconf
- if not f.endswith('.pyconf'):
- continue
+ lproduct = []
+ # search in all directories that can have pyconf applications
+ msg = ""
+ for path in config.PATHS.APPLICATIONPATH:
+ # print a header
+ if not options.no_label:
+ msg += UTS.header("\n------ %s\n" % path)
+ if not os.path.exists(path):
+ msg += (UTS.red( _("Directory not found")) + "\n" )
+ else:
+ for f in sorted(os.listdir(path)):
+ # ignore file that does not ends with .pyconf
+ if not f.endswith('.pyconf'):
+ continue
+ appliname = f[:-len('.pyconf')]
+ if appliname not in lproduct:
+ lproduct.append(appliname)
+ if path.startswith(config.VARS.personalDir):
+ msg += "%s<red>*<reset>\n" % appliname
+ else:
+ msg += "%s\n" % appliname
+ logger.info(msg)
- appliname = f[:-len('.pyconf')]
- if appliname not in lproduct:
- lproduct.append(appliname)
- if path.startswith(config.VARS.personalDir) \
- and not options.no_label:
- msg += "%s*\n" % appliname
- else:
- msg += "%s\n" % appliname
-
- logger.info(msg)
- DBG.write("lproduct", lproduct)
- if len(lproduct) == 0:
- aFile = os.path.join(config.VARS.datadir, 'local.pyconf')
- msg = """\
+ if len(lproduct) == 0:
+ aFile = os.path.join(config.VARS.datadir, 'local.pyconf')
+ msg = """\
no existing product
may be you have to set some PROJECTS.project_file_paths in file
%s""" % aFile
- logger.warning(msg)
- return RCO.ReturnCode("OK", msg)
- else:
- return RCO.ReturnCode("OK", "config -l command done", lproduct)
+ logger.warning(msg)
+ return RCO.ReturnCode("OK", msg)
+ else:
+ return RCO.ReturnCode("OK", "config -l command done", lproduct)
# case : give a synthetic view of all patches used in the application
elif options.show_patchs:
UTS.label(config.VARS.application))
info = [(_("BUILD directory"), os.path.join(config.APPLICATION.workdir, 'BUILD'))]
- UTS.logger_info_tuples(logger, info)
+ logger.info(UTS.formatTuples(info))
# Call the function that will loop over all the products and execute
# the right command(s)
# Logging
header = _("Configuration of %s") % UTS.label(p_name)
- UTS.init_log_step(logger, header)
+ logger.logStep_begin(header) # needs logStep_end
# Do nothing if he product is not compilable
if ("properties" in p_info and \
"compilation" in p_info.properties and \
p_info.properties.compilation == "no"):
- UTS.end_log_step(logger, "ignored")
+ logger.logStep_end("ignored")
return RCO.ReturnCode("OK", "configure %s ignored" % p_name)
# Instantiate the class that manages all the construction commands
builder = COMP.Builder(config, logger, p_info)
# Prepare the environment
- UTS.log_step(logger, "PREPARE ENV")
+ logger.logStep("PREPARE ENV")
res_prepare = builder.prepare()
- UTS.log_step(logger, res_prepare)
+ logger.logStep(res_prepare)
# Execute buildconfigure, configure if the product is autotools
# Execute cmake if the product is cmake
res = []
if PROD.product_is_autotools(p_info):
- UTS.log_step(logger, "BUILDCONFIGURE")
+ logger.logStep("BUILDCONFIGURE")
rc = builder.build_configure()
- UTS.log_step(logger, rc)
+ logger.logStep(rc)
res.append(rc)
- UTS.log_step(logger, "CONFIGURE")
+ logger.logStep("CONFIGURE")
rc = builder.configure(conf_option)
- UTS.log_step(logger, rc)
+ logger.logStep(rc)
res.append(rc)
if PROD.product_is_cmake(p_info):
- UTS.log_step(logger, "CMAKE")
+ logger.logStep("CMAKE")
rc = builder.cmake(conf_option)
- UTS.log_step(logger, rc)
+ logger.logStep(rc)
res.append(rc)
- UTS.end_log_step(logger, rc.getStatus())
+ logger.logStep_end(rc.getStatus())
return RCO.ReturnCode(rc.getStatus(), "in configure %s" % p_name)
(_("Ignored extensions"), extension_ignored),
(_("Ignored directories"), directories_ignored)
]
- UTS.logger_info_tuples(logger, info)
+ logger.info(UTS.formatTuples(info))
# Get all the files and paths
logger.info(_("Store all file paths ... "), 3)
return RCO.ReturnCode(status, "%s %s" % (msg, msgCount))
def generate_component_list(self, product_info, context):
- """returns list of ReturnCode of elementary generate_component"""
+ """returns list of ReturnCode of elementaries generate_component calls"""
# shortcuts
logger = self.getLogger()
def getParser(self):
"""Define all options for command 'sat job <options>'"""
parser = self.getParserWithHelp()
+
+ '''version 5.0
parser.add_option(
'j', 'jobs_config', 'string', 'jobs_cfg',
_('Mandatory: The name of the config file that contains the jobs configuration') )
'', 'name', 'string', 'job',
_('Mandatory: The job name from which to execute commands.'), "" )
return parser
+ '''
+
+ # version 5.1 destroy commands job & jobs ambiguity
+ parser.add_option(
+ 'c', 'config', 'string', 'config_jobs',
+ _('Mandatory: The name of the config file that contains the jobs configuration') )
+ parser.add_option(
+ 'j', 'job', 'string', 'job_name',
+ _('Mandatory: The job name from which to execute commands.'), "" )
+ return parser
def run(self, cmd_arguments):
"""method called for command 'sat job <options>'"""
l_cfg_dir = config.PATHS.JOBPATH
# Make sure the jobs_config option has been called
- if not options.jobs_cfg:
- message = _("The option --jobs_config is required\n")
- logger.error(message)
- return 1
+ if not options.config_jobs:
+ msg = _("The option --config is required")
+ return RCO.ReturnCode("KO", msg)
# Make sure the name option has been called
- if not options.job:
- message = _("The option --name is required\n")
- logger.error(message)
- return 1
+ if not options.job_name:
+ msg = _("The option --job is required")
+ return RCO.ReturnCode("KO", msg)
# Find the file in the directories
found = True
- fPyconf = options.jobs_cfg
- if not file_jobs_cfg.endswith('.pyconf'):
+ fPyconf = options.config_jobs
+ if not file_config_jobs.endswith('.pyconf'):
fPyconf += '.pyconf'
for cfg_dir in l_cfg_dir:
- file_jobs_cfg = os.path.join(cfg_dir, fPyconf)
- if os.path.exists(file_jobs_cfg):
+ file_config_jobs = os.path.join(cfg_dir, fPyconf)
+ if os.path.exists(file_config_jobs):
found = True
break
msg = _("""\
The job file configuration %s was not found.
Use the --list option to get the possible files.""") % UTS.blue(fPyconf)
- logger.error(msg)
- return 1
+ return RCO.ReturnCode("KO", msg)
info = [ (_("Platform"), config.VARS.dist),
- (_("File containing the jobs configuration"), file_jobs_cfg) ]
- UTS.logger_info_tuples(logger, info)
+ (_("File containing the jobs configuration"), file_config_jobs) ]
+ logger.info(UTS.formatTuples(info))
# Read the config that is in the file
- config_jobs = UTS.read_config_from_a_file(file_jobs_cfg)
+ config_jobs = UTS.read_config_from_a_file(file_config_jobs)
# Find the job and its commands
found = False
for job in config_jobs.jobs:
- if job.name == options.job:
+ if job.name == options.job_name:
commands = job.commands
found = True
break
if not found:
- msg = _("Impossible to find the job %s in %s\n") % (options.job, file_jobs_cfg)
- logger.error(msg)
- return 1
+ msg = _("Impossible to find the job %s in %s") % (options.job_name, file_config_jobs)
+ return RCO.ReturnCode("KO", msg)
# Find the maximum length of the commands in order to format the display
len_max_command = max([len(cmd) for cmd in commands])
# Loop over the commands and execute it
- res = 0
- nb_pass = 0
+ res = [] # list of results
for command in commands:
specific_option = False
# Determine if it is a sat command or a shell command
if not(specific_option):
options = None
- # Get dynamically the command function to call
- sat_command = runner.__getattr__(sat_command_name)
-
- logger.info("Executing " + UTS.label(command) + " " +
- "." * (len_max_command - len(command)) + " ")
-
- error = ""
+ logger.logStep_begin("Executing %s" % UTS.label(command))
# Execute the command
- code = sat_command(end_cmd,
- options = options,
- batch = True,
- verbose = 0,
- logger_add_link = logger)
-
- # Print the status of the command
- if code == 0:
- nb_pass += 1
- logger.info("<OK>\n")
- else:
- if sat_command_name != "test":
- res = 1
- logger.info('<KO>: %s\n' % error)
-
+ # obsolete tofix new call executeMicroCommand and filterNameAppli...
+ # rc = sat_command(end_cmd, options = options, batch = True, verbose = 0, logger_add_link = logger)
+ # example of cmd_args
+ # cmd_args = "--products %s --build --install" % p_name
+ nameAppli, cmd_args = self.filterNameAppli(end_cmd)
+ rc = self.executeMicroCommand(sat_command_name, "", cmd_args)
+ res.append(rc)
+ logger.logStep_end(rc)
+
# Print the final state
- if res == 0:
- final_status = "OK"
+ good_result = sum(1 for r in res if r.isOk())
+ nbExpected = len(commands)
+ msgCount = "(%d/%d)" % (good_result, nbExpected)
+ if good_result == nbExpected:
+ status = "OK"
+ msg = _("command job")
+ logger.info("\n%s %s: <%s>.\n" % (msg, msgCount, status))
else:
- final_status = "KO"
-
- msg = "Commands: <%s> (%d/%d)" % (final_status, nb_pass, len(commands))
- logger.info(msg)
- return RCO.ReturnCode(final_status, msg)
+ status = "KO"
+ msg = _("command job, some commands have failed")
+ logger.info("\n%s %s: <%s>.\n" % (msg, msgCount, status))
+
+ return RCO.ReturnCode(status, "%s %s" % (msg, msgCount))
+
+ def filterNameAppli(self, end_cmd):
+ DBG.tofix("sat job filterNameAppli()", end_cmd)
+ return "???", end_cmd
\ No newline at end of file
import itertools
import re
+import pprint as PP
+
# import paramiko later
import src.ElementTree as ETREE
def getParamiko(logger=None):
if len(_PARAMIKO) == 0:
try:
- import paramiko as PARAMIKO
- _PARAMIKO.append(PARAMIKO)
- return PARAMIKO
+ import paramiko as PRMK
+ _PARAMIKO.append(PRMK)
+ if logger is not None: # as native
+ msg = "\nparamiko version %s at %s" % \
+ (PRMK.__version__, os.path.dirname(PRMK.__file__))
+ logger.info(msg)
+ return PRMK
except Exception as e:
if logger is not None:
- logger.critical("Problem import paramiko. No jobs if not 'pip install paramiko'")
+ msg = """\
+Python paramiko prerequisite not installed.
+Jobs on other machines impossible.
+try 'pip install paramiko'
+"""
+ logger.critical(msg)
return None
else:
return _PARAMIKO[0]
the dedicated jobs configuration file.
| Examples:
- | >> sat jobs --name my_jobs --publish
+ | >> # get list of existing config jobs files (pyconf files)
+ | >> sat jobs --list
+ | >> # launch job1 & job2 defined in my_jobs.pyconf
+ | >> sat jobs --configs my_jobs -jobs job1,job2 --publish
"""
name = "jobs"
def getParser(self):
"""Define all options for command 'sat jobs <options>'"""
parser = self.getParserWithHelp()
+
+ '''version 5.0
parser.add_option(
'n', 'name', 'list2', 'jobs_cfg',
_('Mandatory: The name of the config file that contains the jobs configuration. Can be a list.') )
- parser.add_option(
+ parser.add_option(
'o', 'only_jobs', 'list2', 'only_jobs',
- _('Optional: the list of jobs to launch, by their name. ') )
+ _('Optional: the list of jobs to launch, by their name.') )
+ '''
+
+ # version 5.1 destroy commands job & jobs ambiguity
+ parser.add_option(
+ 'c', 'configs', 'list2', 'configs_jobs',
+ _('Mandatory: The name of the config file(s) that contains the jobs configurations.') )
+ parser.add_option(
+ 'j', 'jobs', 'list2', 'job_names',
+ _('Mandatory: The job name(s) from which to execute commands.'), "" )
+
parser.add_option(
'l', 'list', 'boolean', 'list',
- _('Optional: list all available config files.') )
+ _('Optional: list all available config files.') )
parser.add_option(
't', 'test_connection', 'boolean', 'test_connection',
_("Optional: try to connect to the machines. Not executing the jobs."),
_("Optional: generate an xml file that can be read in a browser to display the jobs status."),
False )
parser.add_option(
- 'i', 'input_boards', 'string', 'input_boards', _("Optional: "
- "the path to csv file that contain the expected boards."),
+ 'i', 'input_boards', 'string', 'input_boards',
+ _("Optional: the path to csv file that contain the expected boards."),
"" )
parser.add_option(
'', 'completion', 'boolean', 'no_label',
# list option : display all the available config files
if options.list:
- for cfg_dir in l_cfg_dir:
- if not options.no_label:
- logger.info("------ %s\n" % UTS.blue(cfg_dir))
- if not os.path.exists(cfg_dir):
+ msg = ""
+ for cfg_dir in l_cfg_dir:
+ if not options.no_label:
+ msg += UTS.info("\n------ %s\n" % cfg_dir)
+ if not os.path.exists(cfg_dir):
+ continue
+ for f in sorted(os.listdir(cfg_dir)):
+ if not f.endswith('.pyconf'):
continue
- for f in sorted(os.listdir(cfg_dir)):
- if not f.endswith('.pyconf'):
- continue
- cfilename = f[:-7]
- logger.info("%s\n" % cfilename)
- return RCO.ReturnCode("OK", "jobs command done")
+ cfilename = f[:-7]
+ msg += ("%s\n" % cfilename)
+ logger.info(msg)
+ return RCO.ReturnCode("OK", "jobs command --list done")
# Make sure the jobs_config option has been called
- if not options.jobs_cfg:
- msg = _("The option --jobs_config is required\n")
- logger.error(message)
+ if not options.configs_jobs:
+ msg = _("The option --jobs_config is required")
+ logger.error(msg)
return RCO.ReturnCode("KO", msg)
# Find the file in the directories, unless it is a full path
merger = PYCONF.ConfigMerger()
config_jobs = PYCONF.Config()
l_conf_files_path = []
- for config_file in options.jobs_cfg:
- found, file_jobs_cfg = get_config_file_path(config_file, l_cfg_dir)
+ for config_file in options.configs_jobs:
+ found, file_configs_jobs = get_config_file_path(config_file, l_cfg_dir)
if not found:
msg = _("""\
The file configuration %s was not found.
-Use the --list option to get the possible files.\n""") % config_file
+Use the --list option to get the possible files.""") % config_file
logger.error(msg)
return RCO.ReturnCode("KO", msg)
- l_conf_files_path.append(file_jobs_cfg)
+ l_conf_files_path.append(file_configs_jobs)
# Read the config that is in the file
- one_config_jobs = UTS.read_config_from_a_file(file_jobs_cfg)
+ one_config_jobs = UTS.read_config_from_a_file(file_configs_jobs)
merger.merge(config_jobs, one_config_jobs)
- info = [(_("Platform"), config.VARS.dist),
- (_("Files containing the jobs configuration"), l_conf_files_path)]
- UTS.logger_info_tuples(logger, info)
+ msg = "\n" + _("Platform = ") + config.VARS.dist
+ msg += "\n%s =\n%s" % \
+ ( _("Files containing the jobs configuration"),
+ PP.pformat(l_conf_files_path) )
+ logger.info(msg)
- if options.only_jobs:
- l_jb = PYCONF.Sequence()
- for jb in config_jobs.jobs:
- if jb.name in options.only_jobs:
- l_jb.append(jb,
- "Job that was given in only_jobs option parameters\n")
- config_jobs.jobs = l_jb
+ if options.job_names:
+ l_jb = PYCONF.Sequence()
+ for jb in config_jobs.jobs:
+ if jb.name in options.job_names:
+ l_jb.append(jb, "Job that was given in job_names option parameters")
+ config_jobs.jobs = l_jb
# Parse the config jobs in order to develop all the factorized jobs
develop_factorized_jobs(config_jobs)
with open(path_pyconf , 'w') as f:
config_jobs.__save__(f)
- # log the paramiko problems
+ # log the paramiko messages and problems
log_dir = UTS.get_log_path(config)
- paramiko_log_dir_path = os.path.join(log_dir, "JOBS")
- UTS.ensure_path_exists(paramiko_log_dir_path)
- paramiko = getParamiko(logger)
- paramiko.util.log_to_file(os.path.join(paramiko_log_dir_path,
- logger.txtFileName))
+ logger.info("jobs log directory = %s" % UTS.info(log_dir))
+ paramiko = getParamiko(logger)
+ if paramiko == None:
+ return RCO.ReturnCode("KO", "paramiko prerequisite not installed")
+
+ # paramiko.util.log_to_file(os.path.join(paramiko_log_dir_path, logger.txtFileName))
+ rc = logger.setLoggerParamiko() # manages configuration of paramiko logger
+
+ loggerPrmk = logger.getLoggerParamiko()
+ logger.info("paramiko logger %s" % rc)
+ loggerPrmk.info("initial message from sat jobs options\n%s" % options)
+
# Initialization
today_jobs = Jobs(runner, logger, path_pyconf, config_jobs)
# SSH connection to all machines
- today_jobs.ssh_connection_all_machines()
+ logger.info("today jobs machines =\n%s" % PP.pformat([m.host for m in today_jobs.lmachines]))
+ res = today_jobs.ssh_connection_all_machines()
if options.test_connection:
- return RCO.ReturnCode("OK", "jobs ssh_connection done")
+ return RCO.ReturnCodeFromList(res)
gui = None
if options.publish:
logger,
file_boards = options.input_boards)
- logger.debug("<OK>")
+ logger.debug("Gui init <OK>")
# Display the list of the xml files
- logger.info(("List of published files:\n%s\n") % gui.xml_global_file.logFile)
+ logger.info(("List of published files:\n%s") % gui.xml_global_file.logFile)
msg = ""
for board in gui.d_xml_board_files.keys():
file_path = gui.d_xml_board_files[board].logFile
self.ssh = self.paramiko.SSHClient()
self._connection_successful = None
- def connect(self, logger):
+ def connect(self):
"""Initiate the ssh connection to the remote machine
:param logger: (Logger) The logger instance
username=self.user,
password = self.password)
except self.paramiko.AuthenticationException:
- rc = RCO.ReturnCode("KO", _("Authentication failed"))
+ rc = RCO.ReturnCode("KO", "Authentication failed on %s" % self.host)
except self.paramiko.BadHostKeyException:
- rc = RCO.ReturnCode("KO", _("The server's host key could not be verified"))
+ rc = RCO.ReturnCode("KO", "The server's host key could not be verified on %s" % self.host)
except self.paramiko.SSHException:
- rc = RCO.ReturnCode("KO", _("SSHException error connecting or establishing an SSH session"))
+ rc = RCO.ReturnCode("KO", "SSH Exception connecting on %s" % self.host)
except:
- rc = RCO.ReturnCode("KO", _("Error connecting or establishing an SSH session"))
+ rc = RCO.ReturnCode("KO", "Problem connecting or establishing an SSH session on %s" % self.host)
else:
self._connection_successful = True
rc = RCO.ReturnCode("OK", "connecting SSH session done on %s" % self.host)
self.name_remote_jobs_pyconf = ".%s" % os.path.basename(job_file_path)
self.commands = commands
- self.command = (os.path.join(self.machine.sat_path, "sat") +
- " -l " +
- os.path.join(self.machine.sat_path,
- "list_log_files.txt") +
- " job --jobs_config " +
- os.path.join(self.machine.sat_path,
- self.name_remote_jobs_pyconf) +
- " --name " + self.name)
+ sat_path = self.machine.sat_path
+ sat = os.path.join(sat_path, "sat")
+ cmd = sat + " -l %s job --config %s --job %s"
+ self.command = cmd % \
+ ( os.path.join(sat_path, "list_log_files.txt"),
+ os.path.join(sat_path, self.name_remote_jobs_pyconf),
+ self.name )
if prefix:
self.command = prefix + ' "' + self.command +'"'
(_, out_pid, _) = self.machine.exec_command(cmd_pid, self.logger)
pids_cmd = out_pid.readlines()
pids_cmd = [str(UTS.only_numbers(pid)) for pid in pids_cmd]
- pids+=pids_cmd
+ pids += pids_cmd
return pids
def kill_remote_process(self, wait=1):
return ("Unable to get the pid of the command.", "")
cmd_kill = " ; ".join([("kill -2 " + pid) for pid in pids])
- (_, out_kill, err_kill) = self.machine.exec_command(cmd_kill,
- self.logger)
+ (_, out_kill, err_kill) = self.machine.exec_command(cmd_kill, self.logger)
DATT.sleep(wait)
return (out_kill.read().decode(), err_kill.read().decode())
board,
cmmnds,
timeout,
- self.runner.cfg,
+ self.runner.getConfig(),
self.job_file_path,
self.logger,
after = after,
a_machine = mach
break
+ config = self.runner.getConfig()
if a_machine == None:
for machine_def in self.cfg_jobs.machines:
if machine_def.name == name_machine:
if 'host' not in machine_def:
- host = self.runner.cfg.VARS.hostname
+ host = config.VARS.hostname
else:
host = machine_def.host
if 'user' not in machine_def:
- user = self.runner.cfg.VARS.user
+ user = config.VARS.user
else:
user = machine_def.user
self.lhosts = host_list
- def ssh_connection_all_machines(self, pad=50):
+ def ssh_connection_all_machines(self):
"""Do the ssh connection to every machine to be used today.
:return: None
"""
- self.logger.info( "Establishing connection with all the machines :\n")
- for machine in self.lmachines:
+ config = self.runner.getConfig()
+ logger = self.logger
+ logger.info("\nEstablishing connection with all the machines:")
+
+ res = [] # all connections
+ for machine in self.lmachines[0:2]: # TODO for debug [0:2]
# little algorithm in order to display traces
- begin_line = (_("Connection to %s: ") % machine.name)
- if pad - len(begin_line) < 0:
- endline = " "
- else:
- endline = (pad - len(begin_line)) * "." + " "
-
+ header = ("Connection to %s" % machine.name)
step = "SSH connection"
- self.logger.info( begin_line + endline + step)
+ logger.logStep_begin(header, step)
# the call to the method that initiate the ssh connection
- msg = machine.connect()
+ rc = machine.connect()
+ res.append(rc)
+ if not rc.isOk():
+ logger.logStep_end(rc, 40)
+ continue
# Copy salomeTools to the remote machine
- if machine.successfully_connected(self.logger):
+ if machine.successfully_connected(logger): # as rc.isOk()
step = _("Remove SAT")
- self.logger.info('\r%s%s%s' % (begin_line, endline, 20 * " "))
- self.logger.info('\r%s%s%s' % (begin_line, endline, step))
+ logger.info('\r%s%s%s' % (begin_line, endline, 20 * " "))
+ logger.info('\r%s%s%s' % (begin_line, endline, step))
(__, out_dist, __) = machine.exec_command(
- "rm -rf %s" % machine.sat_path, self.logger)
+ "rm -rf %s" % machine.sat_path, logger)
out_dist.read()
step = _("Copy SAT")
- self.logger.info('\r%s%s%s' % (begin_line, endline, 20 * " "))
- self.logger.info('\r%s%s%s' % (begin_line, endline, step))
+ logger.info('\r%s%s%s' % (begin_line, endline, 20 * " "))
+ logger.info('\r%s%s%s' % (begin_line, endline, step))
- res_copy = machine.copy_sat(self.runner.cfg.VARS.salometoolsway,
- self.job_file_path)
+ res_copy = machine.copy_sat(config.VARS.salometoolsway, self.job_file_path)
# set the local settings of sat on the remote machine using
# the init command
- (__, out_dist, __) = machine.exec_command(
- os.path.join(machine.sat_path,
- "sat init --base default --workdir"
- " default --log_dir default"),
- self.logger)
+ sat = os.path.join(machine.sat_path, "sat")
+ cmd = sat + " init --base default --workdir default --log_dir default"
+ (__, out_dist, __) = machine.exec_command(cmd, logger)
out_dist.read()
# get the remote machine distribution using a sat command
- (__, out_dist, __) = machine.exec_command(
- os.path.join(machine.sat_path,
- "sat config --value VARS.dist --no_label"),
- self.logger)
- machine.distribution = out_dist.read().decode().replace("\n",
- "")
+ cmd = sat + " config --value VARS.dist --no_label"
+ (__, out_dist, __) = machine.exec_command(cmd, logger)
+ machine.distribution = out_dist.read().decode().replace("\n", "")
# Print the status of the copy
if res_copy == 0:
- self.logger.info('\r%s' % \
+ logger.info('\r%s' % \
((len(begin_line)+len(endline)+20) * " "))
- self.logger.info('\r%s%s%s' % (begin_line, endline, "<OK>"))
+ logger.info('\r%s%s%s' % (begin_line, endline, "<OK>"))
else:
- self.logger.info('\r%s' % \
+ logger.info('\r%s' % \
((len(begin_line)+len(endline)+20) * " "), 3)
- self.logger.info('\r%s%s%s %s' % \
+ logger.info('\r%s%s%s %s' % \
(begin_line, endline, "<KO>",
_("Copy of SAT failed: %s") % res_copy))
- else:
- self.logger.info("<TODO_RC>%s" % msg)
- self.logger.info("\n")
+ return res
def is_occupied(self, hostname):
def get_config_file_path(job_config_name, l_cfg_dir):
found = False
- file_jobs_cfg = None
+ file_configs_jobs = None
if os.path.exists(job_config_name) and job_config_name.endswith(".pyconf"):
found = True
- file_jobs_cfg = job_config_name
+ file_configs_jobs = job_config_name
else:
for cfg_dir in l_cfg_dir:
- file_jobs_cfg = os.path.join(cfg_dir, job_config_name)
- if not file_jobs_cfg.endswith('.pyconf'):
- file_jobs_cfg += '.pyconf'
+ file_configs_jobs = os.path.join(cfg_dir, job_config_name)
+ if not file_configs_jobs.endswith('.pyconf'):
+ file_configs_jobs += '.pyconf'
- if not os.path.exists(file_jobs_cfg):
+ if not os.path.exists(file_configs_jobs):
continue
else:
found = True
break
- return found, file_jobs_cfg
+ return found, file_configs_jobs
def develop_factorized_jobs(config_jobs):
"""update information about the jobs for the file xml_file
# Logging
header = _("Make of %s") % UTS.label(p_name)
- UTS.init_log_step(logger, header)
+ logger.logStep_begin(header) # needs logStep_end
# Do nothing if he product is not compilable
if ("properties" in p_info and \
"compilation" in p_info.properties and \
p_info.properties.compilation == "no"):
- UTS.log_step(logger, "ignored")
+ logger.logStep("ignored")
return 0
# Instantiate the class that manages all the construction commands
builder = COMP.Builder(config, logger, p_info)
# Prepare the environment
- UTS.log_step(logger, "PREPARE ENV")
+ logger.logStep("PREPARE ENV")
res_prepare = builder.prepare()
- UTS.log_step(logger, res_prepare)
+ logger.logStep(res_prepare)
# Execute buildconfigure, configure if the product is autotools
# Execute cmake if the product is cmake
nb_proc, make_opt_without_j = get_nb_proc(p_info, config, make_option)
- UTS.log_step(logger, "MAKE -j" + str(nb_proc))
+ logger.logStep("MAKE -j" + str(nb_proc))
if ARCH.is_windows():
res = builder.wmake(nb_proc, make_opt_without_j)
else:
res = builder.make(nb_proc, make_opt_without_j)
- UTS.log_step(logger, res)
+ logger.logStep(res)
return res
def get_nb_proc(product_info, config, make_option):
info = [(_("BUILD directory"),
os.path.join(config.APPLICATION.workdir, 'BUILD'))]
- UTS.logger_info_tuples(logger, info)
+ logger.info(UTS.formatTuples(info))
# Call the function that will loop over all the products and execute
# the right command(s)
# Logging
header = _("Make install of %s") % UTS.label(p_name)
- UTS.init_log_step(logger, header)
+ logger.logStep_begin(header) # needs logStep_end
# Do nothing if he product is not compilable
if ("properties" in p_info and \
"compilation" in p_info.properties and \
p_info.properties.compilation == "no"):
- UTS.log_step(logger, "ignored")
+ logger.logStep("ignored")
return RCO.ReturnCode("OK", "product %s is not compilable" % p_name)
# Instantiate the class that manages all the construction commands
builder = COMP.Builder(config, logger, p_info)
# Prepare the environment
- UTS.log_step(logger, "PREPARE ENV")
+ logger.logStep("PREPARE ENV")
res = builder.prepare()
- UTS.log_step(logger, res)
+ logger.logStep(res)
# Execute buildconfigure, configure if the product is autotools
# Execute cmake if the product is cmake
if not PROD.product_has_script(p_info):
- UTS.log_step(logger, "MAKE INSTALL")
+ logger.logStep("MAKE INSTALL")
res_m = builder.install()
- UTS.log_step(logger, res_m)
+ logger.logStep(res_m)
res += res_m
return res
p_name, p_info = p_name_info
# Logging
- msg = _("Running script of %s ...") % UTS.label(p_name)
- logger.trace(msg)
+ header = _("Running script of %s") % UTS.label(p_name)
+ logger.logStep_begin(header) # needs logStep_end
# Do nothing if he product is not compilable or has no compilation script
test1 = "properties" in p_info and \
"compilation" in p_info.properties and \
p_info.properties.compilation == "no"
if ( test1 or (not PROD.product_has_script(p_info)) ):
- UTS.log_step(logger, "ignored")
+ logger.logStep("ignored")
return res.append(RCO.ReturnCode("OK", "run script %s ignored" % p_name))
builder = COMP.Builder(config, logger, p_info)
# Prepare the environment
- UTS.log_step(logger, "PREPARE ENV")
+ logger.logStep("PREPARE ENV")
res_prepare = builder.prepare()
- UTS.log_step(logger, res_prepare)
+ logger.logStep(res_prepare)
# Execute the script
script_path_display = UTS.label(p_info.compil_script)
- UTS.log_step(logger, "SCRIPT " + script_path_display)
+ logger.logStep("SCRIPT " + script_path_display)
res = builder.do_script_build(p_info.compil_script, number_of_proc=nb_proc)
- UTS.log_step(logger, res)
+ logger.logStep(res)
return res
logger = self.getLogger()
options = self.getOptions()
- msg_miss = _("The --%s argument is required\n")
+ msg_miss = _("The --%s argument is required")
+ rc_ko = RCO.returnCode("KO", "command template not done")
if options.template is None:
logger.error(msg_miss % "template")
- return 1
+ return rc_ko
if options.target is None and options.info is None:
logger.error(msg_miss % "target")
- return 1
+ return rc_ko
if "APPLICATION" in config:
- msg = _("This command does not use a product.\n")
+ msg = _("This command does not use a product")
logger.error(msg)
- return 1
+ return rc_ko
if options.info:
- return get_template_info(config, options.template, logger)
+ return self.get_template_info(options.template)
if options.name is None:
logger.error(msg_miss % "name")
- return 1
+ return rc_ko
if not options.name.replace('_', '').isalnum():
msg = _("""\
-Component name must contains only alphanumeric characters and no spaces\n""")
+Component name must contains only alphanumeric characters and no spaces""")
logger.error(msg)
- return 1
+ return rc_ko
if options.target is None:
logger.error(msg_miss % "target")
- return 1
+ return rc_ko
target_dir = os.path.join(options.target, options.name)
if os.path.exists(target_dir):
- msg = _("The target already exists: %s\n") % target_dir
+ msg = _("The target already exists: %s") % target_dir
logger.error(msg)
- return 1
+ return rc_ko
msg = ""
msg += _('Create sources from template\n')
msg += ' destination = %s\n' % target_dir
- msg += ' name = %\ns' % options.name
+ msg += ' name = %s\n' % options.name
msg += ' template = %s\n' % options.template
logger.info(msg)
for elt in options.param.split(","):
param_def = elt.strip().split('=')
if len(param_def) != 2:
- msg = _("Bad parameter definition: '%s'\n") % elt
+ msg = _("Bad parameter definition: '%s'") % elt
logger.error(msg)
- return 1
+ return rc_ko
conf_values[param_def[0].strip()] = param_def[1].strip()
- retcode = prepare_from_template(config, options.name, options.template,
- target_dir, conf_values, logger)
-
- if retcode == 0:
- logger.info(_("The sources were created in %s\n") % UTS.info(target_dir))
- msg = _("Do not forget to put them in your version control system.\n")
- logger.info("\n" + UTS.red(msg))
- else:
- logger.info("\n")
+ rc = self.prepare_from_template(
+ config, options.name, options.template, target_dir, conf_values, logger)
+
+ if rc.isOk():
+ msg = _("The sources were created in %s") % UTS.info(target_dir)
+ msg += "\n" + _("Do not forget to put them in your version control system.")
+ logger.info("\n" + msg)
- return retcode
+ return rc
+#######################################################################
class TParam:
def __init__(self, param_def, compo_name, dico=None):
self.default = ""
if len(param_def) > 2: self.prompt = param_def[2]
if len(param_def) > 3: self.check_method = param_def[3]
else:
- raise Exception(_("ERROR in template parameter definition"))
+ raise Exception(_("Problem in template parameter definition"))
self.raw_prompt = self.prompt
if len(self.prompt) == 0:
return len(val) > 0
return len(val) > 0 and self.check_method(val)
+
def get_dico_param(dico, key, default):
if dico.has_key(key):
return dico[key]
return default
+#######################################################################
class TemplateSettings:
def __init__(self, compo_name, settings_file, target):
self.compo_name = compo_name
self.delimiter_char = get_dico_param(ldic, "delimiter", ":sat:")
# get the ignore filter
- self.ignore_filters = map(lambda l: l.strip(),
- ldic["ignore_filters"].split(','))
+ self.ignore_filters = \
+ map(lambda l: l.strip(), ldic["ignore_filters"].split(','))
def has_pyconf(self):
return len(self.pyconf) > 0
return []
return re.findall("%\((?P<name>\S[^\)]*)", self.pyconf)
- ##
- # Check if the file needs to be parsed.
def check_file_for_substitution(self, file_):
+ """Check if the file needs to be parsed"""
for filter_ in self.ignore_filters:
if fnmatch.fnmatchcase(file_, filter_):
return False
return True
def check_user_values(self, values):
+ """raise Exception if missing parameters"""
if values is None:
return
missing.append(p)
if len(missing) > 0:
- raise Exception(
- _("Missing parameters: %s") % ", ".join(missing) )
+ raise Exception(_("Missing parameters: %s") % ", ".join(missing))
def get_parameters(self, conf_values=None):
if self.dico is not None:
self.check_user_values(conf_values)
# create dictionary with default values
- dico = {}
- dico["name"] = self.compo_name.lower()
- dico["Name"] = self.compo_name.capitalize()
- dico["NAME"] = self.compo_name
- dico["target"] = self.target
- dico[self.file_subst] = self.compo_name
+ dico = {
+ "name": self.compo_name.lower(),
+ "Name": self.compo_name.capitalize(),
+ "NAME": self.compo_name,
+ "target": self.target,
+ self.file_subst: self.compo_name,
+ }
# add user values if any
if conf_values is not None:
for p in conf_values.keys():
return self.dico
def search_template(config, template):
- # search template
template_src_dir = ""
if os.path.isabs(template):
if os.path.exists(template):
conf_values,
logger):
"""Prepares a module from a template."""
- res = RCO.ReturnCode("OK", "prepare_from_template has no raise")
+ res = RCO.ReturnCode("OK", "prepare_from_template done")
template_src_dir = search_template(config, template)
# copy the template
if os.path.isfile(template_src_dir):
- logger.info(_("Extract template %s\n") % UTS.info(template))
+ logger.info(_("Extract template %s") % UTS.info(template))
SYSS.archive_extract(template_src_dir, target_dir)
else:
- logger.info(_("Copy template %s\n") % UTS.info(template))
+ logger.info(_("Copy template %s") % UTS.info(template))
shutil.copytree(template_src_dir, target_dir)
tsettings = TemplateSettings(compo_name, settings_file, target_dir)
# first rename the files
- logger.debug(UTS.label(_("Rename files\n")))
+ logger.debug(UTS.label(_("Rename files")))
for root, dirs, files in os.walk(target_dir):
for fic in files:
ff = fic.replace(tsettings.file_subst, compo_name)
raise Exception(
_("Destination file already exists: %s") % \
os.path.join(root, ff) )
- logger.debug(" %s -> %s\n" % (fic, ff))
+ logger.debug(" %s -> %s" % (fic, ff))
os.rename(os.path.join(root, fic), os.path.join(root, ff))
# rename the directories
- logger.debug(UTS.label(_("Rename directories\n")))
+ logger.debug(UTS.label(_("Rename directories")))
for root, dirs, files in os.walk(target_dir, topdown=False):
for rep in dirs:
dd = rep.replace(tsettings.file_subst, compo_name)
raise Exception(
_("Destination directory already exists: %s") % \
os.path.join(root, dd) )
- logger.debug(" %s -> %s\n" % (rep, dd))
+ logger.debug(" %s -> %s" % (rep, dd))
os.rename(os.path.join(root, rep), os.path.join(root, dd))
# ask for missing parameters
- logger.debug(UTS.label(_("Make substitution in files\n")))
- logger.debug(_("Delimiter =") + " %s\n" % tsettings.delimiter_char)
- logger.debug(_("Ignore Filters =") + " %s\n" % ', '.join(tsettings.ignore_filters))
+ msg = UTS.label(_("Make substitution in files"))
+ msg += "\n" + _("Delimiter") + " = '%s'" % tsettings.delimiter_char)
+ msg += "\n" + _("Ignore Filters") + "= %s" % ', '.join(tsettings.ignore_filters)
+ logger.debug(msg)
dico = tsettings.get_parameters(conf_values)
-
+ ##############################################################################
class CompoTemplate(string.Template):
"""override standard string.Template class to use the desire delimiter"""
delimiter = tsettings.delimiter_char
for fic in files:
fpath = os.path.join(root, fic)
if not tsettings.check_file_for_substitution(fpath[pathlen:]):
- logger.debug(" - %s\n" % fpath[pathlen:])
+ logger.debug(" - %s" % fpath[pathlen:])
continue
# read the file
m = file(fpath, 'r').read()
if d != m:
changed = "*"
file(fpath, 'w').write(d)
- logger.debug(" %s %s\n" % (changed, fpath[pathlen:]))
+ logger.debug(" %s %s" % (changed, fpath[pathlen:]))
if not tsettings.has_pyconf:
logger.error(_("Definition for sat not found in settings file."))
def get_template_info(config, template_name, logger):
sources = search_template(config, template_name)
- logger.info(" Template = %s\n" % sources)
+ logger.info(" Template = %s" % sources)
# read settings
tmpdir = os.path.join(config.VARS.tmp_root, "tmp_template")
else:
msg += tsettings.info
- msg += "\n= Configuration\n"
+ msg += "= Configuration\n"
msg += " file substitution key = %s\n" % tsettings.file_subst
msg += " substitution key = '%s'\n" % tsettings.delimiter_char
if len(tsettings.ignore_filters) > 0:
logger.info(msg)
- retcode = 0
+ res = [] # list of ReturnCode
msg = skip
msg += "= Verification\n"
if tsettings.file_subst not in pnames:
- msg += "file substitution key not defined as a parameter: %s\n" % \
+ msg += "file substitution key not defined as a parameter: %s" % \
tsettings.file_subst
- retcode = 1
+ res.append(RCO.ReturnCode("KO", msg))
logger.info(msg)
if len(zz) > 0:
msg += "Missing definition in %s: %s\n" % \
( fpath[pathlen:], ", ".join(zz) )
- retcode = 1
+ res.append(RCO.ReturnCode("KO", msg))
- logger.info(msg)
+ if msg != "": logger.info(msg)
- if retcode == 0:
- logger.info("<OK>" + skip)
- else:
- logger.info("<KO>" + skip)
-
+ retcode = RCO.ReturnCodeFromList(res)
# clean up tmp file
shutil.rmtree(tmpdir)
except ImportError:
from sha import sha as sha1
-
+# Compatibility python 2/3 for input function
+# input stays input for python 3 and input = raw_input for python 2
+try:
+ input = raw_input
+except NameError:
+ pass
+
########################################################################
# Command class
########################################################################
"""Check the options
:param options: (Options) The options
- :return: None
+ :return: (RCO.ReturnCode)
"""
if not options.launcher:
options.launcher = ""
returnCode = UTS.check_config_has_application(config)
if not returnCode.isOk():
msg = _("An application is required to use a relative path with option --appli")
- raise Exception(msg)
+ return RCO.ReturnCode("KO", msg)
options.launcher = os.path.join(config.APPLICATION.workdir, options.launcher)
if not os.path.exists(options.launcher):
- raise Exception(_("Launcher %s not found") % options.launcher )
- return
+ return RCO.ReturnCode("KO", _("Launcher %s not found") % options.launcher)
+ return RCO.ReturnCode("OK", "check_option done")
def run(self, cmd_arguments):
"""method called for command 'sat test <options>'"""
logger = self.getLogger()
options = self.getOptions()
- self.check_option(options)
-
+ rc = self.check_option(options)
+ if not rc.isOk():
+ return rc
+
# the test base is specified either by the application, or by the --base option
with_application = False
- if config.VARS.application != 'None':
- logger.info(_('Running tests on application %s\n') %
- UTS.label(config.VARS.application))
+ vars_app = config.VARS.application
+ if vars_app != 'None':
+ logger.info(_('Running tests on application %s') % UTS.label(vars_app))
with_application = True
elif not options.base:
- raise Exception(
- _('A test base is required. Use the --base option') )
+ return RCO.ReturnCode("KO", _('A test base is required. Use the --base option') )
# the launcher is specified either by the application, or by the --launcher option
if with_application:
Impossible to find any launcher.
Please specify an application or a launcher
""")
- logger.error(msg)
- return 1
+ return RCO.ReturnCode("KO", msg)
# set the display
show_desktop = (options.display and options.display.upper() == "NO")
# initialization
#################
+ tmp_root = config.VARS.tmp_root
+ conf_app = config.APPLICATION
if with_application:
- tmp_dir = os.path.join(config.VARS.tmp_root,
- config.APPLICATION.name,
- "test")
+ tmp_dir = os.path.join(tmp_root, conf_app.name, "test")
else:
- tmp_dir = os.path.join(config.VARS.tmp_root,
- "test")
+ tmp_dir = os.path.join(tmp_root, "test")
# remove previous tmp dir
if os.access(tmp_dir, os.F_OK):
try:
shutil.rmtree(tmp_dir)
except:
- logger.error(
- _("error removing TT_TMP_RESULT %s\n") % tmp_dir)
+ logger.error(_("error removing TT_TMP_RESULT %s") % tmp_dir)
- lines = []
- lines.append("date = '%s'" % config.VARS.date)
- lines.append("hour = '%s'" % config.VARS.hour)
- lines.append("node = '%s'" % config.VARS.node)
- lines.append("arch = '%s'" % config.VARS.dist)
+ msg = []
+ msg.append("date = '%s'" % config.VARS.date)
+ msg.append("hour = '%s'" % config.VARS.hour)
+ msg.append("node = '%s'" % config.VARS.node)
+ msg.append("arch = '%s'" % config.VARS.dist)
if 'APPLICATION' in config:
- lines.append("application_info = {}")
- lines.append("application_info['name'] = '%s'" %
- config.APPLICATION.name)
- lines.append("application_info['tag'] = '%s'" %
- config.APPLICATION.tag)
- lines.append("application_info['products'] = %s" %
- str(config.APPLICATION.products))
+ msg.append("application_info = {}")
+ msg.append("application_info['name'] = '%s'" % conf_app.name)
+ msg.append("application_info['tag'] = '%s'" % conf_app.tag)
+ msg.append("application_info['products'] = %s" % str(conf_app.products))
- content = "\n".join(lines)
+ content = "\n".join(msg)
# create hash from context information
dirname = sha1(content.encode()).hexdigest()
os.environ['TT_TMP_RESULT'] = base_dir
# create env_info file
- f = open(os.path.join(base_dir, 'env_info.py'), "w")
- f.write(content)
- f.close()
+ open(os.path.join(base_dir, 'env_info.py'), "w").write(content)
# create working dir and bases dir
working_dir = os.path.join(base_dir, 'WORK')
test_base = ""
if options.base:
test_base = options.base
- elif with_application and "test_base" in config.APPLICATION:
- test_base = config.APPLICATION.test_base.name
+ elif with_application and "test_base" in conf_app:
+ test_base = conf_app.test_base.name
fmt = " %s = %s\n"
msg = fmt % (_('Display'), os.environ['DISPLAY'])
launcher=options.launcher,
show_desktop=show_desktop)
- if not test_runner.test_base_found:
- # Fail
- return 1
+ if not test_runner.test_base_found: # Fail
+ return RCO.ReturnCode("KO", "test base not found")
# run the test
logger.allowPrintLevel = False
retcode = test_runner.run_all_tests()
logger.allowPrintLevel = True
- logger.info(_("Tests finished\n"))
+ logger.info(_("Tests finished"))
- logger.debug(_("Generate the specific test log\n"))
+ logger.debug(_("Generate the specific test log"))
log_dir = UTS.get_log_path(config)
out_dir = os.path.join(log_dir, "TEST")
UTS.ensure_path_exists(out_dir)
def ask_a_path():
"""
- interactive as using 'raw_input'
+ interactive terminal user ask answer using 'input'
"""
- path = raw_input("enter a path where to save the result: ")
+ path = input("enter a path where to save the result: ")
+ sure = " Are you sure to continue ? [y/n] "
if path == "":
- result = raw_input("the result will be not save. Are you sure to "
- "continue ? [y/n] ")
+ result = input("the result will be not save." + sure)
if result == "y":
return path
else:
return ask_a_path()
elif os.path.exists(path):
- result = raw_input("WARNING: the content of %s will be deleted. Are you"
- " sure to continue ? [y/n] " % path)
+ result = input("WARNING: the content of %s will be deleted." % path + sure)
if result == "y":
return path
else:
return path
def save_file(filename, base):
- f = open(filename, 'r')
- content = f.read()
- f.close()
-
+ content = open(filename, 'r').read()
objectname = sha1(content).hexdigest()
-
- f = gzip.open(os.path.join(base, '.objects', objectname), 'w')
- f.write(content)
- f.close()
+ with gzip.open(os.path.join(base, '.objects', objectname), 'w') as f:
+ f.write(content)
return objectname
def move_test_results(in_dir, what, out_dir, logger):
os.makedirs(finalPath)
pathIsOk = True
except:
- logger.error(_("%s cannot be created.") % finalPath)
+ logger.error(_("directory %s cannot be created.") % finalPath)
finalPath = ask_a_path()
if finalPath != "":
if grid_ == 'RESSOURCES':
for file_name in os.listdir(ingrid):
- if not os.path.isfile(os.path.join(ingrid,
- file_name)):
+ if not os.path.isfile(os.path.join(ingrid, file_name)):
continue
- f = open(os.path.join(outgrid, file_name), "w")
- f.write(save_file(os.path.join(ingrid, file_name),
- finalPath))
- f.close()
+ with open(os.path.join(outgrid, file_name), "w") as f:
+ f.write(save_file(os.path.join(ingrid, file_name), finalPath))
+
else:
for session_name in [t for t in os.listdir(ingrid) if
os.path.isdir(os.path.join(ingrid, t))]:
os.makedirs(outsession)
for file_name in os.listdir(insession):
- if not os.path.isfile(os.path.join(insession,
- file_name)):
+ if not os.path.isfile(os.path.join(insession, file_name)):
continue
if file_name.endswith('result.py'):
shutil.copy2(os.path.join(insession, file_name),
os.path.join(outsession, file_name))
else:
- f = open(os.path.join(outsession, file_name), "w")
- f.write(save_file(os.path.join(insession,
- file_name),
- finalPath))
- f.close()
+ with open(os.path.join(outsession, file_name), "w") as f:
+ f.write(save_file(os.path.join(insession, file_name), finalPath))
- logger.info("<OK>\n")
+ logger.info("move test results <OK>")
def check_remote_machine(machine_name, logger):
- logger.debug(_("Check the display on %s\n") % machine_name)
+ logger.debug(_("Check the display on %s") % machine_name)
ssh_cmd = """
set -x
ssh -o "StrictHostKeyChecking no" %s "whoami"
""" % machine_name
res = UTS.Popen(ssh_cmd, shell=True, logger=logger)
+ return res
def create_test_report(config,
xml_history_path,
XMLMGR.write_report(os.path.join(dest_path, xmlname), root, "test.xsl")
XMLMGR.write_report(xml_history_path, root, "test_history.xsl")
- return RCO._OK_STATUS
+ return RCO.ReturnCode("OK", "create test report done")
def generate_history_xml_path(config, test_base):
"""
self.idCommandHandlers = 0 # incremented, 0 for main command 1, 2, etc. for micro command
self.STEP = _STEP
self.TRACE = _TRACE
+
+ self._logStep_header = ["No log step header ..."] # one line message for steps
+ self._loggerParamiko = None # set only if sat jobs, else useless
+ self._fileParamiko = None # set when main command set sat logger
+
+ def logStep_begin(self, header, step=""):
+ """
+ initialize for main handler (tty as stdout)
+ a one line message for steps (...of compilation for example)
+ as no return line message with logger.info() level
+
+ | example:
+ | 'header ... first temporary step message ...'
+ | 'header ... etc ...' (on same line)
+ | 'header ... OK' (on same line)
+ """
+ self._logStep_header.append(header)
+ self.logStep(step)
+
+ def logStep(self, step):
+ """
+ current logger.info() step as
+ 'header ... etc ...'
+ """
+ header = self._logStep_header[-1]
+ if type(step) == str:
+ self.info("<RC>%s %s ..." % (header, step))
+ return
+ elif step.isOk(): # as ReturnCode type step
+ self.info("<RC>%s <OK> ..." % header)
+ else:
+ self.info("<RC>%s %s <KO> ..." % (header, step.getWhy()))
+
+ def logStep_end(self, step, tab=None):
+ """
+ last logger.info() step as
+ 'header ... OK' or 'header ... KO'
+ """
+ import src.utilsSat as UTS
+
+ header = self._logStep_header[-1]
+ if tab is None:
+ if type(step) == str:
+ self.info("<RC>%s %s" % (header, step))
+ elif step.isOk(): # as ReturnCode type
+ self.info("<RC>%s <OK>" % header)
+ else:
+ self.info("<RC>%s <%s>" % (header, step.getStatus()))
+ # pop as end
+ if len(self._logStep_header) > 1:
+ self._logStep_header.pop()
+ else:
+ self.error("Something wrong for logStep")
+ return
+ else:
+ if type(step) == str:
+ stepTab = UTS.tabColor(tab, header, 0, step)
+ self.info("<RC>%s" % stepTab)
+ elif step.isOk(): # as ReturnCode type
+ stepTab = UTS.tabColor(tab, header, 0, "<OK>")
+ self.info("<RC>%s" % stepTab)
+ else:
+ stepTab = UTS.tabColor(tab, header, 0, "<%s>: %s" % (step.getStatus(), step.getWhy()))
+ self.info("<RC>%s" % stepTab)
+ # pop as end
+ if len(self._logStep_header) > 1:
+ self._logStep_header.pop()
+ else:
+ self.error("Something wrong for logStep")
+ return
+
+ def setLoggerParamiko(self):
+ """
+ jobs used paramiko wich uses 'paramiko' logger,
+ which is defined here with an handler to automatic named file .txt.
+ this method have to be called only one time.
+
+ | see:
+ | >> cd /usr/lib/python2.7/site-packages/paramiko/
+ | >> ffipy logging.getLogger
+ | ./util.py:248: l = logging.getLogger("paramiko")
+ | ./util.py:270: l = logging.getLogger(name)
+ | >> ffipy paramiko | grep Logger
+ | ./util.py:248: l = logging.getLogger("paramiko")
+ | >> ffipy paramiko | grep "get_logger"
+ |. /channel.py:120: self.logger = util.get_logger('paramiko.transport')
+ | ./sftp.py:99: self.logger = util.get_logger('paramiko.sftp')
+ | ./hostkeys.py:337: log = get_logger('paramiko.hostkeys')
+ """
+ import src.returnCode as RCO
+ import src.utilsSat as UTS
+
+ if self._loggerParamiko is None:
+ loggerPrmk = LOGI.getLogger("paramiko")
+ self._loggerParamiko = loggerPrmk
+ if self._fileParamiko is None:
+ msg = "logger._fileParamiko not set, fix it"
+ return RCO.ReturnCode("KO", msg)
+ if len(loggerPrmk.handlers) != 0:
+ self.warning("logger paramiko have handler set yet ouside sat, is a surprise")
+
+ paramiko_log_dir = os.path.dirname(self._fileParamiko)
+ UTS.ensure_path_exists(paramiko_log_dir)
+ handler = LOGI.FileHandler(self._fileParamiko)
+ # original from paramiko
+ # frm = '%(levelname)-.3s [%(asctime)s.%(msecs)03d] thr=%(thread)-3d %(name)s: %(message)s' # noqa
+ frm = '%(levelname)-.4s :: %(asctime)s.%(msecs)03d :: %(name)s :: %(message)s' # noqa
+ handler.setFormatter(LOGI.Formatter(frm, '%Y%m%d-%H:%M:%S'))
+ loggerPrmk.addHandler(handler)
+
+ # logger is not notset but low, handlers needs setlevel greater
+ loggerPrmk.setLevel(LOGI.DEBUG)
+ handler.setLevel(LOGI.INFO) # may be other
+
+ msg = "create paramiko logger, with handler on file %s" % self._fileParamiko
+ self.trace(msg)
+
+ return RCO.ReturnCode("OK", msg, loggerPrmk)
+ else:
+ self.warning("logger paramiko set yet, fix it")
+ msg = "existing paramiko logger, with %s handlers" % len(loggerPrmk.handlers)
+ return RCO.ReturnCode("OK", msg, loggerPrmk)
+
+ def getLoggerParamiko(self):
+ """for convenience, theorically useless"""
+ return self._loggerParamiko
+
def getMainCommandHandler(self):
"""
returns handler for colored stdout console/terminal
log("setFileHandler config\n%s" % PP.pformat(dict(config.VARS)))
log("setFileHandler TODO set log_dir config.LOCAL.log_dir")
- log_dir = "TMP" # TODO for debug config.LOCAL.log_dir # files xml
+ log_dir = config.LOCAL.log_dir # files xml
log_dir_out = os.path.join(log_dir, "OUT") # files txt
+ log_dir_jobs = os.path.join(log_dir, "JOBS") # files txt
UTS.ensure_path_exists(log_dir)
UTS.ensure_path_exists(log_dir_out)
if self.idCommandHandlers == 0:
handler.setFormatter(formatter)
logger.addHandler(handler)
+ # same name for paramiko logs if needed, useful for sat logs only
+ # but other JOBS directory
+ self._fileParamiko = os.path.join(log_dir_jobs, nameFileTxt) # useful for sat logs only
+
elif self.idCommandHandlers > 0: # secondary micro command
log("TODO setFileHandler '%s' micro command (id=%i)" % (fullNameCmd, self.idCommandHandlers))
log("setFileHandler %s" % logger)
return self.idCommandHandlers
- def setLevelMainHandler (self, level):
+ def setLevelMainHandler(self, level):
for handl in list(self.handlers): # get main handler
if handl.idCommandHandlers == 0:
log("setLevelMainHandler %s" % level)
cfgMgr = CFGMGR.ConfigManager(self)
# as main config
config = cfgMgr.get_config(nameAppli, self.options, nameCommand, datadir=None)
+
+ config.LOCAL.log_dir = "TMP"
+ logger.warning("log_dir %s for DEBUG, remove that in production" % config.LOCAL.log_dir)
+
self.config = config # runner.config main config
# create/get dynamically the command instance to call its 'run' method
def getCommandAndAppli(self, arguments):
"""
- returns name command to load and name appli to load
- and command to load remainders arguments
+ returns tuple
+
+ | ( name command to load,
+ | name appli to load,
+ | remainders arguments of command to load )
"""
args = self.assumeAsList(arguments)
namecmd, nameAppli, remainderArgs = None, None, []
:param editor: (str) The editor to use.
:param filePath: (str) The path to the file to open.
+ :return: (RCO.ReturnCode)
"""
# default editor is vi
if editor is None or len(editor) == 0:
def read_results(self, listTest, has_timed_out):
- """Read the xxx.result.py files."""
+ """Read the xxx.result.py files.
+
+ return: (dict) with keys test from listTest
+ """
results = {}
for test in listTest:
resfile = os.path.join(self.currentDir,
def get_config_key(inConfig, key, default):
"""
Search for key value in config node 'inConfig[key]' as 'inConfig.key'
- If key is not in inCconfig, then return default,
+ If key is not in inConfig, then return default,
else, return the found value
:param inConfig: (Config or Mapping etc) The in-Config node.
return ''.join([nb for nb in str_num if nb in '0123456789'] or '0')
def read_config_from_a_file(filePath):
+ import src.pyconf as PYCONF
try:
- cfg_file = pyconf.Config(filePath)
- except pyconf.ConfigError as e:
- raise Exception(_("Error in configuration file: %(file)s\n %(error)s") %
- { 'file': filePath, 'error': str(e) } )
+ cfg_file = PYCONF.Config(filePath)
+ except PYCONF.ConfigError as e:
+ msg = _("Error in configuration file: %(file)s\n %(error)s") % \
+ {'file': filePath, 'error': str(e)}
+ raise Exception(_msg)
return cfg_file
def get_tmp_filename(config, name):
for i in tuples:
sp = " " * (smax - len(i[0]))
msg += sp + "%s = %s\n" % (i[0], i[1]) # tuples, may be longer
- if len(tuples) > 1: msg += "\n" # for long list
+ if len(tuples) > 1: msg += "\n" # skip one line for long list
return msg
def formatValue(label, value, suffix=""):
"""
msg = " %s = %s %s" % (label, value, suffix)
return msg
-
-def logger_info_tuples(logger, tuples):
- """
- For convenience
- format as formatTuples() and call logger.info()
- """
- msg = formatTuples(tuples)
- logger.info(msg)
-
-_log_step_header = ["No log step header ..."]
-
-def init_log_step(logger, header, step=""):
- _log_step_header.append(header)
- log_step(logger, step)
-
-def log_step(logger, step):
- header = _log_step_header[-1]
- if type(step) == str:
- logger.info("<RC>%s %s ..." % (header, step))
- return
- #as ReturnCode type step
- if step.isOk():
- logger.info("<RC>%s <OK>..." % header)
- else:
- logger.info("<RC>%s%s <KO>..." % (header, step.getWhy()))
-
-def end_log_step(logger, step):
- header = _log_step_header[-1]
- if type(step) == str:
- logger.info("<RC>%s %s" % (header, step))
- if len(_log_step_header) > 1: _log_step_header.pop()
- return
- #as ReturnCode type
- if step.isOk():
- logger.info("<RC>%s <OK>" % header)
- else:
- logger.info("<RC>%s <%s>" % (header, step.getStatus()))
- if len(_log_step_header) > 1: _log_step_header.pop()
-
def isSilent(output_verbose_level):
"""is silent fort self.build_environ"""