Salome HOME
style: black format
authorGbkng <guillaume.brooking@gmail.com>
Sun, 17 Dec 2023 15:42:09 +0000 (16:42 +0100)
committerGbkng <guillaume.brooking@gmail.com>
Mon, 22 Jan 2024 14:16:54 +0000 (15:16 +0100)
146 files changed:
AllTestLauncherSat.py
__init__.py
commands/__init__.py
commands/application.py
commands/check.py
commands/clean.py
commands/compile.py
commands/config.py
commands/configure.py
commands/doc.py
commands/environ.py
commands/find_duplicates.py
commands/generate.py
commands/init.py
commands/install.py
commands/job.py
commands/jobs.py
commands/launcher.py
commands/log.py
commands/make.py
commands/makeinstall.py
commands/package.py
commands/patch.py
commands/prepare.py
commands/run.py
commands/script.py
commands/shell.py
commands/source.py
commands/source_update.py
commands/template.py
commands/test.py
commands/update.py
data/templates/Application/config/compile.py
data/templates/PythonComponent/src/Controller/Controller.py
data/templates/PythonComponent/src/Dialog/CreateCircleDialog.py
data/templates/PythonComponent/src/Dialog/CreatePolylineDialog.py
data/templates/PythonComponent/src/Dialog/Dialog.py
data/templates/PythonComponent/src/Dialog/DialogEdit.py
data/templates/PythonComponent/src/Dialog/EditCenterDialog.py
data/templates/PythonComponent/src/Dialog/EditPointDialog.py
data/templates/PythonComponent/src/Dialog/EditRadiusDialog.py
data/templates/PythonComponent/src/Dialog/RenameDialog.py
data/templates/PythonComponent/src/Model/Circle.py
data/templates/PythonComponent/src/Model/Model.py
data/templates/PythonComponent/src/Model/Polyline.py
data/templates/PythonComponent/src/StandAlone/Desktop.py
data/templates/PythonComponent/src/StandAlone/StandalonePYCMPGUI.py
data/templates/PythonComponent/src/View/CircleGraphicsScene.py
data/templates/PythonComponent/src/View/CircleTreeWidgetItem.py
data/templates/PythonComponent/src/View/GraphicsRectItem.py
data/templates/PythonComponent/src/View/GraphicsScene.py
data/templates/PythonComponent/src/View/GraphicsView.py
data/templates/PythonComponent/src/View/Menu.py
data/templates/PythonComponent/src/View/PolyGraphicsScene.py
data/templates/PythonComponent/src/View/PolyTreeWidgetItem.py
data/templates/PythonComponent/src/View/TreeWidget.py
data/templates/PythonComponent/src/View/TreeWidgetItem.py
data/templates/PythonComponent/src/View/View.py
data/templates/PythonComponent8/src/Controller/Controller.py
data/templates/PythonComponent8/src/Dialog/CreateCircleDialog.py
data/templates/PythonComponent8/src/Dialog/CreatePolylineDialog.py
data/templates/PythonComponent8/src/Dialog/EditCenterDialog.py
data/templates/PythonComponent8/src/Dialog/EditPointDialog.py
data/templates/PythonComponent8/src/Dialog/EditRadiusDialog.py
data/templates/PythonComponent8/src/Dialog/RenameDialog.py
data/templates/PythonComponent8/src/Model/Circle.py
data/templates/PythonComponent8/src/Model/Polyline.py
data/templates/PythonComponent8/src/StandAlone/Desktop.py
data/templates/PythonComponent8/src/StandAlone/StandalonePYCMPGUI.py
data/templates/PythonComponent8/src/View/CircleGraphicsScene.py
data/templates/PythonComponent8/src/View/CircleTreeWidgetItem.py
data/templates/PythonComponent8/src/View/GraphicsRectItem.py
data/templates/PythonComponent8/src/View/GraphicsView.py
data/templates/PythonComponent8/src/View/Menu.py
data/templates/PythonComponent8/src/View/PolyGraphicsScene.py
data/templates/PythonComponent8/src/View/PolyTreeWidgetItem.py
data/templates/PythonComponent8/src/View/TreeWidget.py
doc/_themes/alabaster/alabaster/__init__.py
doc/_themes/alabaster/alabaster/_version.py
doc/_themes/alabaster/alabaster/support.py
doc/_themes/alabaster/docs/conf.py
doc/_themes/alabaster/setup.py
doc/_themes/alabaster/tasks.py
doc/src/conf.py
src/ElementPath.py
src/ElementTree.py
src/ElementTreePython2.py
src/ElementTreePython3.py
src/__init__.py
src/architecture.py
src/callerName.py
src/colorama/__init__.py
src/colorama/ansi.py
src/colorama/ansitowin32.py
src/colorama/initialise.py
src/colorama/win32.py
src/colorama/winterm.py
src/compilation.py
src/debug.py
src/environment.py
src/fileEnviron.py
src/fork.py
src/i18n/i18nTest.py
src/i18n/translate.py
src/logger.py
src/loggingSimple.py
src/options.py
src/printcolors.py
src/product.py
src/pyconf.py
src/returnCode.py
src/salomeTools.py
src/template.py
src/test/TOOLS.py
src/test_module.py
src/utilsSat.py
src/versionMinorMajorPatch.py
src/xmlManager.py
test/initializeTest.py
test/test_020_debug.py
test/test_021_versionMinorMajorPatch.py
test/test_024_logging.py
test/test_035_pyconf.py
test/test_100_satHelp.py
test/test_500_APPLI_TEST.py
test/test_501_paramiko.py
test/test_sat5_0/compilation/test_compilation.py
test/test_sat5_0/compilation/test_configure.py
test/test_sat5_0/compilation/test_make.py
test/test_sat5_0/compilation/test_makeinstall.py
test/test_sat5_0/config/test_option_copy.py
test/test_sat5_0/config/test_option_edit.py
test/test_sat5_0/config/test_option_value.py
test/test_sat5_0/config/test_option_value_2.py
test/test_sat5_0/environ/test_environ.py
test/test_sat5_0/job/test_job.py
test/test_sat5_0/jobs/test_jobs.py
test/test_sat5_0/log/test_launch_browser.py
test/test_sat5_0/log/test_launch_browser2.py
test/test_sat5_0/prepare/test_clean.py
test/test_sat5_0/prepare/test_patch.py
test/test_sat5_0/prepare/test_prepare.py
test/test_sat5_0/prepare/test_source.py
test/test_sat5_0/shell/test_shell.py
test/test_sat5_0/test/test_command.py
unittestpy/HTMLTestRunner.py

index eaa1a967b5054a52b30297dfd3095c4ea38cc061..6c1740ae68e12225cff88419e864f7daa43ee93d 100755 (executable)
@@ -2,21 +2,21 @@
 # -*- coding: utf-8 -*-
 
 # Copyright (C) 2008-2018  CEA/DEN
-# 
+#
 # This library is free software; you can redistribute it and/or
 # modify it under the terms of the GNU Lesser General Public
 # License as published by the Free Software Foundation; either
 # version 2.1 of the License, or (at your option) any later version.
-# 
+#
 # This library is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 # Lesser General Public License for more details.
-# 
+#
 # You should have received a copy of the GNU Lesser General Public
 # License along with this library; if not, write to the Free Software
 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-# 
+#
 # See http://www.salome-platform.org or email : webmaster.salome@opencascade.com
 
 
@@ -57,7 +57,7 @@ import argparse as AP
 
 import glob
 import fnmatch
-import pprint as PP #pretty print
+import pprint as PP  # pretty print
 import src
 
 debug = False
@@ -65,86 +65,98 @@ verboseImport = True
 
 
 # get path to origin sources
-defaultdir  = os.path.dirname(os.path.realpath(__file__))
+defaultdir = os.path.dirname(os.path.realpath(__file__))
 # get path to salomeTools sources
 satdir = defaultdir
-srcdir = os.path.join(satdir, 'src')
-cmdsdir = os.path.join(satdir, 'commands')
+srcdir = os.path.join(satdir, "src")
+cmdsdir = os.path.join(satdir, "commands")
 
 # Make the src & commands package accessible from all code
 sys.path.insert(0, satdir)
-sys.path.insert(0, srcdir) # TODO remove that
-sys.path.insert(0, cmdsdir) # TODO remove that
+sys.path.insert(0, srcdir)  # TODO remove that
+sys.path.insert(0, cmdsdir)  # TODO remove that
 
 _user = src.architecture.get_user()
 # wambeke is christian at home
-_developers = ["christian", "wambeke",] #  ...who wants
+_developers = [
+    "christian",
+    "wambeke",
+]  #  ...who wants
+
 
 def errPrint(aStr):
-  """stderr to avoid write in html or xml file log message"""
-  sys.stderr.write(aStr + '\n')
+    """stderr to avoid write in html or xml file log message"""
+    sys.stderr.write(aStr + "\n")
+
 
 try:
-  import unittestpy.HTMLTestRunner as HTST
+    import unittestpy.HTMLTestRunner as HTST
 except:
-  HTST = None
-  errPrint("""
+    HTST = None
+    errPrint(
+        """
 WARNING: no HTML output available.
          try find 'test/unittestpy/HTMLTestRunner.py'.
-""")
+"""
+    )
 
 
 try:
-  import xmlrunner as XTST
+    import xmlrunner as XTST
 except:
-  XTST = None
-  errPrint("""
+    XTST = None
+    errPrint(
+        """
 WARNING: no XML output available for unittest.
          try 'pip install unittest-xml-reporting'.
-""")
+"""
+    )
 
 
 ###################################################################
 def locate(pattern, root=os.curdir):
-  """
-  Locate all files matching supplied filename pattern in and below
-  supplied root directory.
-  """
-  result = []
-  for path, dirs, files in os.walk(os.path.abspath(root)):
-    for filename in fnmatch.filter(files, pattern):
-      result.append( os.path.join(path, filename) )
-  return result
+    """
+    Locate all files matching supplied filename pattern in and below
+    supplied root directory.
+    """
+    result = []
+    for path, dirs, files in os.walk(os.path.abspath(root)):
+        for filename in fnmatch.filter(files, pattern):
+            result.append(os.path.join(path, filename))
+    return result
+
 
 def printEnv(search=""):
-  """
-  list all environment variables which name contains search string
-  example: 
-    import AllTestLauncher as ATL
-    ATL.printEnv("ROOT_")
-  """
-  env=os.environ
-  for i in sorted(env):
-    if search in i:
-      print(i) 
+    """
+    list all environment variables which name contains search string
+    example:
+      import AllTestLauncher as ATL
+      ATL.printEnv("ROOT_")
+    """
+    env = os.environ
+    for i in sorted(env):
+        if search in i:
+            print(i)
+
 
 def grepInEnv(search=""):
-  """
-  list all environment variables which contains search string
-  example: 
-    import AllTestLauncher as ATL
-    ATL.grepInEnv("XDATA")
-  """
-  env=os.environ
-  for i in sorted(env):
-     done=False
-     for j in env[i].split(":"):
-       if search in j:
-           if not done:
-             print(i+" contains ") 
-             done=True
-           print("  "+j)
-      
+    """
+    list all environment variables which contains search string
+    example:
+      import AllTestLauncher as ATL
+      ATL.grepInEnv("XDATA")
+    """
+    env = os.environ
+    for i in sorted(env):
+        done = False
+        for j in env[i].split(":"):
+            if search in j:
+                if not done:
+                    print(i + " contains ")
+                    done = True
+                print("  " + j)
+
+
 def format_exception(msg, limit=None, trace=None):
     """
     Format a stack trace and the exception information.
@@ -153,213 +165,229 @@ def format_exception(msg, limit=None, trace=None):
     """
     etype, value, tb = sys.exc_info()
     if _user in _developers:
-      res = "\n" + msg
-      if tb:
-          res += "\nTraceback (most recent call last):\n"
-          res += "".join(traceback.format_tb(tb, limit)) #[:-1])
-      res += "\n<"
-      res += "\n".join(traceback.format_exception_only(etype, value))
-      return res
+        res = "\n" + msg
+        if tb:
+            res += "\nTraceback (most recent call last):\n"
+            res += "".join(traceback.format_tb(tb, limit))  # [:-1])
+        res += "\n<"
+        res += "\n".join(traceback.format_exception_only(etype, value))
+        return res
     else:
-      res = "\n" + msg
-      if tb:
-          res += "\nTraceback:\n"
-          res += "".join(traceback.format_tb(tb, limit)[-1:]) #[:-1])
-      res += "\n<"
-      res += "".join(traceback.format_exception_only(etype, value))
-      return res
+        res = "\n" + msg
+        if tb:
+            res += "\nTraceback:\n"
+            res += "".join(traceback.format_tb(tb, limit)[-1:])  # [:-1])
+        res += "\n<"
+        res += "".join(traceback.format_exception_only(etype, value))
+        return res
+
 
 ###################################################################
 def runOnArgs(args):
-  """
-  launch tests on args.pattern files
-  """
-  fromFileOrPath = args.rootPath
-  fileTestPattern = args.pattern
-  if fromFileOrPath == None:
-    directory, name = os.path.split( os.path.realpath( __file__ ) )
-  else:
-    if os.path.isdir(fromFileOrPath):
-      directory, name = (fromFileOrPath, None)
-      fileTestPatternCurrent = fileTestPattern
-    elif os.path.isfile(fromFileOrPath):
-      directory, name = os.path.split( os.path.realpath( fromFileOrPath ) )
-      fileTestPatternCurrent = name
-    else:
-      mess = "Cannot get file or directory '%s'" % fromFileOrPath
-      errPrint("ERROR: " + mess)
-      return None
-      #raise Exception("Cannot get file or directory '%s'" % fromFileOrPath)
-
-  #files = glob.glob(os.path.join(directory, "*Test.py"))
-  files = sorted(locate(fileTestPatternCurrent, directory))
-
-  filesForTest={}
-
-  for aFile in files:
-    aDir, aName = os.path.split(aFile)
-    aImport, ext = os.path.splitext(aName)
-    
-    try:
-      if aFile in list(filesForTest.keys()):
-        print("WARNING: imported yet: "+aFile)
-      else:
-        sys.path.insert(0, aDir)
-        done = True
-        if verboseImport: errPrint("try import '%s'" % aImport)
-        aModule = __import__(aImport, globals(), locals(), []) 
-        del sys.path[0]
-        done = False
-        filesForTest[aFile] = (aImport, aModule)
-    except Exception as e:
-      if done: 
-        del sys.path[0] #attention of sys.path appends
-        done = False
-      msg = "ERROR: AllTestLauncher: import '%s':" % aFile
-      err = format_exception(msg)
-      errPrint(err)
-      continue
-
-  listfilesForTest = sorted(filesForTest.keys())
-  result = None
-
-  errPrint("AllTestLauncher test files:\n %s" % PP.pformat(listfilesForTest))
-  
-  if len(listfilesForTest) == 0: 
-    if debug: errPrint("WARNING: AllTestLauncher: empty list of test files")
-    return None
-
-  loader = unittest.TestLoader()
-  suite = None
-
-  for i,k in enumerate(listfilesForTest):
-    if debug: errPrint("Test: %s %s" % (i, k))
-    if i == 0:
-      suite = loader.loadTestsFromModule( filesForTest[k][1] )
-      pass
+    """
+    launch tests on args.pattern files
+    """
+    fromFileOrPath = args.rootPath
+    fileTestPattern = args.pattern
+    if fromFileOrPath == None:
+        directory, name = os.path.split(os.path.realpath(__file__))
     else:
-      suite.addTests( loader.loadTestsFromModule( filesForTest[k][1] ) )
-      pass
-
-  if args.type == "std": 
-    runner = unittest.TextTestRunner(verbosity=args.verbosity)
-  elif args.type == "html": 
-    runner = HTST.HTMLTestRunner(verbosity=args.verbosity, )
-  elif args.type == "xml": 
-    if args.name == 'stdout':
-      #all-in-one xml output at 'sys.stdout' for pipe redirection
-      runner = XTST.XMLTestRunner(verbosity=args.verbosity, output=sys.stdout)
+        if os.path.isdir(fromFileOrPath):
+            directory, name = (fromFileOrPath, None)
+            fileTestPatternCurrent = fileTestPattern
+        elif os.path.isfile(fromFileOrPath):
+            directory, name = os.path.split(os.path.realpath(fromFileOrPath))
+            fileTestPatternCurrent = name
+        else:
+            mess = "Cannot get file or directory '%s'" % fromFileOrPath
+            errPrint("ERROR: " + mess)
+            return None
+            # raise Exception("Cannot get file or directory '%s'" % fromFileOrPath)
+
+    # files = glob.glob(os.path.join(directory, "*Test.py"))
+    files = sorted(locate(fileTestPatternCurrent, directory))
+
+    filesForTest = {}
+
+    for aFile in files:
+        aDir, aName = os.path.split(aFile)
+        aImport, ext = os.path.splitext(aName)
+
+        try:
+            if aFile in list(filesForTest.keys()):
+                print("WARNING: imported yet: " + aFile)
+            else:
+                sys.path.insert(0, aDir)
+                done = True
+                if verboseImport:
+                    errPrint("try import '%s'" % aImport)
+                aModule = __import__(aImport, globals(), locals(), [])
+                del sys.path[0]
+                done = False
+                filesForTest[aFile] = (aImport, aModule)
+        except Exception as e:
+            if done:
+                del sys.path[0]  # attention of sys.path appends
+                done = False
+            msg = "ERROR: AllTestLauncher: import '%s':" % aFile
+            err = format_exception(msg)
+            errPrint(err)
+            continue
+
+    listfilesForTest = sorted(filesForTest.keys())
+    result = None
+
+    errPrint("AllTestLauncher test files:\n %s" % PP.pformat(listfilesForTest))
+
+    if len(listfilesForTest) == 0:
+        if debug:
+            errPrint("WARNING: AllTestLauncher: empty list of test files")
+        return None
+
+    loader = unittest.TestLoader()
+    suite = None
+
+    for i, k in enumerate(listfilesForTest):
+        if debug:
+            errPrint("Test: %s %s" % (i, k))
+        if i == 0:
+            suite = loader.loadTestsFromModule(filesForTest[k][1])
+            pass
+        else:
+            suite.addTests(loader.loadTestsFromModule(filesForTest[k][1]))
+            pass
+
+    if args.type == "std":
+        runner = unittest.TextTestRunner(verbosity=args.verbosity)
+    elif args.type == "html":
+        runner = HTST.HTMLTestRunner(
+            verbosity=args.verbosity,
+        )
+    elif args.type == "xml":
+        if args.name == "stdout":
+            # all-in-one xml output at 'sys.stdout' for pipe redirection
+            runner = XTST.XMLTestRunner(verbosity=args.verbosity, output=sys.stdout)
+        else:
+            # one file xml per test in suite in args.name directory
+            runner = XTST.XMLTestRunner(verbosity=args.verbosity, output=args.name)
     else:
-      #one file xml per test in suite in args.name directory
-      runner = XTST.XMLTestRunner(verbosity=args.verbosity, output=args.name)
-  else:
-    errPrint("ERROR: unknown type of output: '%s'" % args.type)
-    return None    
-    
-  if suite != None: result = runner.run(suite)
-  return result
+        errPrint("ERROR: unknown type of output: '%s'" % args.type)
+        return None
+
+    if suite != None:
+        result = runner.run(suite)
+    return result
+
 
 ###################################################################
 def runFromEnvVar(envVar, fileTestPattern="*Test.py"):
-  """
-  example: 
-    import AllTestLauncher as ATL
-    ATL.runFromEnvVar("MICROGEN_ROOT_DIR")
-    ATL.runFromEnvVar("MICROGEN_ROOT_DIR", "aggregate_*GJKTest.py")
-  """
-  env=os.environ
-  res = []
-  for i in sorted(env):
-    if envVar in i:
-      res.append(i)
-  if len(res) > 1:
-    mess = "multiple environment variable for '%s': %s" % (envVar, str(res))
-    errPrint("ERROR: " + mess)
-    return None
-  if len(res) < 1:
-    mess = "no environment variable for '%s'" % (envVar)
-    errPrint("ERROR: " + mess)
-    return None
-  res = res[0]
-  tmp = env[res].split(":")
-  if len(tmp) > 1:
-    mess = "need only one path in environment variable '%s'" % (res)
-    errPrint("ERROR: " + mess)
-    return None  
-  run(fromFileOrPath=env[res], fileTestPattern=fileTestPattern)
+    """
+    example:
+      import AllTestLauncher as ATL
+      ATL.runFromEnvVar("MICROGEN_ROOT_DIR")
+      ATL.runFromEnvVar("MICROGEN_ROOT_DIR", "aggregate_*GJKTest.py")
+    """
+    env = os.environ
+    res = []
+    for i in sorted(env):
+        if envVar in i:
+            res.append(i)
+    if len(res) > 1:
+        mess = "multiple environment variable for '%s': %s" % (envVar, str(res))
+        errPrint("ERROR: " + mess)
+        return None
+    if len(res) < 1:
+        mess = "no environment variable for '%s'" % (envVar)
+        errPrint("ERROR: " + mess)
+        return None
+    res = res[0]
+    tmp = env[res].split(":")
+    if len(tmp) > 1:
+        mess = "need only one path in environment variable '%s'" % (res)
+        errPrint("ERROR: " + mess)
+        return None
+    run(fromFileOrPath=env[res], fileTestPattern=fileTestPattern)
 
 
 ###################################################################
 def getParser():
-  parser = AP.ArgumentParser(description='launch All salomeTools python tests', argument_default=None)
-
-  parser.add_argument(
-    '-d', '--debug', 
-    help='set debug mode, more verbose',
-    action='store_true',
-  )
-  parser.add_argument(
-    '-v', '--verbosity', 
-    help='set verbosity of unittests [0|1|2...]',
-    default=2,
-    metavar='int'
-  )
-  parser.add_argument(
-    '-r', '--rootPath', 
-    help="""\
+    parser = AP.ArgumentParser(
+        description="launch All salomeTools python tests", argument_default=None
+    )
+
+    parser.add_argument(
+        "-d",
+        "--debug",
+        help="set debug mode, more verbose",
+        action="store_true",
+    )
+    parser.add_argument(
+        "-v",
+        "--verbosity",
+        help="set verbosity of unittests [0|1|2...]",
+        default=2,
+        metavar="int",
+    )
+    parser.add_argument(
+        "-r",
+        "--rootPath",
+        help="""\
 dir name with absolute or relative path stand for root directory
 of recursive searching unittest python files
 """,
-   default=defaultdir,
-   metavar='dirPath'
-  )
-  parser.add_argument(
-    '-p', '--pattern', 
-    help="file pattern for unittest files ['test_*.py'|'*Test.py'...]",
-    default="test_???_*.py", # as alphabetical ordered test site
-    metavar='filePattern'
-  )
-  parser.add_argument(
-    '-t', '--type', 
-    help="type of output: ['std'(standart ascii)|'xml'|'html']",
-    default="std",
-    choices=['std', 'xml', 'html'],
-    metavar='outputType'
-  )
-  parser.add_argument(
-    '-n', '--name', 
-    help="""\
+        default=defaultdir,
+        metavar="dirPath",
+    )
+    parser.add_argument(
+        "-p",
+        "--pattern",
+        help="file pattern for unittest files ['test_*.py'|'*Test.py'...]",
+        default="test_???_*.py",  # as alphabetical ordered test site
+        metavar="filePattern",
+    )
+    parser.add_argument(
+        "-t",
+        "--type",
+        help="type of output: ['std'(standart ascii)|'xml'|'html']",
+        default="std",
+        choices=["std", "xml", "html"],
+        metavar="outputType",
+    )
+    parser.add_argument(
+        "-n",
+        "--name",
+        help="""\
 (only for type xml)
 name of directory output: ['test_reports'|...].
 If name = 'stdout' then all-in-one xml output at 'sys.stdout'. For pipe redirection:
 '>> AllTestLauncher.py -t xml -n stdout > tmp.xml'
 """,
-    default="test_reports",
-    metavar='dirName'
-  )
-  return parser
+        default="test_reports",
+        metavar="dirName",
+    )
+    return parser
 
-#export PATH=defaultdir:${PATH}
-
-###################################################################
-if __name__ == '__main__':
-  # Make the src & command package accessible from all code
-  # as export PYTHONPATH=defaultdir:${PYTHONPATH}
-  # https://docs.python.org/2/library/os.html
-  # On some platforms, including FreeBSD and Mac OS X, 
-  # setting environ may cause memory leak
-  # so use sys.path
-  # errPrint("INFO    : AllTestLauncher sys.path:\n'%s'" % PP.pformat(sys.path)
-  if defaultdir not in sys.path[0]:
-    sys.path.insert(0, defaultdir)
-    errPrint("WARNING : sys.path prepend '%s'\n" % defaultdir)
-
-  args = getParser().parse_args(sys.argv[1:])
-  debug = args.debug
-  directory = os.path.realpath(args.rootPath)
-  if debug: print("INFO: args:\n  %s" % PP.pformat(args))
-  sys.path.insert(0, directory) #supposed to be root of a package
-  
-  runOnArgs(args)
 
+# export PATH=defaultdir:${PATH}
 
+###################################################################
+if __name__ == "__main__":
+    # Make the src & command package accessible from all code
+    # as export PYTHONPATH=defaultdir:${PYTHONPATH}
+    # https://docs.python.org/2/library/os.html
+    # On some platforms, including FreeBSD and Mac OS X,
+    # setting environ may cause memory leak
+    # so use sys.path
+    # errPrint("INFO    : AllTestLauncher sys.path:\n'%s'" % PP.pformat(sys.path)
+    if defaultdir not in sys.path[0]:
+        sys.path.insert(0, defaultdir)
+        errPrint("WARNING : sys.path prepend '%s'\n" % defaultdir)
+
+    args = getParser().parse_args(sys.argv[1:])
+    debug = args.debug
+    directory = os.path.realpath(args.rootPath)
+    if debug:
+        print("INFO: args:\n  %s" % PP.pformat(args))
+    sys.path.insert(0, directory)  # supposed to be root of a package
+
+    runOnArgs(args)
index 2646961521f6c20eb55adad6a7a3231301cea35f..d6c512dde61052d99928aa02a7afea799f8ac0c7 100644 (file)
@@ -1 +1,6 @@
-__all__ = ['src', 'commands', "test", "unittestpy", ]
\ No newline at end of file
+__all__ = [
+    "src",
+    "commands",
+    "test",
+    "unittestpy",
+]
index c3aa9d1ad1885e0d2c86c8fb8b26a7af079b2bb3..156027838d634024e62afad977a47e811549991d 100644 (file)
@@ -1,4 +1,3 @@
-
 """
 import os
 import gettext
index 025baf3077a79443c5f4face376680e2bceaeb55..1a2a67f7af79eea53944226a15dcfff11e9da6de 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 #  Copyright (C) 2010-2013  CEA/DEN
 #
 #  This library is free software; you can redistribute it and/or
@@ -25,27 +25,60 @@ import getpass
 
 from src import ElementTree as etree
 import src
-from  src.versionMinorMajorPatch import MinorMajorPatch as MMP
+from src.versionMinorMajorPatch import MinorMajorPatch as MMP
 
 parser = src.options.Options()
-parser.add_option('n', 'name', 'string', 'name',
-    _('Optional: The name of the application (default is APPLICATION.virtual_app.name or '
-      'runAppli)'))
-parser.add_option('c', 'catalog', 'string', 'catalog',
-    _('Optional: The resources catalog to use'))
-parser.add_option('t', 'target', 'string', 'target',
-    _('Optional: The directory where to create the application (default is '
-      'APPLICATION.workdir)'))
-parser.add_option('', 'gencat', 'string', 'gencat',
-    _("Optional: Create a resources catalog for the specified machines "
-      "(separated with ',')\n\tNOTICE: this command will ssh to retrieve "
-      "information to each machine in the list"))
-parser.add_option('m', 'module', 'list2', 'modules',
-    _("Optional: the restricted list of module(s) to include in the "
-      "application"))
-parser.add_option('', 'use_mesa', 'boolean', 'use_mesa',
-    _("Optional: Create a launcher that will use mesa products\n\t"
-      "It can be usefull whan salome is used on a remote machine through ssh"))
+parser.add_option(
+    "n",
+    "name",
+    "string",
+    "name",
+    _(
+        "Optional: The name of the application (default is APPLICATION.virtual_app.name or "
+        "runAppli)"
+    ),
+)
+parser.add_option(
+    "c", "catalog", "string", "catalog", _("Optional: The resources catalog to use")
+)
+parser.add_option(
+    "t",
+    "target",
+    "string",
+    "target",
+    _(
+        "Optional: The directory where to create the application (default is "
+        "APPLICATION.workdir)"
+    ),
+)
+parser.add_option(
+    "",
+    "gencat",
+    "string",
+    "gencat",
+    _(
+        "Optional: Create a resources catalog for the specified machines "
+        "(separated with ',')\n\tNOTICE: this command will ssh to retrieve "
+        "information to each machine in the list"
+    ),
+)
+parser.add_option(
+    "m",
+    "module",
+    "list2",
+    "modules",
+    _("Optional: the restricted list of module(s) to include in the " "application"),
+)
+parser.add_option(
+    "",
+    "use_mesa",
+    "boolean",
+    "use_mesa",
+    _(
+        "Optional: Create a launcher that will use mesa products\n\t"
+        "It can be usefull whan salome is used on a remote machine through ssh"
+    ),
+)
 
 ##
 # Creates an alias for runAppli.
@@ -53,9 +86,10 @@ def make_alias(appli_path, alias_path, force=False):
     assert len(alias_path) > 0, "Bad name for alias"
     if os.path.exists(alias_path) and not force:
         raise src.SatException(_("Cannot create the alias '%s'\n") % alias_path)
-    else: # find relative path
+    else:  # find relative path
         os.symlink(appli_path, alias_path)
 
+
 ##
 # add the definition of a module to out stream.
 def add_module_to_appli(out, module, has_gui, module_path, logger, flagline):
@@ -63,26 +97,33 @@ def add_module_to_appli(out, module, has_gui, module_path, logger, flagline):
         if not flagline:
             logger.write("\n", 3, False)
             flagline = True
-        logger.write("  " + src.printcolors.printcWarning(_(
-                        "WARNING: module %s not installed") % module) + "\n", 3)
-
-    out.write('   <module name="%s" gui="%s" path="%s"/>\n' % (module,
-                                                               has_gui,
-                                                               module_path))
+        logger.write(
+            "  "
+            + src.printcolors.printcWarning(
+                _("WARNING: module %s not installed") % module
+            )
+            + "\n",
+            3,
+        )
+
+    out.write(
+        '   <module name="%s" gui="%s" path="%s"/>\n' % (module, has_gui, module_path)
+    )
     return flagline
 
+
 ##
 # Creates the config file to create an application with the list of modules.
 def create_config_file(config, modules, env_files, logger):
 
     samples = ""
-    if 'SAMPLES' in config.APPLICATION.products:
-        samples = src.product.get_product_config(config, 'SAMPLES').source_dir
+    if "SAMPLES" in config.APPLICATION.products:
+        samples = src.product.get_product_config(config, "SAMPLES").source_dir
 
     config_file = src.get_tmp_filename(config, "appli_config.xml")
     f = open(config_file, "w")
 
-    f.write('<application>\n')
+    f.write("<application>\n")
     for env_file in env_files:
         if env_file.endswith("cfg"):
             f.write('<context path="%s"/>\n' % env_file)
@@ -90,7 +131,7 @@ def create_config_file(config, modules, env_files, logger):
             f.write('<prerequisites path="%s"/>\n' % env_file)
 
     f.write('<resources path="CatalogResources.xml"/>\n')
-    f.write('<modules>\n')
+    f.write("<modules>\n")
 
     flagline = False
     for m in modules:
@@ -101,52 +142,51 @@ def create_config_file(config, modules, env_files, logger):
         # do not include products that do not compile
         if not src.product.product_compiles(mm):
             continue
-        #obsolete?
+        # obsolete?
         if src.product.product_is_smesh_plugin(mm):
             continue
 
-        if 'install_dir' in mm and bool(mm.install_dir):
+        if "install_dir" in mm and bool(mm.install_dir):
             if src.product.product_is_cpp(mm):
                 # cpp module
                 for aa in src.product.get_product_components(mm):
-                    install_dir=os.path.join(config.APPLICATION.workdir,
-                                             config.INTERNAL.config.install_dir)
+                    install_dir = os.path.join(
+                        config.APPLICATION.workdir, config.INTERNAL.config.install_dir
+                    )
                     mp = os.path.join(install_dir, aa)
-                    flagline = add_module_to_appli(f,
-                                                   aa,
-                                                   "yes",
-                                                   mp,
-                                                   logger,
-                                                   flagline)
+                    flagline = add_module_to_appli(f, aa, "yes", mp, logger, flagline)
             else:
                 # regular module
                 mp = mm.install_dir
                 gui = src.get_cfg_param(mm, "has_gui", "yes")
                 flagline = add_module_to_appli(f, m, gui, mp, logger, flagline)
 
-    f.write('</modules>\n')
+    f.write("</modules>\n")
     f.write('<samples path="%s"/>\n' % samples)
-    f.write('</application>\n')
+    f.write("</application>\n")
     f.close()
 
     return config_file
 
+
 ##
 # Customizes the application by editing SalomeApp.xml.
 def customize_app(config, appli_dir, logger):
-    if 'configure' not in config.APPLICATION.virtual_app \
-        or len(config.APPLICATION.virtual_app.configure) == 0:
+    if (
+        "configure" not in config.APPLICATION.virtual_app
+        or len(config.APPLICATION.virtual_app.configure) == 0
+    ):
         return
 
     # shortcut to get an element (section or parameter) from parent.
     def get_element(parent, name, strtype):
         for c in parent.getchildren():
-            if c.attrib['name'] == name:
+            if c.attrib["name"] == name:
                 return c
 
         # element not found create it
         elt = add_simple_node(parent, strtype)
-        elt.attrib['name'] = name
+        elt.attrib["name"] = name
         return elt
 
     # shortcut method to create a node
@@ -170,69 +210,76 @@ def customize_app(config, appli_dir, logger):
     logger.write("\n", 4)
     for section_name in config.APPLICATION.virtual_app.configure:
         for parameter_name in config.APPLICATION.virtual_app.configure[section_name]:
-            parameter_value = config.APPLICATION.virtual_app.configure[section_name][parameter_name]
-            logger.write("  configure: %s/%s = %s\n" % (section_name,
-                                                        parameter_name,
-                                                        parameter_value), 4)
+            parameter_value = config.APPLICATION.virtual_app.configure[section_name][
+                parameter_name
+            ]
+            logger.write(
+                "  configure: %s/%s = %s\n"
+                % (section_name, parameter_name, parameter_value),
+                4,
+            )
             section = get_element(document, section_name, "section")
             parameter = get_element(section, parameter_name, "parameter")
-            parameter.attrib['value'] = parameter_value
+            parameter.attrib["value"] = parameter_value
 
     # write the file
     f = open(app_file, "w")
     f.write("<?xml version='1.0' encoding='utf-8'?>\n")
-    f.write(etree.tostring(document, encoding='utf-8'))
+    f.write(etree.tostring(document, encoding="utf-8"))
     f.close()
 
+
 ##
 # Generates the application with the config_file.
 def generate_application(config, appli_dir, config_file, logger):
     target_dir = os.path.dirname(appli_dir)
 
-    install_KERNEL_dir = src.product.get_product_config(config,
-                                                        'KERNEL').install_dir
+    install_KERNEL_dir = src.product.get_product_config(config, "KERNEL").install_dir
     script = os.path.join(install_KERNEL_dir, "bin", "salome", "appli_gen.py")
     if not os.path.exists(script):
         raise src.SatException(_("KERNEL is not installed"))
 
     # Add SALOME python in the environment in order to avoid python version
     # problems at appli_gen.py call
-    if 'Python' in config.APPLICATION.products:
-        envi = src.environment.SalomeEnviron(config,
-                                             src.environment.Environ(
-                                                              dict(os.environ)),
-                                             True)
-        envi.set_a_product('Python', logger)
-
-    command = "python %s --prefix=%s --config=%s" % (script,
-                                                     appli_dir,
-                                                     config_file)
+    if "Python" in config.APPLICATION.products:
+        envi = src.environment.SalomeEnviron(
+            config, src.environment.Environ(dict(os.environ)), True
+        )
+        envi.set_a_product("Python", logger)
+
+    command = "python %s --prefix=%s --config=%s" % (script, appli_dir, config_file)
     logger.write("\n>" + command + "\n", 5, False)
-    res = subprocess.call(command,
-                    shell=True,
-                    cwd=target_dir,
-                    env=envi.environ.environ,
-                    stdout=logger.logTxtFile,
-                    stderr=subprocess.STDOUT)
+    res = subprocess.call(
+        command,
+        shell=True,
+        cwd=target_dir,
+        env=envi.environ.environ,
+        stdout=logger.logTxtFile,
+        stderr=subprocess.STDOUT,
+    )
 
     if res != 0:
         raise src.SatException(_("Cannot create application, code = %d\n") % res)
 
     return res
 
+
 ##
 #
 def write_step(logger, message, level=3, pad=50):
-    logger.write("%s %s " % (message, '.' * (pad - len(message.decode("UTF-8")))), level)
+    logger.write(
+        "%s %s " % (message, "." * (pad - len(message.decode("UTF-8")))), level
+    )
     logger.flush()
 
+
 ##
 # Creates a SALOME application.
 def create_application(config, appli_dir, catalog, logger, display=True):
 
     SALOME_modules = get_SALOME_modules(config)
 
-    warn = ['KERNEL', 'GUI']
+    warn = ["KERNEL", "GUI"]
     if display:
         for w in warn:
             if w not in SALOME_modules:
@@ -240,11 +287,7 @@ def create_application(config, appli_dir, catalog, logger, display=True):
                 logger.write(src.printcolors.printcWarning(msg), 2)
 
     # generate the launch file
-    retcode = generate_launch_file(config,
-                                   appli_dir,
-                                   catalog,
-                                   logger,
-                                   SALOME_modules)
+    retcode = generate_launch_file(config, appli_dir, catalog, logger, SALOME_modules)
 
     if retcode == 0:
         cmd = src.printcolors.printcLabel("%s/salome" % appli_dir)
@@ -256,15 +299,18 @@ def create_application(config, appli_dir, catalog, logger, display=True):
         logger.write("\n", 3, False)
     return retcode
 
+
 def get_SALOME_modules(config):
     l_modules = []
     for product in config.APPLICATION.products:
         product_info = src.product.get_product_config(config, product)
-        if (src.product.product_is_salome(product_info) or
-               src.product.product_is_generated(product_info)):
+        if src.product.product_is_salome(
+            product_info
+        ) or src.product.product_is_generated(product_info):
             l_modules.append(product)
     return l_modules
 
+
 ##
 # Obsolescent way of creating the application.
 # This method will use appli_gen to create the application directory.
@@ -280,28 +326,25 @@ def generate_launch_file(config, appli_dir, catalog, logger, l_SALOME_modules):
     # build the application (the name depends upon salome version
     env_file = os.path.join(config.APPLICATION.workdir, "env_launch")
     VersionSalome = src.get_salome_version(config)
-    if VersionSalome>=MMP([8,2,0]):
+    if VersionSalome >= MMP([8, 2, 0]):
         # for salome 8+ we use a salome context file for the virtual app
-        app_shell=["cfg", "bash"]
-        env_files=[env_file+".cfg", env_file+".sh"]
+        app_shell = ["cfg", "bash"]
+        env_files = [env_file + ".cfg", env_file + ".sh"]
     else:
-        app_shell=["bash"]
-        env_files=[env_file+".sh"]
+        app_shell = ["bash"]
+        env_files = [env_file + ".sh"]
 
     try:
         import environ
+
         # generate only shells the user wants (by default bash, csh, batch)
         # the environ command will only generate file compatible
         # with the current system.
-        environ.write_all_source_files(config,
-                                       logger,
-                                       shells=app_shell,
-                                       silent=True)
+        environ.write_all_source_files(config, logger, shells=app_shell, silent=True)
         status = src.OK_STATUS
     finally:
         logger.write(src.printcolors.printc(status) + "\n", 2, False)
 
-
     write_step(logger, _("Building application"), level=2)
     cf = create_config_file(config, l_SALOME_modules, env_files, logger)
 
@@ -324,7 +367,6 @@ def generate_launch_file(config, appli_dir, catalog, logger, l_SALOME_modules):
     return retcode
 
 
-
 ##
 # Generates the catalog from a list of machines.
 def generate_catalog(machines, config, logger):
@@ -332,94 +374,103 @@ def generate_catalog(machines, config, logger):
     machines = map(lambda l: l.strip(), machines)
     machines = filter(lambda l: len(l) > 0, machines)
 
-    src.printcolors.print_value(logger,
-                                _("Generate Resources Catalog"),
-                                ", ".join(machines),
-                                4)
+    src.printcolors.print_value(
+        logger, _("Generate Resources Catalog"), ", ".join(machines), 4
+    )
     cmd = '"cat /proc/cpuinfo | grep MHz ; cat /proc/meminfo | grep MemTotal"'
     user = getpass.getuser()
 
     catfile = src.get_tmp_filename(config, "CatalogResources.xml")
-    with open(catfile, 'w') as catalog:
+    with open(catfile, "w") as catalog:
         catalog.write("<!DOCTYPE ResourcesCatalog>\n<resources>\n")
         for k in machines:
             if not src.architecture.is_windows():
-                logger.write("    ssh %s " % (k + " ").ljust(20, '.'), 4)
+                logger.write("    ssh %s " % (k + " ").ljust(20, "."), 4)
                 logger.flush()
 
                 ssh_cmd = 'ssh -o "StrictHostKeyChecking no" %s %s' % (k, cmd)
-                p = subprocess.Popen(ssh_cmd, shell=True,
-                        stdin=subprocess.PIPE,
-                        stdout=subprocess.PIPE,
-                        stderr=subprocess.PIPE)
+                p = subprocess.Popen(
+                    ssh_cmd,
+                    shell=True,
+                    stdin=subprocess.PIPE,
+                    stdout=subprocess.PIPE,
+                    stderr=subprocess.PIPE,
+                )
                 p.wait()
 
-                machine_access = (p.returncode == 0)
+                machine_access = p.returncode == 0
                 if not machine_access:
                     logger.write(src.printcolors.printc(src.KO_STATUS) + "\n", 4)
-                    logger.write("    " + src.printcolors.printcWarning(p.stderr.read()),
-                                 2)
+                    logger.write(
+                        "    " + src.printcolors.printcWarning(p.stderr.read()), 2
+                    )
                 else:
                     logger.write(src.printcolors.printc(src.OK_STATUS) + "\n", 4)
                     lines = p.stdout.readlines()
-                    freq = lines[0][:-1].split(':')[-1].split('.')[0].strip()
-                    nb_proc = len(lines) -1
-                    memory = lines[-1].split(':')[-1].split()[0].strip()
+                    freq = lines[0][:-1].split(":")[-1].split(".")[0].strip()
+                    nb_proc = len(lines) - 1
+                    memory = lines[-1].split(":")[-1].split()[0].strip()
                     memory = int(memory) / 1000
 
                 catalog.write("    <machine\n")
-                catalog.write("        protocol=\"ssh\"\n")
-                catalog.write("        nbOfNodes=\"1\"\n")
-                catalog.write("        mode=\"interactif\"\n")
-                catalog.write("        OS=\"LINUX\"\n")
+                catalog.write('        protocol="ssh"\n')
+                catalog.write('        nbOfNodes="1"\n')
+                catalog.write('        mode="interactif"\n')
+                catalog.write('        OS="LINUX"\n')
 
                 if (not src.architecture.is_windows()) and machine_access:
-                    catalog.write("        CPUFreqMHz=\"%s\"\n" % freq)
-                    catalog.write("        nbOfProcPerNode=\"%s\"\n" % nb_proc)
-                    catalog.write("        memInMB=\"%s\"\n" % memory)
+                    catalog.write('        CPUFreqMHz="%s"\n' % freq)
+                    catalog.write('        nbOfProcPerNode="%s"\n' % nb_proc)
+                    catalog.write('        memInMB="%s"\n' % memory)
 
-                catalog.write("        userName=\"%s\"\n" % user)
-                catalog.write("        name=\"%s\"\n" % k)
-                catalog.write("        hostname=\"%s\"\n" % k)
+                catalog.write('        userName="%s"\n' % user)
+                catalog.write('        name="%s"\n' % k)
+                catalog.write('        hostname="%s"\n' % k)
                 catalog.write("    >\n")
                 catalog.write("    </machine>\n")
 
         catalog.write("</resources>\n")
     return catfile
 
+
 ##################################################
 
 ##
 # Describes the command
 def description():
-    '''method that is called when salomeTools is called with --help option.
+    """method that is called when salomeTools is called with --help option.
 
     :return: The text to display for the application command description.
     :rtype: str
-    '''
-    return _("The application command creates a SALOME application.\n"
-             "WARNING: it works only for SALOME 6. Use the \"launcher\" "
-             "command for newer versions of SALOME\n\nexample:\nsat application"
-             " SALOME-6.6.0")
+    """
+    return _(
+        "The application command creates a SALOME application.\n"
+        'WARNING: it works only for SALOME 6. Use the "launcher" '
+        "command for newer versions of SALOME\n\nexample:\nsat application"
+        " SALOME-6.6.0"
+    )
+
 
 ##
 # Runs the command.
 def run(args, runner, logger):
-    '''method that is called when salomeTools is called with application
-       parameter.
-    '''
+    """method that is called when salomeTools is called with application
+    parameter.
+    """
 
     (options, args) = parser.parse_args(args)
 
     # check for product
-    src.check_config_has_application( runner.cfg )
+    src.check_config_has_application(runner.cfg)
 
     application = src.printcolors.printcLabel(runner.cfg.VARS.application)
     logger.write(_("Building application for %s\n") % application, 1)
 
     # if section APPLICATION.virtual_app does not exists create one
     if "virtual_app" not in runner.cfg.APPLICATION:
-        msg = _("The section APPLICATION.virtual_app is not defined in the product. Use sat launcher in state")
+        msg = _(
+            "The section APPLICATION.virtual_app is not defined in the product. Use sat launcher in state"
+        )
         logger.write(src.printcolors.printcError(msg), 1)
         logger.write("\n", 1)
         return 1
@@ -431,7 +482,7 @@ def run(args, runner, logger):
 
     # set list of modules
     if options.modules:
-        runner.cfg.APPLICATION.virtual_app['modules'] = options.modules
+        runner.cfg.APPLICATION.virtual_app["modules"] = options.modules
 
     # activate mesa use in the generated application
     if options.use_mesa:
@@ -439,18 +490,19 @@ def run(args, runner, logger):
 
     # set name and application_name
     if options.name:
-        runner.cfg.APPLICATION.virtual_app['name'] = options.name
-        runner.cfg.APPLICATION.virtual_app['application_name'] = options.name + "_appdir"
-
-    application_name = src.get_cfg_param(runner.cfg.APPLICATION.virtual_app,
-                                         "application_name",
-                                         runner.cfg.APPLICATION.virtual_app.name + "_appdir")
+        runner.cfg.APPLICATION.virtual_app["name"] = options.name
+        runner.cfg.APPLICATION.virtual_app["application_name"] = (
+            options.name + "_appdir"
+        )
+
+    application_name = src.get_cfg_param(
+        runner.cfg.APPLICATION.virtual_app,
+        "application_name",
+        runner.cfg.APPLICATION.virtual_app.name + "_appdir",
+    )
     appli_dir = os.path.join(target_dir, application_name)
 
-    src.printcolors.print_value(logger,
-                                _("Application directory"),
-                                appli_dir,
-                                3)
+    src.printcolors.print_value(logger, _("Application directory"), appli_dir, 3)
 
     # get catalog
     catalog, catalog_src = "", ""
@@ -460,9 +512,8 @@ def run(args, runner, logger):
     elif options.gencat:
         # generate catalog for given list of computers
         catalog_src = options.gencat
-        catalog = generate_catalog(options.gencat.split(","),
-                                   runner.cfg,logger)
-    elif 'catalog' in runner.cfg.APPLICATION.virtual_app:
+        catalog = generate_catalog(options.gencat.split(","), runner.cfg, logger)
+    elif "catalog" in runner.cfg.APPLICATION.virtual_app:
         # use catalog specified in the product
         if runner.cfg.APPLICATION.virtual_app.catalog.endswith(".xml"):
             # catalog as a file
@@ -470,25 +521,24 @@ def run(args, runner, logger):
         else:
             # catalog as a list of computers
             catalog_src = runner.cfg.APPLICATION.virtual_app.catalog
-            mlist = filter(lambda l: len(l.strip()) > 0,
-                           runner.cfg.APPLICATION.virtual_app.catalog.split(","))
+            mlist = filter(
+                lambda l: len(l.strip()) > 0,
+                runner.cfg.APPLICATION.virtual_app.catalog.split(","),
+            )
             if len(mlist) > 0:
-                catalog = generate_catalog(runner.cfg.APPLICATION.virtual_app.catalog.split(","),
-                                           runner.cfg, logger)
+                catalog = generate_catalog(
+                    runner.cfg.APPLICATION.virtual_app.catalog.split(","),
+                    runner.cfg,
+                    logger,
+                )
 
     # display which catalog is used
     if len(catalog) > 0:
         catalog = os.path.realpath(catalog)
         if len(catalog_src) > 0:
-            src.printcolors.print_value(logger,
-                                        _("Resources Catalog"),
-                                        catalog_src,
-                                        3)
+            src.printcolors.print_value(logger, _("Resources Catalog"), catalog_src, 3)
         else:
-            src.printcolors.print_value(logger,
-                                        _("Resources Catalog"),
-                                        catalog,
-                                        3)
+            src.printcolors.print_value(logger, _("Resources Catalog"), catalog, 3)
 
     logger.write("\n", 3, False)
 
@@ -506,7 +556,7 @@ def run(args, runner, logger):
 
     # generate the application
     try:
-        try: # try/except/finally not supported in all version of python
+        try:  # try/except/finally not supported in all version of python
             retcode = create_application(runner.cfg, appli_dir, catalog, logger)
         except Exception as exc:
             details.append(str(exc))
index 366a1371e05b7e68591235f1c66a0467eb4e9d55..4f30923c6c8eeb1546aa81daa1b1e156d87c59e3 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 #  Copyright (C) 2010-2012  CEA/DEN
 #
 #  This library is free software; you can redistribute it and/or
 #  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 
 import os
-
 import src
 
 # Define all possible option for the check command :  sat check <options>
 parser = src.options.Options()
-parser.add_option('p', 'products', 'list2', 'products',
-    _('Optional: products to check. This option accepts a comma separated list.'))
+parser.add_option(
+    "p",
+    "products",
+    "list2",
+    "products",
+    _("Optional: products to check. This option accepts a comma separated list."),
+)
 
 CHECK_PROPERTY = "has_unit_tests"
 
@@ -34,6 +38,7 @@ def log_step(logger, header, step):
     logger.write("\n==== %s \n" % src.printcolors.printcInfo(step), 4)
     logger.flush()
 
+
 def log_res_step(logger, res):
     if res == 0:
         logger.write("%s \n" % src.printcolors.printcSuccess("OK"), 4)
@@ -42,38 +47,40 @@ def log_res_step(logger, res):
         logger.write("%s \n" % src.printcolors.printcError("KO"), 4)
         logger.flush()
 
+
 def check_all_products(config, products_infos, logger):
-    '''Execute the proper configuration commands 
+    """Execute the proper configuration commands
        in each product build directory.
 
     :param config Config: The global configuration
-    :param products_info list: List of 
+    :param products_info list: List of
                                  (str, Config) => (product_name, product_info)
     :param logger Logger: The logger instance to use for the display and logging
     :return: the number of failing commands.
     :rtype: int
-    '''
+    """
     res = 0
     for p_name_info in products_infos:
         res_prod = check_product(p_name_info, config, logger)
         if res_prod != 0:
-            res += 1 
+            res += 1
     return res
 
+
 def check_product(p_name_info, config, logger):
-    '''Execute the proper configuration command(s) 
+    """Execute the proper configuration command(s)
        in the product build directory.
-    
+
     :param p_name_info tuple: (str, Config) => (product_name, product_info)
     :param config Config: The global configuration
-    :param logger Logger: The logger instance to use for the display 
+    :param logger Logger: The logger instance to use for the display
                           and logging
     :return: 1 if it fails, else 0.
     :rtype: int
-    '''
-    
+    """
+
     p_name, p_info = p_name_info
-    
+
     # Logging
     logger.write("\n", 4, False)
     logger.write("################ ", 4)
@@ -86,27 +93,32 @@ def check_product(p_name_info, config, logger):
     # Verify if the command has to be launched or not
     ignored = False
     if src.product.product_is_native(p_info):
-        msg = _("The product %s is defined as being native. "
-                "product ignored." % p_name)
+        msg = _(
+            "The product %s is defined as being native. " "product ignored." % p_name
+        )
         logger.write("%s\n" % msg, 4)
         ignored = True
     elif not src.get_property_in_product_cfg(p_info, CHECK_PROPERTY):
-        msg = _("The product %s is defined as not having tests. "
-                "product ignored." % p_name)
+        msg = _(
+            "The product %s is defined as not having tests. "
+            "product ignored." % p_name
+        )
         logger.write("%s\n" % msg, 4)
         ignored = True
     elif not src.product.product_compiles(p_info):
-        msg = _("The product %s is defined as not compiling. "
-                "product ignored." % p_name)
+        msg = _(
+            "The product %s is defined as not compiling. " "product ignored." % p_name
+        )
         logger.write("%s\n" % msg, 4)
         ignored = True
     elif "build_dir" not in p_info:
-        msg = _("No build_dir key defined in "
-                "the config file of %s: product ignored." % p_name)
+        msg = _(
+            "No build_dir key defined in "
+            "the config file of %s: product ignored." % p_name
+        )
         logger.write("%s\n" % msg, 4)
         ignored = True
 
-
     # Get the command to execute for script products
     cmd_found = True
     command = ""
@@ -114,105 +126,128 @@ def check_product(p_name_info, config, logger):
         command = src.get_cfg_param(p_info, "test_build", "Not found")
         if command == "Not found":
             cmd_found = False
-            msg = _('WARNING: The product %s is defined as having tests. But it'
-                    ' is compiled using a script and the key "test_build" is '
-                    'not defined in the definition of %s' % (p_name, p_name))
+            msg = _(
+                "WARNING: The product %s is defined as having tests. But it"
+                ' is compiled using a script and the key "test_build" is '
+                "not defined in the definition of %s" % (p_name, p_name)
+            )
             logger.write("%s\n" % msg, 4)
-                
+
     if ignored or not cmd_found:
         log_step(logger, header, "ignored")
-        logger.write("==== %(name)s %(IGNORED)s\n" %
-            { "name" : p_name ,
-             "IGNORED" : src.printcolors.printcInfo("IGNORED")},
-            4)
+        logger.write(
+            "==== %(name)s %(IGNORED)s\n"
+            % {"name": p_name, "IGNORED": src.printcolors.printcInfo("IGNORED")},
+            4,
+        )
         logger.write("\n", 3, False)
         logger.flush()
         if not cmd_found:
             return 1
         return 0
-    
+
     # Instantiate the class that manages all the construction commands
     # like cmake, check, make install, make test, environment management, etc...
     builder = src.compilation.Builder(config, logger, p_name, p_info)
-    
+
     # Prepare the environment
     log_step(logger, header, "PREPARE ENV")
     res_prepare = builder.prepare(add_env_launch=True)
     log_res_step(logger, res_prepare)
-    
+
     len_end_line = 20
 
-    # Launch the check    
+    # Launch the check
     log_step(logger, header, "CHECK")
     res = builder.check(command=command)
     log_res_step(logger, res)
-    
+
     # Log the result
     if res > 0:
         logger.write("\r%s%s" % (header, " " * len_end_line), 3)
         logger.write("\r" + header + src.printcolors.printcError("KO"))
-        logger.write("==== %(KO)s in check of %(name)s \n" %
-            { "name" : p_name , "KO" : src.printcolors.printcInfo("ERROR")}, 4)
+        logger.write(
+            "==== %(KO)s in check of %(name)s \n"
+            % {"name": p_name, "KO": src.printcolors.printcInfo("ERROR")},
+            4,
+        )
         logger.flush()
     else:
         logger.write("\r%s%s" % (header, " " * len_end_line), 3)
         logger.write("\r" + header + src.printcolors.printcSuccess("OK"))
         logger.write("==== %s \n" % src.printcolors.printcInfo("OK"), 4)
-        logger.write("==== Check of %(name)s %(OK)s \n" %
-            { "name" : p_name , "OK" : src.printcolors.printcInfo("OK")}, 4)
+        logger.write(
+            "==== Check of %(name)s %(OK)s \n"
+            % {"name": p_name, "OK": src.printcolors.printcInfo("OK")},
+            4,
+        )
         logger.flush()
     logger.write("\n", 3, False)
 
     return res
 
+
 def description():
-    '''method that is called when salomeTools is called with --help option.
-    
+    """method that is called when salomeTools is called with --help option.
+
     :return: The text to display for the check command description.
     :rtype: str
-    '''
-    return _("The check command executes the \"check\" command in"
-             " the build directory of all the products of the application."
-             "\nIt is possible to reduce the list of products to check by using"
-             " the --products option\n\nexample\nsat check SALOME-master "
-             "--products KERNEL,GUI,GEOM")
-  
+    """
+    return _(
+        'The check command executes the "check" command in'
+        " the build directory of all the products of the application."
+        "\nIt is possible to reduce the list of products to check by using"
+        " the --products option\n\nexample\nsat check SALOME-master "
+        "--products KERNEL,GUI,GEOM"
+    )
+
+
 def run(args, runner, logger):
-    '''method that is called when salomeTools is called with check parameter.
-    '''
-    
+    """method that is called when salomeTools is called with check parameter."""
+
     # Parse the options
     (options, args) = parser.parse_args(args)
 
     # check that the command has been called with an application
-    src.check_config_has_application( runner.cfg )
+    src.check_config_has_application(runner.cfg)
 
     # Get the list of products to treat
     products_infos = src.product.get_products_list(options, runner.cfg, logger)
-    
+
     # Print some informations
-    logger.write(_('Executing the check command in the build '
-                                'directories of the application %s\n') % 
-                src.printcolors.printcLabel(runner.cfg.VARS.application), 1)
-    
-    info = [(_("BUILD directory"),
-             os.path.join(runner.cfg.APPLICATION.workdir, 'BUILD'))]
+    logger.write(
+        _(
+            "Executing the check command in the build "
+            "directories of the application %s\n"
+        )
+        % src.printcolors.printcLabel(runner.cfg.VARS.application),
+        1,
+    )
+
+    info = [
+        (_("BUILD directory"), os.path.join(runner.cfg.APPLICATION.workdir, "BUILD"))
+    ]
     src.print_info(logger, info)
-    
+
     # Call the function that will loop over all the products and execute
     # the right command(s)
     res = check_all_products(runner.cfg, products_infos, logger)
-    
+
     # Print the final state
     nb_products = len(products_infos)
     if res == 0:
         final_status = "OK"
     else:
         final_status = "KO"
-   
-    logger.write(_("\nCheck: %(status)s (%(valid_result)d/%(nb_products)d)\n") % \
-        { 'status': src.printcolors.printc(final_status), 
-          'valid_result': nb_products - res,
-          'nb_products': nb_products }, 1)    
-    
-    return res 
+
+    logger.write(
+        _("\nCheck: %(status)s (%(valid_result)d/%(nb_products)d)\n")
+        % {
+            "status": src.printcolors.printc(final_status),
+            "valid_result": nb_products - res,
+            "nb_products": nb_products,
+        },
+        1,
+    )
+
+    return res
index 070b87517cbed510c3d216b23ed1ca82005c51a5..3fa5bc41ddc6b33db4a4e8cb34e711fd523f5250 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 #  Copyright (C) 2010-2012  CEA/DEN
 #
 #  This library is free software; you can redistribute it and/or
 #  License along with this library; if not, write to the Free Software
 #  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 
-import re
 import os
-
 import src
 
 # Compatibility python 2/3 for input function
 # input stays input for python 3 and input = raw_input for python 2
-try: 
+try:
     input = raw_input
-except NameError: 
+except NameError:
     pass
 
 
 # Define all possible option for the clean command :  sat clean <options>
 parser = src.options.Options()
-parser.add_option('p', 'products', 'list2', 'products',
-    _('Optional: Products to clean. This option accepts a comma separated list.'))
-parser.add_option('s', 'sources', 'boolean', 'sources',
-    _("Optional: Clean the product source directories."))
-parser.add_option('b', 'build', 'boolean', 'build', 
-    _("Optional: Clean the product build directories."))
-parser.add_option('i', 'install', 'boolean', 'install', 
-    _("Optional: Clean the product install directories."))
-parser.add_option('g', 'generated', 'boolean', 'generated', 
-    _("Optional: Clean source, build and install directories for generated products."))
-parser.add_option('', 'package', 'boolean', 'package', 
-    _("Optional: Clean packages produced by sat package command."))
-parser.add_option('a', 'all', 'boolean', 'all', 
-    _("Optional: Clean the product source, build and install directories."))
-parser.add_option('', 'sources_without_dev', 'boolean', 'sources_without_dev', 
-    _("Optional: do not clean the products in development mode."))
+parser.add_option(
+    "p",
+    "products",
+    "list2",
+    "products",
+    _("Optional: Products to clean. This option accepts a comma separated list."),
+)
+parser.add_option(
+    "s",
+    "sources",
+    "boolean",
+    "sources",
+    _("Optional: Clean the product source directories."),
+)
+parser.add_option(
+    "b",
+    "build",
+    "boolean",
+    "build",
+    _("Optional: Clean the product build directories."),
+)
+parser.add_option(
+    "i",
+    "install",
+    "boolean",
+    "install",
+    _("Optional: Clean the product install directories."),
+)
+parser.add_option(
+    "g",
+    "generated",
+    "boolean",
+    "generated",
+    _("Optional: Clean source, build and install directories for generated products."),
+)
+parser.add_option(
+    "",
+    "package",
+    "boolean",
+    "package",
+    _("Optional: Clean packages produced by sat package command."),
+)
+parser.add_option(
+    "a",
+    "all",
+    "boolean",
+    "all",
+    _("Optional: Clean the product source, build and install directories."),
+)
+parser.add_option(
+    "",
+    "sources_without_dev",
+    "boolean",
+    "sources_without_dev",
+    _("Optional: do not clean the products in development mode."),
+)
 
 
 def get_source_directories(config, products_infos, without_dev):
@@ -65,11 +103,14 @@ def get_source_directories(config, products_infos, without_dev):
     for __, product_info in products_infos:
         if product_has_dir(product_info, without_dev):
             # we do not clean source dirs of pip products when pip is activated
-            if not ( src.appli_test_property(config,"pip", "yes") and\
-                     src.product.product_test_property(product_info,"pip", "yes") ) :
+            if not (
+                src.appli_test_property(config, "pip", "yes")
+                and src.product.product_test_property(product_info, "pip", "yes")
+            ):
                 l_dir_source.append(src.Path(product_info.source_dir))
     return l_dir_source
 
+
 def get_build_directories(products_infos):
     """\
     Returns the list of directory build paths corresponding to the list of 
@@ -87,6 +128,7 @@ def get_build_directories(products_infos):
                 l_dir_build.append(src.Path(product_info.build_dir))
     return l_dir_build
 
+
 def get_install_directories(config, products_infos):
     """\
     Returns the list of directory install paths corresponding to the list of 
@@ -100,12 +142,15 @@ def get_install_directories(config, products_infos):
     for __, product_info in products_infos:
         if product_has_dir(product_info):
             # we do not clean pip products installed in python install dir
-            if not ( src.appli_test_property(config,"pip", "yes") and\
-                     src.product.product_test_property(product_info, "pip", "yes") and\
-                     src.appli_test_property(config,"pip_install_dir", "python") ) :
+            if not (
+                src.appli_test_property(config, "pip", "yes")
+                and src.product.product_test_property(product_info, "pip", "yes")
+                and src.appli_test_property(config, "pip_install_dir", "python")
+            ):
                 l_dir_install.append(src.Path(product_info.install_dir))
     return l_dir_install
 
+
 def get_package_directory(config):
     """\
     Returns the package directory name corresponding to the sat package command
@@ -116,6 +161,7 @@ def get_package_directory(config):
     """
     return [src.Path(os.path.join(config.APPLICATION.workdir, "PACKAGE"))]
 
+
 def get_generated_directories(config, products_infos):
     """\
     Returns the list of directories (source, build, install) corresponding to the 
@@ -135,14 +181,12 @@ def get_generated_directories(config, products_infos):
         generate_dir = os.path.join(workdir, "GENERATED")
         source_dir = os.path.join(generate_dir, compo + "_SRC")
         build_dir = os.path.join(os.path.join(workdir, "BUILD"), compo)
-        install_dir = os.path.join(workdir, config.INTERNAL.config.install_dir,
-                                   compo)
+        install_dir = os.path.join(workdir, config.INTERNAL.config.install_dir, compo)
         l_dir_install.append(src.Path(source_dir))
         l_dir_install.append(src.Path(build_dir))
         l_dir_install.append(src.Path(install_dir))
-        
-    return l_dir_install
 
+    return l_dir_install
 
 
 def product_has_dir(product_info, without_dev=False):
@@ -154,55 +198,62 @@ def product_has_dir(product_info, without_dev=False):
     :return: True if there is a source, build and install directory corresponding to the product described by product_info.
     :rtype: boolean
     """
-    if (src.product.product_is_native(product_info) or 
-                            src.product.product_is_fixed(product_info)):
+    if src.product.product_is_native(product_info) or src.product.product_is_fixed(
+        product_info
+    ):
         return False
     if without_dev:
         if src.product.product_is_dev(product_info):
             return False
     return True
-    
+
+
 def suppress_directories(l_paths, logger):
     """Suppress the paths given in the list in l_paths.
-    
+
     :param l_paths list: The list of Path to be suppressed
     :param logger Logger: The logger instance to use for the display and logging
-    """    
+    """
     for path in l_paths:
         if not path.isdir():
-            msg = _("Warning: the path %s does not "
-                    "exists (or is not a directory)\n" % path.__str__())
+            msg = _(
+                "Warning: the path %s does not "
+                "exists (or is not a directory)\n" % path.__str__()
+            )
             logger.write(src.printcolors.printcWarning(msg), 1)
         else:
             logger.write(_("Removing %s ...") % path.__str__())
             path.rm()
-            logger.write('%s\n' % src.printcolors.printc(src.OK_STATUS), 3)
+            logger.write("%s\n" % src.printcolors.printc(src.OK_STATUS), 3)
+
 
 def description():
     """method called when salomeTools is called with --help option.
-    
+
     :return: The text to display for the clean command description.
     :rtype: str
     """
-    return _("""\
+    return _(
+        """\
 The clean command suppress the SOURCES, BUILD or INSTALL directories of the application products.
 Use the options to define what directories you want to suppress and to set the list of products
 
 example:
 >> sat clean SALOME-xx --build --install --properties is_SALOME_module:yes
-""")
-  
+"""
+    )
+
+
 def run(args, runner, logger):
     """\
     method called when salomeTools is called with clean parameter.
     """
-    
+
     # Parse the options
     (options, args) = parser.parse_args(args)
 
     # check that the command has been called with an application
-    src.check_config_has_application( runner.cfg )
-
+    src.check_config_has_application(runner.cfg)
 
     # Get the list of products to threat
     products_infos = src.product.get_products_list(options, runner.cfg, logger)
@@ -210,36 +261,44 @@ def run(args, runner, logger):
     # Construct the list of directories to suppress
     l_dir_to_suppress = []
     if options.all:
-        l_dir_to_suppress += (get_source_directories(runner.cfg, products_infos, 
-                                            options.sources_without_dev) +
-                             get_build_directories(products_infos) + 
-                             get_install_directories(runner.cfg, products_infos) + 
-                             get_generated_directories(runner.cfg, products_infos) + 
-                             get_package_directory(runner.cfg) )
+        l_dir_to_suppress += (
+            get_source_directories(
+                runner.cfg, products_infos, options.sources_without_dev
+            )
+            + get_build_directories(products_infos)
+            + get_install_directories(runner.cfg, products_infos)
+            + get_generated_directories(runner.cfg, products_infos)
+            + get_package_directory(runner.cfg)
+        )
     else:
         if options.install:
             l_dir_to_suppress += get_install_directories(runner.cfg, products_infos)
-        
+
         if options.build:
             l_dir_to_suppress += get_build_directories(products_infos)
-            
+
         if options.sources or options.sources_without_dev:
-            l_dir_to_suppress += get_source_directories(runner.cfg, products_infos, 
-                                                options.sources_without_dev)
+            l_dir_to_suppress += get_source_directories(
+                runner.cfg, products_infos, options.sources_without_dev
+            )
         if options.generated:
             l_dir_to_suppress += get_generated_directories(runner.cfg, products_infos)
 
         if options.package:
             l_dir_to_suppress += get_package_directory(runner.cfg)
-    
+
     if len(l_dir_to_suppress) == 0:
         logger.write(src.printcolors.printcWarning(_("Nothing to suppress\n")))
-        logger.write(_("""\
+        logger.write(
+            _(
+                """\
 Please specify what you want to suppress:
 try 'sat --help clean' and 'sat clean ... --products ... --sources --build --install
-"""))
+"""
+            )
+        )
         return
-    
+
     # Check with the user if he really wants to suppress the directories
     if not runner.options.batch:
         logger.write(_("Remove the following directories ?\n"), 1)
@@ -248,8 +307,8 @@ try 'sat --help clean' and 'sat clean ... --products ... --sources --build --ins
         rep = input(_("Are you sure you want to continue? [Yes/No] "))
         if rep.upper() != _("YES"):
             return 0
-    
+
     # Suppress the list of paths
     suppress_directories(l_dir_to_suppress, logger)
-    
+
     return 0
index b6da2f326012a9ee9245719c6a4764665f500403..72e6af4a324e571670a9387c0220087dacd66c2c 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 #  Copyright (C) 2010-2012  CEA/DEN
 #
 #  This library is free software; you can redistribute it and/or
 #  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 
 import os
-import re
 import subprocess
 import src
 import src.debug as DBG
 
-# Compatibility python 2/3 for input function
-# input stays input for python 3 and input = raw_input for python 2
-try: 
-    input = raw_input
-except NameError: 
-    pass
-
-
 # Define all possible option for the compile command :  sat compile <options>
 parser = src.options.Options()
-parser.add_option('p', 'products', 'list2', 'products',
-    _('Optional: products to compile. This option accepts a comma separated list.'))
-parser.add_option('f', 'force', 'boolean', 'force',
-    'Optional: force the compilation of product, even if it is already installed. The BUILD directory is cleaned before compilation.')
-parser.add_option('u', 'update', 'boolean', 'update',
-    'Optional: update mode, compile only products which sources has changed, including the dependencies.')
-parser.add_option('', 'with_fathers', 'boolean', 'fathers',
-    _("Optional: build all necessary products to the given product (KERNEL is "
-      "build before building GUI)."), False)
-parser.add_option('', 'with_children', 'boolean', 'children',
-    _("Optional: build all products using the given product (all SMESH plugins"
-      " are build after SMESH)."), False)
-parser.add_option('', 'clean_all', 'boolean', 'clean_all',
+parser.add_option(
+    "p",
+    "products",
+    "list2",
+    "products",
+    _("Optional: products to compile. This option accepts a comma separated list."),
+)
+parser.add_option(
+    "f",
+    "force",
+    "boolean",
+    "force",
+    "Optional: force the compilation of product, even if it is already installed. The BUILD directory is cleaned before compilation.",
+)
+parser.add_option(
+    "u",
+    "update",
+    "boolean",
+    "update",
+    "Optional: update mode, compile only products which sources has changed, including the dependencies.",
+)
+parser.add_option(
+    "",
+    "with_fathers",
+    "boolean",
+    "fathers",
+    _(
+        "Optional: build all necessary products to the given product (KERNEL is "
+        "build before building GUI)."
+    ),
+    False,
+)
+parser.add_option(
+    "",
+    "with_children",
+    "boolean",
+    "children",
+    _(
+        "Optional: build all products using the given product (all SMESH plugins"
+        " are build after SMESH)."
+    ),
+    False,
+)
+parser.add_option(
+    "",
+    "clean_all",
+    "boolean",
+    "clean_all",
     _("Optional: clean BUILD dir and INSTALL dir before building product."),
-    False)
-parser.add_option('', 'clean_install', 'boolean', 'clean_install',
-    _("Optional: clean INSTALL dir before building product."), False)
-parser.add_option('', 'make_flags', 'string', 'makeflags',
-    _("Optional: add extra options to the 'make' command."))
-parser.add_option('', 'show', 'boolean', 'no_compile',
+    False,
+)
+parser.add_option(
+    "",
+    "clean_install",
+    "boolean",
+    "clean_install",
+    _("Optional: clean INSTALL dir before building product."),
+    False,
+)
+parser.add_option(
+    "",
+    "make_flags",
+    "string",
+    "makeflags",
+    _("Optional: add extra options to the 'make' command."),
+)
+parser.add_option(
+    "",
+    "show",
+    "boolean",
+    "no_compile",
     _("Optional: DO NOT COMPILE just show if products are installed or not."),
-    False)
-parser.add_option('', 'stop_first_fail', 'boolean', 'stop_first_fail', _(
-                  "Optional: Stops the command at first product compilation"
-                  " fail."), False)
-parser.add_option('', 'check', 'boolean', 'check', _(
-                  "Optional: execute the unit tests after compilation"), False)
+    False,
+)
+parser.add_option(
+    "",
+    "stop_first_fail",
+    "boolean",
+    "stop_first_fail",
+    _("Optional: Stops the command at first product compilation" " fail."),
+    False,
+)
+parser.add_option(
+    "",
+    "check",
+    "boolean",
+    "check",
+    _("Optional: execute the unit tests after compilation"),
+    False,
+)
 
-parser.add_option('', 'clean_build_after', 'boolean', 'clean_build_after', 
-                  _('Optional: remove the build directory after successful compilation'), False)
+parser.add_option(
+    "",
+    "clean_build_after",
+    "boolean",
+    "clean_build_after",
+    _("Optional: remove the build directory after successful compilation"),
+    False,
+)
 
 
 # from sat product infos, represent the product dependencies in a simple python graph
 # keys are nodes, the list of dependencies are values
 def get_dependencies_graph(p_infos, compile_time=True):
-    graph={}
-    for (p_name,p_info) in p_infos:
-        depprod=[]
+    graph = {}
+    for (p_name, p_info) in p_infos:
+        depprod = []
         for d in p_info.depend:
             depprod.append(d)
         if compile_time and "build_depend" in p_info:
             for d in p_info.build_depend:
                 depprod.append(d)
-        graph[p_name]=depprod
+        graph[p_name] = depprod
     return graph
 
+
 # this recursive function calculates all the dependencies of node start
 def depth_search_graph(graph, start, visited=[]):
-    visited= visited+ [start]
+    visited = visited + [start]
     for node in graph[start]:  # for all nodes in start dependencies
         if node not in visited:
-            visited=depth_search_graph(graph, node, visited)
+            visited = depth_search_graph(graph, node, visited)
     return visited
 
+
 # find a path from start node to end (a group of nodes)
 def find_path_graph(graph, start, end, path=[]):
     path = path + [start]
@@ -96,9 +158,11 @@ def find_path_graph(graph, start, end, path=[]):
     for node in graph[start]:
         if node not in path:
             newpath = find_path_graph(graph, node, end, path)
-            if newpath: return newpath
+            if newpath:
+                return newpath
     return None
 
+
 # Topological sorting algo
 # return in sorted_nodes the list of sorted nodes
 def depth_first_topo_graph(graph, start, visited=[], sorted_nodes=[]):
@@ -106,18 +170,26 @@ def depth_first_topo_graph(graph, start, visited=[], sorted_nodes=[]):
     if start not in graph:
         # get more explicit error
         where = [k for k in graph if start in graph[k]]
-        raise src.SatException('Error in product dependencies : %s product is referenced in products dependencies, but is not present in the application, from %s' % (start, where))
+        raise src.SatException(
+            "Error in product dependencies : %s product is referenced in products dependencies, but is not present in the application, from %s"
+            % (start, where)
+        )
         # may be in debug mode, continue loop to get all problems, (if comment raise)
         # print("WARNING : %s product is referenced in products dependencies but is not present in the application, from %s" % (start, where))
         # sorted_nodes = sorted_nodes + [start]
         # return visited, sorted_nodes
     for node in graph[start]:
         if node not in visited:
-            visited,sorted_nodes=depth_first_topo_graph(graph, node, visited,sorted_nodes)
+            visited, sorted_nodes = depth_first_topo_graph(
+                graph, node, visited, sorted_nodes
+            )
         else:
             if node not in sorted_nodes:
-                raise src.SatException('Error in product dependencies : cycle detection for node %s and %s' % (start,node))
-    
+                raise src.SatException(
+                    "Error in product dependencies : cycle detection for node %s and %s"
+                    % (start, node)
+                )
+
     sorted_nodes = sorted_nodes + [start]
     return visited, sorted_nodes
 
@@ -127,16 +199,18 @@ def check_dependencies(config, p_name_p_info, all_products_dict):
     l_depends_not_installed = []
     for prod in p_name_p_info[1]["depend_all"]:
         # for each dependency, check the install
-        prod_name, prod_info=all_products_dict[prod]
-        if not(src.product.check_installation(config, prod_info)):
+        prod_name, prod_info = all_products_dict[prod]
+        if not (src.product.check_installation(config, prod_info)):
             l_depends_not_installed.append(prod_name)
-    return l_depends_not_installed   # non installed deps
+    return l_depends_not_installed  # non installed deps
+
 
 def log_step(logger, header, step):
     logger.write("\r%s%s" % (header, " " * 30), 3)
     logger.write("\r%s%s" % (header, step), 3)
     logger.flush()
 
+
 def log_res_step(logger, res):
     if res == 0:
         logger.write("%s \n" % src.printcolors.printcSuccess("OK"), 4)
@@ -145,90 +219,106 @@ def log_res_step(logger, res):
         logger.write("%s \n" % src.printcolors.printcError("KO"), 4)
         logger.flush()
 
-def compile_all_products(sat, config, options, products_infos, all_products_dict, all_products_graph, logger):
-    '''Execute the proper configuration commands 
+
+def compile_all_products(
+    sat, config, options, products_infos, all_products_dict, all_products_graph, logger
+):
+    """Execute the proper configuration commands
        in each product build directory.
 
     :param config Config: The global configuration
-    :param products_info list: List of 
+    :param products_info list: List of
                                  (str, Config) => (product_name, product_info)
-    :param all_products_dict: Dict of all products 
-    :param all_products_graph: graph of all products 
+    :param all_products_dict: Dict of all products
+    :param all_products_graph: graph of all products
     :param logger Logger: The logger instance to use for the display and logging
     :return: the number of failing commands.
     :rtype: int
-    '''
-    # first loop for the cleaning 
-    check_salome_configuration=False
-    updated_products=[]
+    """
+    # first loop for the cleaning
+    check_salome_configuration = False
+    updated_products = []
     for p_name_info in products_infos:
-        
+
         p_name, p_info = p_name_info
         if src.product.product_is_salome(p_info):
-            check_salome_configuration=True
-        
+            check_salome_configuration = True
+
         # nothing to clean for native or fixed products
-        if (not src.product.product_compiles(p_info)) or\
-           src.product.product_is_native(p_info) or\
-           src.product.product_is_fixed(p_info):
+        if (
+            (not src.product.product_compiles(p_info))
+            or src.product.product_is_native(p_info)
+            or src.product.product_is_fixed(p_info)
+        ):
             continue
 
-        # Clean the build and the install directories 
+        # Clean the build and the install directories
         # if the corresponding options was called
         if options.clean_all:
-            sat.clean(config.VARS.application + 
-                      " --products " + p_name + 
-                      " --build --install",
-                      batch=True,
-                      verbose=0,
-                      logger_add_link = logger)
+            sat.clean(
+                config.VARS.application
+                + " --products "
+                + p_name
+                + " --build --install",
+                batch=True,
+                verbose=0,
+                logger_add_link=logger,
+            )
 
         else:
-            # Clean the the install directory 
+            # Clean the the install directory
             # if the corresponding option was called
             if options.clean_install:
-                sat.clean(config.VARS.application + 
-                          " --products " + p_name + 
-                          " --install",
-                          batch=True,
-                          verbose=0,
-                          logger_add_link = logger)
-            
-            # Clean the the install directory 
+                sat.clean(
+                    config.VARS.application + " --products " + p_name + " --install",
+                    batch=True,
+                    verbose=0,
+                    logger_add_link=logger,
+                )
+
+            # Clean the the install directory
             # if the corresponding option was called
             if options.force:
-                sat.clean(config.VARS.application + 
-                          " --products " + p_name + 
-                          " --build",
-                          batch=True,
-                          verbose=0,
-                          logger_add_link = logger)
+                sat.clean(
+                    config.VARS.application + " --products " + p_name + " --build",
+                    batch=True,
+                    verbose=0,
+                    logger_add_link=logger,
+                )
 
             if options.update and src.product.product_is_vcs(p_info):
-            # only VCS products are concerned by update option
-                try: 
-                    do_update=False
-                    if len(updated_products)>0:
-                        # if other products where updated, check that the current product is a child 
+                # only VCS products are concerned by update option
+                try:
+                    do_update = False
+                    if len(updated_products) > 0:
+                        # if other products where updated, check that the current product is a child
                         # in this case it will be also updated
-                        if find_path_graph(all_products_graph, p_name, updated_products):
+                        if find_path_graph(
+                            all_products_graph, p_name, updated_products
+                        ):
                             logger.write("\nUpdate product %s (child)" % p_name, 5)
-                            do_update=True
-                    if (not do_update) and os.path.isdir(p_info.source_dir) \
-                                       and os.path.isdir(p_info.install_dir):
-                        source_time=os.path.getmtime(p_info.source_dir)
-                        install_time=os.path.getmtime(p_info.install_dir)
-                        if install_time<source_time:
+                            do_update = True
+                    if (
+                        (not do_update)
+                        and os.path.isdir(p_info.source_dir)
+                        and os.path.isdir(p_info.install_dir)
+                    ):
+                        source_time = os.path.getmtime(p_info.source_dir)
+                        install_time = os.path.getmtime(p_info.install_dir)
+                        if install_time < source_time:
                             logger.write("\nupdate product %s" % p_name, 5)
-                            do_update=True
+                            do_update = True
                     if do_update:
-                        updated_products.append(p_name) 
-                        sat.clean(config.VARS.application + 
-                                  " --products " + p_name + 
-                                  " --build --install",
-                                  batch=True,
-                                  verbose=0,
-                                  logger_add_link = logger)
+                        updated_products.append(p_name)
+                        sat.clean(
+                            config.VARS.application
+                            + " --products "
+                            + p_name
+                            + " --build --install",
+                            batch=True,
+                            verbose=0,
+                            logger_add_link=logger,
+                        )
                 except:
                     pass
 
@@ -236,11 +326,12 @@ def compile_all_products(sat, config, options, products_infos, all_products_dict
         # For salome applications, we check if the sources of configuration modules are present
         # configuration modules have the property "configure_dependency"
         # they are implicit prerequisites of the compilation.
-        res=0
+        res = 0
 
-        # get the list of all modules in application 
-        all_products_infos = src.product.get_products_infos(config.APPLICATION.products,
-                                                            config)
+        # get the list of all modules in application
+        all_products_infos = src.product.get_products_infos(
+            config.APPLICATION.products, config
+        )
         check_source = True
         # for configuration modules, check if sources are present
         for prod in all_products_dict:
@@ -248,20 +339,28 @@ def compile_all_products(sat, config, options, products_infos, all_products_dict
             if src.product.product_is_configuration(product_info):
                 check_source = check_source and src.product.check_source(product_info)
                 if not check_source:
-                    logger.write(_("\nERROR : SOURCES of %s not found! It is required for" 
-                                   " the configuration\n" % product_name))
-                    logger.write(_("        Get it with the command : sat prepare %s -p %s \n" % 
-                                  (config.APPLICATION.name, product_name)))
+                    logger.write(
+                        _(
+                            "\nERROR : SOURCES of %s not found! It is required for"
+                            " the configuration\n" % product_name
+                        )
+                    )
+                    logger.write(
+                        _(
+                            "        Get it with the command : sat prepare %s -p %s \n"
+                            % (config.APPLICATION.name, product_name)
+                        )
+                    )
                     res += 1
-        if res>0:
-            return res  # error configure dependency : we stop the compilation
+        if res > 0:
+            return res  # error configure dependency : we stop the compilation
 
-    # second loop to compile
+    # second loop to compile
     res = 0
     for p_name_info in products_infos:
-        
+
         p_name, p_info = p_name_info
-        
+
         # Logging
         len_end_line = 30
         header = _("Compilation of %s") % src.printcolors.printcLabel(p_name)
@@ -287,22 +386,25 @@ def compile_all_products(sat, config, options, products_infos, all_products_dict
             logger.write("\n", 3, False)
             continue
 
-
         # Recompute the product information to get the right install_dir
         # (it could change if there is a clean of the install directory)
         p_info = src.product.get_product_config(config, p_name)
-        
+
         # Check if sources was already successfully installed
         check_source = src.product.check_source(p_info)
-        is_pip= (src.appli_test_property(config,"pip", "yes") and src.product.product_test_property(p_info,"pip", "yes"))
-        # don't check sources with option --show 
+        is_pip = src.appli_test_property(
+            config, "pip", "yes"
+        ) and src.product.product_test_property(p_info, "pip", "yes")
+        # don't check sources with option --show
         # or for products managed by pip (there sources are in wheels stored in LOCAL.ARCHIVE
-        if not (options.no_compile or is_pip): 
+        if not (options.no_compile or is_pip):
             if not check_source:
-                logger.write(_("Sources of product not found (try 'sat -h prepare') \n"))
-                res += 1 # one more error
+                logger.write(
+                    _("Sources of product not found (try 'sat -h prepare') \n")
+                )
+                res += 1  # one more error
                 continue
-        
+
         # if we don't force compilation, check if the was already successfully installed.
         # we don't compile in this case.
         if (not options.force) and src.product.check_installation(config, p_info):
@@ -310,331 +412,386 @@ def compile_all_products(sat, config, options, products_infos, all_products_dict
             logger.write(_(" in %s" % p_info.install_dir), 4)
             logger.write(_("\n"))
             continue
-        
+
         # If the show option was called, do not launch the compilation
         if options.no_compile:
             logger.write(_("Not installed in %s\n" % p_info.install_dir))
             continue
-        
+
         # Check if the dependencies are installed
-        l_depends_not_installed = check_dependencies(config, p_name_info, all_products_dict)
+        l_depends_not_installed = check_dependencies(
+            config, p_name_info, all_products_dict
+        )
         if len(l_depends_not_installed) > 0:
             log_step(logger, header, "")
-            logger.write(src.printcolors.printcError(
-                    _("ERROR : the following mandatory product(s) is(are) not installed: ")))
+            logger.write(
+                src.printcolors.printcError(
+                    _(
+                        "ERROR : the following mandatory product(s) is(are) not installed: "
+                    )
+                )
+            )
             for prod_name in l_depends_not_installed:
                 logger.write(src.printcolors.printcError(prod_name + " "))
             logger.write("\n")
             continue
-        
+
         # Call the function to compile the product
         res_prod, len_end_line, error_step = compile_product(
-             sat, p_name_info, config, options, logger, header, len_end_line)
-        
+            sat, p_name_info, config, options, logger, header, len_end_line
+        )
+
         if res_prod != 0:
             res += 1
             # there was an error, we clean install dir, unless :
             #  - the error step is "check", or
             #  - the product is managed by pip and installed in python dir
-            do_not_clean_install=False
-            is_single_dir=(src.appli_test_property(config,"single_install_dir", "yes") and \
-                           src.product.product_test_property(p_info,"single_install_dir", "yes"))
-              
-            if (error_step == "CHECK") or (is_pip and src.appli_test_property(config,"pip_install_dir", "python")) or is_single_dir  :
+            do_not_clean_install = False
+            is_single_dir = src.appli_test_property(
+                config, "single_install_dir", "yes"
+            ) and src.product.product_test_property(p_info, "single_install_dir", "yes")
+
+            if (
+                (error_step == "CHECK")
+                or (
+                    is_pip
+                    and src.appli_test_property(config, "pip_install_dir", "python")
+                )
+                or is_single_dir
+            ):
                 # cases for which we do not want to remove install dir
-                #   for is_single_dir and is_pip, the test to determine if the product is already 
+                #   for is_single_dir and is_pip, the test to determine if the product is already
                 #   compiled is based on configuration file, not the directory
-                do_not_clean_install=True 
+                do_not_clean_install = True
 
             if not do_not_clean_install:
                 # Clean the install directory if there is any
-                logger.write(_(
-                            "Cleaning the install directory if there is any\n"),
-                             5)
-                sat.clean(config.VARS.application + 
-                          " --products " + p_name + 
-                          " --install",
-                          batch=True,
-                          verbose=0,
-                          logger_add_link = logger)
+                logger.write(_("Cleaning the install directory if there is any\n"), 5)
+                sat.clean(
+                    config.VARS.application + " --products " + p_name + " --install",
+                    batch=True,
+                    verbose=0,
+                    logger_add_link=logger,
+                )
         else:
             # Clean the build directory if the compilation and tests succeed
             if options.clean_build_after:
                 log_step(logger, header, "CLEAN BUILD")
-                sat.clean(config.VARS.application + 
-                          " --products " + p_name + 
-                          " --build",
-                          batch=True,
-                          verbose=0,
-                          logger_add_link = logger)
+                sat.clean(
+                    config.VARS.application + " --products " + p_name + " --build",
+                    batch=True,
+                    verbose=0,
+                    logger_add_link=logger,
+                )
 
         # Log the result
         if res_prod > 0:
             logger.write("\r%s%s" % (header, " " * len_end_line), 3)
-            logger.write("\r" + header + src.printcolors.printcError("KO ") + error_step)
-            logger.write("\n==== %(KO)s in compile of %(name)s \n" %
-                { "name" : p_name , "KO" : src.printcolors.printcInfo("ERROR")}, 4)
+            logger.write(
+                "\r" + header + src.printcolors.printcError("KO ") + error_step
+            )
+            logger.write(
+                "\n==== %(KO)s in compile of %(name)s \n"
+                % {"name": p_name, "KO": src.printcolors.printcInfo("ERROR")},
+                4,
+            )
             if error_step == "CHECK":
-                logger.write(_("\nINSTALL directory = %s" % 
-                           src.printcolors.printcInfo(p_info.install_dir)), 3)
+                logger.write(
+                    _(
+                        "\nINSTALL directory = %s"
+                        % src.printcolors.printcInfo(p_info.install_dir)
+                    ),
+                    3,
+                )
             logger.flush()
         else:
             logger.write("\r%s%s" % (header, " " * len_end_line), 3)
             logger.write("\r" + header + src.printcolors.printcSuccess("OK"))
-            logger.write(_("\nINSTALL directory = %s" % 
-                           src.printcolors.printcInfo(p_info.install_dir)), 3)
+            logger.write(
+                _(
+                    "\nINSTALL directory = %s"
+                    % src.printcolors.printcInfo(p_info.install_dir)
+                ),
+                3,
+            )
             logger.write("\n==== %s \n" % src.printcolors.printcInfo("OK"), 4)
-            logger.write("\n==== Compilation of %(name)s %(OK)s \n" %
-                { "name" : p_name , "OK" : src.printcolors.printcInfo("OK")}, 4)
+            logger.write(
+                "\n==== Compilation of %(name)s %(OK)s \n"
+                % {"name": p_name, "OK": src.printcolors.printcInfo("OK")},
+                4,
+            )
             logger.flush()
         logger.write("\n", 3, False)
-        
-        
+
         if res_prod != 0 and options.stop_first_fail:
             break
-        
+
     return res
 
+
 def compile_product(sat, p_name_info, config, options, logger, header, len_end):
-    '''Execute the proper configuration command(s) 
+    """Execute the proper configuration command(s)
        in the product build directory.
-    
+
     :param p_name_info tuple: (str, Config) => (product_name, product_info)
     :param config Config: The global configuration
-    :param logger Logger: The logger instance to use for the display 
+    :param logger Logger: The logger instance to use for the display
                           and logging
     :param header Str: the header to display when logging
     :param len_end Int: the lenght of the the end of line (used in display)
     :return: 1 if it fails, else 0.
     :rtype: int
-    '''
-    
+    """
+
     p_name, p_info = p_name_info
-          
-    # Get the build procedure from the product configuration.
+
+    # Get the build procedure from the product configuration.
     # It can be :
     # build_sources : autotools -> build_configure, configure, make, make install
     # build_sources : cmake     -> cmake, make, make install
     # build_sources : script    -> script executions
     res = 0
 
-    
     # check if pip should be used : the application and product have pip property
-    if (src.appli_test_property(config,"pip", "yes") and 
-       src.product.product_test_property(p_info,"pip", "yes")):
-            res, len_end_line, error_step = compile_product_pip(sat,
-                                                                p_name_info,
-                                                                config,
-                                                                options,
-                                                                logger,
-                                                                header,
-                                                                len_end)
+    if src.appli_test_property(
+        config, "pip", "yes"
+    ) and src.product.product_test_property(p_info, "pip", "yes"):
+        res, len_end_line, error_step = compile_product_pip(
+            sat, p_name_info, config, options, logger, header, len_end
+        )
     else:
-        if (src.product.product_is_autotools(p_info) or 
-                                              src.product.product_is_cmake(p_info)):
-            res, len_end_line, error_step = compile_product_cmake_autotools(sat,
-                                                                      p_name_info,
-                                                                      config,
-                                                                      options,
-                                                                      logger,
-                                                                      header,
-                                                                      len_end)
+        if src.product.product_is_autotools(p_info) or src.product.product_is_cmake(
+            p_info
+        ):
+            res, len_end_line, error_step = compile_product_cmake_autotools(
+                sat, p_name_info, config, options, logger, header, len_end
+            )
         if src.product.product_has_script(p_info):
-            res, len_end_line, error_step = compile_product_script(sat,
-                                                                   p_name_info,
-                                                                   config,
-                                                                   options,
-                                                                   logger,
-                                                                   header,
-                                                                   len_end)
+            res, len_end_line, error_step = compile_product_script(
+                sat, p_name_info, config, options, logger, header, len_end
+            )
 
     # Check that the install directory exists
-    if res==0 and not(os.path.exists(p_info.install_dir)):
+    if res == 0 and not (os.path.exists(p_info.install_dir)):
         res = 1
         error_step = "NO INSTALL DIR"
-        msg = _("Error: despite the fact that all the steps ended successfully,"
-                " no install directory was found !")
+        msg = _(
+            "Error: despite the fact that all the steps ended successfully,"
+            " no install directory was found !"
+        )
         logger.write(src.printcolors.printcError(msg), 4)
         logger.write("\n", 4)
         return res, len_end, error_step
-    
-    # Add the config file corresponding to the dependencies/versions of the 
+
+    # Add the config file corresponding to the dependencies/versions of the
     # product that have been successfully compiled
-    if res==0:       
+    if res == 0:
         logger.write(_("Add the config file in installation directory\n"), 5)
         # for git bases : add the description of git tag
-        src_sha1=src.system.git_describe(p_info.source_dir)
+        src_sha1 = src.system.git_describe(p_info.source_dir)
         if src_sha1:
-            p_info.git_tag_description=src_sha1
+            p_info.git_tag_description = src_sha1
         src.product.add_compile_config_file(p_info, config)
-        
+
         if options.check:
             # Do the unit tests (call the check command)
             log_step(logger, header, "CHECK")
             res_check = sat.check(
-                              config.VARS.application + " --products " + p_name,
-                              verbose = 0,
-                              logger_add_link = logger)
+                config.VARS.application + " --products " + p_name,
+                verbose=0,
+                logger_add_link=logger,
+            )
             if res_check != 0:
                 error_step = "CHECK"
-                
+
             res += res_check
-    
+
     return res, len_end_line, error_step
 
 
-def compile_product_pip(sat,
-                        p_name_info,
-                        config,
-                        options,
-                        logger,
-                        header,
-                        len_end):
-    '''Execute the proper build procedure for pip products
+def compile_product_pip(sat, p_name_info, config, options, logger, header, len_end):
+    """Execute the proper build procedure for pip products
     :param p_name_info tuple: (str, Config) => (product_name, product_info)
     :param config Config: The global configuration
-    :param logger Logger: The logger instance to use for the display 
+    :param logger Logger: The logger instance to use for the display
                           and logging
     :param header Str: the header to display when logging
     :param len_end Int: the lenght of the the end of line (used in display)
     :return: 1 if it fails, else 0.
     :rtype: int
-    '''
+    """
     # pip needs openssl-dev. If openssl is declared in the application, we check it!
     if "openssl" in config.APPLICATION.products:
         openssl_cfg = src.product.get_product_config(config, "openssl")
         if not src.product.check_installation(config, openssl_cfg):
-            raise src.SatException(_("please install system openssl development package, it is required for products managed by pip."))
+            raise src.SatException(
+                _(
+                    "please install system openssl development package, it is required for products managed by pip."
+                )
+            )
     # a) initialisation
     p_name, p_info = p_name_info
     res = 0
     error_step = ""
-    pip_install_in_python=False
-    pip_wheels_dir=os.path.join(config.LOCAL.archive_dir,"wheels")
-    pip_install_cmd=config.INTERNAL.command.pip_install # parametrized in src/internal
+    pip_install_in_python = False
+    pip_wheels_dir = os.path.join(config.LOCAL.archive_dir, "wheels")
+    pip_install_cmd = (
+        config.INTERNAL.command.pip_install
+    )  # parametrized in src/internal
 
     # b) get the build environment (useful to get the installed python & pip3)
-    build_environ = src.environment.SalomeEnviron(config,
-                             src.environment.Environ(dict(os.environ)),
-                             True)
-    environ_info = src.product.get_product_dependencies(config,
-                                                        p_name,
-                                                        p_info)
-    build_environ.silent = (config.USER.output_verbose_level < 5)
+    build_environ = src.environment.SalomeEnviron(
+        config, src.environment.Environ(dict(os.environ)), True
+    )
+    environ_info = src.product.get_product_dependencies(config, p_name, p_info)
+    build_environ.silent = config.USER.output_verbose_level < 5
     build_environ.set_full_environ(logger, environ_info)
 
     # c- download : check/get pip wheel in pip_wheels_dir
-    pip_download_cmd=config.INTERNAL.command.pip_download +\
-                     " --destination-directory %s --no-deps %s==%s " %\
-                     (pip_wheels_dir, p_info.name, p_info.version)
-    logger.write("\n"+pip_download_cmd+"\n", 4, False) 
-    res_pip_dwl = (subprocess.call(pip_download_cmd, 
-                                   shell=True, 
-                                   cwd=config.LOCAL.workdir,
-                                   env=build_environ.environ.environ,
-                                   stdout=logger.logTxtFile, 
-                                   stderr=subprocess.STDOUT) == 0)
+    pip_download_cmd = (
+        config.INTERNAL.command.pip_download
+        + " --destination-directory %s --no-deps %s==%s "
+        % (pip_wheels_dir, p_info.name, p_info.version)
+    )
+    logger.write("\n" + pip_download_cmd + "\n", 4, False)
+    res_pip_dwl = (
+        subprocess.call(
+            pip_download_cmd,
+            shell=True,
+            cwd=config.LOCAL.workdir,
+            env=build_environ.environ.environ,
+            stdout=logger.logTxtFile,
+            stderr=subprocess.STDOUT,
+        )
+        == 0
+    )
     # error is not managed at the stage. error will be handled by pip install
     # here we just print a message
     if not res_pip_dwl:
         logger.write("Error in pip download\n", 4, False)
     try:
         pip_version_cmd = 'python -c "import pip;print(pip.__version__)"'
-        res_pip_version = subprocess.check_output(pip_version_cmd,
-                               shell=True,
-                               cwd=config.LOCAL.workdir,
-                               env=build_environ.environ.environ,
-                               stderr=subprocess.STDOUT).strip()
-        pip_build_options=res_pip_version.split('.')[0] < 21
+        res_pip_version = subprocess.check_output(
+            pip_version_cmd,
+            shell=True,
+            cwd=config.LOCAL.workdir,
+            env=build_environ.environ.environ,
+            stderr=subprocess.STDOUT,
+        ).strip()
+        pip_build_options = res_pip_version.split(".")[0] < 21
     except:
-        pip_build_options= True
+        pip_build_options = True
     # d- install (in python or in separate product directory)
-    if src.appli_test_property(config,"pip_install_dir", "python"):
+    if src.appli_test_property(config, "pip_install_dir", "python"):
         # pip will install product in python directory"
         if pip_build_options:
-            pip_install_cmd+=" --find-links=%s --build %s %s==%s" %\
-                (pip_wheels_dir, p_info.build_dir, p_info.name, p_info.version)
+            pip_install_cmd += " --find-links=%s --build %s %s==%s" % (
+                pip_wheels_dir,
+                p_info.build_dir,
+                p_info.name,
+                p_info.version,
+            )
         else:
-            pip_install_cmd+=" --find-links=%s --cache-dir %s %s==%s" %\
-                (pip_wheels_dir, p_info.build_dir, p_info.name, p_info.version)
-        pip_install_in_python=True
-    else: 
+            pip_install_cmd += " --find-links=%s --cache-dir %s %s==%s" % (
+                pip_wheels_dir,
+                p_info.build_dir,
+                p_info.name,
+                p_info.version,
+            )
+        pip_install_in_python = True
+    else:
         # pip will install product in product install_dir
-        pip_install_dir=os.path.join(p_info.install_dir, "lib", "python${PYTHON}", "site-packages")
+        pip_install_dir = os.path.join(
+            p_info.install_dir, "lib", "python${PYTHON}", "site-packages"
+        )
         if pip_build_options:
-            pip_install_cmd+=" --find-links=%s --build %s --target %s %s==%s" %\
-                (pip_wheels_dir, p_info.build_dir, pip_install_dir, p_info.name, p_info.version)
+            pip_install_cmd += " --find-links=%s --build %s --target %s %s==%s" % (
+                pip_wheels_dir,
+                p_info.build_dir,
+                pip_install_dir,
+                p_info.name,
+                p_info.version,
+            )
         else:
-            pip_install_cmd+=" --find-links=%s --cache-dir %s --target %s %s==%s" %\
-                (pip_wheels_dir,  p_info.build_dir, pip_install_dir, p_info.name, p_info.version)
+            pip_install_cmd += " --find-links=%s --cache-dir %s --target %s %s==%s" % (
+                pip_wheels_dir,
+                p_info.build_dir,
+                pip_install_dir,
+                p_info.name,
+                p_info.version,
+            )
     log_step(logger, header, "PIP")
-    logger.write("\n"+pip_install_cmd+"\n", 4)
+    logger.write("\n" + pip_install_cmd + "\n", 4)
     len_end_line = len_end + 3
     error_step = ""
 
-    res_pip = (subprocess.call(pip_install_cmd, 
-                               shell=True, 
-                               cwd=config.LOCAL.workdir,
-                               env=build_environ.environ.environ,
-                               stdout=logger.logTxtFile, 
-                               stderr=subprocess.STDOUT) == 0)        
+    res_pip = (
+        subprocess.call(
+            pip_install_cmd,
+            shell=True,
+            cwd=config.LOCAL.workdir,
+            env=build_environ.environ.environ,
+            stdout=logger.logTxtFile,
+            stderr=subprocess.STDOUT,
+        )
+        == 0
+    )
     if res_pip:
-        res=0
+        res = 0
     else:
-        #log_res_step(logger, res)
-        res=1
+        # log_res_step(logger, res)
+        res = 1
         error_step = "PIP"
-        logger.write("\nError in pip command, please consult details with sat log command's internal traces\n", 3)
-
-    return res, len_end_line, error_step 
+        logger.write(
+            "\nError in pip command, please consult details with sat log command's internal traces\n",
+            3,
+        )
 
+    return res, len_end_line, error_step
 
 
-def compile_product_cmake_autotools(sat,
-                                    p_name_info,
-                                    config,
-                                    options,
-                                    logger,
-                                    header,
-                                    len_end):
-    '''Execute the proper build procedure for autotools or cmake
+def compile_product_cmake_autotools(
+    sat, p_name_info, config, options, logger, header, len_end
+):
+    """Execute the proper build procedure for autotools or cmake
        in the product build directory.
-    
+
     :param p_name_info tuple: (str, Config) => (product_name, product_info)
     :param config Config: The global configuration
-    :param logger Logger: The logger instance to use for the display 
+    :param logger Logger: The logger instance to use for the display
                           and logging
     :param header Str: the header to display when logging
     :param len_end Int: the lenght of the the end of line (used in display)
     :return: 1 if it fails, else 0.
     :rtype: int
-    '''
+    """
     p_name, p_info = p_name_info
-    
+
     # Execute "sat configure", "sat make" and "sat install"
     res = 0
     error_step = ""
-    
+
     # Logging and sat command call for configure step
     len_end_line = len_end
     log_step(logger, header, "CONFIGURE")
-    res_c = sat.configure(config.VARS.application + " --products " + p_name,
-                          verbose = 0,
-                          logger_add_link = logger)
+    res_c = sat.configure(
+        config.VARS.application + " --products " + p_name,
+        verbose=0,
+        logger_add_link=logger,
+    )
     log_res_step(logger, res_c)
     res += res_c
-    
+
     if res_c > 0:
         error_step = "CONFIGURE"
     else:
         # Logging and sat command call for make step
-        # Logging take account of the fact that the product has a compilation 
+        # Logging take account of the fact that the product has a compilation
         # script or not
         if src.product.product_has_script(p_info):
-            # if the product has a compilation script, 
+            # if the product has a compilation script,
             # it is executed during make step
-            scrit_path_display = src.printcolors.printcLabel(
-                                                        p_info.compil_script)
+            scrit_path_display = src.printcolors.printcLabel(p_info.compil_script)
             log_step(logger, header, "SCRIPT " + scrit_path_display)
             len_end_line = len(scrit_path_display)
         else:
@@ -643,142 +800,150 @@ def compile_product_cmake_autotools(sat,
         # Get the make_flags option if there is any
         if options.makeflags:
             make_arguments += " --option -j" + options.makeflags
-        res_m = sat.make(make_arguments,
-                         verbose = 0,
-                         logger_add_link = logger)
+        res_m = sat.make(make_arguments, verbose=0, logger_add_link=logger)
         log_res_step(logger, res_m)
         res += res_m
-        
+
         if res_m > 0:
             error_step = "MAKE"
-        else: 
+        else:
             # Logging and sat command call for make install step
             log_step(logger, header, "MAKE INSTALL")
-            res_mi = sat.makeinstall(config.VARS.application + 
-                                     " --products " + 
-                                     p_name,
-                                    verbose = 0,
-                                    logger_add_link = logger)
+            res_mi = sat.makeinstall(
+                config.VARS.application + " --products " + p_name,
+                verbose=0,
+                logger_add_link=logger,
+            )
 
             log_res_step(logger, res_mi)
             res += res_mi
-            
+
             if res_mi > 0:
                 error_step = "MAKE INSTALL"
-                
-    return res, len_end_line, error_step 
-
-def compile_product_script(sat,
-                           p_name_info,
-                           config,
-                           options,
-                           logger,
-                           header,
-                           len_end):
-    '''Execute the script build procedure in the product build directory.
-    
+
+    return res, len_end_line, error_step
+
+
+def compile_product_script(sat, p_name_info, config, options, logger, header, len_end):
+    """Execute the script build procedure in the product build directory.
+
     :param p_name_info tuple: (str, Config) => (product_name, product_info)
     :param config Config: The global configuration
-    :param logger Logger: The logger instance to use for the display 
+    :param logger Logger: The logger instance to use for the display
                           and logging
     :param header Str: the header to display when logging
     :param len_end Int: the lenght of the the end of line (used in display)
     :return: 1 if it fails, else 0.
     :rtype: int
-    '''
+    """
     p_name, p_info = p_name_info
-    
+
     # Execute "sat configure", "sat make" and "sat install"
     error_step = ""
-    
+
     # Logging and sat command call for the script step
     scrit_path_display = src.printcolors.printcLabel(p_info.compil_script)
     log_step(logger, header, "SCRIPT " + scrit_path_display)
     len_end_line = len_end + len(scrit_path_display)
-    res = sat.script(config.VARS.application + " --products " + p_name,
-                     verbose = 0,
-                     logger_add_link = logger)
+    res = sat.script(
+        config.VARS.application + " --products " + p_name,
+        verbose=0,
+        logger_add_link=logger,
+    )
     log_res_step(logger, res)
-              
-    return res, len_end_line, error_step 
 
-    
+    return res, len_end_line, error_step
+
+
 def description():
-    '''method that is called when salomeTools is called with --help option.
-    
+    """method that is called when salomeTools is called with --help option.
+
     :return: The text to display for the compile command description.
     :rtype: str
-    '''
-    return _("The compile command constructs the products of the application"
-             "\n\nexample:\nsat compile SALOME-master --products KERNEL,GUI,"
-             "MEDCOUPLING --clean_all")
-  
+    """
+    return _(
+        "The compile command constructs the products of the application"
+        "\n\nexample:\nsat compile SALOME-master --products KERNEL,GUI,"
+        "MEDCOUPLING --clean_all"
+    )
+
+
 def run(args, runner, logger):
-    '''method that is called when salomeTools is called with compile parameter.
-    '''
+    """method that is called when salomeTools is called with compile parameter."""
     # Parse the options
     (options, args) = parser.parse_args(args)
 
-    # Warn the user if he invoked the clean_all option 
+    # Warn the user if he invoked the clean_all option
     # without --products option
-    if (options.clean_all and 
-        options.products is None and 
-        not runner.options.batch):
-        rep = input(_("You used --clean_all without specifying a product"
-                          " are you sure you want to continue? [Yes/No] "))
+    if options.clean_all and options.products is None and not runner.options.batch:
+        rep = input(
+            _(
+                "You used --clean_all without specifying a product"
+                " are you sure you want to continue? [Yes/No] "
+            )
+        )
         if rep.upper() != _("YES").upper():
             return 0
-        
+
     if options.update and (options.clean_all or options.force or options.clean_install):
-        options.update=False  # update is useless in this case
+        options.update = False  # update is useless in this case
 
     # check that the command has been called with an application
-    src.check_config_has_application( runner.cfg )
+    src.check_config_has_application(runner.cfg)
 
     # write warning if platform is not declared as supported
-    src.check_platform_is_supported( runner.cfg, logger )
+    src.check_platform_is_supported(runner.cfg, logger)
 
     # Print some informations
-    logger.write(_('Executing the compile commands in the build '
-                                'directories of the products of '
-                                'the application %s\n') % 
-                src.printcolors.printcLabel(runner.cfg.VARS.application), 1)
-    
+    logger.write(
+        _(
+            "Executing the compile commands in the build "
+            "directories of the products of "
+            "the application %s\n"
+        )
+        % src.printcolors.printcLabel(runner.cfg.VARS.application),
+        1,
+    )
+
     info = [
-            (_("SOURCE directory"),
-             os.path.join(runner.cfg.APPLICATION.workdir, 'SOURCES')),
-            (_("BUILD directory"),
-             os.path.join(runner.cfg.APPLICATION.workdir, 'BUILD'))
-            ]
+        (
+            _("SOURCE directory"),
+            os.path.join(runner.cfg.APPLICATION.workdir, "SOURCES"),
+        ),
+        (_("BUILD directory"), os.path.join(runner.cfg.APPLICATION.workdir, "BUILD")),
+    ]
     src.print_info(logger, info)
 
     # Get the list of all application products, and create its dependency graph
-    all_products_infos = src.product.get_products_infos(runner.cfg.APPLICATION.products,
-                                                        runner.cfg)
-    all_products_graph=get_dependencies_graph(all_products_infos)
-    #logger.write("Dependency graph of all application products : %s\n" % all_products_graph, 6)
+    all_products_infos = src.product.get_products_infos(
+        runner.cfg.APPLICATION.products, runner.cfg
+    )
+    all_products_graph = get_dependencies_graph(all_products_infos)
+    # logger.write("Dependency graph of all application products : %s\n" % all_products_graph, 6)
     DBG.write("Dependency graph of all application products : ", all_products_graph)
 
     # Get the list of products we have to compile
     products_infos = src.product.get_products_list(options, runner.cfg, logger)
     products_list = [pi[0] for pi in products_infos]
 
-    logger.write("Product we have to compile (as specified by user) : %s\n" % products_list, 5)
+    logger.write(
+        "Product we have to compile (as specified by user) : %s\n" % products_list, 5
+    )
     if options.fathers:
         # Extend the list with all recursive dependencies of the given products
-        visited=[]
+        visited = []
         for p_name in products_list:
-            visited=depth_search_graph(all_products_graph, p_name, visited)
+            visited = depth_search_graph(all_products_graph, p_name, visited)
         products_list = visited
 
     logger.write("Product list to compile with fathers : %s\n" % products_list, 5)
     if options.children:
         # Extend the list with all products that depends upon the given products
-        children=[]
+        children = []
         for n in all_products_graph:
             # for all products (that are not in products_list):
             # if we we find a path from the product to the product list,
-            # then we product is a child and we add it to the children list 
+            # then we product is a child and we add it to the children list
             if (n not in children) and (n not in products_list):
                 if find_path_graph(all_products_graph, n, products_list):
                     children = children + [n]
@@ -788,58 +953,74 @@ def run(args, runner, logger):
 
     # Sort the list of all products (topological sort).
     # the products listed first do not depend upon products listed after
-    visited_nodes=[]
-    sorted_nodes=[]
+    visited_nodes = []
+    sorted_nodes = []
     for n in all_products_graph:
         if n not in visited_nodes:
-            visited_nodes,sorted_nodes=depth_first_topo_graph(all_products_graph, n, visited_nodes,sorted_nodes)
-    logger.write("Complete dependency graph topological search (sorting): %s\n" % sorted_nodes, 6)
+            visited_nodes, sorted_nodes = depth_first_topo_graph(
+                all_products_graph, n, visited_nodes, sorted_nodes
+            )
+    logger.write(
+        "Complete dependency graph topological search (sorting): %s\n" % sorted_nodes, 6
+    )
 
     #  Create a dict of all products to facilitate products_infos sorting
-    all_products_dict={}
-    for (pname,pinfo) in all_products_infos:
-        all_products_dict[pname]=(pname,pinfo)
+    all_products_dict = {}
+    for (pname, pinfo) in all_products_infos:
+        all_products_dict[pname] = (pname, pinfo)
 
     # Use the sorted list of all products to sort the list of products we have to compile
-    sorted_product_list=[]
-    product_list_runtime=[]
-    product_list_compiletime=[]
+    sorted_product_list = []
+    product_list_runtime = []
+    product_list_compiletime = []
 
     # store at beginning compile time products, we need to compile them before!
     for n in sorted_nodes:
         if n in products_list:
             sorted_product_list.append(n)
     logger.write("Sorted list of products to compile : %s\n" % sorted_product_list, 5)
-    
+
     # from the sorted list of products to compile, build a sorted list of products infos
-    products_infos=[]
+    products_infos = []
     for product in sorted_product_list:
         products_infos.append(all_products_dict[product])
 
-    # for all products to compile, store in "depend_all" field the complete dependencies (recursive) 
+    # for all products to compile, store in "depend_all" field the complete dependencies (recursive)
     # (will be used by check_dependencies function)
     for pi in products_infos:
-        dep_prod=[]
-        dep_prod=depth_search_graph(all_products_graph,pi[0], dep_prod)
-        pi[1]["depend_all"]=dep_prod[1:]
-        
+        dep_prod = []
+        dep_prod = depth_search_graph(all_products_graph, pi[0], dep_prod)
+        pi[1]["depend_all"] = dep_prod[1:]
 
     # Call the function that will loop over all the products and execute
     # the right command(s)
-    res = compile_all_products(runner, runner.cfg, options, products_infos, all_products_dict, all_products_graph, logger)
-    
+    res = compile_all_products(
+        runner,
+        runner.cfg,
+        options,
+        products_infos,
+        all_products_dict,
+        all_products_graph,
+        logger,
+    )
+
     # Print the final state
     nb_products = len(products_infos)
     if res == 0:
         final_status = "OK"
     else:
         final_status = "KO"
-   
-    logger.write(_("\nCompilation: %(status)s (%(valid_result)d/%(nb_products)d)\n") % \
-        { 'status': src.printcolors.printc(final_status), 
-          'valid_result': nb_products - res,
-          'nb_products': nb_products }, 1)    
-    
+
+    logger.write(
+        _("\nCompilation: %(status)s (%(valid_result)d/%(nb_products)d)\n")
+        % {
+            "status": src.printcolors.printc(final_status),
+            "valid_result": nb_products - res,
+            "nb_products": nb_products,
+        },
+        1,
+    )
+
     code = res
     if code != 0:
         code = 1
index a5997bb7814fcc265dbeef9d3df3413f44d0eb58..a36fcfc33be324bd0c5543e5f0deb90540a20bd2 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 #  Copyright (C) 2010-2012  CEA/DEN
 #
 #  This library is free software; you can redistribute it and/or
@@ -22,7 +22,6 @@ import platform
 import datetime
 import shutil
 import gettext
-import pprint as PP
 
 import src
 import src.logger as LOG
@@ -31,358 +30,458 @@ import src.callerName as CALN
 
 logger = LOG.getDefaultLogger()
 
-verbose = False # True for debug
+verbose = False  # True for debug
 
 # internationalization
 satdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
-gettext.install('salomeTools', os.path.join(satdir, 'src', 'i18n'))
+gettext.install("salomeTools", os.path.join(satdir, "src", "i18n"))
 
 # Define all possible option for config command :  sat config <options>
 parser = src.options.Options()
-parser.add_option('v', 'value', 'string', 'value',
-    _("Optional: print the value of CONFIG_VARIABLE."))
-parser.add_option('g', 'debug', 'string', 'debug',
-    _("Optional: print the debugging mode value of CONFIG_VARIABLE."))
-parser.add_option('e', 'edit', 'boolean', 'edit',
-    _("Optional: edit the product configuration file."))
-parser.add_option('i', 'info', 'list2', 'info',
-    _("Optional: get information on product(s). This option accepts a comma separated list."))
-parser.add_option('p', 'products', 'list2', 'products',
-    _("Optional: same as --info, for convenience."))
-parser.add_option('l', 'list', 'boolean', 'list',
-    _("Optional: list all available applications."))
-parser.add_option('', 'show_patchs', 'boolean', 'show_patchs',
-    _("Optional: synthetic list of all patches used in the application"))
-parser.add_option('', 'show_dependencies', 'boolean', 'show_dependencies',
-    _("Optional: list of product dependencies in the application"))
-parser.add_option('', 'show_install', 'boolean', 'show_install',
-    _("Optional: synthetic list of all install directories in the application"))
-parser.add_option('', 'show_properties', 'boolean', 'show_properties',
-    _("Optional: synthetic list of all properties used in the application"))
-parser.add_option('', 'check_system', 'boolean', 'check_system',
-    _("Optional: check if system products are installed"))
-parser.add_option('c', 'copy', 'boolean', 'copy',
-    _("""Optional: copy a config file to the personal config files directory.
+parser.add_option(
+    "v", "value", "string", "value", _("Optional: print the value of CONFIG_VARIABLE.")
+)
+parser.add_option(
+    "g",
+    "debug",
+    "string",
+    "debug",
+    _("Optional: print the debugging mode value of CONFIG_VARIABLE."),
+)
+parser.add_option(
+    "e", "edit", "boolean", "edit", _("Optional: edit the product configuration file.")
+)
+parser.add_option(
+    "i",
+    "info",
+    "list2",
+    "info",
+    _(
+        "Optional: get information on product(s). This option accepts a comma separated list."
+    ),
+)
+parser.add_option(
+    "p",
+    "products",
+    "list2",
+    "products",
+    _("Optional: same as --info, for convenience."),
+)
+parser.add_option(
+    "l", "list", "boolean", "list", _("Optional: list all available applications.")
+)
+parser.add_option(
+    "",
+    "show_patchs",
+    "boolean",
+    "show_patchs",
+    _("Optional: synthetic list of all patches used in the application"),
+)
+parser.add_option(
+    "",
+    "show_dependencies",
+    "boolean",
+    "show_dependencies",
+    _("Optional: list of product dependencies in the application"),
+)
+parser.add_option(
+    "",
+    "show_install",
+    "boolean",
+    "show_install",
+    _("Optional: synthetic list of all install directories in the application"),
+)
+parser.add_option(
+    "",
+    "show_properties",
+    "boolean",
+    "show_properties",
+    _("Optional: synthetic list of all properties used in the application"),
+)
+parser.add_option(
+    "",
+    "check_system",
+    "boolean",
+    "check_system",
+    _("Optional: check if system products are installed"),
+)
+parser.add_option(
+    "c",
+    "copy",
+    "boolean",
+    "copy",
+    _(
+        """Optional: copy a config file to the personal config files directory.
 WARNING: the included files are not copied.
-If a name is given the new config file takes the given name."""))
-parser.add_option('n', 'no_label', 'boolean', 'no_label',
-    _("Internal use: do not print labels, Works only with --value and --list."))
-parser.add_option('', 'completion', 'boolean', 'completion',
-    _("Internal use: print only keys, works only with --value."))
-parser.add_option('s', 'schema', 'boolean', 'schema',
-    _("Internal use."))
+If a name is given the new config file takes the given name."""
+    ),
+)
+parser.add_option(
+    "n",
+    "no_label",
+    "boolean",
+    "no_label",
+    _("Internal use: do not print labels, Works only with --value and --list."),
+)
+parser.add_option(
+    "",
+    "completion",
+    "boolean",
+    "completion",
+    _("Internal use: print only keys, works only with --value."),
+)
+parser.add_option("s", "schema", "boolean", "schema", _("Internal use."))
+
 
 def osJoin(*args):
-  """
-  shortcut wrapper to os.path.join
-  plus optionaly print for debug
-  """
-  res = os.path.realpath(os.path.join(*args))
-  if verbose:
-    if True: # ".pyconf" in res:
-      logger.info("osJoin %-80s in %s" % (res, CALN.caller_name(1)))
-  return res
+    """
+    shortcut wrapper to os.path.join
+    plus optionaly print for debug
+    """
+    res = os.path.realpath(os.path.join(*args))
+    if verbose:
+        if True:  # ".pyconf" in res:
+            logger.info("osJoin %-80s in %s" % (res, CALN.caller_name(1)))
+    return res
+
 
 class ConfigOpener:
-    '''Class that helps to find an application pyconf 
-       in all the possible directories (pathList)
-    '''
+    """Class that helps to find an application pyconf
+    in all the possible directories (pathList)
+    """
+
     def __init__(self, pathList):
-        '''Initialization
-        
+        """Initialization
+
         :param pathList list: The list of paths where to search a pyconf.
-        '''
+        """
         self.pathList = pathList
         if verbose:
-          for path in pathList:
-            if not os.path.isdir(path):
-              logger.warning("ConfigOpener inexisting directory: %s" % path)
+            for path in pathList:
+                if not os.path.isdir(path):
+                    logger.warning("ConfigOpener inexisting directory: %s" % path)
 
     def __call__(self, name):
         if os.path.isabs(name):
-            return src.pyconf.ConfigInputStream(open(name, 'rb'))
+            return src.pyconf.ConfigInputStream(open(name, "rb"))
         else:
-            return src.pyconf.ConfigInputStream(open(osJoin(self.get_path(name), name), 'rb'))
+            return src.pyconf.ConfigInputStream(
+                open(osJoin(self.get_path(name), name), "rb")
+            )
         raise IOError(_("Configuration file '%s' not found") % name)
 
-    def get_path( self, name ):
-        '''The method that returns the entire path of the pyconf searched
+    def get_path(self, name):
+        """The method that returns the entire path of the pyconf searched
         returns first found in self.pathList directories
 
         :param name str: The name of the searched pyconf.
-        '''
+        """
         for path in self.pathList:
             if os.path.exists(osJoin(path, name)):
                 return path
         raise IOError(_("Configuration file '%s' not found") % name)
 
+
 class ConfigManager:
-    '''Class that manages the read of all the configuration files of salomeTools
-    '''
+    """Class that manages the read of all the configuration files of salomeTools"""
+
     def __init__(self, datadir=None):
         pass
 
     def _create_vars(self, application=None, command=None, datadir=None):
-        '''Create a dictionary that stores all information about machine,
+        """Create a dictionary that stores all information about machine,
            user, date, repositories, etc...
-        
+
         :param application str: The application for which salomeTools is called.
         :param command str: The command that is called.
-        :param datadir str: The repository that contain external data 
+        :param datadir str: The repository that contain external data
                             for salomeTools.
         :return: The dictionary that stores all information.
         :rtype: dict
-        '''
-        var = {}      
-        var['user'] = src.architecture.get_user()
-        var['salometoolsway'] = os.path.dirname( os.path.dirname(os.path.abspath(__file__)))
-        var['srcDir'] =  osJoin(var['salometoolsway'], 'src')
-        var['internal_dir'] =  osJoin(var['srcDir'], 'internal_config')
-        var['sep']= os.path.sep
+        """
+        var = {}
+        var["user"] = src.architecture.get_user()
+        var["salometoolsway"] = os.path.dirname(
+            os.path.dirname(os.path.abspath(__file__))
+        )
+        var["srcDir"] = osJoin(var["salometoolsway"], "src")
+        var["internal_dir"] = osJoin(var["srcDir"], "internal_config")
+        var["sep"] = os.path.sep
         if src.architecture.is_windows():
-          var['scriptExtension'] = '.bat'
+            var["scriptExtension"] = ".bat"
         else:
-          var['scriptExtension'] = '.sh'
-        
+            var["scriptExtension"] = ".sh"
+
         # datadir has a default location
-        var['datadir'] =  osJoin(var['salometoolsway'], 'data')
+        var["datadir"] = osJoin(var["salometoolsway"], "data")
         if datadir is not None:
-            var['datadir'] = datadir
+            var["datadir"] = datadir
 
-        var['personalDir'] =  osJoin(os.path.expanduser('~'), '.salomeTools')
-        src.ensure_path_exists(var['personalDir'])
+        var["personalDir"] = osJoin(os.path.expanduser("~"), ".salomeTools")
+        src.ensure_path_exists(var["personalDir"])
 
-        var['personal_applications_dir'] =  osJoin(var['personalDir'], "Applications")
-        src.ensure_path_exists(var['personal_applications_dir'])
-        
-        var['personal_products_dir'] =  osJoin(var['personalDir'], "products")
-        src.ensure_path_exists(var['personal_products_dir'])
-        
-        var['personal_archives_dir'] =  osJoin(var['personalDir'], "Archives")
-        src.ensure_path_exists(var['personal_archives_dir'])
+        var["personal_applications_dir"] = osJoin(var["personalDir"], "Applications")
+        src.ensure_path_exists(var["personal_applications_dir"])
 
-        var['personal_jobs_dir'] =  osJoin(var['personalDir'], "Jobs")
-        src.ensure_path_exists(var['personal_jobs_dir'])
+        var["personal_products_dir"] = osJoin(var["personalDir"], "products")
+        src.ensure_path_exists(var["personal_products_dir"])
 
-        var['personal_machines_dir'] =  osJoin(var['personalDir'], "Machines")
-        src.ensure_path_exists(var['personal_machines_dir'])
+        var["personal_archives_dir"] = osJoin(var["personalDir"], "Archives")
+        src.ensure_path_exists(var["personal_archives_dir"])
+
+        var["personal_jobs_dir"] = osJoin(var["personalDir"], "Jobs")
+        src.ensure_path_exists(var["personal_jobs_dir"])
+
+        var["personal_machines_dir"] = osJoin(var["personalDir"], "Machines")
+        src.ensure_path_exists(var["personal_machines_dir"])
 
         # read linux distributions dictionary
-        distrib_cfg = src.pyconf.Config( osJoin(var['srcDir'], 'internal_config', 'distrib.pyconf'))
-        
+        distrib_cfg = src.pyconf.Config(
+            osJoin(var["srcDir"], "internal_config", "distrib.pyconf")
+        )
+
         # set platform parameters
         dist_name = src.architecture.get_distribution(codes=distrib_cfg.DISTRIBUTIONS)
         dist_version = src.architecture.get_distrib_version(dist_name)
         dist_version_full = src.architecture.get_version_XY()
         dist = dist_name + dist_version
-        
-        var['dist_name'] = dist_name
-        var['dist_version'] = dist_version
-        var['dist'] = dist
-        var['dist_ref'] = dist_name + dist_version_full
-        var['python'] = src.architecture.get_python_version()
-
-        var['nb_proc'] = src.architecture.get_nb_proc()
+
+        var["dist_name"] = dist_name
+        var["dist_version"] = dist_version
+        var["dist"] = dist
+        var["dist_ref"] = dist_name + dist_version_full
+        var["python"] = src.architecture.get_python_version()
+
+        var["nb_proc"] = src.architecture.get_nb_proc()
         node_name = platform.node()
-        var['node'] = node_name
-        var['hostname'] = node_name
+        var["node"] = node_name
+        var["hostname"] = node_name
 
         # set date parameters
         dt = datetime.datetime.now()
-        var['date'] = dt.strftime('%Y%m%d')
-        var['datehour'] = dt.strftime('%Y%m%d_%H%M%S')
-        var['hour'] = dt.strftime('%H%M%S')
-
-        var['command'] = str(command)
-        var['application'] = str(application)
-
-        # Root dir for temporary files 
-        var['tmp_root'] = os.sep + 'tmp' + os.sep + var['user']
-        # particular win case 
-        if src.architecture.is_windows() : 
-            var['tmp_root'] =  os.path.expanduser('~') + os.sep + 'tmp'
-        
+        var["date"] = dt.strftime("%Y%m%d")
+        var["datehour"] = dt.strftime("%Y%m%d_%H%M%S")
+        var["hour"] = dt.strftime("%H%M%S")
+
+        var["command"] = str(command)
+        var["application"] = str(application)
+
+        # Root dir for temporary files
+        var["tmp_root"] = os.sep + "tmp" + os.sep + var["user"]
+        # particular win case
+        if src.architecture.is_windows():
+            var["tmp_root"] = os.path.expanduser("~") + os.sep + "tmp"
+
         return var
 
     def get_command_line_overrides(self, options, sections):
-        '''get all the overwrites that are in the command line
-        
-        :param options: the options from salomeTools class 
+        """get all the overwrites that are in the command line
+
+        :param options: the options from salomeTools class
                         initialization (like -l5 or --overwrite)
         :param sections str: The config section to overwrite.
         :return: The list of all the overwrites to apply.
         :rtype: list
-        '''
-        # when there are no options or not the overwrite option, 
+        """
+        # when there are no options or not the overwrite option,
         # return an empty list
         if options is None or options.overwrite is None:
             return []
-        
+
         over = []
         for section in sections:
-            # only overwrite the sections that correspond to the option 
-            over.extend(filter(lambda l: l.startswith(section + "."), 
-                               options.overwrite))
+            # only overwrite the sections that correspond to the option
+            over.extend(
+                filter(lambda l: l.startswith(section + "."), options.overwrite)
+            )
         return over
 
-    def get_config(self, application=None, options=None, command=None,
-                    datadir=None):
-        '''get the config from all the configuration files.
-        
+    def get_config(self, application=None, options=None, command=None, datadir=None):
+        """get the config from all the configuration files.
+
         :param application str: The application for which salomeTools is called.
         :param options class Options: The general salomeToos
                                       options (--overwrite or -l5, for example)
         :param command str: The command that is called.
-        :param datadir str: The repository that contain 
+        :param datadir str: The repository that contain
                             external data for salomeTools.
         :return: The final config.
         :rtype: class 'src.pyconf.Config'
-        '''        
-        
+        """
+
         # create a ConfigMerger to handle merge
-        merger = src.pyconf.ConfigMerger()#MergeHandler())
-        
+        merger = src.pyconf.ConfigMerger()  # MergeHandler())
+
         # create the configuration instance
         cfg = src.pyconf.Config()
-        
+
         # =====================================================================
         # create VARS section
-        var = self._create_vars(application=application, command=command, datadir=datadir)
+        var = self._create_vars(
+            application=application, command=command, datadir=datadir
+        )
         # DBG.write("create_vars", var, DBG.isDeveloper())
 
         # add VARS to config
         cfg.VARS = src.pyconf.Mapping(cfg)
         for variable in var:
             cfg.VARS[variable] = var[variable]
-        
+
         # apply overwrite from command line if needed
         for rule in self.get_command_line_overrides(options, ["VARS"]):
-            exec('cfg.' + rule) # this cannot be factorized because of the exec
-        
+            exec("cfg." + rule)  # this cannot be factorized because of the exec
+
         # =====================================================================
         # Load INTERNAL config
         # read src/internal_config/salomeTools.pyconf
-        src.pyconf.streamOpener = ConfigOpener([
-                             osJoin(cfg.VARS.srcDir, 'internal_config')])
+        src.pyconf.streamOpener = ConfigOpener(
+            [osJoin(cfg.VARS.srcDir, "internal_config")]
+        )
         try:
-            if src.architecture.is_windows(): # special internal config for windows
-                internal_cfg = src.pyconf.Config(open( osJoin(cfg.VARS.srcDir,
-                                        'internal_config', 'salomeTools_win.pyconf')))
+            if src.architecture.is_windows():  # special internal config for windows
+                internal_cfg = src.pyconf.Config(
+                    open(
+                        osJoin(
+                            cfg.VARS.srcDir, "internal_config", "salomeTools_win.pyconf"
+                        )
+                    )
+                )
             else:
-                internal_cfg = src.pyconf.Config(open( osJoin(cfg.VARS.srcDir,
-                                        'internal_config', 'salomeTools.pyconf')))
+                internal_cfg = src.pyconf.Config(
+                    open(
+                        osJoin(cfg.VARS.srcDir, "internal_config", "salomeTools.pyconf")
+                    )
+                )
         except src.pyconf.ConfigError as e:
-            raise src.SatException(_("Error in configuration file:"
-                                     " salomeTools.pyconf\n  %(error)s") % \
-                                   {'error': str(e) })
-        
+            raise src.SatException(
+                _("Error in configuration file:" " salomeTools.pyconf\n  %(error)s")
+                % {"error": str(e)}
+            )
+
         merger.merge(cfg, internal_cfg)
 
         # apply overwrite from command line if needed
         for rule in self.get_command_line_overrides(options, ["INTERNAL"]):
-            exec('cfg.' + rule) # this cannot be factorized because of the exec        
-               
+            exec("cfg." + rule)  # this cannot be factorized because of the exec
+
         # =====================================================================
         # Load LOCAL config file
         # search only in the data directory
         src.pyconf.streamOpener = ConfigOpener([cfg.VARS.datadir])
         try:
-            local_cfg = src.pyconf.Config(open( osJoin(cfg.VARS.datadir,
-                                                           'local.pyconf')),
-                                         PWD = ('LOCAL', cfg.VARS.datadir) )
+            local_cfg = src.pyconf.Config(
+                open(osJoin(cfg.VARS.datadir, "local.pyconf")),
+                PWD=("LOCAL", cfg.VARS.datadir),
+            )
         except src.pyconf.ConfigError as e:
-            raise src.SatException(_("Error in configuration file: "
-                                     "local.pyconf\n  %(error)s") % \
-                {'error': str(e) })
+            raise src.SatException(
+                _("Error in configuration file: " "local.pyconf\n  %(error)s")
+                % {"error": str(e)}
+            )
         except IOError as error:
             e = str(error)
-            raise src.SatException( e );
+            raise src.SatException(e)
         merger.merge(cfg, local_cfg)
 
         # When the key is "default", put the default value
         if cfg.LOCAL.base == "default":
-            cfg.LOCAL.base = os.path.abspath(osJoin(cfg.VARS.salometoolsway, "..", "BASE"))
+            cfg.LOCAL.base = os.path.abspath(
+                osJoin(cfg.VARS.salometoolsway, "..", "BASE")
+            )
         if cfg.LOCAL.workdir == "default":
             cfg.LOCAL.workdir = os.path.abspath(osJoin(cfg.VARS.salometoolsway, ".."))
         if cfg.LOCAL.log_dir == "default":
-            cfg.LOCAL.log_dir = os.path.abspath(osJoin(cfg.VARS.salometoolsway, "..", "LOGS"))
+            cfg.LOCAL.log_dir = os.path.abspath(
+                osJoin(cfg.VARS.salometoolsway, "..", "LOGS")
+            )
 
         if cfg.LOCAL.archive_dir == "default":
-            cfg.LOCAL.archive_dir = os.path.abspath( osJoin(cfg.VARS.salometoolsway, "..", "ARCHIVES"))
+            cfg.LOCAL.archive_dir = os.path.abspath(
+                osJoin(cfg.VARS.salometoolsway, "..", "ARCHIVES")
+            )
 
         # if the sat tag was not set permanently by user
         if cfg.LOCAL.tag == "unknown":
             # get the tag with git, and store it
-            sat_version=src.system.git_describe(cfg.VARS.salometoolsway) 
+            sat_version = src.system.git_describe(cfg.VARS.salometoolsway)
             if sat_version == False:
-                sat_version=cfg.INTERNAL.sat_version
-            cfg.LOCAL.tag=sat_version
-                
+                sat_version = cfg.INTERNAL.sat_version
+            cfg.LOCAL.tag = sat_version
 
         # apply overwrite from command line if needed
         for rule in self.get_command_line_overrides(options, ["LOCAL"]):
-            exec('cfg.' + rule) # this cannot be factorized because of the exec
-        
+            exec("cfg." + rule)  # this cannot be factorized because of the exec
+
         # =====================================================================
         # Load the PROJECTS
         projects_cfg = src.pyconf.Config()
-        projects_cfg.addMapping("PROJECTS",
-                                src.pyconf.Mapping(projects_cfg),
-                                "The projects\n")
-        projects_cfg.PROJECTS.addMapping("projects",
-                                src.pyconf.Mapping(cfg.PROJECTS),
-                                "The projects definition\n")
-        
+        projects_cfg.addMapping(
+            "PROJECTS", src.pyconf.Mapping(projects_cfg), "The projects\n"
+        )
+        projects_cfg.PROJECTS.addMapping(
+            "projects", src.pyconf.Mapping(cfg.PROJECTS), "The projects definition\n"
+        )
+
         for project_pyconf_path in cfg.PROJECTS.project_file_paths:
             if not os.path.isabs(project_pyconf_path):
                 # for a relative path (archive case) we complete with sat path
-                project_pyconf_path = os.path.join(cfg.VARS.salometoolsway,
-                                                  project_pyconf_path)
+                project_pyconf_path = os.path.join(
+                    cfg.VARS.salometoolsway, project_pyconf_path
+                )
             if not os.path.exists(project_pyconf_path):
-                msg = _("WARNING: The project file %s cannot be found. "
-                        "It will be ignored\n" % project_pyconf_path)
+                msg = _(
+                    "WARNING: The project file %s cannot be found. "
+                    "It will be ignored\n" % project_pyconf_path
+                )
                 sys.stdout.write(msg)
                 continue
-            project_name = os.path.basename(
-                                    project_pyconf_path)[:-len(".pyconf")]
+            project_name = os.path.basename(project_pyconf_path)[: -len(".pyconf")]
             try:
                 project_pyconf_dir = os.path.dirname(project_pyconf_path)
-                project_cfg = src.pyconf.Config(open(project_pyconf_path),
-                                                PWD=("", project_pyconf_dir))
+                project_cfg = src.pyconf.Config(
+                    open(project_pyconf_path), PWD=("", project_pyconf_dir)
+                )
             except Exception as e:
-                msg = _("ERROR: Error in configuration file: "
-                                 "%(file_path)s\n  %(error)s\n") % \
-                            {'file_path' : project_pyconf_path, 'error': str(e) }
+                msg = _(
+                    "ERROR: Error in configuration file: "
+                    "%(file_path)s\n  %(error)s\n"
+                ) % {"file_path": project_pyconf_path, "error": str(e)}
                 sys.stdout.write(msg)
                 continue
-            projects_cfg.PROJECTS.projects.addMapping(project_name,
-                             src.pyconf.Mapping(projects_cfg.PROJECTS.projects),
-                             "The %s project\n" % project_name)
-            projects_cfg.PROJECTS.projects[project_name]=project_cfg
-            projects_cfg.PROJECTS.projects[project_name]["file_path"] = \
-                                                        project_pyconf_path
+            projects_cfg.PROJECTS.projects.addMapping(
+                project_name,
+                src.pyconf.Mapping(projects_cfg.PROJECTS.projects),
+                "The %s project\n" % project_name,
+            )
+            projects_cfg.PROJECTS.projects[project_name] = project_cfg
+            projects_cfg.PROJECTS.projects[project_name][
+                "file_path"
+            ] = project_pyconf_path
             # store the project tag if any
-            product_project_git_tag = src.system.git_describe(os.path.dirname(project_pyconf_path))
+            product_project_git_tag = src.system.git_describe(
+                os.path.dirname(project_pyconf_path)
+            )
             if product_project_git_tag:
-                projects_cfg.PROJECTS.projects[project_name]["git_tag"] = product_project_git_tag
+                projects_cfg.PROJECTS.projects[project_name][
+                    "git_tag"
+                ] = product_project_git_tag
             else:
                 projects_cfg.PROJECTS.projects[project_name]["git_tag"] = "unknown"
-                   
+
         merger.merge(cfg, projects_cfg)
 
         # apply overwrite from command line if needed
         for rule in self.get_command_line_overrides(options, ["PROJECTS"]):
-            exec('cfg.' + rule) # this cannot be factorized because of the exec
-        
+            exec("cfg." + rule)  # this cannot be factorized because of the exec
+
         # =====================================================================
-        # Create the paths where to search the application configurations, 
-        # the product configurations, the products archives, 
+        # Create the paths where to search the application configurations,
+        # the product configurations, the products archives,
         # the jobs configurations and the machines configurations
         cfg.addMapping("PATHS", src.pyconf.Mapping(cfg), "The paths\n")
         cfg.PATHS["APPLICATIONPATH"] = src.pyconf.Sequence(cfg.PATHS)
         cfg.PATHS.APPLICATIONPATH.append(cfg.VARS.personal_applications_dir, "")
 
-        
         cfg.PATHS["PRODUCTPATH"] = src.pyconf.Sequence(cfg.PATHS)
         cfg.PATHS.PRODUCTPATH.append(cfg.VARS.personal_products_dir, "")
         cfg.PATHS["ARCHIVEPATH"] = src.pyconf.Sequence(cfg.PATHS)
@@ -400,27 +499,29 @@ class ConfigManager:
         # Loop over the projects in order to complete the PATHS variables
         # as /data/tmpsalome/salome/prerequis/archives for example ARCHIVEPATH
         for project in cfg.PROJECTS.projects:
-            for PATH in ["APPLICATIONPATH",
-                         "PRODUCTPATH",
-                         "ARCHIVEPATH", #comment this for default archive      #8646
-                         "ARCHIVEFTP",
-                         "JOBPATH",
-                         "MACHINEPATH",
-                         "LICENCEPATH"]:
+            for PATH in [
+                "APPLICATIONPATH",
+                "PRODUCTPATH",
+                "ARCHIVEPATH",  # comment this for default archive     #8646
+                "ARCHIVEFTP",
+                "JOBPATH",
+                "MACHINEPATH",
+                "LICENCEPATH",
+            ]:
                 if PATH not in cfg.PROJECTS.projects[project]:
                     continue
-                pathlist=cfg.PROJECTS.projects[project][PATH].split(":")
+                pathlist = cfg.PROJECTS.projects[project][PATH].split(":")
                 for path in pathlist:
                     cfg.PATHS[PATH].append(path, "")
-        
+
         # apply overwrite from command line if needed
         for rule in self.get_command_line_overrides(options, ["PATHS"]):
-            exec('cfg.' + rule) # this cannot be factorized because of the exec
+            exec("cfg." + rule)  # this cannot be factorized because of the exec
 
         # AT END append APPLI_TEST directory in APPLICATIONPATH, for unittest
-        appli_test_dir =  osJoin(satdir, "test", "APPLI_TEST")
+        appli_test_dir = osJoin(satdir, "test", "APPLI_TEST")
         if appli_test_dir not in cfg.PATHS.APPLICATIONPATH:
-          cfg.PATHS.APPLICATIONPATH.append(appli_test_dir, "unittest APPLI_TEST path")
+            cfg.PATHS.APPLICATIONPATH.append(appli_test_dir, "unittest APPLI_TEST path")
 
         # =====================================================================
         # Load APPLICATION config file
@@ -430,94 +531,119 @@ class ConfigManager:
             src.pyconf.streamOpener = ConfigOpener(cp)
             do_merge = True
             try:
-                application_cfg = src.pyconf.Config(application + '.pyconf')
+                application_cfg = src.pyconf.Config(application + ".pyconf")
             except IOError as e:
                 raise src.SatException(
-                   _("%s, use 'config --list' to get the list of available applications.") % e)
+                    _(
+                        "%s, use 'config --list' to get the list of available applications."
+                    )
+                    % e
+                )
             except src.pyconf.ConfigError as e:
-                if (not ('-e' in parser.parse_args()[1]) 
-                                         or ('--edit' in parser.parse_args()[1]) 
-                                         and command == 'config'):
-                    raise src.SatException(_("Error in configuration file: "
-                                             "%(application)s.pyconf\n "
-                                             " %(error)s") % \
-                        { 'application': application, 'error': str(e) } )
+                if (
+                    not ("-e" in parser.parse_args()[1])
+                    or ("--edit" in parser.parse_args()[1])
+                    and command == "config"
+                ):
+                    raise src.SatException(
+                        _(
+                            "Error in configuration file: "
+                            "%(application)s.pyconf\n "
+                            " %(error)s"
+                        )
+                        % {"application": application, "error": str(e)}
+                    )
                 else:
-                    sys.stdout.write(src.printcolors.printcWarning(
-                                        "There is an error in the file"
-                                        " %s.pyconf.\n" % cfg.VARS.application))
+                    sys.stdout.write(
+                        src.printcolors.printcWarning(
+                            "There is an error in the file"
+                            " %s.pyconf.\n" % cfg.VARS.application
+                        )
+                    )
                     do_merge = False
             except Exception as e:
-                if (not ('-e' in parser.parse_args()[1]) 
-                                        or ('--edit' in parser.parse_args()[1]) 
-                                        and command == 'config'):
+                if (
+                    not ("-e" in parser.parse_args()[1])
+                    or ("--edit" in parser.parse_args()[1])
+                    and command == "config"
+                ):
                     sys.stdout.write(src.printcolors.printcWarning("%s\n" % str(e)))
-                    raise src.SatException(_("Error in configuration file:"
-                                             " %(application)s.pyconf\n") % \
-                        { 'application': application} )
+                    raise src.SatException(
+                        _("Error in configuration file:" " %(application)s.pyconf\n")
+                        % {"application": application}
+                    )
                 else:
-                    sys.stdout.write(src.printcolors.printcWarning(
-                                "There is an error in the file"
-                                " %s.pyconf. Opening the file with the"
-                                " default viewer\n" % cfg.VARS.application))
-                    sys.stdout.write("The error:"
-                                 " %s\n" % src.printcolors.printcWarning(
-                                                                      str(e)))
+                    sys.stdout.write(
+                        src.printcolors.printcWarning(
+                            "There is an error in the file"
+                            " %s.pyconf. Opening the file with the"
+                            " default viewer\n" % cfg.VARS.application
+                        )
+                    )
+                    sys.stdout.write(
+                        "The error:" " %s\n" % src.printcolors.printcWarning(str(e))
+                    )
                     do_merge = False
-        
+
             else:
-                cfg['open_application'] = 'yes'
+                cfg["open_application"] = "yes"
         # =====================================================================
         # Load product config files in PRODUCTS section
         products_cfg = src.pyconf.Config()
-        products_cfg.addMapping("PRODUCTS",
-                                src.pyconf.Mapping(products_cfg),
-                                "The products\n")
+        products_cfg.addMapping(
+            "PRODUCTS", src.pyconf.Mapping(products_cfg), "The products\n"
+        )
         if application is not None:
             src.pyconf.streamOpener = ConfigOpener(cfg.PATHS.PRODUCTPATH)
             for product_name in application_cfg.APPLICATION.products.keys():
                 # Loop on all files that are in softsDir directory
                 # and read their config
                 product_file_name = product_name + ".pyconf"
-                product_file_path = src.find_file_in_lpath(product_file_name, cfg.PATHS.PRODUCTPATH)
+                product_file_path = src.find_file_in_lpath(
+                    product_file_name, cfg.PATHS.PRODUCTPATH
+                )
                 if product_file_path:
                     products_dir = os.path.dirname(product_file_path)
                     # for a relative path (archive case) we complete with sat path
                     if not os.path.isabs(products_dir):
-                        products_dir = os.path.join(cfg.VARS.salometoolsway,
-                                                    products_dir)
+                        products_dir = os.path.join(
+                            cfg.VARS.salometoolsway, products_dir
+                        )
                     try:
-                        prod_cfg = src.pyconf.Config(open(product_file_path),
-                                                     PWD=("", products_dir))
+                        prod_cfg = src.pyconf.Config(
+                            open(product_file_path), PWD=("", products_dir)
+                        )
                         prod_cfg.from_file = product_file_path
                         products_cfg.PRODUCTS[product_name] = prod_cfg
                     except Exception as e:
                         msg = _(
                             "WARNING: Error in configuration file"
-                            ": %(prod)s\n  %(error)s" % \
-                            {'prod' :  product_name, 'error': str(e) })
+                            ": %(prod)s\n  %(error)s"
+                            % {"prod": product_name, "error": str(e)}
+                        )
                         sys.stdout.write(msg)
-            
+
             merger.merge(cfg, products_cfg)
-            
+
             # apply overwrite from command line if needed
             for rule in self.get_command_line_overrides(options, ["PRODUCTS"]):
-                exec('cfg.' + rule) # this cannot be factorized because of the exec
-            
+                exec("cfg." + rule)  # this cannot be factorized because of the exec
+
             if do_merge:
                 merger.merge(cfg, application_cfg)
 
                 # default launcher name ('salome')
-                if ('profile' in cfg.APPLICATION and 
-                    'launcher_name' not in cfg.APPLICATION.profile):
-                    cfg.APPLICATION.profile.launcher_name = 'salome'
+                if (
+                    "profile" in cfg.APPLICATION
+                    and "launcher_name" not in cfg.APPLICATION.profile
+                ):
+                    cfg.APPLICATION.profile.launcher_name = "salome"
 
                 # apply overwrite from command line if needed
-                for rule in self.get_command_line_overrides(options,
-                                                             ["APPLICATION"]):
+                for rule in self.get_command_line_overrides(options, ["APPLICATION"]):
                     # this cannot be factorized because of the exec
-                    exec('cfg.' + rule)
-            
+                    exec("cfg." + rule)
+
         # =====================================================================
         # load USER config
         self.set_user_config_file(cfg)
@@ -527,8 +653,8 @@ class ConfigManager:
 
         # apply overwrite from command line if needed
         for rule in self.get_command_line_overrides(options, ["USER"]):
-            exec('cfg.' + rule) # this cannot be factorize because of the exec
-        
+            exec("cfg." + rule)  # this cannot be factorize because of the exec
+
         # remove application products "blacklisted" in rm_products field
         if "APPLICATION" in cfg and "rm_products" in cfg.APPLICATION:
             for prod_to_remove in cfg.APPLICATION.rm_products:
@@ -538,115 +664,129 @@ class ConfigManager:
         return cfg
 
     def set_user_config_file(self, config):
-        '''Set the user config file name and path.
+        """Set the user config file name and path.
         If necessary, build it from another one or create it from scratch.
-        
-        :param config class 'src.pyconf.Config': The global config 
+
+        :param config class 'src.pyconf.Config': The global config
                                                  (containing all pyconf).
-        '''
+        """
         # get the expected name and path of the file
-        self.config_file_name = 'SAT.pyconf'
-        self.user_config_file_path =  osJoin(config.VARS.personalDir, self.config_file_name)
-        
+        self.config_file_name = "SAT.pyconf"
+        self.user_config_file_path = osJoin(
+            config.VARS.personalDir, self.config_file_name
+        )
+
         # if pyconf does not exist, create it from scratch
-        if not os.path.isfile(self.user_config_file_path): 
+        if not os.path.isfile(self.user_config_file_path):
             self.create_config_file(config)
-    
+
     def create_config_file(self, config):
-        '''This method is called when there are no user config file. 
+        """This method is called when there are no user config file.
            It build it from scratch.
-        
+
         :param config class 'src.pyconf.Config': The global config.
         :return: the config corresponding to the file created.
         :rtype: config class 'src.pyconf.Config'
-        '''
-        
+        """
+
         cfg_name = self.get_user_config_file()
 
         user_cfg = src.pyconf.Config()
         #
-        user_cfg.addMapping('USER', src.pyconf.Mapping(user_cfg), "")
-
-        user_cfg.USER.addMapping('cvs_user', config.VARS.user,
-            "This is the user name used to access salome cvs base.\n")
-        user_cfg.USER.addMapping('svn_user', config.VARS.user,
-            "This is the user name used to access salome svn base.\n")
-        user_cfg.USER.addMapping('output_verbose_level', 3,
+        user_cfg.addMapping("USER", src.pyconf.Mapping(user_cfg), "")
+
+        user_cfg.USER.addMapping(
+            "cvs_user",
+            config.VARS.user,
+            "This is the user name used to access salome cvs base.\n",
+        )
+        user_cfg.USER.addMapping(
+            "svn_user",
+            config.VARS.user,
+            "This is the user name used to access salome svn base.\n",
+        )
+        user_cfg.USER.addMapping(
+            "output_verbose_level",
+            3,
             "This is the default output_verbose_level you want."
-            " 0=>no output, 5=>debug.\n")
-        user_cfg.USER.addMapping('publish_dir', 
-                                  osJoin(os.path.expanduser('~'),
-                                 'websupport', 
-                                 'satreport'), 
-                                 "")
-        user_cfg.USER.addMapping('editor',
-                                 'vi', 
-                                 "This is the editor used to "
-                                 "modify configuration files\n")
-        user_cfg.USER.addMapping('browser', 
-                                 'firefox', 
-                                 "This is the browser used to "
-                                 "read html documentation\n")
-        user_cfg.USER.addMapping('pdf_viewer', 
-                                 'evince', 
-                                 "This is the pdf_viewer used "
-                                 "to read pdf documentation\n")
+            " 0=>no output, 5=>debug.\n",
+        )
+        user_cfg.USER.addMapping(
+            "publish_dir",
+            osJoin(os.path.expanduser("~"), "websupport", "satreport"),
+            "",
+        )
+        user_cfg.USER.addMapping(
+            "editor", "vi", "This is the editor used to " "modify configuration files\n"
+        )
+        user_cfg.USER.addMapping(
+            "browser",
+            "firefox",
+            "This is the browser used to " "read html documentation\n",
+        )
+        user_cfg.USER.addMapping(
+            "pdf_viewer",
+            "evince",
+            "This is the pdf_viewer used " "to read pdf documentation\n",
+        )
 
         src.ensure_path_exists(config.VARS.personalDir)
-        src.ensure_path_exists( osJoin(config.VARS.personalDir,
-                                            'Applications'))
+        src.ensure_path_exists(osJoin(config.VARS.personalDir, "Applications"))
 
-        f = open(cfg_name, 'w')
+        f = open(cfg_name, "w")
         user_cfg.__save__(f)
         f.close()
 
-        return user_cfg   
+        return user_cfg
 
     def get_user_config_file(self):
-        '''Get the user config file
+        """Get the user config file
         :return: path to the user config file.
         :rtype: str
-        '''
+        """
         if not self.user_config_file_path:
-            raise src.SatException(_("Error in get_user_config_file: "
-                                     "missing user config file path"))
-        return self.user_config_file_path     
+            raise src.SatException(
+                _("Error in get_user_config_file: " "missing user config file path")
+            )
+        return self.user_config_file_path
+
 
 def check_path(path, ext=[]):
-    '''Construct a text with the input path and "not found" if it does not
+    """Construct a text with the input path and "not found" if it does not
        exist.
-    
+
     :param path Str: the path to check.
-    :param ext List: An extension. Verify that the path extension 
+    :param ext List: An extension. Verify that the path extension
                      is in the list
     :return: The string of the path with information
     :rtype: Str
-    '''
+    """
     # check if file exists
     if not os.path.exists(path):
-        return "'%s'" % path + " " + src.printcolors.printcError(_(
-                                                            "** not found"))
+        return "'%s'" % path + " " + src.printcolors.printcError(_("** not found"))
 
     # check extension
     if len(ext) > 0:
         fe = os.path.splitext(path)[1].lower()
         if fe not in ext:
-            return "'%s'" % path + " " + src.printcolors.printcError(_(
-                                                        "** bad extension"))
+            return (
+                "'%s'" % path + " " + src.printcolors.printcError(_("** bad extension"))
+            )
 
     return path
 
+
 def show_product_info(config, name, logger):
-    '''Display on the terminal and logger information about a product.
-    
+    """Display on the terminal and logger information about a product.
+
     :param config Config: the global configuration.
     :param name Str: The name of the product
     :param logger Logger: The logger instance to use for the display
-    '''
-    
+    """
+
     logger.write(_("%s is a product\n") % src.printcolors.printcLabel(name), 2)
     pinfo = src.product.get_product_config(config, name)
-    
+
     if "depend" in pinfo:
         src.printcolors.print_value(logger, "depends on", sorted(pinfo.depend), 2)
 
@@ -654,22 +794,17 @@ def show_product_info(config, name, logger):
         src.printcolors.print_value(logger, "optional", sorted(pinfo.opt_depend), 2)
 
     if "build_depend" in pinfo:
-        src.printcolors.print_value(logger, "build depend on", sorted(pinfo.build_depend), 2)
-
+        src.printcolors.print_value(
+            logger, "build depend on", sorted(pinfo.build_depend), 2
+        )
 
     # information on pyconf
     logger.write("\n", 2)
     logger.write(src.printcolors.printcLabel("configuration:") + "\n", 2)
     if "from_file" in pinfo:
-        src.printcolors.print_value(logger,
-                                    "pyconf file path",
-                                    pinfo.from_file,
-                                    2)
+        src.printcolors.print_value(logger, "pyconf file path", pinfo.from_file, 2)
     if "section" in pinfo:
-        src.printcolors.print_value(logger,
-                                    "section",
-                                    pinfo.section,
-                                    2)
+        src.printcolors.print_value(logger, "section", pinfo.section, 2)
 
     # information on prepare
     logger.write("\n", 2)
@@ -681,72 +816,66 @@ def show_product_info(config, name, logger):
         method += " (dev)"
     src.printcolors.print_value(logger, "get method", method, 2)
 
-    if method == 'cvs':
+    if method == "cvs":
         src.printcolors.print_value(logger, "server", pinfo.cvs_info.server, 2)
-        src.printcolors.print_value(logger, "base module",
-                                    pinfo.cvs_info.module_base, 2)
+        src.printcolors.print_value(
+            logger, "base module", pinfo.cvs_info.module_base, 2
+        )
         src.printcolors.print_value(logger, "source", pinfo.cvs_info.source, 2)
         src.printcolors.print_value(logger, "tag", pinfo.cvs_info.tag, 2)
 
-    elif method == 'svn':
+    elif method == "svn":
         src.printcolors.print_value(logger, "repo", pinfo.svn_info.repo, 2)
 
-    elif method == 'git':
+    elif method == "git":
         src.printcolors.print_value(logger, "repo", pinfo.git_info.repo, 2)
         src.printcolors.print_value(logger, "tag", pinfo.git_info.tag, 2)
 
-    elif method == 'archive':
-        src.printcolors.print_value(logger,
-                                    "get from",
-                                    check_path(pinfo.archive_info.archive_name),
-                                    2)
+    elif method == "archive":
+        src.printcolors.print_value(
+            logger, "get from", check_path(pinfo.archive_info.archive_name), 2
+        )
 
-    if 'patches' in pinfo:
+    if "patches" in pinfo:
         for patch in pinfo.patches:
             src.printcolors.print_value(logger, "patch", check_path(patch), 2)
 
     if src.product.product_is_fixed(pinfo):
-        src.printcolors.print_value(logger, "install_dir",
-                                    check_path(pinfo.install_dir), 2)
+        src.printcolors.print_value(
+            logger, "install_dir", check_path(pinfo.install_dir), 2
+        )
 
     if src.product.product_is_native(pinfo) or src.product.product_is_fixed(pinfo):
         return
-    
+
     # information on compilation
     if src.product.product_compiles(pinfo):
         logger.write("\n", 2)
         logger.write(src.printcolors.printcLabel("compile:") + "\n", 2)
-        src.printcolors.print_value(logger,
-                                    "compilation method",
-                                    pinfo.build_source,
-                                    2)
-        
+        src.printcolors.print_value(logger, "compilation method", pinfo.build_source, 2)
+
         if pinfo.build_source == "script" and "compil_script" in pinfo:
-            src.printcolors.print_value(logger, 
-                                        "Compilation script", 
-                                        pinfo.compil_script, 
-                                        2)
-        
-        if 'nb_proc' in pinfo:
+            src.printcolors.print_value(
+                logger, "Compilation script", pinfo.compil_script, 2
+            )
+
+        if "nb_proc" in pinfo:
             src.printcolors.print_value(logger, "make -j", pinfo.nb_proc, 2)
-    
-        src.printcolors.print_value(logger, 
-                                    "source dir", 
-                                    check_path(pinfo.source_dir), 
-                                    2)
-        if 'install_dir' in pinfo:
-            src.printcolors.print_value(logger, 
-                                        "build dir", 
-                                        check_path(pinfo.build_dir), 
-                                        2)
-            src.printcolors.print_value(logger, 
-                                        "install dir", 
-                                        check_path(pinfo.install_dir), 
-                                        2)
+
+        src.printcolors.print_value(
+            logger, "source dir", check_path(pinfo.source_dir), 2
+        )
+        if "install_dir" in pinfo:
+            src.printcolors.print_value(
+                logger, "build dir", check_path(pinfo.build_dir), 2
+            )
+            src.printcolors.print_value(
+                logger, "install dir", check_path(pinfo.install_dir), 2
+            )
         else:
-            logger.write("  " + 
-                         src.printcolors.printcWarning(_("no install dir")) + 
-                         "\n", 2)
+            logger.write(
+                "  " + src.printcolors.printcWarning(_("no install dir")) + "\n", 2
+            )
 
         src.printcolors.print_value(logger, "debug ", pinfo.debug, 2)
         src.printcolors.print_value(logger, "verbose ", pinfo.verbose, 2)
@@ -762,258 +891,297 @@ def show_product_info(config, name, logger):
     logger.write("\n", 2)
     logger.write(src.printcolors.printcLabel("environ :") + "\n", 2)
     if "environ" in pinfo and "env_script" in pinfo.environ:
-        src.printcolors.print_value(logger, 
-                                    "script", 
-                                    check_path(pinfo.environ.env_script), 
-                                    2)
+        src.printcolors.print_value(
+            logger, "script", check_path(pinfo.environ.env_script), 2
+        )
 
     # display run-time environment
-    zz = src.environment.SalomeEnviron(config,
-                                       src.fileEnviron.ScreenEnviron(logger), 
-                                       False)
+    zz = src.environment.SalomeEnviron(
+        config, src.fileEnviron.ScreenEnviron(logger), False
+    )
     zz.set_python_libdirs()
     zz.set_a_product(name, logger)
     logger.write("\n", 2)
 
 
 def show_patchs(config, logger):
-  '''Prints all the used patchs in the application.
+    """Prints all the used patchs in the application.
 
-  :param config Config: the global configuration.
-  :param logger Logger: The logger instance to use for the display
-  '''
-  oneOrMore = False
-  for product in sorted(config.APPLICATION.products):
-    try:
-      product_info = src.product.get_product_config(config, product)
-      if src.product.product_has_patches(product_info):
-        oneOrMore = True
-        logger.write("%s:\n" % product, 1)
-        for i in product_info.patches:
-          logger.write(src.printcolors.printcInfo("    %s\n" % i), 1)
-    except Exception as e:
-      msg = "problem on product %s\n%s\n" % (product, str(e))
-      logger.error(msg)
+    :param config Config: the global configuration.
+    :param logger Logger: The logger instance to use for the display
+    """
+    oneOrMore = False
+    for product in sorted(config.APPLICATION.products):
+        try:
+            product_info = src.product.get_product_config(config, product)
+            if src.product.product_has_patches(product_info):
+                oneOrMore = True
+                logger.write("%s:\n" % product, 1)
+                for i in product_info.patches:
+                    logger.write(src.printcolors.printcInfo("    %s\n" % i), 1)
+        except Exception as e:
+            msg = "problem on product %s\n%s\n" % (product, str(e))
+            logger.error(msg)
+
+    if oneOrMore:
+        logger.write("\n", 1)
+    else:
+        logger.write("No patchs found\n", 1)
 
-  if oneOrMore:
-    logger.write("\n", 1)
-  else:
-    logger.write("No patchs found\n", 1)
 
 def check_install_system(config, logger):
-  '''Check the installation of all (declared) system products
-
-  :param config Config: the global configuration.
-  :param logger Logger: The logger instance to use for the display
-  '''
-  # get the command to use for checking the system dependencies
-  # (either rmp or apt)
-  check_cmd=src.system.get_pkg_check_cmd(config.VARS.dist_name)
-  logger.write("\nCheck the system dependencies declared in the application\n",1)
-  pkgmgr=check_cmd[0]
-  run_dep_ko=[] # list of missing run time dependencies
-  build_dep_ko=[] # list of missing compile time dependencies
-  for product in sorted(config.APPLICATION.products):
-    try:
-      product_info = src.product.get_product_config(config, product)
-      if src.product.product_is_native(product_info):
-        # if the product is native, get (in two dictionnaries the runtime and compile time 
-        # system dependencies with the status (OK/KO)
-        run_pkg,build_pkg=src.product.check_system_dep(config.VARS.dist, check_cmd, product_info)
-        #logger.write("\n*** %s ***\n" % product, 1)
-        for pkg in run_pkg:
-            logger.write("\n   - "+pkg + " : " + run_pkg[pkg], 1)
-            if "KO" in run_pkg[pkg]:
-                run_dep_ko.append(pkg)
-        for pkg in build_pkg:
-            logger.write("\n   - "+pkg + " : " + build_pkg[pkg], 1)
-            if "KO" in build_pkg[pkg]:
-                build_dep_ko.append(pkg)
-        #  logger.write(src.printcolors.printcInfo("    %s\n" % i), 1)
+    """Check the installation of all (declared) system products
+
+    :param config Config: the global configuration.
+    :param logger Logger: The logger instance to use for the display
+    """
+    # get the command to use for checking the system dependencies
+    # (either rmp or apt)
+    check_cmd = src.system.get_pkg_check_cmd(config.VARS.dist_name)
+    logger.write("\nCheck the system dependencies declared in the application\n", 1)
+    pkgmgr = check_cmd[0]
+    run_dep_ko = []  # list of missing run time dependencies
+    build_dep_ko = []  # list of missing compile time dependencies
+    for product in sorted(config.APPLICATION.products):
+        try:
+            product_info = src.product.get_product_config(config, product)
+            if src.product.product_is_native(product_info):
+                # if the product is native, get (in two dictionnaries the runtime and compile time
+                # system dependencies with the status (OK/KO)
+                run_pkg, build_pkg = src.product.check_system_dep(
+                    config.VARS.dist, check_cmd, product_info
+                )
+                # logger.write("\n*** %s ***\n" % product, 1)
+                for pkg in run_pkg:
+                    logger.write("\n   - " + pkg + " : " + run_pkg[pkg], 1)
+                    if "KO" in run_pkg[pkg]:
+                        run_dep_ko.append(pkg)
+                for pkg in build_pkg:
+                    logger.write("\n   - " + pkg + " : " + build_pkg[pkg], 1)
+                    if "KO" in build_pkg[pkg]:
+                        build_dep_ko.append(pkg)
+                #  logger.write(src.printcolors.printcInfo("    %s\n" % i), 1)
+
+        except Exception as e:
+            msg = "\nproblem with the check of system prerequisite %s\n%s\n" % (
+                product,
+                str(e),
+            )
+            logger.error(msg)
+            raise Exception(msg)
+
+    logger.write("\n\n", 1)
+    if run_dep_ko:
+        msg = (
+            "Some run time system dependencies are missing!\n"
+            + "Please install them with %s before running salome" % pkgmgr
+        )
+        logger.warning(msg)
+        logger.write("missing run time dependencies : ", 1)
+        for md in run_dep_ko:
+            logger.write(md + " ", 1)
+        logger.write("\n\n")
+
+    if build_dep_ko:
+        msg = (
+            "Some compile time system dependencies are missing!\n"
+            + "Please install them with %s before compiling salome" % pkgmgr
+        )
+        logger.warning(msg)
+        logger.write("missing compile time dependencies : ", 1)
+        for md in build_dep_ko:
+            logger.write(md + " ", 1)
+        logger.write("\n\n")
 
-    except Exception as e:
-      msg = "\nproblem with the check of system prerequisite %s\n%s\n" % (product, str(e))
-      logger.error(msg)
-      raise Exception(msg)
-
-  logger.write("\n\n",1)
-  if run_dep_ko:
-      msg="Some run time system dependencies are missing!\n"+\
-          "Please install them with %s before running salome" % pkgmgr
-      logger.warning(msg)
-      logger.write("missing run time dependencies : ",1)
-      for md in run_dep_ko: 
-        logger.write(md+" ",1)
-      logger.write("\n\n")
-        
-  if build_dep_ko:
-      msg="Some compile time system dependencies are missing!\n"+\
-          "Please install them with %s before compiling salome" % pkgmgr
-      logger.warning(msg)
-      logger.write("missing compile time dependencies : ",1)
-      for md in build_dep_ko: 
-        logger.write(md+" ",1)
-      logger.write("\n\n")
-    
 
 def show_dependencies(config, products, logger):
-    '''Prints dependencies of products in the application.
+    """Prints dependencies of products in the application.
 
     :param config Config: the global configuration.
     :param logger Logger: The logger instance to use for the display
-    '''
+    """
+
+    from compile import get_dependencies_graph, depth_search_graph, find_path_graph
 
-    from compile import get_dependencies_graph,depth_search_graph,find_path_graph
     # Get the list of all application products, and create its dependency graph
-    all_products_infos = src.product.get_products_infos(config.APPLICATION.products,config)
-    all_products_graph=get_dependencies_graph(all_products_infos, compile_time=False)
+    all_products_infos = src.product.get_products_infos(
+        config.APPLICATION.products, config
+    )
+    all_products_graph = get_dependencies_graph(all_products_infos, compile_time=False)
 
-    products_list=[]
-    product_liste_name=""
+    products_list = []
+    product_liste_name = ""
     if products is None:
-        products_list=config.APPLICATION.products
+        products_list = config.APPLICATION.products
         products_graph = all_products_graph
     else:
         # 1. Extend the list with all products that depends upon the given list of products
-        products_list=products
-        product_liste_name="_".join(products)
-        visited=[]
+        products_list = products
+        product_liste_name = "_".join(products)
+        visited = []
         for p_name in products_list:
-            visited=depth_search_graph(all_products_graph, p_name, visited)
+            visited = depth_search_graph(all_products_graph, p_name, visited)
         products_infos = src.product.get_products_infos(visited, config)
         products_graph = get_dependencies_graph(products_infos, compile_time=False)
 
         # 2. Extend the list with all the dependencies of the given list of products
-        children=[]
+        children = []
         for n in all_products_graph:
             # for all products (that are not in products_list):
             # if we we find a path from the product to the product list,
-            # then we product is a child and we add it to the children list 
+            # then we product is a child and we add it to the children list
             if (n not in children) and (n not in products_list):
                 if find_path_graph(all_products_graph, n, products_list):
                     children = children + [n]
         products_infos_rev = src.product.get_products_infos(children, config)
-        products_graph_rev = get_dependencies_graph(products_infos_rev, compile_time=False)
+        products_graph_rev = get_dependencies_graph(
+            products_infos_rev, compile_time=False
+        )
 
     logger.write("Dependency graph (python format)\n%s\n" % products_graph, 3)
 
-    gv_file_name='%s_%s_dep.gv' % (config.VARS.application,product_liste_name)
-    logger.write("\nDependency graph (graphviz format) written in file %s\n" % 
-                 src.printcolors.printcLabel(gv_file_name), 3)
-    with open(gv_file_name,"w") as f:
+    gv_file_name = "%s_%s_dep.gv" % (config.VARS.application, product_liste_name)
+    logger.write(
+        "\nDependency graph (graphviz format) written in file %s\n"
+        % src.printcolors.printcLabel(gv_file_name),
+        3,
+    )
+    with open(gv_file_name, "w") as f:
         f.write("digraph G {\n")
         for p in products_graph:
             for dep in products_graph[p]:
-                f.write ("\t%s -> %s\n" % (p,dep))
+                f.write("\t%s -> %s\n" % (p, dep))
         f.write("}\n")
-        
 
     if products is not None:
         # if a list of products was given, produce also the reverse dependencies
-        gv_revfile_name='%s_%s_rev_dep.gv' % (config.VARS.application,product_liste_name)
-        logger.write("\nReverse dependency graph (graphviz format) written in file %s\n" % 
-                 src.printcolors.printcLabel(gv_revfile_name), 3)
-        with open(gv_revfile_name,"w") as rf:
+        gv_revfile_name = "%s_%s_rev_dep.gv" % (
+            config.VARS.application,
+            product_liste_name,
+        )
+        logger.write(
+            "\nReverse dependency graph (graphviz format) written in file %s\n"
+            % src.printcolors.printcLabel(gv_revfile_name),
+            3,
+        )
+        with open(gv_revfile_name, "w") as rf:
             rf.write("digraph G {\n")
             for p in products_graph_rev:
                 for dep in products_graph_rev[p]:
-                    rf.write ("\t%s -> %s\n" % (p,dep))
+                    rf.write("\t%s -> %s\n" % (p, dep))
             rf.write("}\n")
-    
-    graph_cmd = "dot -Tpdf %s -o %s.pdf" % (gv_file_name,gv_file_name)
-    logger.write("\nTo generate a graph use dot tool : \n  %s" % 
-                 src.printcolors.printcLabel(graph_cmd), 3)
+
+    graph_cmd = "dot -Tpdf %s -o %s.pdf" % (gv_file_name, gv_file_name)
+    logger.write(
+        "\nTo generate a graph use dot tool : \n  %s"
+        % src.printcolors.printcLabel(graph_cmd),
+        3,
+    )
+
+
 def show_install_dir(config, logger):
-  '''Prints all the used installed directories in the application.
+    """Prints all the used installed directories in the application.
 
-  :param config Config: the global configuration.
-  :param logger Logger: The logger instance to use for the display
-  '''
-  for product in sorted(config.APPLICATION.products):
-    try:
-      product_info = src.product.get_product_config(config, product)
-      install_path=src.Path(product_info.install_dir)
-      if (src.product.product_is_native(product_info)):
-          install_path="Native"
-      elif (src.product.product_is_fixed(product_info)):
-          install_path+=" (Fixed)"
-      logger.write("%s : %s\n" % (product, install_path) , 1)
-    except Exception as e:
-      msg = "problem on product %s\n%s\n" % (product, str(e))
-      logger.error(msg)
-  logger.write("\n", 1)
+    :param config Config: the global configuration.
+    :param logger Logger: The logger instance to use for the display
+    """
+    for product in sorted(config.APPLICATION.products):
+        try:
+            product_info = src.product.get_product_config(config, product)
+            install_path = src.Path(product_info.install_dir)
+            if src.product.product_is_native(product_info):
+                install_path = "Native"
+            elif src.product.product_is_fixed(product_info):
+                install_path += " (Fixed)"
+            logger.write("%s : %s\n" % (product, install_path), 1)
+        except Exception as e:
+            msg = "problem on product %s\n%s\n" % (product, str(e))
+            logger.error(msg)
+    logger.write("\n", 1)
 
 
 def show_properties(config, logger):
-  '''Prints all the used properties in the application.
-
-  :param config Config: the global configuration.
-  :param logger Logger: The logger instance to use for the display
-  '''
-  if "properties" in config.APPLICATION:
-      # some properties are defined at application level, we display them
-      logger.write("Application properties:\n", 1)
-      for prop in config.APPLICATION.properties:
-          logger.write(src.printcolors.printcInfo("    %s : %s\n" % (prop, config.APPLICATION.properties[prop])), 1)
-  oneOrMore = False
-  for product in sorted(config.APPLICATION.products):
-    try:
-      product_info = src.product.get_product_config(config, product)
-      done = False
-      try:
-        for prop in product_info.properties:
-          if not done:
-            logger.write("%s:\n" % product, 1)
-            done = True
-          oneOrMore = True
-          logger.write(src.printcolors.printcInfo("    %s : %s\n" % (prop, product_info.properties[prop])), 1)
-      except Exception as e:
-        pass
-    except Exception as e:
-      # logger.write(src.printcolors.printcInfo("    %s\n" % "no properties"), 1)
-      msg = "problem on product %s\n%s\n" % (product, e)
-      logger.error(msg)
+    """Prints all the used properties in the application.
+
+    :param config Config: the global configuration.
+    :param logger Logger: The logger instance to use for the display
+    """
+    if "properties" in config.APPLICATION:
+        # some properties are defined at application level, we display them
+        logger.write("Application properties:\n", 1)
+        for prop in config.APPLICATION.properties:
+            logger.write(
+                src.printcolors.printcInfo(
+                    "    %s : %s\n" % (prop, config.APPLICATION.properties[prop])
+                ),
+                1,
+            )
+    oneOrMore = False
+    for product in sorted(config.APPLICATION.products):
+        try:
+            product_info = src.product.get_product_config(config, product)
+            done = False
+            try:
+                for prop in product_info.properties:
+                    if not done:
+                        logger.write("%s:\n" % product, 1)
+                        done = True
+                    oneOrMore = True
+                    logger.write(
+                        src.printcolors.printcInfo(
+                            "    %s : %s\n" % (prop, product_info.properties[prop])
+                        ),
+                        1,
+                    )
+            except Exception as e:
+                pass
+        except Exception as e:
+            # logger.write(src.printcolors.printcInfo("    %s\n" % "no properties"), 1)
+            msg = "problem on product %s\n%s\n" % (product, e)
+            logger.error(msg)
+
+    if oneOrMore:
+        logger.write("\n", 1)
+    else:
+        logger.write("No properties found\n", 1)
 
-  if oneOrMore:
-    logger.write("\n", 1)
-  else:
-    logger.write("No properties found\n", 1)
 
 def print_value(config, path, show_label, logger, level=0, show_full_path=False):
-    '''Prints a value from the configuration. Prints recursively the values 
+    """Prints a value from the configuration. Prints recursively the values
        under the initial path.
-    
-    :param config class 'src.pyconf.Config': The configuration 
+
+    :param config class 'src.pyconf.Config': The configuration
                                              from which the value is displayed.
     :param path str : the path in the configuration of the value to print.
-    :param show_label boolean: if True, do a basic display. 
+    :param show_label boolean: if True, do a basic display.
                                (useful for bash completion)
     :param logger Logger: the logger instance
     :param level int: The number of spaces to add before display.
     :param show_full_path :
-    '''            
-    
+    """
+
     # Make sure that the path does not ends with a point
-    if path.endswith('.'):
+    if path.endswith("."):
         path = path[:-1]
-    
+
     # display all the path or not
     if show_full_path:
         vname = path
     else:
-        vname = path.split('.')[-1]
+        vname = path.split(".")[-1]
 
     # number of spaces before the display
     tab_level = "  " * level
-    
+
     # call to the function that gets the value of the path.
     try:
         val = config.getByPath(path)
     except Exception as e:
         logger.write(tab_level)
-        logger.write("%s: ERROR %s\n" % (src.printcolors.printcLabel(vname), 
-                                         src.printcolors.printcError(str(e))))
+        logger.write(
+            "%s: ERROR %s\n"
+            % (src.printcolors.printcLabel(vname), src.printcolors.printcError(str(e)))
+        )
         return
 
     # in this case, display only the value
@@ -1021,39 +1189,43 @@ def print_value(config, path, show_label, logger, level=0, show_full_path=False)
         logger.write(tab_level)
         logger.write("%s: " % src.printcolors.printcLabel(vname))
 
-    # The case where the value has under values, 
+    # The case where the value has under values,
     # do a recursive call to the function
-    if dir(val).__contains__('keys'):
-        if show_label: logger.write("\n")
+    if dir(val).__contains__("keys"):
+        if show_label:
+            logger.write("\n")
         for v in sorted(val.keys()):
-            print_value(config, path + '.' + v, show_label, logger, level + 1)
-    elif val.__class__ == src.pyconf.Sequence or isinstance(val, list): 
+            print_value(config, path + "." + v, show_label, logger, level + 1)
+    elif val.__class__ == src.pyconf.Sequence or isinstance(val, list):
         # in this case, value is a list (or a Sequence)
-        if show_label: logger.write("\n")
+        if show_label:
+            logger.write("\n")
         index = 0
         for v in val:
-            print_value(config, path + "[" + str(index) + "]", 
-                        show_label, logger, level + 1)
+            print_value(
+                config, path + "[" + str(index) + "]", show_label, logger, level + 1
+            )
             index = index + 1
-    else: # case where val is just a str
+    else:  # case where val is just a str
         logger.write("%s\n" % val)
 
+
 def get_config_children(config, args):
-    '''Gets the names of the children of the given parameter.
+    """Gets the names of the children of the given parameter.
        Useful only for completion mechanism
-    
+
     :param config Config: The configuration where to read the values
     :param args: The path in the config from which get the keys
-    '''
+    """
     vals = []
     rootkeys = config.keys()
-    
+
     if len(args) == 0:
         # no parameter returns list of root keys
         vals = rootkeys
     else:
         parent = args[0]
-        pos = parent.rfind('.')
+        pos = parent.rfind(".")
         if pos < 0:
             # Case where there is only on key as parameter.
             # For example VARS
@@ -1062,32 +1234,36 @@ def get_config_children(config, args):
             # Case where there is a part from a key
             # for example VARS.us  (for VARS.user)
             head = parent[0:pos]
-            tail = parent[pos+1:]
+            tail = parent[pos + 1 :]
             try:
                 a = config.getByPath(head)
-                if dir(a).__contains__('keys'):
-                    vals = map(lambda x: head + '.' + x,
-                               [m for m in a.keys() if m.startswith(tail)])
+                if dir(a).__contains__("keys"):
+                    vals = map(
+                        lambda x: head + "." + x,
+                        [m for m in a.keys() if m.startswith(tail)],
+                    )
             except:
                 pass
 
     for v in sorted(vals):
         sys.stdout.write("%s\n" % v)
 
+
 def description():
-    '''method that is called when salomeTools is called with --help option.
-    
+    """method that is called when salomeTools is called with --help option.
+
     :return: The text to display for the config command description.
     :rtype: str
-    '''
-    return _("The config command allows manipulation "
-             "and operation on config files.\n\nexample:\nsat config "
-             "SALOME-master --info ParaView")
-    
+    """
+    return _(
+        "The config command allows manipulation "
+        "and operation on config files.\n\nexample:\nsat config "
+        "SALOME-master --info ParaView"
+    )
+
 
 def run(args, runner, logger):
-    '''method that is called when salomeTools is called with config parameter.
-    '''
+    """method that is called when salomeTools is called with config parameter."""
     # Parse the options
     (options, args) = parser.parse_args(args)
 
@@ -1103,125 +1279,145 @@ def run(args, runner, logger):
             for val in sorted(runner.cfg.keys()):
                 print_value(runner.cfg, val, not options.no_label, logger)
         else:
-            print_value(runner.cfg, options.value, not options.no_label, logger, 
-                        level=0, show_full_path=False)
-    
+            print_value(
+                runner.cfg,
+                options.value,
+                not options.no_label,
+                logger,
+                level=0,
+                show_full_path=False,
+            )
+
     # case : print a debug value of the config
     if options.debug:
         if options.debug == ".":
             # if argument is ".", print all the config
             res = DBG.indent(DBG.getStrConfigDbg(runner.cfg))
-            logger.write("\nConfig of application %s:\n\n%s\n" % (runner.cfg.VARS.application, res))
+            logger.write(
+                "\nConfig of application %s:\n\n%s\n"
+                % (runner.cfg.VARS.application, res)
+            )
         else:
-            if options.debug[0] == ".": # accept ".PRODUCT.etc" as "PRODUCT.etc"
-              od = options.debug[1:]
+            if options.debug[0] == ".":  # accept ".PRODUCT.etc" as "PRODUCT.etc"
+                od = options.debug[1:]
             else:
-              od = options.debug
+                od = options.debug
             try:
-              aCode = "a = runner.cfg.%s" % od
-              # https://stackoverflow.com/questions/15086040/behavior-of-exec-function-in-python-2-and-python-3
-              aDict = {"runner": runner}
-              exec(aCode, globals(), aDict)
-              # DBG.write("globals()", globals(), True)
-              # DBG.write("aDict", aDict, True)
-              res = DBG.indent(DBG.getStrConfigDbg(aDict["a"]))
-              logger.write("\nConfig.%s of application %s:\n\n%s\n" % (od, runner.cfg.VARS.application, res))
+                aCode = "a = runner.cfg.%s" % od
+                # https://stackoverflow.com/questions/15086040/behavior-of-exec-function-in-python-2-and-python-3
+                aDict = {"runner": runner}
+                exec(aCode, globals(), aDict)
+                # DBG.write("globals()", globals(), True)
+                # DBG.write("aDict", aDict, True)
+                res = DBG.indent(DBG.getStrConfigDbg(aDict["a"]))
+                logger.write(
+                    "\nConfig.%s of application %s:\n\n%s\n"
+                    % (od, runner.cfg.VARS.application, res)
+                )
             except Exception as e:
-              msg = "\nConfig.%s of application %s: Unknown pyconf key\n" % (od, runner.cfg.VARS.application)
-              logger.write(src.printcolors.printcError(msg), 1)
+                msg = "\nConfig.%s of application %s: Unknown pyconf key\n" % (
+                    od,
+                    runner.cfg.VARS.application,
+                )
+                logger.write(src.printcolors.printcError(msg), 1)
 
-    
     # case : edit user pyconf file or application file
     if options.edit:
         editor = runner.cfg.USER.editor
-        if ('APPLICATION' not in runner.cfg and
-                       'open_application' not in runner.cfg): # edit user pyconf
-            usercfg =  osJoin(runner.cfg.VARS.personalDir,
-                                   'SAT.pyconf')
+        if (
+            "APPLICATION" not in runner.cfg and "open_application" not in runner.cfg
+        ):  # edit user pyconf
+            usercfg = osJoin(runner.cfg.VARS.personalDir, "SAT.pyconf")
             logger.write(_("Opening %s\n" % usercfg), 3)
             src.system.show_in_editor(editor, usercfg, logger)
         else:
             # search for file <application>.pyconf and open it
             for path in runner.cfg.PATHS.APPLICATIONPATH:
-                pyconf_path =  osJoin(path,
-                                    runner.cfg.VARS.application + ".pyconf")
+                pyconf_path = osJoin(path, runner.cfg.VARS.application + ".pyconf")
                 if os.path.exists(pyconf_path):
                     logger.write(_("Opening %s\n" % pyconf_path), 3)
                     src.system.show_in_editor(editor, pyconf_path, logger)
                     break
-    
+
     # case : give information about the product(s) in parameter
     if options.products:
-      if options.info is not None:
-        logger.warning('options.products %s overrides options.info %s' % (options.products, options.info))
-      options.info = options.products
+        if options.info is not None:
+            logger.warning(
+                "options.products %s overrides options.info %s"
+                % (options.products, options.info)
+            )
+        options.info = options.products
 
     if options.info:
-      # DBG.write("products", sorted(runner.cfg.APPLICATION.products.keys()), True)
-      src.check_config_has_application(runner.cfg)
-      taggedProducts = src.getProductNames(runner.cfg, options.info, logger)
-      DBG.write("tagged products", sorted(taggedProducts))
-      for prod in sorted(taggedProducts):
-        if prod in runner.cfg.APPLICATION.products:
-          try:
-            if len(taggedProducts) > 1:
-              logger.write("#################### ", 2)
-            show_product_info(runner.cfg, prod, logger)
-          except Exception as e:
-            msg = "problem on product %s\n%s\n" % (prod, str(e))
-            logger.error(msg)
-          # return
-        else:
-          msg = _("%s is not a product of %s.\n") % \
-                (prod, runner.cfg.VARS.application)
-          logger.warning(msg)
-          #raise Exception(msg)
-    
-    # case : copy an existing <application>.pyconf 
+        # DBG.write("products", sorted(runner.cfg.APPLICATION.products.keys()), True)
+        src.check_config_has_application(runner.cfg)
+        taggedProducts = src.getProductNames(runner.cfg, options.info, logger)
+        DBG.write("tagged products", sorted(taggedProducts))
+        for prod in sorted(taggedProducts):
+            if prod in runner.cfg.APPLICATION.products:
+                try:
+                    if len(taggedProducts) > 1:
+                        logger.write("#################### ", 2)
+                    show_product_info(runner.cfg, prod, logger)
+                except Exception as e:
+                    msg = "problem on product %s\n%s\n" % (prod, str(e))
+                    logger.error(msg)
+                # return
+            else:
+                msg = _("%s is not a product of %s.\n") % (
+                    prod,
+                    runner.cfg.VARS.application,
+                )
+                logger.warning(msg)
+                # raise Exception(msg)
+
+    # case : copy an existing <application>.pyconf
     # to ~/.salomeTools/Applications/LOCAL_<application>.pyconf
     if options.copy:
         # product is required
-        src.check_config_has_application( runner.cfg )
+        src.check_config_has_application(runner.cfg)
 
-        # get application file path 
-        source = runner.cfg.VARS.application + '.pyconf'
+        # get application file path
+        source = runner.cfg.VARS.application + ".pyconf"
         source_full_path = ""
         for path in runner.cfg.PATHS.APPLICATIONPATH:
             # ignore personal directory
             if path == runner.cfg.VARS.personalDir:
                 continue
             # loop on all directories that can have pyconf applications
-            zz =  osJoin(path, source)
+            zz = osJoin(path, source)
             if os.path.exists(zz):
                 source_full_path = zz
                 break
 
         if len(source_full_path) == 0:
-            raise src.SatException(_(
-                        "Config file for product %s not found\n") % source)
+            raise src.SatException(_("Config file for product %s not found\n") % source)
         else:
             if len(args) > 0:
                 # a name is given as parameter, use it
                 dest = args[0]
-            elif 'copy_prefix' in runner.cfg.INTERNAL.config:
+            elif "copy_prefix" in runner.cfg.INTERNAL.config:
                 # use prefix
-                dest = (runner.cfg.INTERNAL.config.copy_prefix 
-                        + runner.cfg.VARS.application)
+                dest = (
+                    runner.cfg.INTERNAL.config.copy_prefix + runner.cfg.VARS.application
+                )
             else:
                 # use same name as source
                 dest = runner.cfg.VARS.application
-                
+
             # the full path
-            dest_file =  osJoin(runner.cfg.VARS.personalDir,
-                                     'Applications', dest + '.pyconf')
+            dest_file = osJoin(
+                runner.cfg.VARS.personalDir, "Applications", dest + ".pyconf"
+            )
             if os.path.exists(dest_file):
-                raise src.SatException(_("A personal application"
-                                         " '%s' already exists") % dest)
-            
+                raise src.SatException(
+                    _("A personal application" " '%s' already exists") % dest
+                )
+
             # perform the copy
             shutil.copyfile(source_full_path, dest_file)
             logger.write(_("%s has been created.\n") % dest_file)
-    
+
     # case : display all the available pyconf applications
     if options.list:
         lproduct = list()
@@ -1232,36 +1428,42 @@ def run(args, runner, logger):
                 logger.write("------ %s\n" % src.printcolors.printcHeader(path))
 
             if not os.path.exists(path):
-                logger.write(src.printcolors.printcError(_(
-                                            "Directory not found")) + "\n")
+                logger.write(
+                    src.printcolors.printcError(_("Directory not found")) + "\n"
+                )
             else:
                 for f in sorted(os.listdir(path)):
                     # ignore file that does not ends with .pyconf
-                    if not f.endswith('.pyconf'):
+                    if not f.endswith(".pyconf"):
                         continue
 
-                    appliname = f[:-len('.pyconf')]
+                    appliname = f[: -len(".pyconf")]
                     if appliname not in lproduct:
                         lproduct.append(appliname)
-                        if path.startswith(runner.cfg.VARS.personalDir) \
-                                    and not options.no_label:
+                        if (
+                            path.startswith(runner.cfg.VARS.personalDir)
+                            and not options.no_label
+                        ):
                             logger.write("%s*\n" % appliname)
                         else:
                             logger.write("%s\n" % appliname)
-                            
+
             logger.write("\n")
 
     # case: print all the products name of the application (internal use for completion)
     if options.completion:
         for product_name in runner.cfg.APPLICATION.products.keys():
             logger.write("%s\n" % product_name)
-        
+
     # case : give a synthetic view of all patches used in the application
     if options.show_patchs:
         src.check_config_has_application(runner.cfg)
         # Print some informations
-        logger.write(_('Patchs of application %s\n') %
-                    src.printcolors.printcLabel(runner.cfg.VARS.application), 3)
+        logger.write(
+            _("Patchs of application %s\n")
+            % src.printcolors.printcLabel(runner.cfg.VARS.application),
+            3,
+        )
         logger.write("\n", 2, False)
         show_patchs(runner.cfg, logger)
 
@@ -1269,8 +1471,11 @@ def run(args, runner, logger):
     if options.show_install:
         src.check_config_has_application(runner.cfg)
         # Print some informations
-        logger.write(_('Installation directories of application %s\n') %
-                    src.printcolors.printcLabel(runner.cfg.VARS.application), 3)
+        logger.write(
+            _("Installation directories of application %s\n")
+            % src.printcolors.printcLabel(runner.cfg.VARS.application),
+            3,
+        )
         logger.write("\n", 2, False)
         show_install_dir(runner.cfg, logger)
 
@@ -1278,8 +1483,13 @@ def run(args, runner, logger):
     if options.show_dependencies:
         src.check_config_has_application(runner.cfg)
         # Print some informations
-        logger.write(_('List of run-time dependencies of the application %s, product by product\n') %
-                    src.printcolors.printcLabel(runner.cfg.VARS.application), 3)
+        logger.write(
+            _(
+                "List of run-time dependencies of the application %s, product by product\n"
+            )
+            % src.printcolors.printcLabel(runner.cfg.VARS.application),
+            3,
+        )
         logger.write("\n", 2, False)
         show_dependencies(runner.cfg, options.products, logger)
 
@@ -1288,12 +1498,15 @@ def run(args, runner, logger):
         src.check_config_has_application(runner.cfg)
 
         # Print some informations
-        logger.write(_('Properties of application %s\n') %
-                    src.printcolors.printcLabel(runner.cfg.VARS.application), 3)
+        logger.write(
+            _("Properties of application %s\n")
+            % src.printcolors.printcLabel(runner.cfg.VARS.application),
+            3,
+        )
         logger.write("\n", 2, False)
         show_properties(runner.cfg, logger)
 
     # check system prerequisites
     if options.check_system:
-       check_install_system(runner.cfg, logger)
-       pass 
+        check_install_system(runner.cfg, logger)
+        pass
index 876e928f189cc0777c3dd7d2fdc83d7cf80ec1e6..823442d4ec40dfd8f0f2c68c95532df6c1319e95 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 #  Copyright (C) 2010-2012  CEA/DEN
 #
 #  This library is free software; you can redistribute it and/or
@@ -22,10 +22,21 @@ import src
 
 # Define all possible option for configure command :  sat configure <options>
 parser = src.options.Options()
-parser.add_option('p', 'products', 'list2', 'products',
-    _('Optional: products to configure. This option accepts a comma separated list.'))
-parser.add_option('o', 'option', 'string', 'option',
-    _('Optional: Option to add to the configure or cmake command.'), "")
+parser.add_option(
+    "p",
+    "products",
+    "list2",
+    "products",
+    _("Optional: products to configure. This option accepts a comma separated list."),
+)
+parser.add_option(
+    "o",
+    "option",
+    "string",
+    "option",
+    _("Optional: Option to add to the configure or cmake command."),
+    "",
+)
 
 
 def log_step(logger, header, step):
@@ -34,6 +45,7 @@ def log_step(logger, header, step):
     logger.write("\n==== %s \n" % src.printcolors.printcInfo(step), 4)
     logger.flush()
 
+
 def log_res_step(logger, res):
     if res == 0:
         logger.write("%s \n" % src.printcolors.printcSuccess("OK"), 4)
@@ -42,40 +54,42 @@ def log_res_step(logger, res):
         logger.write("%s \n" % src.printcolors.printcError("KO"), 4)
         logger.flush()
 
+
 def configure_all_products(config, products_infos, conf_option, logger):
-    '''Execute the proper configuration commands 
+    """Execute the proper configuration commands
        in each product build directory.
 
     :param config Config: The global configuration
-    :param products_info list: List of 
+    :param products_info list: List of
                                  (str, Config) => (product_name, product_info)
     :param conf_option str: The options to add to the command
     :param logger Logger: The logger instance to use for the display and logging
     :return: the number of failing commands.
     :rtype: int
-    '''
+    """
     res = 0
     for p_name_info in products_infos:
         res_prod = configure_product(p_name_info, conf_option, config, logger)
         if res_prod != 0:
-            res += 1 
+            res += 1
     return res
 
+
 def configure_product(p_name_info, conf_option, config, logger):
-    '''Execute the proper configuration command(s) 
+    """Execute the proper configuration command(s)
        in the product build directory.
-    
+
     :param p_name_info tuple: (str, Config) => (product_name, product_info)
     :param conf_option str: The options to add to the command
     :param config Config: The global configuration
-    :param logger Logger: The logger instance to use for the display 
+    :param logger Logger: The logger instance to use for the display
                           and logging
     :return: 1 if it fails, else 0.
     :rtype: int
-    '''
-    
+    """
+
     p_name, p_info = p_name_info
-    
+
     # Logging
     logger.write("\n", 4, False)
     logger.write("################ ", 4)
@@ -86,8 +100,11 @@ def configure_product(p_name_info, conf_option, config, logger):
     logger.flush()
 
     # Do nothing if he product is not compilable
-    if ("properties" in p_info and "compilation" in p_info.properties and 
-                                        p_info.properties.compilation == "no"):
+    if (
+        "properties" in p_info
+        and "compilation" in p_info.properties
+        and p_info.properties.compilation == "no"
+    ):
         log_step(logger, header, "ignored")
         logger.write("\n", 3, False)
         return 0
@@ -95,12 +112,12 @@ def configure_product(p_name_info, conf_option, config, logger):
     # Instantiate the class that manages all the construction commands
     # like cmake, make, make install, make test, environment management, etc...
     builder = src.compilation.Builder(config, logger, p_name, p_info)
-    
+
     # Prepare the environment
     log_step(logger, header, "PREPARE ENV")
     res_prepare = builder.prepare()
     log_res_step(logger, res_prepare)
-    
+
     # Execute buildconfigure, configure if the product is autotools
     # Execute cmake if the product is cmake
     res = 0
@@ -118,76 +135,94 @@ def configure_product(p_name_info, conf_option, config, logger):
         res_cm = builder.cmake(conf_option)
         log_res_step(logger, res_cm)
         res += res_cm
-    
+
     # Log the result
     if res > 0:
         logger.write("\r%s%s" % (header, " " * 20), 3)
         logger.write("\r" + header + src.printcolors.printcError("KO"))
-        logger.write("==== %(KO)s in configuration of %(name)s \n" %
-            { "name" : p_name , "KO" : src.printcolors.printcInfo("ERROR")}, 4)
+        logger.write(
+            "==== %(KO)s in configuration of %(name)s \n"
+            % {"name": p_name, "KO": src.printcolors.printcInfo("ERROR")},
+            4,
+        )
         logger.flush()
     else:
         logger.write("\r%s%s" % (header, " " * 20), 3)
         logger.write("\r" + header + src.printcolors.printcSuccess("OK"))
         logger.write("==== %s \n" % src.printcolors.printcInfo("OK"), 4)
-        logger.write("==== Configuration of %(name)s %(OK)s \n" %
-            { "name" : p_name , "OK" : src.printcolors.printcInfo("OK")}, 4)
+        logger.write(
+            "==== Configuration of %(name)s %(OK)s \n"
+            % {"name": p_name, "OK": src.printcolors.printcInfo("OK")},
+            4,
+        )
         logger.flush()
     logger.write("\n", 3, False)
 
     return res
 
+
 def description():
-    '''method that is called when salomeTools is called with --help option.
-    
+    """method that is called when salomeTools is called with --help option.
+
     :return: The text to display for the configure command description.
     :rtype: str
-    '''
-    return _("The configure command executes in the build directory"
-             " the configure commands corresponding\nto the compilation mode"
-             " of the application products.\nThe possible compilation modes"
-             " are \"cmake\", \"autotools\", or a script.\n\nHere are the "
-             "commands to be run :\nautotools: build_configure and configure\n"
-             "cmake: cmake\nscript: do nothing\n\nexample:\nsat configure "
-             "SALOME-master --products KERNEL,GUI,PARAVIS")
-  
+    """
+    return _(
+        "The configure command executes in the build directory"
+        " the configure commands corresponding\nto the compilation mode"
+        " of the application products.\nThe possible compilation modes"
+        ' are "cmake", "autotools", or a script.\n\nHere are the '
+        "commands to be run :\nautotools: build_configure and configure\n"
+        "cmake: cmake\nscript: do nothing\n\nexample:\nsat configure "
+        "SALOME-master --products KERNEL,GUI,PARAVIS"
+    )
+
+
 def run(args, runner, logger):
-    '''method that is called when salomeTools is called with make parameter.
-    '''
-    
+    """method that is called when salomeTools is called with make parameter."""
+
     # Parse the options
     (options, args) = parser.parse_args(args)
 
     # check that the command has been called with an application
-    src.check_config_has_application( runner.cfg )
+    src.check_config_has_application(runner.cfg)
 
     # Get the list of products to treat
     products_infos = src.product.get_products_list(options, runner.cfg, logger)
-    
+
     # Print some informations
-    logger.write(_('Configuring the sources of the application %s\n') % 
-                src.printcolors.printcLabel(runner.cfg.VARS.application), 1)
-    
-    info = [(_("BUILD directory"),
-             os.path.join(runner.cfg.APPLICATION.workdir, 'BUILD'))]
+    logger.write(
+        _("Configuring the sources of the application %s\n")
+        % src.printcolors.printcLabel(runner.cfg.VARS.application),
+        1,
+    )
+
+    info = [
+        (_("BUILD directory"), os.path.join(runner.cfg.APPLICATION.workdir, "BUILD"))
+    ]
     src.print_info(logger, info)
-    
+
     # Call the function that will loop over all the products and execute
     # the right command(s)
     if options.option is None:
         options.option = ""
     res = configure_all_products(runner.cfg, products_infos, options.option, logger)
-    
+
     # Print the final state
     nb_products = len(products_infos)
     if res == 0:
         final_status = "OK"
     else:
         final_status = "KO"
-   
-    logger.write(_("\nConfiguration: %(status)s (%(valid_result)d/%(nb_products)d)\n") % \
-        { 'status': src.printcolors.printc(final_status), 
-          'valid_result': nb_products - res,
-          'nb_products': nb_products }, 1)    
-    
-    return res 
+
+    logger.write(
+        _("\nConfiguration: %(status)s (%(valid_result)d/%(nb_products)d)\n")
+        % {
+            "status": src.printcolors.printc(final_status),
+            "valid_result": nb_products - res,
+            "nb_products": nb_products,
+        },
+        1,
+    )
+
+    return res
index 0c9b44881b807698f208719607bd97816135b85d..0a218814e4f0ba6f3dd81b1788f0673d2d6fd3ef 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 #  Copyright (C) 2010-2012  CEA/DEN
 #
 #  This library is free software; you can redistribute it and/or
@@ -21,10 +21,24 @@ import src
 
 # Define all possible option for log command :  sat doc <options>
 parser = src.options.Options()
-parser.add_option('x', 'xml', 'boolean', 'xml', "Open sat xml/html documentation in browser (x as firefoX)", None)
-parser.add_option('p', 'pdf', 'boolean', 'pdf', "Open sat pdf documentation in viewer", False)
-parser.add_option('e', 'edit', 'boolean', 'edit', "edit/modify source dodumentation rst files", False)
-parser.add_option('c', 'compile', 'boolean', 'compile', "how to compile html/pdf doc", False)
+parser.add_option(
+    "x",
+    "xml",
+    "boolean",
+    "xml",
+    "Open sat xml/html documentation in browser (x as firefoX)",
+    None,
+)
+parser.add_option(
+    "p", "pdf", "boolean", "pdf", "Open sat pdf documentation in viewer", False
+)
+parser.add_option(
+    "e", "edit", "boolean", "edit", "edit/modify source dodumentation rst files", False
+)
+parser.add_option(
+    "c", "compile", "boolean", "compile", "how to compile html/pdf doc", False
+)
+
 
 def description():
     """method that is called when salomeTools is called with --help option.
@@ -32,22 +46,24 @@ def description():
     :return: The text to display for the log command description.
     :rtype: str
     """
-    return _("""\
+    return _(
+        """\
 The doc command gives access to the sat documentation.
     
 example:
 >> sat doc         # --xml as default
 >> sat doc --xml
 >> sat doc --pdf
-""")
+"""
+    )
+
 
 def run(args, runner, logger):
-    '''method that is called when salomeTools is called with log parameter.
-    '''
+    """method that is called when salomeTools is called with log parameter."""
     # Parse the options
     (options, args) = parser.parse_args(args)
 
-    # get the log directory. 
+    # get the log directory.
     satDir = runner.cfg.VARS.salometoolsway
     docDir = os.path.join(satDir, "doc")
     htmlFile = os.path.join(docDir, "build", "html", "index.html")
@@ -61,8 +77,10 @@ def run(args, runner, logger):
 
     if options.pdf:
         if not os.path.isfile(pdfFile):
-            msg = "\npdf documentation not found. Please build it inside doc directory\n"\
-                  "(follow README instructions in doc directory)\n"
+            msg = (
+                "\npdf documentation not found. Please build it inside doc directory\n"
+                "(follow README instructions in doc directory)\n"
+            )
             logger.error(msg)
             return 1
         src.system.show_in_editor(runner.cfg.USER.pdf_viewer, pdfFile, logger)
@@ -72,12 +90,16 @@ def run(args, runner, logger):
         src.system.show_in_editor(runner.cfg.USER.editor, rstFilesCommands, logger)
 
     elif options.compile:
-        logger.write("How to compile documentation:\n%s" % open(readmeFile,"r").read(), 3)
+        logger.write(
+            "How to compile documentation:\n%s" % open(readmeFile, "r").read(), 3
+        )
 
     else:
         if not os.path.isfile(htmlFile):
-            msg = "\nhtml documentation not found. Please build it inside doc directory\n"\
-                  "(follow README instructions in doc directory)\n"
+            msg = (
+                "\nhtml documentation not found. Please build it inside doc directory\n"
+                "(follow README instructions in doc directory)\n"
+            )
             logger.error(msg)
             return 1
         src.system.show_in_editor(runner.cfg.USER.browser, htmlFile, logger)
index 7b2676354df69d4307767c47b49a5bfd55cbdbd9..76de3cc6d6cf3d9db5fe93726d35792a8464ef20 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 #  Copyright (C) 2010-2013  CEA/DEN
 #
 #  This library is free software; you can redistribute it and/or
@@ -21,37 +21,62 @@ import os
 import src
 
 parser = src.options.Options()
-parser.add_option('', 'shell', 'list2', 'shell',
-    _("Optional: Generates the environment files for the given format: "
-      "bash (default), bat (for windows), cfg (salome context file) or all."), [])
-parser.add_option('p', 'products', 'list2', 'products',
-    _("Optional: Includes only the specified products."))
-parser.add_option('', 'prefix', 'string', 'prefix',
-    _("Optional: Specifies the prefix for the environment files."), "env")
-parser.add_option('t', 'target', 'string', 'out_dir',
-    _("Optional: Specifies the directory path where to put the environment "
-      "files."),
-    None)
+parser.add_option(
+    "",
+    "shell",
+    "list2",
+    "shell",
+    _(
+        "Optional: Generates the environment files for the given format: "
+        "bash (default), bat (for windows), cfg (salome context file) or all."
+    ),
+    [],
+)
+parser.add_option(
+    "p",
+    "products",
+    "list2",
+    "products",
+    _("Optional: Includes only the specified products."),
+)
+parser.add_option(
+    "",
+    "prefix",
+    "string",
+    "prefix",
+    _("Optional: Specifies the prefix for the environment files."),
+    "env",
+)
+parser.add_option(
+    "t",
+    "target",
+    "string",
+    "out_dir",
+    _("Optional: Specifies the directory path where to put the environment " "files."),
+    None,
+)
 
 # list of available shells with extensions
-C_SHELLS = { "bash": "sh", "bat": "bat", "cfg" : "cfg", "tcl" : ""}
-C_ALL_SHELL = [ "bash", "bat", "cfg", "tcl" ]
+C_SHELLS = {"bash": "sh", "bat": "bat", "cfg": "cfg", "tcl": ""}
+C_ALL_SHELL = ["bash", "bat", "cfg", "tcl"]
 
 
 ##
 # Writes all the environment files
-def write_all_source_files(config,
-                           logger,
-                           out_dir=None,
-                           src_root=None,
-                           silent=False,
-                           shells=["bash"],
-                           prefix="env",
-                           env_info=None):
-    '''Generates the environment files.
-    
+def write_all_source_files(
+    config,
+    logger,
+    out_dir=None,
+    src_root=None,
+    silent=False,
+    shells=["bash"],
+    prefix="env",
+    env_info=None,
+):
+    """Generates the environment files.
+
     :param config Config: The global configuration
-    :param logger Logger: The logger instance to use for the display 
+    :param logger Logger: The logger instance to use for the display
                           and logging
     :param out_dir str: The path to the directory where the files will be put
     :param src_root str: The path to the directory where the sources are
@@ -61,8 +86,8 @@ def write_all_source_files(config,
     :param env_info str: The list of products to add in the files.
     :return: The list of the generated files.
     :rtype: List
-    '''
-        
+    """
+
     if not out_dir:
         out_dir = config.APPLICATION.workdir
 
@@ -70,13 +95,16 @@ def write_all_source_files(config,
         raise src.SatException(_("Target directory not found: %s") % out_dir)
 
     if not silent:
-        logger.write(_("Creating environment files for %s\n") % 
-                     src.printcolors.printcLabel(config.APPLICATION.name), 2)
-        src.printcolors.print_value(logger,
-                                    _("Target"),
-                                    src.printcolors.printcInfo(out_dir), 3)
+        logger.write(
+            _("Creating environment files for %s\n")
+            % src.printcolors.printcLabel(config.APPLICATION.name),
+            2,
+        )
+        src.printcolors.print_value(
+            logger, _("Target"), src.printcolors.printcInfo(out_dir), 3
+        )
         logger.write("\n", 3, False)
-    
+
     shells_list = []
     all_shells = C_ALL_SHELL
     if "all" in shells:
@@ -89,42 +117,43 @@ def write_all_source_files(config,
             logger.write(_("Unknown shell: %s\n") % shell, 2)
         else:
             shells_list.append(src.environment.Shell(shell, C_SHELLS[shell]))
-    
-    writer = src.environment.FileEnvWriter(config,
-                                           logger,
-                                           out_dir,
-                                           src_root,
-                                           env_info)
+
+    writer = src.environment.FileEnvWriter(config, logger, out_dir, src_root, env_info)
     writer.silent = silent
     files = []
     for_build = True
     for_launch = False
     for shell in shells_list:
-        if shell.name=="tcl":
-            files.append(writer.write_tcl_files(for_launch,
-                                                shell.name))
+        if shell.name == "tcl":
+            files.append(writer.write_tcl_files(for_launch, shell.name))
         else:
-            files.append(writer.write_env_file("%s_launch.%s" %
-                                               (prefix, shell.extension),
-                                               for_launch,
-                                               shell.name))
-            files.append(writer.write_env_file("%s_build.%s" %
-                                               (prefix, shell.extension),
-                                               for_build,
-                                               shell.name))
+            files.append(
+                writer.write_env_file(
+                    "%s_launch.%s" % (prefix, shell.extension), for_launch, shell.name
+                )
+            )
+            files.append(
+                writer.write_env_file(
+                    "%s_build.%s" % (prefix, shell.extension), for_build, shell.name
+                )
+            )
 
     for f in files:
         if f:
-            logger.write("    "+f+"\n", 3)
+            logger.write("    " + f + "\n", 3)
     return files
 
+
 ##################################################
 
 ##
 # Describes the command
 def description():
-    return _("The environ command generates the environment files of your "
-             "application.\n\nexample:\nsat environ SALOME-master")
+    return _(
+        "The environ command generates the environment files of your "
+        "application.\n\nexample:\nsat environ SALOME-master"
+    )
+
 
 ##
 # Runs the command.
@@ -132,28 +161,34 @@ def run(args, runner, logger):
     (options, args) = parser.parse_args(args)
 
     # check that the command was called with an application
-    src.check_config_has_application( runner.cfg )
-    
+    src.check_config_has_application(runner.cfg)
+
     if options.products is None:
         environ_info = None
     else:
-        # add products specified by user (only products 
+        # add products specified by user (only products
         # included in the application)
-        environ_info = filter(lambda l:
-                              l in runner.cfg.APPLICATION.products.keys(),
-                              options.products)
-    
+        environ_info = filter(
+            lambda l: l in runner.cfg.APPLICATION.products.keys(), options.products
+        )
+
     if options.shell == []:
         shell = ["bash"]
         if src.architecture.is_windows():
             shell = ["bat"]
     else:
         shell = options.shell
-    
+
     out_dir = options.out_dir
     if out_dir:
         out_dir = os.path.abspath(out_dir)
-    
-    write_all_source_files(runner.cfg, logger, out_dir=out_dir, shells=shell,
-                           prefix=options.prefix, env_info=environ_info)
+
+    write_all_source_files(
+        runner.cfg,
+        logger,
+        out_dir=out_dir,
+        shells=shell,
+        prefix=options.prefix,
+        env_info=environ_info,
+    )
     logger.write("\n", 3, False)
index 56e7143135c2284c43020c2dcdfba717d1948aed..084219d937ae1e8898bfb634dbb32b31f066c5af 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 #  Copyright (C) 2010-2013  CEA/DEN
 #
 #  This library is free software; you can redistribute it and/or
@@ -22,169 +22,208 @@ import src
 
 # create a parser for command line options
 parser = src.options.Options()
-parser.add_option("s",
-                  "sources",
-                  "boolean",
-                  "sources",
-                  _("Search the duplicate files in the SOURCES directory."))
-parser.add_option("p",
-                  "path",
-                  "list2",
-                  "path",
-                  _("Optional: Search the duplicate files in the given "
-                    "directory paths."))
-parser.add_option("",
-                  "exclude-file",
-                  "list2",
-                  "exclude_file",
-                  _("Optional: Override the default list of filtered files."))
-parser.add_option("",
-                  "exclude-extension",
-                  "list2",
-                  "exclude_extension",
-                  _("Optional: Override the default list of filtered "
-                    "extensions."))
-parser.add_option("",
-                  "exclude-path",
-                  "list2",
-                  "exclude_path",
-                  _("Optional: Override the default list of filtered paths."))
-
-default_extension_ignored = ['html', 'png', 'txt', 'js', 'xml', 'cmake', 'gif', 
-                     'm4', 'in', 'pyo', 'pyc', 'doctree', 'css']
-default_files_ignored = ['__init__.py', 'Makefile.am', 'VERSION',
-                         'build_configure', 
-                         'README', 'AUTHORS', 'NEWS', 'COPYING', 'ChangeLog']
+parser.add_option(
+    "s",
+    "sources",
+    "boolean",
+    "sources",
+    _("Search the duplicate files in the SOURCES directory."),
+)
+parser.add_option(
+    "p",
+    "path",
+    "list2",
+    "path",
+    _("Optional: Search the duplicate files in the given " "directory paths."),
+)
+parser.add_option(
+    "",
+    "exclude-file",
+    "list2",
+    "exclude_file",
+    _("Optional: Override the default list of filtered files."),
+)
+parser.add_option(
+    "",
+    "exclude-extension",
+    "list2",
+    "exclude_extension",
+    _("Optional: Override the default list of filtered " "extensions."),
+)
+parser.add_option(
+    "",
+    "exclude-path",
+    "list2",
+    "exclude_path",
+    _("Optional: Override the default list of filtered paths."),
+)
+
+default_extension_ignored = [
+    "html",
+    "png",
+    "txt",
+    "js",
+    "xml",
+    "cmake",
+    "gif",
+    "m4",
+    "in",
+    "pyo",
+    "pyc",
+    "doctree",
+    "css",
+]
+default_files_ignored = [
+    "__init__.py",
+    "Makefile.am",
+    "VERSION",
+    "build_configure",
+    "README",
+    "AUTHORS",
+    "NEWS",
+    "COPYING",
+    "ChangeLog",
+]
 default_directories_ignored = []
 
+
 def list_directory(lpath, extension_ignored, files_ignored, directories_ignored):
-    '''Make the list of all files and paths that are not filtered 
-    
-    :param lpath List: The list of path to of the directories where to 
+    """Make the list of all files and paths that are not filtered
+
+    :param lpath List: The list of path to of the directories where to
                        search for duplicates
     :param extension_ignored List: The list of extensions to ignore
     :param files_ignored List: The list of files to ignore
     :param directories_ignored List: The list of directory paths to ignore
-    :return: files_arb_out is the list of [file, path] 
+    :return: files_arb_out is the list of [file, path]
              and files_out is is the list of files
     :rtype: List, List
-    '''
+    """
     files_out = []
-    files_arb_out=[]
+    files_arb_out = []
     for path in lpath:
-        for root, __, files in os.walk(path):  
+        for root, __, files in os.walk(path):
             for fic in files:
-                extension = fic.split('.')[-1]   
-                if (extension not in extension_ignored and 
-                                                      fic not in files_ignored):
+                extension = fic.split(".")[-1]
+                if extension not in extension_ignored and fic not in files_ignored:
                     in_ignored_dir = False
                     for rep in directories_ignored:
                         if rep in root:
-                            in_ignored_dir = True                
+                            in_ignored_dir = True
                     if not in_ignored_dir:
-                        files_out.append([fic])              
+                        files_out.append([fic])
                         files_arb_out.append([fic, root])
     return files_arb_out, files_out
 
+
 def format_list_of_str(l_str):
-    '''Make a list from a string
-    
+    """Make a list from a string
+
     :param l_str List or Str: The variable to format
     :return: the formatted variable
     :rtype: List
-    '''
+    """
     if not isinstance(l_str, list):
         return l_str
     return ",".join(l_str)
 
+
 def print_info(logger, info, level=2):
-    '''Format a display
-    
+    """Format a display
+
     :param logger Logger: The logger instance
     :param info List: the list of tuple to display
     :param valMax float: the maximum value of the variable
     :param level int: the verbose level that will be used
-    '''
+    """
     smax = max(map(lambda l: len(l[0]), info))
     for i in info:
         sp = " " * (smax - len(i[0]))
-        src.printcolors.print_value(logger,
-                                    sp + i[0],
-                                    format_list_of_str(i[1]),
-                                    2)
+        src.printcolors.print_value(logger, sp + i[0], format_list_of_str(i[1]), 2)
     logger.write("\n", level)
 
+
 class Progress_bar:
     "Create a progress bar in the terminal"
-    
-    def __init__(self, name, valMin, valMax, logger, length = 50):
-        '''Initialization of the progress bar.
-        
+
+    def __init__(self, name, valMin, valMax, logger, length=50):
+        """Initialization of the progress bar.
+
         :param name str: The name of the progress bar
         :param valMin float: the minimum value of the variable
         :param valMax float: the maximum value of the variable
         :param logger Logger: the logger instance
         :param length int: the lenght of the progress bar
-        '''
+        """
         self.name = name
         self.valMin = valMin
         self.valMax = valMax
         self.length = length
         self.logger = logger
         if (self.valMax - self.valMin) <= 0 or length <= 0:
-            out_err = _('ERROR: Wrong init values for the progress bar\n')
+            out_err = _("ERROR: Wrong init values for the progress bar\n")
             raise src.SatException(out_err)
-        
-    def display_value_progression(self,val):
-        '''Display the progress bar.
-        
+
+    def display_value_progression(self, val):
+        """Display the progress bar.
+
         :param val float: val must be between valMin and valMax.
-        '''
+        """
         if val < self.valMin or val > self.valMax:
-            self.logger.write(src.printcolors.printcWarning(_(
-                           'WARNING : wrong value for the progress bar.\n')), 3)
+            self.logger.write(
+                src.printcolors.printcWarning(
+                    _("WARNING : wrong value for the progress bar.\n")
+                ),
+                3,
+            )
         else:
-            perc = (float(val-self.valMin) / (self.valMax - self.valMin)) * 100.
+            perc = (float(val - self.valMin) / (self.valMax - self.valMin)) * 100.0
             nb_equals = int(perc * self.length / 100)
-            out = '\r %s : %3d %% [%s%s]' % (self.name, perc, nb_equals*'=',
-                                             (self.length - nb_equals)*' ' )
+            out = "\r %s : %3d %% [%s%s]" % (
+                self.name,
+                perc,
+                nb_equals * "=",
+                (self.length - nb_equals) * " ",
+            )
             self.logger.write(out, 3)
             self.logger.flush()
 
+
 def description():
-    '''method that is called when salomeTools is called with --help option.
-    
+    """method that is called when salomeTools is called with --help option.
+
     :return: The text to display for the find_duplicates command description.
     :rtype: str
-    '''
-    return _("The find_duplicates command search recursively for all duplicates"
-             " files in a the INSTALL directory (or the optionally given "
-             "directory) and prints the found files to the terminal.\n\n"
-             "example:\nsat find_duplicates --path /tmp")
+    """
+    return _(
+        "The find_duplicates command search recursively for all duplicates"
+        " files in a the INSTALL directory (or the optionally given "
+        "directory) and prints the found files to the terminal.\n\n"
+        "example:\nsat find_duplicates --path /tmp"
+    )
+
 
 def run(args, runner, logger):
-    '''method that is called when salomeTools is called with find_duplicates 
-       parameter.
-    '''
+    """method that is called when salomeTools is called with find_duplicates
+    parameter.
+    """
     # parse the arguments
     (options, args) = parser.parse_args(args)
-    
-    # Determine the directory path where to search 
+
+    # Determine the directory path where to search
     # for duplicates files regarding the options
     if options.path:
         l_dir_path = options.path
     else:
         src.check_config_has_application(runner.cfg)
         if options.sources:
-            l_dir_path = [os.path.join(runner.cfg.APPLICATION.workdir,
-                                       "SOURCES")]
+            l_dir_path = [os.path.join(runner.cfg.APPLICATION.workdir, "SOURCES")]
         else:
             # find all installation paths
             all_products = runner.cfg.APPLICATION.products.keys()
-            l_product_cfg = src.product.get_products_infos(all_products,
-                                                           runner.cfg)
+            l_product_cfg = src.product.get_products_infos(all_products, runner.cfg)
             l_dir_path = [pi.install_dir for __, pi in l_product_cfg]
-    
+
     # Get the files to ignore during the searching
     files_ignored = default_files_ignored
     if options.exclude_file:
@@ -199,48 +238,47 @@ def run(args, runner, logger):
     directories_ignored = default_directories_ignored
     if options.exclude_path:
         directories_ignored = options.exclude_path
-    
+
     # Check the directories
     l_path = src.deepcopy_list(l_dir_path)
     l_dir_path = []
     for dir_path in l_path:
-        if not(os.path.isdir(dir_path)):
-            msg = _("%s does not exists or is not a directory path: "
-                    "it will be ignored" % dir_path)
+        if not (os.path.isdir(dir_path)):
+            msg = _(
+                "%s does not exists or is not a directory path: "
+                "it will be ignored" % dir_path
+            )
             logger.write("%s\n" % src.printcolors.printcWarning(msg), 3)
             continue
         l_dir_path.append(dir_path)
-            
-    
+
     # Display some information
-    info = [(_("Directories"), "\n".join(l_dir_path)),
-            (_("Ignored files"), files_ignored),
-            (_("Ignored extensions"), extension_ignored),
-            (_("Ignored directories"), directories_ignored)
-           ]
+    info = [
+        (_("Directories"), "\n".join(l_dir_path)),
+        (_("Ignored files"), files_ignored),
+        (_("Ignored extensions"), extension_ignored),
+        (_("Ignored directories"), directories_ignored),
+    ]
     print_info(logger, info)
-    
+
     # Get all the files and paths
     logger.write(_("Store all file paths ... "), 3)
     logger.flush()
-    dic, fic = list_directory(l_dir_path,
-                              extension_ignored,
-                              files_ignored,
-                              directories_ignored)  
-    logger.write(src.printcolors.printcSuccess('OK\n'), 3)
-    
+    dic, fic = list_directory(
+        l_dir_path, extension_ignored, files_ignored, directories_ignored
+    )
+    logger.write(src.printcolors.printcSuccess("OK\n"), 3)
+
     # Eliminate all the singletons
     len_fic = len(fic)
-    range_fic = range(0,len_fic)
+    range_fic = range(0, len_fic)
     range_fic.reverse()
-    my_bar = Progress_bar(_('Eliminate the files that are not duplicated'),
-                          0,
-                          len_fic,
-                          logger,
-                          length = 50)
+    my_bar = Progress_bar(
+        _("Eliminate the files that are not duplicated"), 0, len_fic, logger, length=50
+    )
     for i in range_fic:
         my_bar.display_value_progression(len_fic - i)
-        if fic.count(fic[i])==1:
+        if fic.count(fic[i]) == 1:
             fic.remove(fic[i])
             dic.remove(dic[i])
 
@@ -248,10 +286,10 @@ def run(args, runner, logger):
     logger.write(_("\n\nCompute the dict {files : [list of pathes]} ... "), 3)
     fic.sort()
     len_fic = len(fic)
-    rg_fic = range(0,len_fic)
+    rg_fic = range(0, len_fic)
     rg_fic.reverse()
     for i in rg_fic:
-        if fic[i-1] != fic[i]:
+        if fic[i - 1] != fic[i]:
             fic.remove(fic[i])
 
     dic_fic_paths = {}
@@ -262,8 +300,8 @@ def run(args, runner, logger):
             if fic_path[0] == the_file:
                 l_path.append(fic_path[1])
         dic_fic_paths[the_file] = l_path
-    
-    logger.write(src.printcolors.printcSuccess('OK\n'), 3)
+
+    logger.write(src.printcolors.printcSuccess("OK\n"), 3)
 
     # End the execution if no duplicates were found
     if len(dic_fic_paths) == 0:
@@ -272,13 +310,13 @@ def run(args, runner, logger):
 
     # Check that there are no singletons in the result (it would be a bug)
     for elem in dic_fic_paths:
-        if len(dic_fic_paths[elem])<2:
-            logger.write(_("Warning : element %s has not more than"
-                         " two paths.\n") % elem, 3)
-
+        if len(dic_fic_paths[elem]) < 2:
+            logger.write(
+                _("Warning : element %s has not more than" " two paths.\n") % elem, 3
+            )
 
     # Display the results
-    logger.write(src.printcolors.printcInfo(_('\nResults:\n\n')), 3)
+    logger.write(src.printcolors.printcInfo(_("\nResults:\n\n")), 3)
     max_file_name_lenght = max(map(lambda l: len(l), dic_fic_paths.keys()))
     for fich in dic_fic_paths:
         logger.write(src.printcolors.printcLabel(fich), 1)
index fbd08aa5e57271f44df9b46c69f58eab60b02141..b4af11454a5587bcc79a97dcae9371e0fff5e0d0 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 #  Copyright (C) 2010-2013  CEA/DEN
 #
 #  This library is free software; you can redistribute it and/or
@@ -23,36 +23,48 @@ import imp
 import subprocess
 
 import src
-from  src.versionMinorMajorPatch import MinorMajorPatch as MMP
+from src.versionMinorMajorPatch import MinorMajorPatch as MMP
 import src.debug as DBG
 
 parser = src.options.Options()
-parser.add_option('p', 'products', 'list2', 'products',
-                  _("Optional: the list of products to generate"))
-parser.add_option('', 'yacsgen', 'string', 'yacsgen',
-                  _("Optional: path to YACSGEN's module_generator package"))
+parser.add_option(
+    "p",
+    "products",
+    "list2",
+    "products",
+    _("Optional: the list of products to generate"),
+)
+parser.add_option(
+    "",
+    "yacsgen",
+    "string",
+    "yacsgen",
+    _("Optional: path to YACSGEN's module_generator package"),
+)
+
 
 def generate_component_list(config, product_name, product_info, context, logger):
     res = "?"
     logger.write("\n", 3)
     for compo in src.product.get_product_components(product_info):
-        header = "  %s %s " % (src.printcolors.printcLabel(compo),
-                               "." * (20 - len(compo)))
-        res = generate_component(config,
-                                 compo,
-                                 product_name,
-                                 product_info,
-                                 context,
-                                 header,
-                                 logger)
+        header = "  %s %s " % (
+            src.printcolors.printcLabel(compo),
+            "." * (20 - len(compo)),
+        )
+        res = generate_component(
+            config, compo, product_name, product_info, context, header, logger
+        )
         if config.USER.output_verbose_level == 3:
             logger.write("\r%s%s\r%s" % (header, " " * 20, header), 3)
         logger.write(src.printcolors.printc(res), 3, False)
         logger.write("\n", 3, False)
     return res
 
-def generate_component(config, compo, product_name, product_info, context, header, logger):
-#   get from config include file name and librairy name, or take default value
+
+def generate_component(
+    config, compo, product_name, product_info, context, header, logger
+):
+    #   get from config include file name and librairy name, or take default value
     if "hxxfile" in product_info:
         hxxfile = product_info.hxxfile
     else:
@@ -79,13 +91,15 @@ def generate_component(config, compo, product_name, product_info, context, heade
     compo_info.install_dir = os.path.join(install_dir, compo)
     compo_info.build_dir = os.path.join(build_dir, compo)
     compo_info.depend = product_info.depend
-    compo_info.depend.append(product_info.name, "") # add cpp module
+    compo_info.depend.append(product_info.name, "")  # add cpp module
     compo_info.opt_depend = product_info.opt_depend
 
     config.PRODUCTS.addMapping(compo, src.pyconf.Mapping(config), "")
     config.PRODUCTS[compo].default = compo_info
 
-    builder = src.compilation.Builder(config, logger, product_name, compo_info, check_src=False)
+    builder = src.compilation.Builder(
+        config, logger, product_name, compo_info, check_src=False
+    )
     builder.header = header
 
     # generate the component
@@ -104,28 +118,33 @@ def generate_component(config, compo, product_name, product_info, context, heade
 
     # inline class to override bootstrap method
     import module_generator
+
     class sat_generator(module_generator.Generator):
         # old bootstrap for automake (used if salome version <= 7.4)
         def bootstrap(self, source_dir, log_file):
             # replace call to default bootstrap() by using subprocess call (cleaner)
             command = "sh autogen.sh"
-            ier = subprocess.call(command, shell=True, cwd=source_dir,
-                                  stdout=log_file, stderr=subprocess.STDOUT)
+            ier = subprocess.call(
+                command,
+                shell=True,
+                cwd=source_dir,
+                stdout=log_file,
+                stderr=subprocess.STDOUT,
+            )
             if ier != 0:
                 raise src.SatException("bootstrap has ended in error")
 
-
     # determine salome version
     VersionSalome = src.get_salome_version(config)
-    if VersionSalome >= MMP([7,5,0]) :
-        use_autotools=False
-        builder.log('USE CMAKE', 3)
+    if VersionSalome >= MMP([7, 5, 0]):
+        use_autotools = False
+        builder.log("USE CMAKE", 3)
     else:
-        use_autotools=True
-        builder.log('USE AUTOTOOLS', 3)
+        use_autotools = True
+        builder.log("USE AUTOTOOLS", 3)
 
     result = "GENERATE"
-    builder.log('GENERATE', 3)
+    builder.log("GENERATE", 3)
 
     prevstdout = sys.stdout
     prevstderr = sys.stderr
@@ -135,31 +154,32 @@ def generate_component(config, compo, product_name, product_info, context, heade
         sys.stderr = logger.logTxtFile
 
         if src.product.product_is_mpi(product_info):
-            salome_compo = module_generator.HXX2SALOMEParaComponent(hxxfile,
-                                                                    cpplib,
-                                                                    cpp_path)
+            salome_compo = module_generator.HXX2SALOMEParaComponent(
+                hxxfile, cpplib, cpp_path
+            )
         else:
-            salome_compo = module_generator.HXX2SALOMEComponent(hxxfile,
-                                                                cpplib,
-                                                                cpp_path)
+            salome_compo = module_generator.HXX2SALOMEComponent(
+                hxxfile, cpplib, cpp_path
+            )
 
         if src.product.product_has_salome_gui(product_info):
             # get files to build a template GUI
-            try: # try new yacsgen api
+            try:  # try new yacsgen api
                 gui_files = salome_compo.getGUIfilesTemplate(compo)
             except:  # use old yacsgen api
                 gui_files = salome_compo.getGUIfilesTemplate()
         else:
             gui_files = None
 
-        mg = module_generator.Module(compo, components=[salome_compo],
-                                     prefix=generate_dir, gui=gui_files)
+        mg = module_generator.Module(
+            compo, components=[salome_compo], prefix=generate_dir, gui=gui_files
+        )
         g = sat_generator(mg, context)
         g.generate()
 
         if use_autotools:
             result = "BUID_CONFIGURE"
-            builder.log('BUID_CONFIGURE (no bootstrap)', 3)
+            builder.log("BUID_CONFIGURE (no bootstrap)", 3)
             g.bootstrap(compo_info.source_dir, logger.logTxtFile)
 
         result = src.OK_STATUS
@@ -171,32 +191,43 @@ def generate_component(config, compo, product_name, product_info, context, heade
     os.chdir(curdir)
 
     # do the compilation using the builder object
-    if builder.prepare()!= 0: return "Error in prepare"
+    if builder.prepare() != 0:
+        return "Error in prepare"
     if use_autotools:
-        if builder.configure()!= 0: return "Error in configure"
+        if builder.configure() != 0:
+            return "Error in configure"
     else:
-        if builder.cmake()!= 0: return "Error in cmake"
+        if builder.cmake() != 0:
+            return "Error in cmake"
 
-    if builder.make(config.VARS.nb_proc, "")!=0: return "Error in make"
-    if builder.install()!=0: return "Error in make install"
+    if builder.make(config.VARS.nb_proc, "") != 0:
+        return "Error in make"
+    if builder.install() != 0:
+        return "Error in make install"
 
     # copy specified logo in generated component install directory
     # rem : logo is not copied in source dir because this would require
     #       to modify the generated makefile
     logo_path = src.product.product_has_logo(product_info)
     if logo_path:
-        destlogo = os.path.join(compo_info.install_dir, "share", "salome",
-            "resources", compo.lower(), compo + ".png")
+        destlogo = os.path.join(
+            compo_info.install_dir,
+            "share",
+            "salome",
+            "resources",
+            compo.lower(),
+            compo + ".png",
+        )
         src.Path(logo_path).copyfile(destlogo)
 
     return result
 
+
 def build_context(config, logger):
-    products_list = [ 'KERNEL', 'GUI' ]
-    ctxenv = src.environment.SalomeEnviron(config,
-                                           src.environment.Environ(dict(
-                                                                   os.environ)),
-                                           True)
+    products_list = ["KERNEL", "GUI"]
+    ctxenv = src.environment.SalomeEnviron(
+        config, src.environment.Environ(dict(os.environ)), True
+    )
     ctxenv.silent = True
     ctxenv.set_full_environ(logger, config.APPLICATION.products.keys())
 
@@ -206,9 +237,10 @@ def build_context(config, logger):
         val = os.getenv(prod_env)
         if os.getenv(prod_env) is None:
             if p not in config.APPLICATION.products:
-                warn = _("product %(product)s is not defined. Include it in the"
-                         " application or define $%(env)s.") % \
-                    { "product": p, "env": prod_env}
+                warn = _(
+                    "product %(product)s is not defined. Include it in the"
+                    " application or define $%(env)s."
+                ) % {"product": p, "env": prod_env}
                 logger.write(src.printcolors.printcWarning(warn), 1)
                 logger.write("\n", 3, False)
                 val = ""
@@ -221,15 +253,16 @@ def build_context(config, logger):
         "update": 1,
         "makeflags": "-j2",
         "kernel": dicdir["KERNEL"],
-        "gui":    dicdir["GUI"],
-        "yacs":   "",
-        "med":    "",
-        "mesh":   "",
-        "visu":   "",
-        "geom":   "",
+        "gui": dicdir["GUI"],
+        "yacs": "",
+        "med": "",
+        "mesh": "",
+        "visu": "",
+        "geom": "",
     }
     return context
 
+
 def check_module_generator(directory=None):
     """Check if module_generator is available.
 
@@ -244,7 +277,7 @@ def check_module_generator(directory=None):
 
     res = None
     try:
-        #import module_generator
+        # import module_generator
         info = imp.find_module("module_generator")
         res = info[1]
     except ImportError:
@@ -254,6 +287,7 @@ def check_module_generator(directory=None):
 
     return res
 
+
 def check_yacsgen(config, directory, logger):
     """Check if YACSGEN is available.
 
@@ -269,8 +303,8 @@ def check_yacsgen(config, directory, logger):
     if directory is not None:
         yacsgen_dir = directory
         yacs_src = _("Using YACSGEN from command line")
-    elif 'YACSGEN' in config.APPLICATION.products:
-        yacsgen_info = src.product.get_product_config(config, 'YACSGEN')
+    elif "YACSGEN" in config.APPLICATION.products:
+        yacsgen_info = src.product.get_product_config(config, "YACSGEN")
         yacsgen_dir = yacsgen_info.install_dir
         yacs_src = _("Using YACSGEN from application")
     elif "YACSGEN_ROOT_DIR" in os.environ:
@@ -295,38 +329,40 @@ def check_yacsgen(config, directory, logger):
     pv = os.getenv("PYTHON_VERSION")
     if pv is None:
         python_info = src.product.get_product_config(config, "Python")
-        pv = '.'.join(python_info.version.split('.')[:2])
+        pv = ".".join(python_info.version.split(".")[:2])
     assert pv is not None, "$PYTHON_VERSION not defined"
-    yacsgen_dir = os.path.join(yacsgen_dir, "lib", "python%s" % pv,
-                               "site-packages")
+    yacsgen_dir = os.path.join(yacsgen_dir, "lib", "python%s" % pv, "site-packages")
     c = check_module_generator(yacsgen_dir)
     if c is not None:
         return c
 
-    return (False,
-            _("The python module module_generator was not found in YACSGEN"))
+    return (False, _("The python module module_generator was not found in YACSGEN"))
 
 
 def description():
-    '''method that is called when salomeTools is called with --help option.
+    """method that is called when salomeTools is called with --help option.
 
     :return: The text to display for the generate command description.
     :rtype: str
-    '''
-    return _("The generate command generates SALOME modules from 'pure cpp' "
-             "products.\nWARNING this command NEEDS YACSGEN to run!\n\nexample:"
-             "\nsat generate SALOME-master --products FLICACPP")
+    """
+    return _(
+        "The generate command generates SALOME modules from 'pure cpp' "
+        "products.\nWARNING this command NEEDS YACSGEN to run!\n\nexample:"
+        "\nsat generate SALOME-master --products FLICACPP"
+    )
 
 
 def run(args, runner, logger):
-    '''method that is called when salomeTools is called with generate parameter.
-    '''
+    """method that is called when salomeTools is called with generate parameter."""
 
     # Check that the command has been called with an application
     src.check_config_has_application(runner.cfg)
 
-    logger.write(_('Generation of SALOME modules for application %s\n') % \
-        src.printcolors.printcLabel(runner.cfg.VARS.application), 1)
+    logger.write(
+        _("Generation of SALOME modules for application %s\n")
+        % src.printcolors.printcLabel(runner.cfg.VARS.application),
+        1,
+    )
 
     (options, args) = parser.parse_args(args)
 
@@ -374,19 +410,15 @@ def run(args, runner, logger):
         logger.write(_("\nCleaning generated directories\n"), 3, False)
         # clean source, build and install directories of the generated product
         # no verbosity to avoid warning at the first generation, for which dirs don't exist
-        runner.clean(runner.cfg.VARS.application +
-                  " --products " + pi.name +
-                  " --generated",
-                  batch=True,
-                  verbose=0,
-                  logger_add_link = logger)
+        runner.clean(
+            runner.cfg.VARS.application + " --products " + pi.name + " --generated",
+            batch=True,
+            verbose=0,
+            logger_add_link=logger,
+        )
         nbgen += 1
         try:
-            result = generate_component_list(runner.cfg,
-                                             product,
-                                             pi,
-                                             context,
-                                             logger)
+            result = generate_component_list(runner.cfg, product, pi, context, logger)
         except Exception as exc:
             result = str(exc)
 
@@ -396,7 +428,7 @@ def run(args, runner, logger):
 
     if len(details) == 0:
         status = src.OK_STATUS
-    else: #if config.USER.output_level != 3:
+    else:  # if config.USER.output_level != 3:
         logger.write("\n", 2, False)
         logger.write(_("The following modules were not generated correctly:\n"), 2)
         for d in details:
index 592843a5163a208d0f754ec4266f64efb84f6e15..b5ea7060d2e5b27b892b966fa6523ed0b5c443ed 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 #  Copyright (C) 2010-2012  CEA/DEN
 #
 #  This library is free software; you can redistribute it and/or
@@ -22,30 +22,66 @@ import src
 
 # Define all possible option for the init command :  sat init <options>
 parser = src.options.Options()
-parser.add_option('b', 'base', 'string', 'base', 
-                  _('Optional: The path to the products base'))
-parser.add_option('w', 'workdir', 'string', 'workdir', 
-                  _('Optional: The path to the working directory '
-                    '(where to install the applications'))
-parser.add_option('a', 'archive_dir', 'string', 'archive_dir', 
-                  _('Optional: The path to the local archive directory '
-                    '(where to install local source archives'))
-parser.add_option('', 'add_project ', 'string', 'add_project', 
-                  _('Optional: The path of the project to add'))
-parser.add_option('', 'reset_projects', 'boolean', 'reset_projects', 
-                  _('Optional: Reset the list of projects'))
-parser.add_option('v', 'VCS', 'string', 'VCS', 
-                  _('Optional: The address of the repository of SAT '
-                    '(only informative)'))
-parser.add_option('t', 'tag', 'string', 'tag', 
-                  _('Optional: The tag of SAT (only informative)'))
-parser.add_option('l', 'log_dir', 'string', 'log_dir', 
-                  _('Optional: The directory where to put all the logs of SAT'))
+parser.add_option(
+    "b", "base", "string", "base", _("Optional: The path to the products base")
+)
+parser.add_option(
+    "w",
+    "workdir",
+    "string",
+    "workdir",
+    _(
+        "Optional: The path to the working directory "
+        "(where to install the applications"
+    ),
+)
+parser.add_option(
+    "a",
+    "archive_dir",
+    "string",
+    "archive_dir",
+    _(
+        "Optional: The path to the local archive directory "
+        "(where to install local source archives"
+    ),
+)
+parser.add_option(
+    "",
+    "add_project ",
+    "string",
+    "add_project",
+    _("Optional: The path of the project to add"),
+)
+parser.add_option(
+    "",
+    "reset_projects",
+    "boolean",
+    "reset_projects",
+    _("Optional: Reset the list of projects"),
+)
+parser.add_option(
+    "v",
+    "VCS",
+    "string",
+    "VCS",
+    _("Optional: The address of the repository of SAT " "(only informative)"),
+)
+parser.add_option(
+    "t", "tag", "string", "tag", _("Optional: The tag of SAT (only informative)")
+)
+parser.add_option(
+    "l",
+    "log_dir",
+    "string",
+    "log_dir",
+    _("Optional: The directory where to put all the logs of SAT"),
+)
+
 
 def set_local_value(config, key, value, logger):
-    """ Edit the site.pyconf file and change a value.
+    """Edit the site.pyconf file and change a value.
 
-    :param config Config: The global configuration.    
+    :param config Config: The global configuration.
     :param key Str: The key from which to change the value.
     :param value Str: The path to change.
     :param logger Logger: The logger instance.
@@ -57,7 +93,7 @@ def set_local_value(config, key, value, logger):
     try:
         local_cfg = src.pyconf.Config(local_file_path)
         local_cfg.LOCAL[key] = value
-        ff = open(local_file_path, 'w')
+        ff = open(local_file_path, "w")
         local_cfg.__save__(ff, 1)
         ff.close()
         if key != "log_dir":
@@ -67,20 +103,25 @@ def set_local_value(config, key, value, logger):
         msg = _("Unable to update the local.pyconf file: %s\n" % err)
         logger.write(msg, 1)
         return 1
-    
+
     return 0
-    
+
+
 def add_local_project(config, project_file, logger):
-    """ Add a project in local configuration (file data/local.pyconf).
+    """Add a project in local configuration (file data/local.pyconf).
 
-    :param config Config: The global configuration.    
+    :param config Config: The global configuration.
     :param new_project Str: The project pyconf file to add in local config.
     :param logger Logger: The logger instance.
     :return: 0 if all is OK, else 1
     :rtype: int
     """
     if not os.path.isfile(project_file):
-        logger.write("Unable to add a project in local configuration, project file %s does not exist\n" % project_file, 1)
+        logger.write(
+            "Unable to add a project in local configuration, project file %s does not exist\n"
+            % project_file,
+            1,
+        )
         return 1
 
     # check that the project file exists
@@ -90,7 +131,7 @@ def add_local_project(config, project_file, logger):
     try:
         local_cfg = src.pyconf.Config(local_file_path)
         local_cfg.PROJECTS.project_file_paths.append(project_file, "")
-        ff = open(local_file_path, 'w')
+        ff = open(local_file_path, "w")
         local_cfg.__save__(ff, 1)
         ff.close()
         config.PROJECTS.project_file_paths.append(project_file, "")
@@ -105,9 +146,9 @@ def add_local_project(config, project_file, logger):
 
 
 def reset_local_projects(config, logger):
-    """ Reinitialise the list of projects in local configuration (file data/local.pyconf).
+    """Reinitialise the list of projects in local configuration (file data/local.pyconf).
 
-    :param config Config: The global configuration.    
+    :param config Config: The global configuration.
     :param logger Logger: The logger instance.
     :return: 0 if all is OK, else 1
     :rtype: int
@@ -117,11 +158,11 @@ def reset_local_projects(config, logger):
     # Update the local.pyconf file
     try:
         local_cfg = src.pyconf.Config(local_file_path)
-        local_cfg.PROJECTS.project_file_paths=src.pyconf.Sequence(local_cfg.PROJECTS)
-        ff = open(local_file_path, 'w')
+        local_cfg.PROJECTS.project_file_paths = src.pyconf.Sequence(local_cfg.PROJECTS)
+        ff = open(local_file_path, "w")
         local_cfg.__save__(ff, 1)
         ff.close()
-        config.PROJECTS.project_file_paths=src.pyconf.Sequence(config.PROJECTS)
+        config.PROJECTS.project_file_paths = src.pyconf.Sequence(config.PROJECTS)
 
     except Exception as e:
         err = str(e)
@@ -133,81 +174,89 @@ def reset_local_projects(config, logger):
 
 
 def display_local_values(config, logger):
-    """ Display the base path
+    """Display the base path
 
     :param config Config: The global configuration.
     :param key Str: The key from which to change the value.
     :param logger Logger: The logger instance.
     """
-    info = [("base", config.LOCAL.base),
-            ("workdir", config.LOCAL.workdir),
-            ("log_dir", config.LOCAL.log_dir),
-            ("archive_dir", config.LOCAL.archive_dir),
-            ("VCS", config.LOCAL.VCS),
-            ("tag", config.LOCAL.tag),
-            ("projects", config.PROJECTS.project_file_paths)]
+    info = [
+        ("base", config.LOCAL.base),
+        ("workdir", config.LOCAL.workdir),
+        ("log_dir", config.LOCAL.log_dir),
+        ("archive_dir", config.LOCAL.archive_dir),
+        ("VCS", config.LOCAL.VCS),
+        ("tag", config.LOCAL.tag),
+        ("projects", config.PROJECTS.project_file_paths),
+    ]
     src.print_info(logger, info)
 
     return 0
 
+
 def check_path(path_to_check, logger):
-    """ Verify that the given path is not a file and can be created.
-    
+    """Verify that the given path is not a file and can be created.
+
     :param path_to_check Str: The path to check.
     :param logger Logger: The logger instance.
     """
     if path_to_check == "default":
         return 0
-    
+
     # Get the path
     path = src.Path(path_to_check)
-    
+
     # If it is a file, do nothing and return error
     if path.isfile():
-        msg = _("Error: The given path is a file. Please provide a path to "
-                "a directory")
+        msg = _(
+            "Error: The given path is a file. Please provide a path to " "a directory"
+        )
         logger.write(src.printcolors.printcError(msg), 1)
         return 1
-      
+
     # Try to create the given path
     try:
         src.ensure_path_exists(str(path))
     except Exception as e:
         err = src.printcolors.printcError(str(e))
-        msg = _("Unable to create the directory %s: %s\n" % (str(path),
-                                                             err))
+        msg = _("Unable to create the directory %s: %s\n" % (str(path), err))
         logger.write(msg, 1)
         return 1
-    
+
     return 0
 
+
 def description():
-    '''method that is called when salomeTools is called with --help option.
-    
+    """method that is called when salomeTools is called with --help option.
+
     :return: The text to display for the init command description.
     :rtype: str
-    '''
+    """
     return _("The init command Changes the local settings of SAT.")
-  
+
+
 def run(args, runner, logger):
-    '''method that is called when salomeTools is called with init parameter.
-    '''
-    
+    """method that is called when salomeTools is called with init parameter."""
+
     # Parse the options
     (options, args) = parser.parse_args(args)
-    
+
     # Print some informations
-    logger.write(_('Local Settings of SAT %s\n\n') % 
-                src.printcolors.printcLabel(runner.cfg.VARS.salometoolsway), 1)
+    logger.write(
+        _("Local Settings of SAT %s\n\n")
+        % src.printcolors.printcLabel(runner.cfg.VARS.salometoolsway),
+        1,
+    )
 
     res = 0
-    
 
     # Set the options corresponding to a directory
-    for opt in [("base" , options.base),
-                ("workdir", options.workdir),
-                ("log_dir", options.log_dir),
-                ("archive_dir", options.archive_dir)]:
+    for opt in [
+        ("base", options.base),
+        ("workdir", options.workdir),
+        ("log_dir", options.log_dir),
+        ("archive_dir", options.archive_dir),
+    ]:
         key, value = opt
         if value:
             res_check = check_path(value, logger)
@@ -218,20 +267,20 @@ def run(args, runner, logger):
 
     # set the options corresponding to projects file names
     if options.add_project:
-        res_add=add_local_project(runner.cfg, options.add_project, logger)
+        res_add = add_local_project(runner.cfg, options.add_project, logger)
         res += res_add
 
     if options.reset_projects:
-        res_rem=reset_local_projects(runner.cfg, logger)
+        res_rem = reset_local_projects(runner.cfg, logger)
         res += res_rem
 
-    # Set the options corresponding to an informative value            
+    # Set the options corresponding to an informative value
     for opt in [("VCS", options.VCS), ("tag", options.tag)]:
         key, value = opt
         if value:
             res_set = set_local_value(runner.cfg, key, value, logger)
             res += res_set
-    
+
     display_local_values(runner.cfg, logger)
-    
+
     return res
index 2811a6af8c4dd7648c2f48034a48f26335f17930..92302797be2be49a279451543442940d5a698bb1 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 #  Copyright (C) 2010-2012  CEA/DEN
 #
 #  This library is free software; you can redistribute it and/or
@@ -25,83 +25,102 @@ import src
 import prepare
 import src.debug as DBG
 
-PACKAGE_EXT=".tar.gz" # the extension we use for the packages
+PACKAGE_EXT = ".tar.gz"  # the extension we use for the packages
 
 # Define all possible option for patch command :  sat patch <options>
 parser = src.options.Options()
-parser.add_option('p', 'products', 'list2', 'products',
-    _('Optional: products from which to get the sources. This option accepts a comma separated list.'))
+parser.add_option(
+    "p",
+    "products",
+    "list2",
+    "products",
+    _(
+        "Optional: products from which to get the sources. This option accepts a comma separated list."
+    ),
+)
 
 
 def get_binary_from_archive(config, product_name, product_info, install_dir, logger):
-    '''The method get the binary of the product from an archive
-    
+    """The method get the binary of the product from an archive
+
     :param config Config: The global configuration
     :param product_name : The name of the product
-    :param product_info Config: The configuration specific to 
+    :param product_info Config: The configuration specific to
                                the product to be prepared
-    :param install_dir Path: The Path instance corresponding to the 
+    :param install_dir Path: The Path instance corresponding to the
                             directory where to put the sources
     :param logger Logger: The logger instance to use for the display and logging
     :return: True if it succeed, else False
     :rtype: boolean
-    '''
-
+    """
 
     # check archive exists
 
     # the expected name of the bin archive, as produced by sat package --bin_products
-    archive_name = product_name + '-' + product_info.version + "-" + config.VARS.dist + PACKAGE_EXT
+    archive_name = (
+        product_name + "-" + product_info.version + "-" + config.VARS.dist + PACKAGE_EXT
+    )
     # we search this archive in bin directory
-    bin_arch_name = os.path.join("bin",archive_name)
+    bin_arch_name = os.path.join("bin", archive_name)
     # search in the config.PATHS.ARCHIVEPATH
     arch_path = src.find_file_in_lpath(archive_name, config.PATHS.ARCHIVEPATH, "bin")
     if not arch_path:
         # bin archive was not found locally in ARCHIVEPATH
         # search on ftp site
-        logger.write("\n   The bin archive is not found on local file system, we try ftp\n", 3)
-        ret=src.find_file_in_ftppath(archive_name, config.PATHS.ARCHIVEFTP, 
-                                     config.LOCAL.archive_dir, logger, "bin")
-        
+        logger.write(
+            "\n   The bin archive is not found on local file system, we try ftp\n", 3
+        )
+        ret = src.find_file_in_ftppath(
+            archive_name,
+            config.PATHS.ARCHIVEFTP,
+            config.LOCAL.archive_dir,
+            logger,
+            "bin",
+        )
+
         if ret:
             # archive was found on ftp and stored in ret
             arch_path = ret
         else:
-            logger.write('%s  ' % src.printcolors.printc(src.OK_STATUS), 3, False) 
-            msg = _("Archive not found in ARCHIVEPATH, nor on ARCHIVEFTP: '%s'") % bin_arch_name
+            logger.write("%s  " % src.printcolors.printc(src.OK_STATUS), 3, False)
+            msg = (
+                _("Archive not found in ARCHIVEPATH, nor on ARCHIVEFTP: '%s'")
+                % bin_arch_name
+            )
             logger.write(msg, 3)
             return 1
 
-    logger.write('arc:%s ... ' % 
-                 src.printcolors.printcInfo(archive_name),
-                 3, 
-                 False)
+    logger.write("arc:%s ... " % src.printcolors.printcInfo(archive_name), 3, False)
     logger.flush()
     # Call the system function that do the extraction in archive mode
-    retcode, NameExtractedDirectory = src.system.archive_extract(arch_path,
-                                      install_dir.dir(), logger)
-    
-    # Rename the source directory if 
+    retcode, NameExtractedDirectory = src.system.archive_extract(
+        arch_path, install_dir.dir(), logger
+    )
+
+    # Rename the source directory if
     # it does not match with product_info.source_dir
-    if (NameExtractedDirectory.replace('/', '') != 
-            os.path.basename(product_info.install_dir)):
-        shutil.move(os.path.join(os.path.dirname(product_info.install_dir), 
-                                 NameExtractedDirectory), 
-                    product_info.install_dir)
-    
-    return retcode
+    if NameExtractedDirectory.replace("/", "") != os.path.basename(
+        product_info.install_dir
+    ):
+        shutil.move(
+            os.path.join(
+                os.path.dirname(product_info.install_dir), NameExtractedDirectory
+            ),
+            product_info.install_dir,
+        )
 
+    return retcode
 
 
 def get_all_product_binaries(config, products, logger):
-    '''Get all the product sources.
-    
+    """Get all the product sources.
+
     :param config Config: The global configuration
     :param products List: The list of tuples (product name, product informations)
     :param logger Logger: The logger instance to be used for the logging
     :return: the tuple (number of success, dictionary product_name/success_fail)
     :rtype: (int,dict)
-    '''
+    """
 
     # Initialize the variables that will count the fails and success
     results = dict()
@@ -111,60 +130,78 @@ def get_all_product_binaries(config, products, logger):
     max_product_name_len = 1
     if len(products) > 0:
         max_product_name_len = max(map(lambda l: len(l), products[0])) + 4
-    
+
     # The loop on all the products from which to get the binaries
     for product_name, product_info in products:
         # display and log
-        logger.write('%s: ' % src.printcolors.printcLabel(product_name), 3)
-        logger.write(' ' * (max_product_name_len - len(product_name)), 3, False)
+        logger.write("%s: " % src.printcolors.printcLabel(product_name), 3)
+        logger.write(" " * (max_product_name_len - len(product_name)), 3, False)
         logger.write("\n", 4, False)
         #
-        do_install_prod=True
+        do_install_prod = True
         # check if there is something to do!
         if src.product.product_is_fixed(product_info):
-            do_install_prod=False
-            msg = _("INFO : Not doing anything because the products %s is fixed\n") % product_name
+            do_install_prod = False
+            msg = (
+                _("INFO : Not doing anything because the products %s is fixed\n")
+                % product_name
+            )
         elif src.product.product_is_native(product_info):
-            do_install_prod=False
-            msg = _("INFO : Not doing anything because the products %s is native\n") % product_name
-        elif src.appli_test_property(config,"pip", "yes") and \
-             src.product.product_test_property(product_info,"pip", "yes"):
-            do_install_prod=False
-            msg = _("INFO : Not doing anything because the products %s is managed by pip\n") % product_name
+            do_install_prod = False
+            msg = (
+                _("INFO : Not doing anything because the products %s is native\n")
+                % product_name
+            )
+        elif src.appli_test_property(
+            config, "pip", "yes"
+        ) and src.product.product_test_property(product_info, "pip", "yes"):
+            do_install_prod = False
+            msg = (
+                _(
+                    "INFO : Not doing anything because the products %s is managed by pip\n"
+                )
+                % product_name
+            )
         else:
-            install_dir=src.Path(product_info.install_dir) 
+            install_dir = src.Path(product_info.install_dir)
             if install_dir.exists():
-                do_install_prod=False 
-                msg = _("INFO : Not doing anything because the install directory already exists:\n    %s\n") % install_dir
+                do_install_prod = False
+                msg = (
+                    _(
+                        "INFO : Not doing anything because the install directory already exists:\n    %s\n"
+                    )
+                    % install_dir
+                )
 
         if not do_install_prod:
-            logger.write('%s  ' % src.printcolors.printc(src.OK_STATUS), 3, False) 
+            logger.write("%s  " % src.printcolors.printc(src.OK_STATUS), 3, False)
             logger.write(msg, 3)
-            good_result = good_result + 1  
+            good_result = good_result + 1
             # Do not get the binaries and go to next product
             continue
 
         # we neeed to install binaries for the product
-        retcode = get_binary_from_archive(config, product_name, product_info, install_dir, logger)
+        retcode = get_binary_from_archive(
+            config, product_name, product_info, install_dir, logger
+        )
 
         # Check that the sources are correctly get using the files to be tested
         # in product information
         if retcode:
             pass
             # CNC TODO check md5sum
-            #check_OK, wrong_path = check_sources(product_info, logger)
-            #if not check_OK:
+            # check_OK, wrong_path = check_sources(product_info, logger)
+            # if not check_OK:
             #    # Print the missing file path
             #    msg = _("The required file %s does not exists. " % wrong_path)
             #    logger.write(src.printcolors.printcError("\nERROR: ") + msg, 3)
             #    retcode = False
-# does post install substitutions
-#for f in $(grep -RIl -e /volatile/salome/jenkins/workspace/Salome_master_CO7/SALOME-9.7.0-CO7/INSTALL INSTALL); do
-#     sed -i "
-#        s?/volatile/salome/jenkins/workspace/Salome_master_CO7/SALOME-9.7.0-CO7/INSTALL?$(pwd)/INSTALL?g
-#            " $f
-#done
-
+        # does post install substitutions
+        # for f in $(grep -RIl -e /volatile/salome/jenkins/workspace/Salome_master_CO7/SALOME-9.7.0-CO7/INSTALL INSTALL); do
+        #     sed -i "
+        #        s?/volatile/salome/jenkins/workspace/Salome_master_CO7/SALOME-9.7.0-CO7/INSTALL?$(pwd)/INSTALL?g
+        #            " $f
+        # done
 
         # show results
         results[product_name] = retcode
@@ -175,28 +212,28 @@ def get_all_product_binaries(config, products, logger):
         else:
             # The case where it failed
             res = src.KO_STATUS
-        
+
         # print the result
         if do_install_prod:
-            logger.write('%s\n' % src.printcolors.printc(res), 3, False)
+            logger.write("%s\n" % src.printcolors.printc(res), 3, False)
 
     return good_result, results
 
+
 def check_sources(product_info, logger):
-    '''Check that the sources are correctly get, using the files to be tested
+    """Check that the sources are correctly get, using the files to be tested
        in product information
-    
-    :param product_info Config: The configuration specific to 
+
+    :param product_info Config: The configuration specific to
                                 the product to be prepared
     :return: True if the files exists (or no files to test is provided).
     :rtype: boolean
-    '''
+    """
     # Get the files to test if there is any
-    if ("present_files" in product_info and 
-        "source" in product_info.present_files):
+    if "present_files" in product_info and "source" in product_info.present_files:
         l_files_to_be_tested = product_info.present_files.source
         for file_path in l_files_to_be_tested:
-            # The path to test is the source directory 
+            # The path to test is the source directory
             # of the product joined the file path provided
             path_to_test = os.path.join(product_info.source_dir, file_path)
             logger.write(_("\nTesting existence of file: \n"), 5)
@@ -206,73 +243,78 @@ def check_sources(product_info, logger):
             logger.write(src.printcolors.printcSuccess(" OK\n"), 5)
     return True, ""
 
+
 def description():
-    '''method that is called when salomeTools is called with --help option.
-    
+    """method that is called when salomeTools is called with --help option.
+
     :return: The text to display for the source command description.
     :rtype: str
-    '''
-    return _("The install command gets the binaries of the application products "
-             "from local (ARCHIVEPATH) or ftp server.\n\nexample:"
-             "\nsat install SALOME-master --products GEOM,SMESH")
-  
+    """
+    return _(
+        "The install command gets the binaries of the application products "
+        "from local (ARCHIVEPATH) or ftp server.\n\nexample:"
+        "\nsat install SALOME-master --products GEOM,SMESH"
+    )
+
+
 def run(args, runner, logger):
-    '''method that is called when salomeTools is called with install parameter.
-    '''
+    """method that is called when salomeTools is called with install parameter."""
     DBG.write("install.run()", args)
     # Parse the options
     (options, args) = parser.parse_args(args)
-    
+
     # check that the command has been called with an application
-    src.check_config_has_application( runner.cfg )
+    src.check_config_has_application(runner.cfg)
 
     # Print some informations
-    logger.write(_('Getting binaries of the application %s\n') % 
-                src.printcolors.printcLabel(runner.cfg.VARS.application), 1)
-    src.printcolors.print_value(logger, 'workdir', 
-                                runner.cfg.APPLICATION.workdir, 2)
+    logger.write(
+        _("Getting binaries of the application %s\n")
+        % src.printcolors.printcLabel(runner.cfg.VARS.application),
+        1,
+    )
+    src.printcolors.print_value(logger, "workdir", runner.cfg.APPLICATION.workdir, 2)
     logger.write("\n", 2, False)
-       
 
     # Get the list of all application products, and create its dependency graph
-    all_products_infos = src.product.get_products_infos(runner.cfg.APPLICATION.products,
-                                                        runner.cfg)
-    from compile import get_dependencies_graph,depth_search_graph
-    all_products_graph=get_dependencies_graph(all_products_infos)
-    #logger.write("Dependency graph of all application products : %s\n" % all_products_graph, 6)
+    all_products_infos = src.product.get_products_infos(
+        runner.cfg.APPLICATION.products, runner.cfg
+    )
+    from compile import get_dependencies_graph, depth_search_graph
+
+    all_products_graph = get_dependencies_graph(all_products_infos)
+    # logger.write("Dependency graph of all application products : %s\n" % all_products_graph, 6)
     DBG.write("Dependency graph of all application products : ", all_products_graph)
 
-    products_infos=[]
+    products_infos = []
     if options.products is None:
-        #implicit selection of all products
+        # implicit selection of all products
         products_infos = all_products_infos
     else:
         # a list of products is specified
-        products_list=options.products
+        products_list = options.products
         # we evaluate the complete list including dependencies (~ to the --with-fathers of sat compile)
 
         # Extend the list with all recursive dependencies of the given products
-        visited=[]
+        visited = []
         for p_name in products_list:
-            visited=depth_search_graph(all_products_graph, p_name, visited)
+            visited = depth_search_graph(all_products_graph, p_name, visited)
         products_list = visited
-        logger.write("Product we have to compile (as specified by user) : %s\n" % products_list, 5)
+        logger.write(
+            "Product we have to compile (as specified by user) : %s\n" % products_list,
+            5,
+        )
 
         #  Create a dict of all products to facilitate products_infos sorting
-        all_products_dict={}
-        for (pname,pinfo) in all_products_infos:
-            all_products_dict[pname]=(pname,pinfo)
+        all_products_dict = {}
+        for (pname, pinfo) in all_products_infos:
+            all_products_dict[pname] = (pname, pinfo)
 
         # build products_infos for the products we have to install
         for product in products_list:
             products_infos.append(all_products_dict[product])
 
-
-    
     # Call to the function that gets all the sources
-    good_result, results = get_all_product_binaries(runner.cfg, 
-                                                    products_infos,
-                                                    logger)
+    good_result, results = get_all_product_binaries(runner.cfg, products_infos, logger)
 
     # Display the results (how much passed, how much failed, etc...)
     status = src.OK_STATUS
index f5805feaa557dcaf65007cdf09ed75c8a5f1ef6d..db03bbd83fb51c225ee91e8acc8320490400cfcc 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 #  Copyright (C) 2010-2012  CEA/DEN
 #
 #  This library is free software; you can redistribute it and/or
@@ -23,74 +23,89 @@ import src.salomeTools
 
 # Define all possible option for the make command :  sat make <options>
 parser = src.options.Options()
-parser.add_option('j', 'jobs_config', 'string', 'jobs_cfg', 
-                  _('Mandatory: The name of the config file that contains'
-                  ' the jobs configuration'))
-parser.add_option('', 'name', 'string', 'job',
-    _('Mandatory: The job name from which to execute commands.'), "")
+parser.add_option(
+    "j",
+    "jobs_config",
+    "string",
+    "jobs_cfg",
+    _("Mandatory: The name of the config file that contains" " the jobs configuration"),
+)
+parser.add_option(
+    "",
+    "name",
+    "string",
+    "job",
+    _("Mandatory: The job name from which to execute commands."),
+    "",
+)
+
 
 def description():
-    '''method that is called when salomeTools is called with --help option.
-    
+    """method that is called when salomeTools is called with --help option.
+
     :return: The text to display for the job command description.
     :rtype: str
-    '''
-    return _("""\
+    """
+    return _(
+        """\
 The job command executes the commands of the job defined in the jobs configuration file
 
 example:
 >> sat job --jobs_config my_jobs --name my_job
-""")
-  
+"""
+    )
+
+
 def run(args, runner, logger):
-    '''method that is called when salomeTools is called with job parameter.
-    '''
-    
+    """method that is called when salomeTools is called with job parameter."""
+
     # Parse the options
     (options, args) = parser.parse_args(args)
-         
+
     l_cfg_dir = runner.cfg.PATHS.JOBPATH
-    
+
     # Make sure the jobs_config option has been called
     if not options.jobs_cfg:
-        message = _("The option --jobs_config is required\n")      
+        message = _("The option --jobs_config is required\n")
         logger.write(src.printcolors.printcError(message))
         return 1
-    
+
     # Make sure the name option has been called
     if not options.job:
-        message = _("The option --name is required\n")      
+        message = _("The option --name is required\n")
         logger.write(src.printcolors.printcError(message))
         return 1
-    
+
     # Find the file in the directories
     found = False
     for cfg_dir in l_cfg_dir:
         file_jobs_cfg = os.path.join(cfg_dir, options.jobs_cfg)
-        if not file_jobs_cfg.endswith('.pyconf'):
-            file_jobs_cfg += '.pyconf'
-        
+        if not file_jobs_cfg.endswith(".pyconf"):
+            file_jobs_cfg += ".pyconf"
+
         if not os.path.exists(file_jobs_cfg):
             continue
         else:
             found = True
             break
-    
+
     if not found:
-        msg = _("The file configuration %(name_file)s was not found."
-                "\nUse the --list option to get the possible files.")
+        msg = _(
+            "The file configuration %(name_file)s was not found."
+            "\nUse the --list option to get the possible files."
+        )
         src.printcolors.printcError(msg)
         return 1
-    
+
     info = [
-    (_("Platform"), runner.cfg.VARS.dist),
-    (_("File containing the jobs configuration"), file_jobs_cfg)
+        (_("Platform"), runner.cfg.VARS.dist),
+        (_("File containing the jobs configuration"), file_jobs_cfg),
     ]
     src.print_info(logger, info)
-    
+
     # Read the config that is in the file
     config_jobs = src.read_config_from_a_file(file_jobs_cfg)
-    
+
     # Find the job and its commands
     found = False
     for job in config_jobs.jobs:
@@ -99,26 +114,28 @@ def run(args, runner, logger):
             found = True
             break
     if not found:
-        msg = _("Impossible to find the job \"%(job_name)s\" in "
-                "%(jobs_config_file)s" % {"job_name" : options.job,
-                                          "jobs_config_file" : file_jobs_cfg})
+        msg = _(
+            'Impossible to find the job "%(job_name)s" in '
+            "%(jobs_config_file)s"
+            % {"job_name": options.job, "jobs_config_file": file_jobs_cfg}
+        )
         logger.write(src.printcolors.printcError(msg) + "\n")
         return 1
-    
+
     # Find the maximum length of the commands in order to format the display
     len_max_command = max([len(cmd) for cmd in commands])
-    
+
     # Loop over the commands and execute it
     res = 0
     nb_pass = 0
     for command in commands:
         specific_option = False
         # Determine if it is a sat command or a shell command
-        cmd_exe = command.split(" ")[0] # first part
+        cmd_exe = command.split(" ")[0]  # first part
         if cmd_exe == "sat":
             # use the salomeTools parser to get the options of the command
             sat_parser = src.salomeTools.parser
-            input_parser = src.remove_item_from_list(command.split(' ')[1:], "")
+            input_parser = src.remove_item_from_list(command.split(" ")[1:], "")
             (options, argus) = sat_parser.parse_args(input_parser)
             # Verify if there is a changed option
             for attr in dir(options):
@@ -132,47 +149,48 @@ def run(args, runner, logger):
             sat_command_name = "shell"
             end_cmd = ["--command", command]
         # Do not change the options if no option was called in the command
-        if not(specific_option):
+        if not (specific_option):
             options = None
 
         # Get dynamically the command function to call
         sat_command = runner.__getattr__(sat_command_name)
 
-        logger.write("Executing " + 
-                     src.printcolors.printcLabel(command) + " ", 3)
+        logger.write("Executing " + src.printcolors.printcLabel(command) + " ", 3)
         logger.write("." * (len_max_command - len(command)) + " ", 3)
         logger.flush()
-        
+
         error = ""
         stack = ""
         # Execute the command
-        code = sat_command(end_cmd,
-                           options = options,
-                           batch = True,
-                           verbose = 0,
-                           logger_add_link = logger)
-            
+        code = sat_command(
+            end_cmd, options=options, batch=True, verbose=0, logger_add_link=logger
+        )
+
         # Print the status of the command
         if code == 0:
             nb_pass += 1
-            logger.write('%s\n' % src.printcolors.printc(src.OK_STATUS), 3)
+            logger.write("%s\n" % src.printcolors.printc(src.OK_STATUS), 3)
         else:
             if sat_command_name != "test":
                 res = 1
-            logger.write('%s %s\n' % (src.printcolors.printc(src.KO_STATUS),
-                                      error), 3)
+            logger.write("%s %s\n" % (src.printcolors.printc(src.KO_STATUS), error), 3)
             if len(stack) > 0:
-                logger.write('stack: %s\n' % stack, 3)
-    
+                logger.write("stack: %s\n" % stack, 3)
+
     # Print the final state
     if res == 0:
         final_status = "OK"
     else:
         final_status = "KO"
-   
-    logger.write(_("\nCommands: %(status)s (%(valid_result)d/%(nb_products)d)\n") % \
-        { 'status': src.printcolors.printc(final_status), 
-          'valid_result': nb_pass,
-          'nb_products': len(commands) }, 3)
-    
-    return res
\ No newline at end of file
+
+    logger.write(
+        _("\nCommands: %(status)s (%(valid_result)d/%(nb_products)d)\n")
+        % {
+            "status": src.printcolors.printc(final_status),
+            "valid_result": nb_pass,
+            "nb_products": len(commands),
+        },
+        3,
+    )
+
+    return res
index c043d7048e094d8463f6f40b6f9f26b9a6c3c7dc..7c9347d9a716fd9082cdbcfbcb273fc9a0a81119 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 #  Copyright (C) 2010-2013  CEA/DEN
 #
 #  This library is free software; you can redistribute it and/or
@@ -29,10 +29,10 @@ import re
 
 # generate problem
 try:
-  import paramiko
+    import paramiko
 except:
-  paramiko = "import paramiko impossible"
-  pass
+    paramiko = "import paramiko impossible"
+    pass
 
 import src
 
@@ -47,98 +47,125 @@ CSV_DELIMITER = ";"
 
 parser = src.options.Options()
 
-parser.add_option('n', 'name', 'list2', 'jobs_cfg', 
-                  _('Mandatory: The name of the config file that contains'
-                  ' the jobs configuration. Can be a list.'))
-parser.add_option('o', 'only_jobs', 'list2', 'only_jobs',
-                  _('Optional: the list of jobs to launch, by their name. '))
-parser.add_option('l', 'list', 'boolean', 'list', 
-                  _('Optional: list all available config files.'))
-parser.add_option('t', 'test_connection', 'boolean', 'test_connection',
-                  _("Optional: try to connect to the machines. "
-                    "Not executing the jobs."),
-                  False)
-parser.add_option('p', 'publish', 'boolean', 'publish',
-                  _("Optional: generate an xml file that can be read in a "
-                    "browser to display the jobs status."),
-                  False)
-parser.add_option('i', 'input_boards', 'string', 'input_boards', _("Optional: "
-                                "the path to csv file that contain "
-                                "the expected boards."),"")
-parser.add_option('', 'completion', 'boolean', 'no_label',
-                  _("Optional (internal use): do not print labels, Works only "
-                    "with --list."),
-                  False)
+parser.add_option(
+    "n",
+    "name",
+    "list2",
+    "jobs_cfg",
+    _(
+        "Mandatory: The name of the config file that contains"
+        " the jobs configuration. Can be a list."
+    ),
+)
+parser.add_option(
+    "o",
+    "only_jobs",
+    "list2",
+    "only_jobs",
+    _("Optional: the list of jobs to launch, by their name. "),
+)
+parser.add_option(
+    "l", "list", "boolean", "list", _("Optional: list all available config files.")
+)
+parser.add_option(
+    "t",
+    "test_connection",
+    "boolean",
+    "test_connection",
+    _("Optional: try to connect to the machines. " "Not executing the jobs."),
+    False,
+)
+parser.add_option(
+    "p",
+    "publish",
+    "boolean",
+    "publish",
+    _(
+        "Optional: generate an xml file that can be read in a "
+        "browser to display the jobs status."
+    ),
+    False,
+)
+parser.add_option(
+    "i",
+    "input_boards",
+    "string",
+    "input_boards",
+    _("Optional: " "the path to csv file that contain " "the expected boards."),
+    "",
+)
+parser.add_option(
+    "",
+    "completion",
+    "boolean",
+    "no_label",
+    _("Optional (internal use): do not print labels, Works only " "with --list."),
+    False,
+)
+
 
 class Machine(object):
-    '''Class to manage a ssh connection on a machine
-    '''
-    def __init__(self,
-                 name,
-                 host,
-                 user,
-                 port=22,
-                 passwd=None,
-                 sat_path="salomeTools"):
+    """Class to manage a ssh connection on a machine"""
+
+    def __init__(self, name, host, user, port=22, passwd=None, sat_path="salomeTools"):
         self.name = name
         self.host = host
         self.port = port
-        self.distribution = None # Will be filled after copying SAT on the machine
+        self.distribution = None  # Will be filled after copying SAT on the machine
         self.user = user
         self.password = passwd
         self.sat_path = sat_path
         self.ssh = paramiko.SSHClient()
         self._connection_successful = None
-    
+
     def connect(self, logger):
-        '''Initiate the ssh connection to the remote machine
-        
-        :param logger src.logger.Logger: The logger instance 
+        """Initiate the ssh connection to the remote machine
+
+        :param logger src.logger.Logger: The logger instance
         :return: Nothing
         :rtype: N\A
-        '''
+        """
 
         self._connection_successful = False
         self.ssh.load_system_host_keys()
         self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
         try:
-            self.ssh.connect(self.host,
-                             port=self.port,
-                             username=self.user,
-                             password = self.password)
+            self.ssh.connect(
+                self.host, port=self.port, username=self.user, password=self.password
+            )
         except paramiko.AuthenticationException:
             message = src.KO_STATUS + _("Authentication failed")
         except paramiko.BadHostKeyException:
-            message = (src.KO_STATUS + 
-                       _("The server's host key could not be verified"))
+            message = src.KO_STATUS + _("The server's host key could not be verified")
         except paramiko.SSHException:
-            message = ( _("SSHException error connecting or "
-                          "establishing an SSH session"))            
+            message = _(
+                "SSHException error connecting or " "establishing an SSH session"
+            )
         except:
-            message = ( _("Error connecting or establishing an SSH session"))
+            message = _("Error connecting or establishing an SSH session")
         else:
             self._connection_successful = True
             message = ""
         return message
-    
+
     def successfully_connected(self, logger):
-        '''Verify if the connection to the remote machine has succeed
-        
-        :param logger src.logger.Logger: The logger instance 
+        """Verify if the connection to the remote machine has succeed
+
+        :param logger src.logger.Logger: The logger instance
         :return: True if the connection has succeed, False if not
         :rtype: bool
-        '''
+        """
         if self._connection_successful == None:
-            message = _("Warning : trying to ask if the connection to "
-            "(name: %s host: %s, port: %s, user: %s) is OK whereas there were"
-            " no connection request" % 
-                        (self.name, self.host, self.port, self.user))
-            logger.write( src.printcolors.printcWarning(message))
+            message = _(
+                "Warning : trying to ask if the connection to "
+                "(name: %s host: %s, port: %s, user: %s) is OK whereas there were"
+                " no connection request" % (self.name, self.host, self.port, self.user)
+            )
+            logger.write(src.printcolors.printcWarning(message))
         return self._connection_successful
 
     def copy_sat(self, sat_local_path, job_file):
-        '''Copy salomeTools to the remote machine in self.sat_path
-        '''
+        """Copy salomeTools to the remote machine in self.sat_path"""
         res = 0
         try:
             # open a sftp connection
@@ -146,23 +173,22 @@ class Machine(object):
             # Create the sat directory on remote machine if it is not existing
             self.mkdir(self.sat_path, ignore_existing=True)
             # Put sat
-            self.put_dir(sat_local_path, self.sat_path, filters = ['.git'])
-            # put the job configuration file in order to make it reachable 
+            self.put_dir(sat_local_path, self.sat_path, filters=[".git"])
+            # put the job configuration file in order to make it reachable
             # on the remote machine
             remote_job_file_name = ".%s" % os.path.basename(job_file)
-            self.sftp.put(job_file, os.path.join(self.sat_path,
-                                                 remote_job_file_name))
+            self.sftp.put(job_file, os.path.join(self.sat_path, remote_job_file_name))
         except Exception as e:
             res = str(e)
             self._connection_successful = False
-        
+
         return res
-        
-    def put_dir(self, source, target, filters = []):
-        ''' Uploads the contents of the source directory to the target path. The
-            target directory needs to exists. All sub-directories in source are 
-            created under target.
-        '''
+
+    def put_dir(self, source, target, filters=[]):
+        """Uploads the contents of the source directory to the target path. The
+        target directory needs to exists. All sub-directories in source are
+        created under target.
+        """
         for item in os.listdir(source):
             if item in filters:
                 continue
@@ -172,70 +198,67 @@ class Machine(object):
                 linkto = os.readlink(source_path)
                 try:
                     self.sftp.symlink(linkto, destination_path)
-                    self.sftp.chmod(destination_path,
-                                    os.stat(source_path).st_mode)
+                    self.sftp.chmod(destination_path, os.stat(source_path).st_mode)
                 except IOError:
                     pass
             else:
                 if os.path.isfile(source_path):
                     self.sftp.put(source_path, destination_path)
-                    self.sftp.chmod(destination_path,
-                                    os.stat(source_path).st_mode)
+                    self.sftp.chmod(destination_path, os.stat(source_path).st_mode)
                 else:
                     self.mkdir(destination_path, ignore_existing=True)
                     self.put_dir(source_path, destination_path)
 
     def mkdir(self, path, mode=511, ignore_existing=False):
-        ''' Augments mkdir by adding an option to not fail 
-            if the folder exists 
-        '''
+        """Augments mkdir by adding an option to not fail
+        if the folder exists
+        """
         try:
             self.sftp.mkdir(path, mode)
         except IOError:
             if ignore_existing:
                 pass
             else:
-                raise       
-    
+                raise
+
     def exec_command(self, command, logger):
-        '''Execute the command on the remote machine
-        
+        """Execute the command on the remote machine
+
         :param command str: The command to be run
-        :param logger src.logger.Logger: The logger instance 
+        :param logger src.logger.Logger: The logger instance
         :return: the stdin, stdout, and stderr of the executing command,
                  as a 3-tuple
         :rtype: (paramiko.channel.ChannelFile, paramiko.channel.ChannelFile,
                 paramiko.channel.ChannelFile)
-        '''
-        try:        
+        """
+        try:
             # Does not wait the end of the command
             (stdin, stdout, stderr) = self.ssh.exec_command(command)
         except paramiko.SSHException:
-            message = src.KO_STATUS + _(
-                            ": the server failed to execute the command\n")
-            logger.write( src.printcolors.printcError(message))
+            message = src.KO_STATUS + _(": the server failed to execute the command\n")
+            logger.write(src.printcolors.printcError(message))
             return (None, None, None)
         except:
-            logger.write( src.printcolors.printcError(src.KO_STATUS + '\n'))
+            logger.write(src.printcolors.printcError(src.KO_STATUS + "\n"))
             return (None, None, None)
         else:
             return (stdin, stdout, stderr)
 
     def close(self):
-        '''Close the ssh connection
-        
+        """Close the ssh connection
+
         :rtype: N\A
-        '''
+        """
         self.ssh.close()
-     
+
     def write_info(self, logger):
-        '''Prints the informations relative to the machine in the logger 
+        """Prints the informations relative to the machine in the logger
            (terminal traces and log file)
-        
+
         :param logger src.logger.Logger: The logger instance
         :return: Nothing
         :rtype: N\A
-        '''
+        """
         logger.write("host : " + self.host + "\n")
         logger.write("port : " + str(self.port) + "\n")
         logger.write("user : " + str(self.user) + "\n")
@@ -243,24 +266,26 @@ class Machine(object):
             status = src.OK_STATUS
         else:
             status = src.KO_STATUS
-        logger.write("Connection : " + status + "\n\n") 
+        logger.write("Connection : " + status + "\n\n")
 
 
 class Job(object):
-    '''Class to manage one job
-    '''
-    def __init__(self,
-                 name,
-                 machine,
-                 application,
-                 board, 
-                 commands,
-                 timeout,
-                 config,
-                 job_file_path,
-                 logger,
-                 after=None,
-                 prefix=None):
+    """Class to manage one job"""
+
+    def __init__(
+        self,
+        name,
+        machine,
+        application,
+        board,
+        commands,
+        timeout,
+        config,
+        job_file_path,
+        logger,
+        after=None,
+        prefix=None,
+    ):
 
         self.name = name
         self.machine = machine
@@ -270,97 +295,97 @@ class Job(object):
         self.board = board
         self.config = config
         self.logger = logger
-        # The list of log files to download from the remote machine 
+        # The list of log files to download from the remote machine
         self.remote_log_files = []
-        
+
         # The remote command status
-        # -1 means that it has not been launched, 
+        # -1 means that it has not been launched,
         # 0 means success and 1 means fail
         self.res_job = "-1"
         self.cancelled = False
-        
+
         self._T0 = -1
         self._Tf = -1
         self._has_begun = False
         self._has_finished = False
         self._has_timouted = False
-        self._stdin = None # Store the command inputs field
-        self._stdout = None # Store the command outputs field
-        self._stderr = None # Store the command errors field
+        self._stdin = None  # Store the command inputs field
+        self._stdout = None  # Store the command outputs field
+        self._stderr = None  # Store the command errors field
 
         self.out = ""
         self.err = ""
-        
+
         self.name_remote_jobs_pyconf = ".%s" % os.path.basename(job_file_path)
         self.commands = commands
-        self.command = (os.path.join(self.machine.sat_path, "sat") +
-                        " -l " +
-                        os.path.join(self.machine.sat_path,
-                                     "list_log_files.txt") +
-                        " job --jobs_config " + 
-                        os.path.join(self.machine.sat_path,
-                                     self.name_remote_jobs_pyconf) +
-                        " --name " + self.name)
+        self.command = (
+            os.path.join(self.machine.sat_path, "sat")
+            + " -l "
+            + os.path.join(self.machine.sat_path, "list_log_files.txt")
+            + " job --jobs_config "
+            + os.path.join(self.machine.sat_path, self.name_remote_jobs_pyconf)
+            + " --name "
+            + self.name
+        )
         if prefix:
-            self.command = prefix + ' "' + self.command +'"'
-    
+            self.command = prefix + ' "' + self.command + '"'
+
     def get_pids(self):
-        """ Get the pid(s) corresponding to the command that have been launched
+        """Get the pid(s) corresponding to the command that have been launched
             On the remote machine
-        
+
         :return: The list of integers corresponding to the found pids
         :rtype: List
         """
         pids = []
-        cmd_pid = 'ps aux | grep "' + self.command + '" | awk \'{print $2}\''
+        cmd_pid = 'ps aux | grep "' + self.command + "\" | awk '{print $2}'"
         (_, out_pid, _) = self.machine.exec_command(cmd_pid, self.logger)
         pids_cmd = out_pid.readlines()
         pids_cmd = [str(src.only_numbers(pid)) for pid in pids_cmd]
-        pids+=pids_cmd
+        pids += pids_cmd
         return pids
-    
+
     def kill_remote_process(self, wait=1):
-        '''Kills the process on the remote machine.
-        
+        """Kills the process on the remote machine.
+
         :return: (the output of the kill, the error of the kill)
         :rtype: (str, str)
-        '''
+        """
         try:
             pids = self.get_pids()
         except:
             return ("Unable to get the pid of the command.", "")
-            
+
         cmd_kill = " ; ".join([("kill -2 " + pid) for pid in pids])
-        (_, out_kill, err_kill) = self.machine.exec_command(cmd_kill, 
-                                                            self.logger)
+        (_, out_kill, err_kill) = self.machine.exec_command(cmd_kill, self.logger)
         time.sleep(wait)
         return (out_kill.read().decode(), err_kill.read().decode())
-            
+
     def has_begun(self):
-        '''Returns True if the job has already begun
-        
+        """Returns True if the job has already begun
+
         :return: True if the job has already begun
         :rtype: bool
-        '''
+        """
         return self._has_begun
-    
+
     def has_finished(self):
-        '''Returns True if the job has already finished 
+        """Returns True if the job has already finished
            (i.e. all the commands have been executed)
            If it is finished, the outputs are stored in the fields out and err.
-        
+
         :return: True if the job has already finished
         :rtype: bool
-        '''
-        
+        """
+
         # If the method has already been called and returned True
         if self._has_finished:
             return True
-        
+
         # If the job has not begun yet
         if not self.has_begun():
             return False
-        
+
         if self._stdout.channel.closed:
             self._has_finished = True
             # Store the result outputs
@@ -373,27 +398,25 @@ class Job(object):
                 self.get_log_files()
             except Exception as e:
                 self.err += _("Unable to get remote log files: %s" % e)
-        
+
         return self._has_finished
-          
+
     def get_log_files(self):
-        """Get the log files produced by the command launched 
-           on the remote machine, and put it in the log directory of the user,
-           so they can be accessible from 
+        """Get the log files produced by the command launched
+        on the remote machine, and put it in the log directory of the user,
+        so they can be accessible from
         """
         # Do not get the files if the command is not finished
         if not self.has_finished():
             msg = _("Trying to get log files whereas the job is not finished.")
             self.logger.write(src.printcolors.printcWarning(msg))
             return
-        
+
         # First get the file that contains the list of log files to get
         tmp_file_path = src.get_tmp_filename(self.config, "list_log_files.txt")
         remote_path = os.path.join(self.machine.sat_path, "list_log_files.txt")
-        self.machine.sftp.get(
-                    remote_path,
-                    tmp_file_path)
-        
+        self.machine.sftp.get(remote_path, tmp_file_path)
+
         # Read the file and get the result of the command and all the log files
         # to get
         fstream_tmp = open(tmp_file_path, "r")
@@ -401,65 +424,72 @@ class Job(object):
         file_lines = [line.replace("\n", "") for line in file_lines]
         fstream_tmp.close()
         os.remove(tmp_file_path)
-        
-        try :
+
+        try:
             # The first line is the result of the command (0 success or 1 fail)
             self.res_job = file_lines[0]
         except Exception as e:
-            self.err += _("Unable to get status from remote file %s: %s" % 
-                                                    (remote_path, str(e)))
+            self.err += _(
+                "Unable to get status from remote file %s: %s" % (remote_path, str(e))
+            )
 
         for i, job_path_remote in enumerate(file_lines[1:]):
             try:
                 # For each command, there is two files to get :
-                # 1- The xml file describing the command and giving the 
+                # 1- The xml file describing the command and giving the
                 # internal traces.
-                # 2- The txt file containing the system command traces (like 
+                # 2- The txt file containing the system command traces (like
                 # traces produced by the "make" command)
                 # 3- In case of the test command, there is another file to get :
                 # the xml board that contain the test results
                 dirname = os.path.basename(os.path.dirname(job_path_remote))
-                if dirname != 'OUT' and dirname != 'TEST':
+                if dirname != "OUT" and dirname != "TEST":
                     # Case 1-
-                    local_path = os.path.join(os.path.dirname(
-                                                        self.logger.logFilePath),
-                                              os.path.basename(job_path_remote))
-                    if i==0: # The first is the job command
-                        self.logger.add_link(os.path.basename(job_path_remote),
-                                             "job",
-                                             self.res_job,
-                                             self.command) 
-                elif dirname == 'OUT':
+                    local_path = os.path.join(
+                        os.path.dirname(self.logger.logFilePath),
+                        os.path.basename(job_path_remote),
+                    )
+                    if i == 0:  # The first is the job command
+                        self.logger.add_link(
+                            os.path.basename(job_path_remote),
+                            "job",
+                            self.res_job,
+                            self.command,
+                        )
+                elif dirname == "OUT":
                     # Case 2-
-                    local_path = os.path.join(os.path.dirname(
-                                                        self.logger.logFilePath),
-                                              'OUT',
-                                              os.path.basename(job_path_remote))
-                elif dirname == 'TEST':
+                    local_path = os.path.join(
+                        os.path.dirname(self.logger.logFilePath),
+                        "OUT",
+                        os.path.basename(job_path_remote),
+                    )
+                elif dirname == "TEST":
                     # Case 3-
-                    local_path = os.path.join(os.path.dirname(
-                                                        self.logger.logFilePath),
-                                              'TEST',
-                                              os.path.basename(job_path_remote))
-                
+                    local_path = os.path.join(
+                        os.path.dirname(self.logger.logFilePath),
+                        "TEST",
+                        os.path.basename(job_path_remote),
+                    )
+
                 # Get the file
                 if not os.path.exists(local_path):
                     self.machine.sftp.get(job_path_remote, local_path)
                 self.remote_log_files.append(local_path)
             except Exception as e:
-                self.err += _("Unable to get %s log file from remote: %s" % 
-                                                    (str(job_path_remote),
-                                                     str(e)))
+                self.err += _(
+                    "Unable to get %s log file from remote: %s"
+                    % (str(job_path_remote), str(e))
+                )
 
     def has_failed(self):
-        '''Returns True if the job has failed. 
+        """Returns True if the job has failed.
            A job is considered as failed if the machine could not be reached,
-           if the remote command failed, 
+           if the remote command failed,
            or if the job finished with a time out.
-        
+
         :return: True if the job has failed
         :rtype: bool
-        '''
+        """
         if not self.has_finished():
             return False
         if not self.machine.successfully_connected(self.logger):
@@ -469,10 +499,10 @@ class Job(object):
         if self.res_job == "1":
             return True
         return False
-    
+
     def cancel(self):
-        """In case of a failing job, one has to cancel every job that depend 
-           on it. This method put the job as failed and will not be executed.
+        """In case of a failing job, one has to cancel every job that depend
+        on it. This method put the job as failed and will not be executed.
         """
         if self.cancelled:
             return
@@ -483,24 +513,24 @@ class Job(object):
         self.err += _("This job was not launched because its father has failed.")
 
     def is_running(self):
-        '''Returns True if the job commands are running 
-        
+        """Returns True if the job commands are running
+
         :return: True if the job is running
         :rtype: bool
-        '''
+        """
         return self.has_begun() and not self.has_finished()
 
     def is_timeout(self):
-        '''Returns True if the job commands has finished with timeout 
-        
+        """Returns True if the job commands has finished with timeout
+
         :return: True if the job has finished with timeout
         :rtype: bool
-        '''
+        """
         return self._has_timouted
 
     def time_elapsed(self):
         """Get the time elapsed since the job launching
-        
+
         :return: The number of seconds
         :rtype: int
         """
@@ -508,10 +538,10 @@ class Job(object):
             return -1
         T_now = time.time()
         return T_now - self._T0
-    
+
     def check_time(self):
         """Verify that the job has not exceeded its timeout.
-           If it has, kill the remote command and consider the job as finished.
+        If it has, kill the remote command and consider the job as finished.
         """
         if not self.has_begun():
             return
@@ -526,78 +556,84 @@ class Job(object):
                 self.get_log_files()
             except Exception as e:
                 self.err += _("Unable to get remote log files!\n%s\n" % str(e))
-            
+
     def total_duration(self):
         """Give the total duration of the job
-        
+
         :return: the total duration of the job in seconds
         :rtype: int
         """
         return self._Tf - self._T0
-        
+
     def run(self):
-        """Launch the job by executing the remote command.
-        """
-        
+        """Launch the job by executing the remote command."""
+
         # Prevent multiple run
         if self.has_begun():
             msg = _("Warning: A job can only be launched one time")
-            msg2 = _("Trying to launch the job \"%s\" whereas it has "
-                     "already been launched." % self.name)
-            self.logger.write(src.printcolors.printcWarning("%s\n%s\n" % (msg,
-                                                                        msg2)))
+            msg2 = _(
+                'Trying to launch the job "%s" whereas it has '
+                "already been launched." % self.name
+            )
+            self.logger.write(src.printcolors.printcWarning("%s\n%s\n" % (msg, msg2)))
             return
-        
+
         # Do not execute the command if the machine could not be reached
         if not self.machine.successfully_connected(self.logger):
             self._has_finished = True
             self.out = "N\A"
-            self.err += ("Connection to machine (name : %s, host: %s, port:"
-                        " %s, user: %s) has failed\nUse the log command "
-                        "to get more information."
-                        % (self.machine.name,
-                           self.machine.host,
-                           self.machine.port,
-                           self.machine.user))
+            self.err += (
+                "Connection to machine (name : %s, host: %s, port:"
+                " %s, user: %s) has failed\nUse the log command "
+                "to get more information."
+                % (
+                    self.machine.name,
+                    self.machine.host,
+                    self.machine.port,
+                    self.machine.user,
+                )
+            )
         else:
             # Usual case : Launch the command on remote machine
             self._T0 = time.time()
             self._stdin, self._stdout, self._stderr = self.machine.exec_command(
-                                                                  self.command,
-                                                                  self.logger)
+                self.command, self.logger
+            )
             # If the results are not initialized, finish the job
             if (self._stdin, self._stdout, self._stderr) == (None, None, None):
                 self._has_finished = True
                 self._Tf = time.time()
                 self.out += "N\A"
                 self.err += "The server failed to execute the command"
-        
+
         # Put the beginning flag to true.
         self._has_begun = True
-    
+
     def write_results(self):
-        """Display on the terminal all the job's information
-        """
+        """Display on the terminal all the job's information"""
         self.logger.write("name : " + self.name + "\n")
         if self.after:
             self.logger.write("after : %s\n" % self.after)
-        self.logger.write("Time elapsed : %4imin %2is \n" % 
-                     (self.total_duration()//60 , self.total_duration()%60))
+        self.logger.write(
+            "Time elapsed : %4imin %2is \n"
+            % (self.total_duration() // 60, self.total_duration() % 60)
+        )
         if self._T0 != -1:
-            self.logger.write("Begin time : %s\n" % 
-                         time.strftime('%Y-%m-%d %H:%M:%S', 
-                                       time.localtime(self._T0)) )
+            self.logger.write(
+                "Begin time : %s\n"
+                % time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self._T0))
+            )
         if self._Tf != -1:
-            self.logger.write("End time   : %s\n\n" % 
-                         time.strftime('%Y-%m-%d %H:%M:%S', 
-                                       time.localtime(self._Tf)) )
-        
+            self.logger.write(
+                "End time   : %s\n\n"
+                % time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self._Tf))
+            )
+
         machine_head = "Informations about connection :\n"
         underline = (len(machine_head) - 2) * "-"
-        self.logger.write(src.printcolors.printcInfo(
-                                                machine_head+underline+"\n"))
+        self.logger.write(src.printcolors.printcInfo(machine_head + underline + "\n"))
         self.machine.write_info(self.logger)
-        
+
         self.logger.write(src.printcolors.printcInfo("out : \n"))
         if self.out == "":
             self.logger.write("Unable to get output\n")
@@ -605,10 +641,10 @@ class Job(object):
             self.logger.write(self.out + "\n")
         self.logger.write(src.printcolors.printcInfo("err : \n"))
         self.logger.write(self.err + "\n")
-        
+
     def get_status(self):
         """Get the status of the job (used by the Gui for xml display)
-        
+
         :return: The current status of the job
         :rtype: String
         """
@@ -619,271 +655,295 @@ class Job(object):
         if self.cancelled:
             return "Cancelled"
         if self.is_running():
-            return "running since " + time.strftime('%Y-%m-%d %H:%M:%S',
-                                                    time.localtime(self._T0))        
+            return "running since " + time.strftime(
+                "%Y-%m-%d %H:%M:%S", time.localtime(self._T0)
+            )
         if self.has_finished():
             if self.is_timeout():
-                return "Timeout since " + time.strftime('%Y-%m-%d %H:%M:%S',
-                                                    time.localtime(self._Tf))
-            return "Finished since " + time.strftime('%Y-%m-%d %H:%M:%S',
-                                                     time.localtime(self._Tf))
-    
+                return "Timeout since " + time.strftime(
+                    "%Y-%m-%d %H:%M:%S", time.localtime(self._Tf)
+                )
+            return "Finished since " + time.strftime(
+                "%Y-%m-%d %H:%M:%S", time.localtime(self._Tf)
+            )
+
+
 class Jobs(object):
-    '''Class to manage the jobs to be run
-    '''
-    def __init__(self,
-                 runner,
-                 logger,
-                 job_file_path,
-                 config_jobs,
-                 lenght_columns = 20):
+    """Class to manage the jobs to be run"""
+
+    def __init__(self, runner, logger, job_file_path, config_jobs, lenght_columns=20):
         # The jobs configuration
         self.cfg_jobs = config_jobs
         self.job_file_path = job_file_path
         # The machine that will be used today
         self.lmachines = []
-        # The list of machine (hosts, port) that will be used today 
-        # (a same host can have several machine instances since there 
-        # can be several ssh parameters) 
+        # The list of machine (hosts, port) that will be used today
+        # (a same host can have several machine instances since there
+        # can be several ssh parameters)
         self.lhosts = []
-        # The jobs to be launched today 
+        # The jobs to be launched today
         self.ljobs = []
         # The jobs that will not be launched today
         self.ljobs_not_today = []
         self.runner = runner
         self.logger = logger
         self.len_columns = lenght_columns
-        
+
         # the list of jobs that have not been run yet
         self._l_jobs_not_started = []
-        # the list of jobs that have already ran 
+        # the list of jobs that have already ran
         self._l_jobs_finished = []
-        # the list of jobs that are running 
-        self._l_jobs_running = [] 
-                
+        # the list of jobs that are running
+        self._l_jobs_running = []
+
         self.determine_jobs_and_machines()
-    
+
     def define_job(self, job_def, machine):
-        '''Takes a pyconf job definition and a machine (from class machine)
+        """Takes a pyconf job definition and a machine (from class machine)
            and returns the job instance corresponding to the definition.
-        
-        :param job_def src.config.Mapping: a job definition 
+
+        :param job_def src.config.Mapping: a job definition
         :param machine machine: the machine on which the job will run
         :return: The corresponding job in a job class instance
         :rtype: job
-        '''
+        """
         name = job_def.name
         cmmnds = job_def.commands
         if not "timeout" in job_def:
-            timeout = 4*60*60 # default timeout = 4h
+            timeout = 4 * 60 * 60  # default timeout = 4h
         else:
             timeout = job_def.timeout
         after = None
-        if 'after' in job_def:
+        if "after" in job_def:
             after = job_def.after
         application = None
-        if 'application' in job_def:
+        if "application" in job_def:
             application = job_def.application
         board = None
-        if 'board' in job_def:
+        if "board" in job_def:
             board = job_def.board
         prefix = None
         if "prefix" in job_def:
             prefix = job_def.prefix
-            
-        return Job(name,
-                   machine,
-                   application,
-                   board,
-                   cmmnds,
-                   timeout,
-                   self.runner.cfg,
-                   self.job_file_path,
-                   self.logger,
-                   after = after,
-                   prefix = prefix)
-    
+
+        return Job(
+            name,
+            machine,
+            application,
+            board,
+            cmmnds,
+            timeout,
+            self.runner.cfg,
+            self.job_file_path,
+            self.logger,
+            after=after,
+            prefix=prefix,
+        )
+
     def determine_jobs_and_machines(self):
-        '''Function that reads the pyconf jobs definition and instantiates all
+        """Function that reads the pyconf jobs definition and instantiates all
            the machines and jobs to be done today.
 
         :return: Nothing
         :rtype: N\A
-        '''
+        """
         today = datetime.date.weekday(datetime.date.today())
         host_list = []
-               
-        for job_def in self.cfg_jobs.jobs :
-                
+
+        for job_def in self.cfg_jobs.jobs:
+
             if not "machine" in job_def:
-                msg = _('WARNING: The job "%s" do not have the key '
-                       '"machine", this job is ignored.\n\n' % job_def.name)
+                msg = _(
+                    'WARNING: The job "%s" do not have the key '
+                    '"machine", this job is ignored.\n\n' % job_def.name
+                )
                 self.logger.write(src.printcolors.printcWarning(msg))
                 continue
             name_machine = job_def.machine
-            
+
             a_machine = None
             for mach in self.lmachines:
                 if mach.name == name_machine:
                     a_machine = mach
                     break
-            
+
             if a_machine == None:
                 for machine_def in self.cfg_jobs.machines:
                     if machine_def.name == name_machine:
-                        if 'host' not in machine_def:
+                        if "host" not in machine_def:
                             host = self.runner.cfg.VARS.hostname
                         else:
                             host = machine_def.host
 
-                        if 'user' not in machine_def:
+                        if "user" not in machine_def:
                             user = self.runner.cfg.VARS.user
                         else:
                             user = machine_def.user
 
-                        if 'port' not in machine_def:
+                        if "port" not in machine_def:
                             port = 22
                         else:
                             port = machine_def.port
-            
-                        if 'password' not in machine_def:
+
+                        if "password" not in machine_def:
                             passwd = None
                         else:
-                            passwd = machine_def.password    
-                            
-                        if 'sat_path' not in machine_def:
+                            passwd = machine_def.password
+
+                        if "sat_path" not in machine_def:
                             sat_path = "salomeTools"
                         else:
                             sat_path = machine_def.sat_path
-                        
+
                         a_machine = Machine(
-                                            machine_def.name,
-                                            host,
-                                            user,
-                                            port=port,
-                                            passwd=passwd,
-                                            sat_path=sat_path
-                                            )
-                        
+                            machine_def.name,
+                            host,
+                            user,
+                            port=port,
+                            passwd=passwd,
+                            sat_path=sat_path,
+                        )
+
                         self.lmachines.append(a_machine)
                         if (host, port) not in host_list:
                             host_list.append((host, port))
-                
+
                 if a_machine == None:
-                    msg = _("WARNING: The job \"%(job_name)s\" requires the "
-                            "machine \"%(machine_name)s\" but this machine "
-                            "is not defined in the configuration file.\n"
-                            "The job will not be launched\n")
-                    self.logger.write(src.printcolors.printcWarning(
-                                        msg % {"job_name" : job_def.name,
-                                               "machine_name" : name_machine}))
+                    msg = _(
+                        'WARNING: The job "%(job_name)s" requires the '
+                        'machine "%(machine_name)s" but this machine '
+                        "is not defined in the configuration file.\n"
+                        "The job will not be launched\n"
+                    )
+                    self.logger.write(
+                        src.printcolors.printcWarning(
+                            msg
+                            % {"job_name": job_def.name, "machine_name": name_machine}
+                        )
+                    )
                     continue
-                                  
+
             a_job = self.define_job(job_def, a_machine)
-                
-            if today in job_def.when:    
+
+            if today in job_def.when:
                 self.ljobs.append(a_job)
-            else: # today in job_def.when
+            else:  # today in job_def.when
                 self.ljobs_not_today.append(a_job)
-               
+
         self.lhosts = host_list
-        
+
     def ssh_connection_all_machines(self, pad=50):
-        '''Function that do the ssh connection to every machine 
+        """Function that do the ssh connection to every machine
            to be used today.
 
         :return: Nothing
         :rtype: N\A
-        '''
-        self.logger.write(src.printcolors.printcInfo((
-                        "Establishing connection with all the machines :\n")))
+        """
+        self.logger.write(
+            src.printcolors.printcInfo(
+                ("Establishing connection with all the machines :\n")
+            )
+        )
         for machine in self.lmachines:
             # little algorithm in order to display traces
-            begin_line = (_("Connection to %s: " % machine.name))
+            begin_line = _("Connection to %s: " % machine.name)
             if pad - len(begin_line) < 0:
                 endline = " "
             else:
                 endline = (pad - len(begin_line)) * "." + " "
-            
+
             step = "SSH connection"
-            self.logger.write( begin_line + endline + step)
+            self.logger.write(begin_line + endline + step)
             self.logger.flush()
             # the call to the method that initiate the ssh connection
             msg = machine.connect(self.logger)
-            
+
             # Copy salomeTools to the remote machine
             if machine.successfully_connected(self.logger):
                 step = _("Remove SAT")
-                self.logger.write('\r%s%s%s' % (begin_line, endline, 20 * " "),3)
-                self.logger.write('\r%s%s%s' % (begin_line, endline, step), 3)
+                self.logger.write("\r%s%s%s" % (begin_line, endline, 20 * " "), 3)
+                self.logger.write("\r%s%s%s" % (begin_line, endline, step), 3)
                 (__, out_dist, __) = machine.exec_command(
-                                                "rm -rf %s" % machine.sat_path,
-                                                self.logger)
+                    "rm -rf %s" % machine.sat_path, self.logger
+                )
                 out_dist.read()
-                
+
                 self.logger.flush()
                 step = _("Copy SAT")
-                self.logger.write('\r%s%s%s' % (begin_line, endline, 20 * " "),3)
-                self.logger.write('\r%s%s%s' % (begin_line, endline, step), 3)
+                self.logger.write("\r%s%s%s" % (begin_line, endline, 20 * " "), 3)
+                self.logger.write("\r%s%s%s" % (begin_line, endline, step), 3)
                 self.logger.flush()
-                res_copy = machine.copy_sat(self.runner.cfg.VARS.salometoolsway,
-                                            self.job_file_path)
+                res_copy = machine.copy_sat(
+                    self.runner.cfg.VARS.salometoolsway, self.job_file_path
+                )
 
                 # set the local settings of sat on the remote machine using
                 # the init command
                 (__, out_dist, __) = machine.exec_command(
-                                os.path.join(machine.sat_path,
-                                    "sat init --base default --workdir"
-                                    " default --log_dir default"),
-                                self.logger)
-                out_dist.read()    
-                
+                    os.path.join(
+                        machine.sat_path,
+                        "sat init --base default --workdir"
+                        " default --log_dir default",
+                    ),
+                    self.logger,
+                )
+                out_dist.read()
+
                 # get the remote machine distribution using a sat command
                 (__, out_dist, __) = machine.exec_command(
-                                os.path.join(machine.sat_path,
-                                    "sat config --value VARS.dist --no_label"),
-                                self.logger)
-                machine.distribution = out_dist.read().decode().replace("\n",
-                                                                        "")
-                
+                    os.path.join(
+                        machine.sat_path, "sat config --value VARS.dist --no_label"
+                    ),
+                    self.logger,
+                )
+                machine.distribution = out_dist.read().decode().replace("\n", "")
+
                 # Print the status of the copy
                 if res_copy == 0:
-                    self.logger.write('\r%s' % 
-                                ((len(begin_line)+len(endline)+20) * " "), 3)
-                    self.logger.write('\r%s%s%s' % 
-                        (begin_line, 
-                         endline, 
-                         src.printcolors.printc(src.OK_STATUS)), 3)
+                    self.logger.write(
+                        "\r%s" % ((len(begin_line) + len(endline) + 20) * " "), 3
+                    )
+                    self.logger.write(
+                        "\r%s%s%s"
+                        % (begin_line, endline, src.printcolors.printc(src.OK_STATUS)),
+                        3,
+                    )
                 else:
-                    self.logger.write('\r%s' % 
-                            ((len(begin_line)+len(endline)+20) * " "), 3)
-                    self.logger.write('\r%s%s%s %s' % 
-                        (begin_line,
-                         endline,
-                         src.printcolors.printc(src.KO_STATUS),
-                         _("Copy of SAT failed: %s" % res_copy)), 3)
+                    self.logger.write(
+                        "\r%s" % ((len(begin_line) + len(endline) + 20) * " "), 3
+                    )
+                    self.logger.write(
+                        "\r%s%s%s %s"
+                        % (
+                            begin_line,
+                            endline,
+                            src.printcolors.printc(src.KO_STATUS),
+                            _("Copy of SAT failed: %s" % res_copy),
+                        ),
+                        3,
+                    )
             else:
-                self.logger.write('\r%s' % 
-                                  ((len(begin_line)+len(endline)+20) * " "), 3)
-                self.logger.write('\r%s%s%s %s' % 
-                    (begin_line,
-                     endline,
-                     src.printcolors.printc(src.KO_STATUS),
-                     msg), 3)
+                self.logger.write(
+                    "\r%s" % ((len(begin_line) + len(endline) + 20) * " "), 3
+                )
+                self.logger.write(
+                    "\r%s%s%s %s"
+                    % (begin_line, endline, src.printcolors.printc(src.KO_STATUS), msg),
+                    3,
+                )
             self.logger.write("\n", 3)
-                
+
         self.logger.write("\n")
-        
 
     def is_occupied(self, hostname):
-        '''Function that returns True if a job is running on 
+        """Function that returns True if a job is running on
            the machine defined by its host and its port.
-        
+
         :param hostname (str, int): the pair (host, port)
-        :return: the job that is running on the host, 
-                or false if there is no job running on the host. 
+        :return: the job that is running on the host,
+                or false if there is no job running on the host.
         :rtype: job / bool
-        '''
+        """
         host = hostname[0]
         port = hostname[1]
         for jb in self.ljobs:
@@ -891,14 +951,14 @@ class Jobs(object):
                 if jb.is_running():
                     return jb
         return False
-    
+
     def update_jobs_states_list(self):
-        '''Function that updates the lists that store the currently
+        """Function that updates the lists that store the currently
            running jobs and the jobs that have already finished.
-        
-        :return: Nothing. 
+
+        :return: Nothing.
         :rtype: N\A
-        '''
+        """
         jobs_finished_list = []
         jobs_running_list = []
         for jb in self.ljobs:
@@ -907,128 +967,128 @@ class Jobs(object):
                 jb.check_time()
             if jb.has_finished():
                 jobs_finished_list.append(jb)
-        
+
         nb_job_finished_before = len(self._l_jobs_finished)
         self._l_jobs_finished = jobs_finished_list
         self._l_jobs_running = jobs_running_list
-        
+
         nb_job_finished_now = len(self._l_jobs_finished)
-        
+
         return nb_job_finished_now > nb_job_finished_before
-    
+
     def cancel_dependencies_of_failing_jobs(self):
-        '''Function that cancels all the jobs that depend on a failing one.
-        
-        :return: Nothing. 
+        """Function that cancels all the jobs that depend on a failing one.
+
+        :return: Nothing.
         :rtype: N\A
-        '''
-        
+        """
+
         for job in self.ljobs:
             if job.after is None:
                 continue
             father_job = self.find_job_that_has_name(job.after)
             if father_job is not None and father_job.has_failed():
                 job.cancel()
-    
+
     def find_job_that_has_name(self, name):
-        '''Returns the job by its name.
-        
+        """Returns the job by its name.
+
         :param name str: a job name
-        :return: the job that has the name. 
+        :return: the job that has the name.
         :rtype: job
-        '''
+        """
         for jb in self.ljobs:
             if jb.name == name:
                 return jb
         # the following is executed only if the job was not found
         return None
-    
+
     def str_of_length(self, text, length):
-        '''Takes a string text of any length and returns 
+        """Takes a string text of any length and returns
            the most close string of length "length".
-        
+
         :param text str: any string
         :param length int: a length for the returned string
         :return: the most close string of length "length"
         :rtype: str
-        '''
+        """
         if len(text) > length:
-            text_out = text[:length-3] + '...'
+            text_out = text[: length - 3] + "..."
         else:
             diff = length - len(text)
-            before = " " * (diff//2)
-            after = " " * (diff//2 + diff%2)
+            before = " " * (diff // 2)
+            after = " " * (diff // 2 + diff % 2)
             text_out = before + text + after
-            
+
         return text_out
-    
+
     def display_status(self, len_col):
-        '''Takes a lenght and construct the display of the current status 
+        """Takes a lenght and construct the display of the current status
            of the jobs in an array that has a column for each host.
-           It displays the job that is currently running on the host 
+           It displays the job that is currently running on the host
            of the column.
-        
-        :param len_col int: the size of the column 
+
+        :param len_col int: the size of the column
         :return: Nothing
         :rtype: N\A
-        '''
-        
+        """
+
         display_line = ""
         for host_port in self.lhosts:
             jb = self.is_occupied(host_port)
-            if not jb: # nothing running on the host
+            if not jb:  # nothing running on the host
                 empty = self.str_of_length("empty", len_col)
-                display_line += "|" + empty 
+                display_line += "|" + empty
             else:
                 display_line += "|" + src.printcolors.printcInfo(
-                                        self.str_of_length(jb.name, len_col))
-        
+                    self.str_of_length(jb.name, len_col)
+                )
+
         self.logger.write("\r" + display_line + "|")
         self.logger.flush()
-    
 
     def run_jobs(self):
-        '''The main method. Runs all the jobs on every host. 
+        """The main method. Runs all the jobs on every host.
            For each host, at a given time, only one job can be running.
            The jobs that have the field after (that contain the job that has
            to be run before it) are run after the previous job.
            This method stops when all the jobs are finished.
-        
+
         :return: Nothing
         :rtype: N\A
-        '''
+        """
 
         # Print header
-        self.logger.write(src.printcolors.printcInfo(
-                                                _('Executing the jobs :\n')))
+        self.logger.write(src.printcolors.printcInfo(_("Executing the jobs :\n")))
         text_line = ""
         for host_port in self.lhosts:
             host = host_port[0]
             port = host_port[1]
-            if port == 22: # default value
+            if port == 22:  # default value
                 text_line += "|" + self.str_of_length(host, self.len_columns)
             else:
                 text_line += "|" + self.str_of_length(
-                                "("+host+", "+str(port)+")", self.len_columns)
-        
-        tiret_line = " " + "-"*(len(text_line)-1) + "\n"
+                    "(" + host + ", " + str(port) + ")", self.len_columns
+                )
+
+        tiret_line = " " + "-" * (len(text_line) - 1) + "\n"
         self.logger.write(tiret_line)
         self.logger.write(text_line + "|\n")
         self.logger.write(tiret_line)
         self.logger.flush()
-        
+
         # The infinite loop that runs the jobs
         l_jobs_not_started = src.deepcopy_list(self.ljobs)
         while len(self._l_jobs_finished) != len(self.ljobs):
             new_job_start = False
             for host_port in self.lhosts:
-                
+
                 if self.is_occupied(host_port):
                     continue
-             
+
                 for jb in l_jobs_not_started:
                     if (jb.machine.host, jb.machine.port) != host_port:
-                        continue 
+                        continue
                     if jb.after == None:
                         jb.run()
                         l_jobs_not_started.remove(jb)
@@ -1038,8 +1098,10 @@ class Jobs(object):
                         jb_before = self.find_job_that_has_name(jb.after)
                         if jb_before is None:
                             jb.cancel()
-                            msg = _("This job was not launched because its "
-                                    "father is not in the jobs list.")
+                            msg = _(
+                                "This job was not launched because its "
+                                "father is not in the jobs list."
+                            )
                             jb.out = msg
                             jb.err = msg
                             break
@@ -1050,81 +1112,79 @@ class Jobs(object):
                             break
             self.cancel_dependencies_of_failing_jobs()
             new_job_finished = self.update_jobs_states_list()
-            
+
             if new_job_start or new_job_finished:
                 if self.gui:
-                    self.gui.update_xml_files(self.ljobs)            
-                # Display the current status     
+                    self.gui.update_xml_files(self.ljobs)
+                # Display the current status
                 self.display_status(self.len_columns)
-            
+
             # Make sure that the proc is not entirely busy
             time.sleep(0.001)
-        
-        self.logger.write("\n")    
-        self.logger.write(tiret_line)                   
+
+        self.logger.write("\n")
+        self.logger.write(tiret_line)
         self.logger.write("\n\n")
-        
+
         if self.gui:
             self.gui.update_xml_files(self.ljobs)
             self.gui.last_update()
 
     def write_all_results(self):
-        '''Display all the jobs outputs.
-        
+        """Display all the jobs outputs.
+
         :return: Nothing
         :rtype: N\A
-        '''
-        
+        """
+
         for jb in self.ljobs:
-            self.logger.write(src.printcolors.printcLabel(
-                        "#------- Results for job %s -------#\n" % jb.name))
+            self.logger.write(
+                src.printcolors.printcLabel(
+                    "#------- Results for job %s -------#\n" % jb.name
+                )
+            )
             jb.write_results()
             self.logger.write("\n\n")
 
+
 class Gui(object):
-    '''Class to manage the the xml data that can be displayed in a browser to
-       see the jobs states
-    '''
-   
-    def __init__(self,
-                 xml_dir_path,
-                 l_jobs,
-                 l_jobs_not_today,
-                 prefix,
-                 logger,
-                 file_boards=""):
-        '''Initialization
-        
-        :param xml_dir_path str: The path to the directory where to put 
+    """Class to manage the the xml data that can be displayed in a browser to
+    see the jobs states
+    """
+
+    def __init__(
+        self, xml_dir_path, l_jobs, l_jobs_not_today, prefix, logger, file_boards=""
+    ):
+        """Initialization
+
+        :param xml_dir_path str: The path to the directory where to put
                                  the xml resulting files
         :param l_jobs List: the list of jobs that run today
         :param l_jobs_not_today List: the list of jobs that do not run today
         :param file_boards str: the file path from which to read the
                                    expected boards
-        '''
+        """
         # The logging instance
         self.logger = logger
-        
+
         # The prefix to add to the xml files : date_hour
         self.prefix = prefix
-        
+
         # The path of the csv files to read to fill the expected boards
         self.file_boards = file_boards
-        
+
         if file_boards != "":
             today = datetime.date.weekday(datetime.date.today())
             self.parse_csv_boards(today)
         else:
             self.d_input_boards = {}
-        
+
         # The path of the global xml file
         self.xml_dir_path = xml_dir_path
         # Initialize the xml files
         self.global_name = "global_report"
-        xml_global_path = os.path.join(self.xml_dir_path,
-                                       self.global_name + ".xml")
-        self.xml_global_file = src.xmlManager.XmlLogFile(xml_global_path,
-                                                         "JobsReport")
+        xml_global_path = os.path.join(self.xml_dir_path, self.global_name + ".xml")
+        self.xml_global_file = src.xmlManager.XmlLogFile(xml_global_path, "JobsReport")
 
         # Find history for each job
         self.history = {}
@@ -1139,84 +1199,84 @@ class Gui(object):
 
         # Write the xml file
         self.update_xml_files(l_jobs)
-    
+
     def add_xml_board(self, name):
-        '''Add a board to the board list   
+        """Add a board to the board list
         :param name str: the board name
-        '''
+        """
         xml_board_path = os.path.join(self.xml_dir_path, name + ".xml")
-        self.d_xml_board_files[name] =  src.xmlManager.XmlLogFile(
-                                                    xml_board_path,
-                                                    "JobsReport")
+        self.d_xml_board_files[name] = src.xmlManager.XmlLogFile(
+            xml_board_path, "JobsReport"
+        )
         self.d_xml_board_files[name].add_simple_node("distributions")
         self.d_xml_board_files[name].add_simple_node("applications")
         self.d_xml_board_files[name].add_simple_node("board", text=name)
-           
+
     def initialize_boards(self, l_jobs, l_jobs_not_today):
-        '''Get all the first information needed for each file and write the 
-           first version of the files   
+        """Get all the first information needed for each file and write the
+           first version of the files
         :param l_jobs List: the list of jobs that run today
         :param l_jobs_not_today List: the list of jobs that do not run today
-        '''
+        """
         # Get the boards to fill and put it in a dictionary
         # {board_name : xml instance corresponding to the board}
         for job in l_jobs + l_jobs_not_today:
             board = job.board
-            if (board is not None and 
-                                board not in self.d_xml_board_files.keys()):
+            if board is not None and board not in self.d_xml_board_files.keys():
                 self.add_xml_board(board)
-        
+
         # Verify that the boards given as input are done
         for board in list(self.d_input_boards.keys()):
             if board not in self.d_xml_board_files:
                 self.add_xml_board(board)
             root_node = self.d_xml_board_files[board].xmlroot
-            src.xmlManager.append_node_attrib(root_node, 
-                                              {"input_file" : self.file_boards})
-        
-        # Loop over all jobs in order to get the lines and columns for each 
+            src.xmlManager.append_node_attrib(
+                root_node, {"input_file": self.file_boards}
+            )
+
+        # Loop over all jobs in order to get the lines and columns for each
         # xml file
         d_dist = {}
         d_application = {}
         for board in self.d_xml_board_files:
             d_dist[board] = []
             d_application[board] = []
-            
+
         l_hosts_ports = []
-            
+
         for job in l_jobs + l_jobs_not_today:
-            
+
             if (job.machine.host, job.machine.port) not in l_hosts_ports:
                 l_hosts_ports.append((job.machine.host, job.machine.port))
-                
+
             distrib = job.machine.distribution
             application = job.application
-            
+
             board_job = job.board
             if board is None:
                 continue
             for board in self.d_xml_board_files:
                 if board_job == board:
-                    if (distrib not in [None, ''] and 
-                                            distrib not in d_dist[board]):
+                    if distrib not in [None, ""] and distrib not in d_dist[board]:
                         d_dist[board].append(distrib)
                         src.xmlManager.add_simple_node(
-                            self.d_xml_board_files[board].xmlroot.find(
-                                                            'distributions'),
-                                                   "dist",
-                                                   attrib={"name" : distrib})
-                    
+                            self.d_xml_board_files[board].xmlroot.find("distributions"),
+                            "dist",
+                            attrib={"name": distrib},
+                        )
+
                 if board_job == board:
-                    if (application not in [None, ''] and 
-                                    application not in d_application[board]):
+                    if (
+                        application not in [None, ""]
+                        and application not in d_application[board]
+                    ):
                         d_application[board].append(application)
                         src.xmlManager.add_simple_node(
-                            self.d_xml_board_files[board].xmlroot.find(
-                                                                'applications'),
-                                                   "application",
-                                                   attrib={
-                                                        "name" : application})
-        
+                            self.d_xml_board_files[board].xmlroot.find("applications"),
+                            "application",
+                            attrib={"name": application},
+                        )
+
         # Verify that there are no missing application or distribution in the
         # xml board files (regarding the input boards)
         for board in self.d_xml_board_files:
@@ -1226,44 +1286,42 @@ class Gui(object):
             for dist in self.d_input_boards[board]["rows"]:
                 if dist not in l_dist:
                     src.xmlManager.add_simple_node(
-                            self.d_xml_board_files[board].xmlroot.find(
-                                                            'distributions'),
-                                                   "dist",
-                                                   attrib={"name" : dist})
+                        self.d_xml_board_files[board].xmlroot.find("distributions"),
+                        "dist",
+                        attrib={"name": dist},
+                    )
             l_appli = d_application[board]
             for appli in self.d_input_boards[board]["columns"]:
                 if appli not in l_appli:
                     src.xmlManager.add_simple_node(
-                            self.d_xml_board_files[board].xmlroot.find(
-                                                                'applications'),
-                                                   "application",
-                                                   attrib={"name" : appli})
-                
+                        self.d_xml_board_files[board].xmlroot.find("applications"),
+                        "application",
+                        attrib={"name": appli},
+                    )
+
         # Initialize the hosts_ports node for the global file
-        self.xmlhosts_ports = self.xml_global_file.add_simple_node(
-                                                                "hosts_ports")
+        self.xmlhosts_ports = self.xml_global_file.add_simple_node("hosts_ports")
         for host, port in l_hosts_ports:
             host_port = "%s:%i" % (host, port)
-            src.xmlManager.add_simple_node(self.xmlhosts_ports,
-                                           "host_port",
-                                           attrib={"name" : host_port})
-        
+            src.xmlManager.add_simple_node(
+                self.xmlhosts_ports, "host_port", attrib={"name": host_port}
+            )
+
         # Initialize the jobs node in all files
-        for xml_file in [self.xml_global_file] + list(
-                                            self.d_xml_board_files.values()):
-            xml_jobs = xml_file.add_simple_node("jobs")      
-            # Get the jobs present in the config file but 
+        for xml_file in [self.xml_global_file] + list(self.d_xml_board_files.values()):
+            xml_jobs = xml_file.add_simple_node("jobs")
+            # Get the jobs present in the config file but
             # that will not be launched today
             self.put_jobs_not_today(l_jobs_not_today, xml_jobs)
-            
+
             # add also the infos node
-            xml_file.add_simple_node("infos",
-                                     attrib={"name" : "last update",
-                                             "JobsCommandStatus" : "running"})
-            
+            xml_file.add_simple_node(
+                "infos", attrib={"name": "last update", "JobsCommandStatus": "running"}
+            )
+
             # and put the history node
             history_node = xml_file.add_simple_node("history")
-            name_board = os.path.basename(xml_file.logFile)[:-len(".xml")]
+            name_board = os.path.basename(xml_file.logFile)[: -len(".xml")]
             # serach for board files
             expression = "^[0-9]{8}_+[0-9]{6}_" + name_board + ".xml$"
             oExpr = re.compile(expression)
@@ -1272,54 +1330,51 @@ class Gui(object):
                 if oExpr.search(file_name):
                     date = os.path.basename(file_name).split("_")[0]
                     file_path = os.path.join(self.xml_dir_path, file_name)
-                    src.xmlManager.add_simple_node(history_node,
-                                                   "link",
-                                                   text=file_path,
-                                                   attrib={"date" : date})      
-            
-                
+                    src.xmlManager.add_simple_node(
+                        history_node, "link", text=file_path, attrib={"date": date}
+                    )
+
         # Find in each board the squares that needs to be filled regarding the
         # input csv files but that are not covered by a today job
         for board in self.d_input_boards.keys():
             xml_root_board = self.d_xml_board_files[board].xmlroot
             # Find the missing jobs for today
-            xml_missing = src.xmlManager.add_simple_node(xml_root_board,
-                                                 "missing_jobs")
+            xml_missing = src.xmlManager.add_simple_node(xml_root_board, "missing_jobs")
             for row, column in self.d_input_boards[board]["jobs"]:
                 found = False
                 for job in l_jobs:
-                    if (job.application == column and 
-                        job.machine.distribution == row):
+                    if job.application == column and job.machine.distribution == row:
                         found = True
                         break
                 if not found:
-                    src.xmlManager.add_simple_node(xml_missing,
-                                            "job",
-                                            attrib={"distribution" : row,
-                                                    "application" : column })
+                    src.xmlManager.add_simple_node(
+                        xml_missing,
+                        "job",
+                        attrib={"distribution": row, "application": column},
+                    )
             # Find the missing jobs not today
             xml_missing_not_today = src.xmlManager.add_simple_node(
-                                                 xml_root_board,
-                                                 "missing_jobs_not_today")
+                xml_root_board, "missing_jobs_not_today"
+            )
             for row, column in self.d_input_boards[board]["jobs_not_today"]:
                 found = False
                 for job in l_jobs_not_today:
-                    if (job.application == column and 
-                        job.machine.distribution == row):
+                    if job.application == column and job.machine.distribution == row:
                         found = True
                         break
                 if not found:
-                    src.xmlManager.add_simple_node(xml_missing_not_today,
-                                            "job",
-                                            attrib={"distribution" : row,
-                                                    "application" : column })
+                    src.xmlManager.add_simple_node(
+                        xml_missing_not_today,
+                        "job",
+                        attrib={"distribution": row, "application": column},
+                    )
 
     def find_history(self, l_jobs, l_jobs_not_today):
-        """find, for each job, in the existent xml boards the results for the 
-           job. Store the results in the dictionnary self.history = {name_job : 
+        """find, for each job, in the existent xml boards the results for the
+           job. Store the results in the dictionnary self.history = {name_job :
            list of (date, status, list links)}
-        
-        :param l_jobs List: the list of jobs to run today   
+
+        :param l_jobs List: the list of jobs to run today
         :param l_jobs_not_today List: the list of jobs that do not run today
         """
         # load the all the history
@@ -1334,22 +1389,21 @@ class Gui(object):
                     global_xml = src.xmlManager.ReadXmlFile(file_path)
                     l_globalxml.append(global_xml)
                 except Exception as e:
-                    msg = _("\nWARNING: the file %s can not be read, it will be "
-                            "ignored\n%s" % (file_path, e))
-                    self.logger.write("%s\n" % src.printcolors.printcWarning(
-                                                                        msg), 5)
-                    
-        # Construct the dictionnary self.history 
+                    msg = _(
+                        "\nWARNING: the file %s can not be read, it will be "
+                        "ignored\n%s" % (file_path, e)
+                    )
+                    self.logger.write("%s\n" % src.printcolors.printcWarning(msg), 5)
+
+        # Construct the dictionnary self.history
         for job in l_jobs + l_jobs_not_today:
             l_links = []
             for global_xml in l_globalxml:
                 date = os.path.basename(global_xml.filePath).split("_")[0]
                 global_root_node = global_xml.xmlroot.find("jobs")
                 job_node = src.xmlManager.find_node_by_attrib(
-                                                              global_root_node,
-                                                              "job",
-                                                              "name",
-                                                              job.name)
+                    global_root_node, "job", "name", job.name
+                )
                 if job_node:
                     if job_node.find("remote_log_file_path") is not None:
                         link = job_node.find("remote_log_file_path").text
@@ -1358,77 +1412,78 @@ class Gui(object):
                             l_links.append((date, res_job, link))
             l_links = sorted(l_links, reverse=True)
             self.history[job.name] = l_links
-  
+
     def put_jobs_not_today(self, l_jobs_not_today, xml_node_jobs):
-        '''Get all the first information needed for each file and write the 
-           first version of the files   
+        """Get all the first information needed for each file and write the
+           first version of the files
 
         :param xml_node_jobs etree.Element: the node corresponding to a job
         :param l_jobs_not_today List: the list of jobs that do not run today
-        '''
+        """
         for job in l_jobs_not_today:
-            xmlj = src.xmlManager.add_simple_node(xml_node_jobs,
-                                                 "job",
-                                                 attrib={"name" : job.name})
+            xmlj = src.xmlManager.add_simple_node(
+                xml_node_jobs, "job", attrib={"name": job.name}
+            )
             src.xmlManager.add_simple_node(xmlj, "application", job.application)
-            src.xmlManager.add_simple_node(xmlj,
-                                           "distribution",
-                                           job.machine.distribution)
+            src.xmlManager.add_simple_node(
+                xmlj, "distribution", job.machine.distribution
+            )
             src.xmlManager.add_simple_node(xmlj, "board", job.board)
-            src.xmlManager.add_simple_node(xmlj,
-                                       "commands", " ; ".join(job.commands))
+            src.xmlManager.add_simple_node(xmlj, "commands", " ; ".join(job.commands))
             src.xmlManager.add_simple_node(xmlj, "state", "Not today")
             src.xmlManager.add_simple_node(xmlj, "machine", job.machine.name)
             src.xmlManager.add_simple_node(xmlj, "host", job.machine.host)
             src.xmlManager.add_simple_node(xmlj, "port", str(job.machine.port))
             src.xmlManager.add_simple_node(xmlj, "user", job.machine.user)
-            src.xmlManager.add_simple_node(xmlj, "sat_path",
-                                                        job.machine.sat_path)
+            src.xmlManager.add_simple_node(xmlj, "sat_path", job.machine.sat_path)
             xml_history = src.xmlManager.add_simple_node(xmlj, "history")
             for i, (date, res_job, link) in enumerate(self.history[job.name]):
-                if i==0:
+                if i == 0:
                     # tag the first one (the last one)
-                    src.xmlManager.add_simple_node(xml_history,
-                                                   "link",
-                                                   text=link,
-                                                   attrib={"date" : date,
-                                                           "res" : res_job,
-                                                           "last" : "yes"})
+                    src.xmlManager.add_simple_node(
+                        xml_history,
+                        "link",
+                        text=link,
+                        attrib={"date": date, "res": res_job, "last": "yes"},
+                    )
                 else:
-                    src.xmlManager.add_simple_node(xml_history,
-                                                   "link",
-                                                   text=link,
-                                                   attrib={"date" : date,
-                                                           "res" : res_job,
-                                                           "last" : "no"})
+                    src.xmlManager.add_simple_node(
+                        xml_history,
+                        "link",
+                        text=link,
+                        attrib={"date": date, "res": res_job, "last": "no"},
+                    )
 
     def parse_csv_boards(self, today):
-        """ Parse the csv file that describes the boards to produce and fill 
+        """Parse the csv file that describes the boards to produce and fill
             the dict d_input_boards that contain the csv file contain
-        
-        :param today int: the current day of the week 
+
+        :param today int: the current day of the week
         """
         # open the csv file and read its content
         l_read = []
-        with open(self.file_boards, 'r') as f:
-            reader = csv.reader(f,delimiter=CSV_DELIMITER)
+        with open(self.file_boards, "r") as f:
+            reader = csv.reader(f, delimiter=CSV_DELIMITER)
             for row in reader:
                 l_read.append(row)
         # get the delimiter for the boards (empty line)
-        boards_delimiter = [''] * len(l_read[0])
+        boards_delimiter = [""] * len(l_read[0])
         # Make the list of boards, by splitting with the delimiter
-        l_boards = [list(y) for x, y in itertools.groupby(l_read,
-                                    lambda z: z == boards_delimiter) if not x]
-           
+        l_boards = [
+            list(y)
+            for x, y in itertools.groupby(l_read, lambda z: z == boards_delimiter)
+            if not x
+        ]
+
         # loop over the csv lists of lines and get the rows, columns and jobs
         d_boards = {}
         for input_board in l_boards:
             # get board name
             board_name = input_board[0][0]
-            
+
             # Get columns list
             columns = input_board[0][1:]
-            
+
             rows = []
             jobs = []
             jobs_not_today = []
@@ -1436,112 +1491,110 @@ class Gui(object):
                 row = line[0]
                 rows.append(row)
                 for i, square in enumerate(line[1:]):
-                    if square=='':
+                    if square == "":
                         continue
                     days = square.split(DAYS_SEPARATOR)
                     days = [int(day) for day in days]
                     job = (row, columns[i])
-                    if today in days:                           
+                    if today in days:
                         jobs.append(job)
                     else:
                         jobs_not_today.append(job)
 
-            d_boards[board_name] = {"rows" : rows,
-                                    "columns" : columns,
-                                    "jobs" : jobs,
-                                    "jobs_not_today" : jobs_not_today}
-        
+            d_boards[board_name] = {
+                "rows": rows,
+                "columns": columns,
+                "jobs": jobs,
+                "jobs_not_today": jobs_not_today,
+            }
+
         self.d_input_boards = d_boards
 
     def update_xml_files(self, l_jobs):
-        '''Write all the xml files with updated information about the jobs   
+        """Write all the xml files with updated information about the jobs
 
         :param l_jobs List: the list of jobs that run today
-        '''
-        for xml_file in [self.xml_global_file] + list(
-                                            self.d_xml_board_files.values()):
+        """
+        for xml_file in [self.xml_global_file] + list(self.d_xml_board_files.values()):
             self.update_xml_file(l_jobs, xml_file)
-            
+
         # Write the file
         self.write_xml_files()
-            
-    def update_xml_file(self, l_jobs, xml_file):      
-        '''update information about the jobs for the file xml_file   
+
+    def update_xml_file(self, l_jobs, xml_file):
+        """update information about the jobs for the file xml_file
 
         :param l_jobs List: the list of jobs that run today
         :param xml_file xmlManager.XmlLogFile: the xml instance to update
-        '''
-        
-        xml_node_jobs = xml_file.xmlroot.find('jobs')
+        """
+
+        xml_node_jobs = xml_file.xmlroot.find("jobs")
         # Update the job names and status node
         for job in l_jobs:
             # Find the node corresponding to the job and delete it
             # in order to recreate it
-            for xmljob in xml_node_jobs.findall('job'):
-                if xmljob.attrib['name'] == job.name:
+            for xmljob in xml_node_jobs.findall("job"):
+                if xmljob.attrib["name"] == job.name:
                     xml_node_jobs.remove(xmljob)
-            
+
             T0 = str(job._T0)
             if T0 != "-1":
-                T0 = time.strftime('%Y-%m-%d %H:%M:%S', 
-                                       time.localtime(job._T0))
+                T0 = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(job._T0))
             Tf = str(job._Tf)
             if Tf != "-1":
-                Tf = time.strftime('%Y-%m-%d %H:%M:%S', 
-                                       time.localtime(job._Tf))
-            
+                Tf = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(job._Tf))
+
             # recreate the job node
-            xmlj = src.xmlManager.add_simple_node(xml_node_jobs,
-                                                  "job",
-                                                  attrib={"name" : job.name})
+            xmlj = src.xmlManager.add_simple_node(
+                xml_node_jobs, "job", attrib={"name": job.name}
+            )
             src.xmlManager.add_simple_node(xmlj, "machine", job.machine.name)
             src.xmlManager.add_simple_node(xmlj, "host", job.machine.host)
             src.xmlManager.add_simple_node(xmlj, "port", str(job.machine.port))
             src.xmlManager.add_simple_node(xmlj, "user", job.machine.user)
             xml_history = src.xmlManager.add_simple_node(xmlj, "history")
             for date, res_job, link in self.history[job.name]:
-                src.xmlManager.add_simple_node(xml_history,
-                                               "link",
-                                               text=link,
-                                               attrib={"date" : date,
-                                                       "res" : res_job})
-
-            src.xmlManager.add_simple_node(xmlj, "sat_path",
-                                           job.machine.sat_path)
+                src.xmlManager.add_simple_node(
+                    xml_history,
+                    "link",
+                    text=link,
+                    attrib={"date": date, "res": res_job},
+                )
+
+            src.xmlManager.add_simple_node(xmlj, "sat_path", job.machine.sat_path)
             src.xmlManager.add_simple_node(xmlj, "application", job.application)
-            src.xmlManager.add_simple_node(xmlj, "distribution",
-                                           job.machine.distribution)
+            src.xmlManager.add_simple_node(
+                xmlj, "distribution", job.machine.distribution
+            )
             src.xmlManager.add_simple_node(xmlj, "board", job.board)
             src.xmlManager.add_simple_node(xmlj, "timeout", str(job.timeout))
-            src.xmlManager.add_simple_node(xmlj, "commands",
-                                           " ; ".join(job.commands))
+            src.xmlManager.add_simple_node(xmlj, "commands", " ; ".join(job.commands))
             src.xmlManager.add_simple_node(xmlj, "state", job.get_status())
             src.xmlManager.add_simple_node(xmlj, "begin", T0)
             src.xmlManager.add_simple_node(xmlj, "end", Tf)
-            src.xmlManager.add_simple_node(xmlj, "out",
-                                           src.printcolors.cleancolor(job.out))
-            src.xmlManager.add_simple_node(xmlj, "err",
-                                           src.printcolors.cleancolor(job.err))
+            src.xmlManager.add_simple_node(
+                xmlj, "out", src.printcolors.cleancolor(job.out)
+            )
+            src.xmlManager.add_simple_node(
+                xmlj, "err", src.printcolors.cleancolor(job.err)
+            )
             src.xmlManager.add_simple_node(xmlj, "res", str(job.res_job))
             if len(job.remote_log_files) > 0:
-                src.xmlManager.add_simple_node(xmlj,
-                                               "remote_log_file_path",
-                                               job.remote_log_files[0])
+                src.xmlManager.add_simple_node(
+                    xmlj, "remote_log_file_path", job.remote_log_files[0]
+                )
             else:
-                src.xmlManager.add_simple_node(xmlj,
-                                               "remote_log_file_path",
-                                               "nothing")           
+                src.xmlManager.add_simple_node(xmlj, "remote_log_file_path", "nothing")
             # Search for the test log if there is any
             l_test_log_files = self.find_test_log(job.remote_log_files)
-            xml_test = src.xmlManager.add_simple_node(xmlj,
-                                                      "test_log_file_path")
+            xml_test = src.xmlManager.add_simple_node(xmlj, "test_log_file_path")
             for test_log_path, res_test, nb_fails in l_test_log_files:
-                test_path_node = src.xmlManager.add_simple_node(xml_test,
-                                               "path",
-                                               test_log_path)
+                test_path_node = src.xmlManager.add_simple_node(
+                    xml_test, "path", test_log_path
+                )
                 test_path_node.attrib["res"] = res_test
                 test_path_node.attrib["nb_fails"] = nb_fails
-            
+
             xmlafter = src.xmlManager.add_simple_node(xmlj, "after", job.after)
             # get the job father
             if job.after is not None:
@@ -1549,48 +1602,41 @@ class Gui(object):
                 for jb in l_jobs:
                     if jb.name == job.after:
                         job_father = jb
-                
-                if (job_father is not None and 
-                        len(job_father.remote_log_files) > 0):
+
+                if job_father is not None and len(job_father.remote_log_files) > 0:
                     link = job_father.remote_log_files[0]
                 else:
                     link = "nothing"
-                src.xmlManager.append_node_attrib(xmlafter, {"link" : link})
-            
+                src.xmlManager.append_node_attrib(xmlafter, {"link": link})
+
             # Verify that the job is to be done today regarding the input csv
             # files
             if job.board and job.board in self.d_input_boards.keys():
                 found = False
                 for dist, appli in self.d_input_boards[job.board]["jobs"]:
-                    if (job.machine.distribution == dist 
-                        and job.application == appli):
+                    if job.machine.distribution == dist and job.application == appli:
                         found = True
-                        src.xmlManager.add_simple_node(xmlj,
-                                               "extra_job",
-                                               "no")
+                        src.xmlManager.add_simple_node(xmlj, "extra_job", "no")
                         break
                 if not found:
-                    src.xmlManager.add_simple_node(xmlj,
-                                               "extra_job",
-                                               "yes")
-            
-        
+                    src.xmlManager.add_simple_node(xmlj, "extra_job", "yes")
+
         # Update the date
-        xml_node_infos = xml_file.xmlroot.find('infos')
-        src.xmlManager.append_node_attrib(xml_node_infos,
-                    attrib={"value" : 
-                    datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")})
-               
+        xml_node_infos = xml_file.xmlroot.find("infos")
+        src.xmlManager.append_node_attrib(
+            xml_node_infos,
+            attrib={"value": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")},
+        )
 
     def find_test_log(self, l_remote_log_files):
-        '''Find if there is a test log (board) in the remote log files and 
+        """Find if there is a test log (board) in the remote log files and
            the path to it. There can be several test command, so the result is
            a list.
 
         :param l_remote_log_files List: the list of all remote log files
         :return: the list of (test log files path, res of the command)
         :rtype: List
-        '''
+        """
         res = []
         for file_path in l_remote_log_files:
             dirname = os.path.basename(os.path.dirname(file_path))
@@ -1604,43 +1650,42 @@ class Gui(object):
                 # find the number of fails
                 testbase_node = prod_node.find("tests").find("testbase")
                 nb_fails = int(testbase_node.attrib["failed"])
-                # put the file path, the res of the test command and the number 
+                # put the file path, the res of the test command and the number
                 # of fails in the output
                 res.append((file_path, res_test, nb_fails))
-                
+
         return res
-    
-    def last_update(self, finish_status = "finished"):
-        '''update information about the jobs for the file xml_file   
+
+    def last_update(self, finish_status="finished"):
+        """update information about the jobs for the file xml_file
 
         :param l_jobs List: the list of jobs that run today
         :param xml_file xmlManager.XmlLogFile: the xml instance to update
-        '''
+        """
         for xml_file in [self.xml_global_file] + list(self.d_xml_board_files.values()):
-            xml_node_infos = xml_file.xmlroot.find('infos')
-            src.xmlManager.append_node_attrib(xml_node_infos,
-                        attrib={"JobsCommandStatus" : finish_status})
+            xml_node_infos = xml_file.xmlroot.find("infos")
+            src.xmlManager.append_node_attrib(
+                xml_node_infos, attrib={"JobsCommandStatus": finish_status}
+            )
         # Write the file
         self.write_xml_files()
 
     def write_xml_file(self, xml_file, stylesheet):
-        ''' Write one xml file and the same file with prefix
-        '''
+        """Write one xml file and the same file with prefix"""
         xml_file.write_tree(stylesheet)
         file_path = xml_file.logFile
         file_dir = os.path.dirname(file_path)
         file_name = os.path.basename(file_path)
         file_name_with_prefix = self.prefix + "_" + file_name
-        xml_file.write_tree(stylesheet, os.path.join(file_dir,
-                                                     file_name_with_prefix))
-        
+        xml_file.write_tree(stylesheet, os.path.join(file_dir, file_name_with_prefix))
+
     def write_xml_files(self):
-        ''' Write the xml files   
-        '''
+        """Write the xml files"""
         self.write_xml_file(self.xml_global_file, STYLESHEET_GLOBAL)
         for xml_file in self.d_xml_board_files.values():
             self.write_xml_file(xml_file, STYLESHEET_BOARD)
 
+
 def get_config_file_path(job_config_name, l_cfg_dir):
     found = False
     file_jobs_cfg = None
@@ -1650,9 +1695,9 @@ def get_config_file_path(job_config_name, l_cfg_dir):
     else:
         for cfg_dir in l_cfg_dir:
             file_jobs_cfg = os.path.join(cfg_dir, job_config_name)
-            if not file_jobs_cfg.endswith('.pyconf'):
-                file_jobs_cfg += '.pyconf'
-            
+            if not file_jobs_cfg.endswith(".pyconf"):
+                file_jobs_cfg += ".pyconf"
+
             if not os.path.exists(file_jobs_cfg):
                 continue
             else:
@@ -1660,11 +1705,12 @@ def get_config_file_path(job_config_name, l_cfg_dir):
                 break
     return found, file_jobs_cfg
 
+
 def develop_factorized_jobs(config_jobs):
-    '''update information about the jobs for the file xml_file   
-    
+    """update information about the jobs for the file xml_file
+
     :param config_jobs Config: the config corresponding to the jos description
-    '''
+    """
     developed_jobs_list = []
     for jb in config_jobs.jobs:
         # case where the jobs are not developed
@@ -1678,7 +1724,7 @@ def develop_factorized_jobs(config_jobs):
         for machine in jb.machine:
             new_job = src.pyconf.deepCopyMapping(jb)
             # case where there is a jobs on the machine corresponding to all
-            # days in when variable. 
+            # days in when variable.
             if type(machine) == type(""):
                 new_job.machine = machine
                 new_job.name = name_job + " / " + machine
@@ -1688,35 +1734,37 @@ def develop_factorized_jobs(config_jobs):
                 new_job.name = name_job + " / " + machine[0]
                 new_job.when = machine[1:]
             developed_jobs_list.append(new_job)
-    
+
     config_jobs.jobs = developed_jobs_list
-            
+
 
 ##
 # Describes the command
 def description():
-    return _("The jobs command launches maintenances that are described"
-             " in the dedicated jobs configuration file.\n\nexample:\nsat "
-             "jobs --name my_jobs --publish")
+    return _(
+        "The jobs command launches maintenances that are described"
+        " in the dedicated jobs configuration file.\n\nexample:\nsat "
+        "jobs --name my_jobs --publish"
+    )
+
 
 ##
 # Runs the command.
 def run(args, runner, logger):
-       
+
     (options, args) = parser.parse_args(args)
-       
+
     l_cfg_dir = runner.cfg.PATHS.JOBPATH
-    
+
     # list option : display all the available config files
     if options.list:
         for cfg_dir in l_cfg_dir:
             if not options.no_label:
-                logger.write("------ %s\n" % 
-                                 src.printcolors.printcHeader(cfg_dir))
+                logger.write("------ %s\n" % src.printcolors.printcHeader(cfg_dir))
             if not os.path.exists(cfg_dir):
                 continue
             for f in sorted(os.listdir(cfg_dir)):
-                if not f.endswith('.pyconf'):
+                if not f.endswith(".pyconf"):
                     continue
                 cfilename = f[:-7]
                 logger.write("%s\n" % cfilename)
@@ -1724,81 +1772,81 @@ def run(args, runner, logger):
 
     # Make sure the jobs_config option has been called
     if not options.jobs_cfg:
-        message = _("The option --jobs_config is required\n")      
+        message = _("The option --jobs_config is required\n")
         src.printcolors.printcError(message)
         return 1
-    
+
     # Find the file in the directories, unless it is a full path
-    # merge all in a config
+    # merge all in a config
     merger = src.pyconf.ConfigMerger()
     config_jobs = src.pyconf.Config()
     l_conf_files_path = []
     for config_file in options.jobs_cfg:
         found, file_jobs_cfg = get_config_file_path(config_file, l_cfg_dir)
         if not found:
-            msg = _("The file configuration %s was not found."
-                    "\nUse the --list option to get the "
-                    "possible files." % config_file)
+            msg = _(
+                "The file configuration %s was not found."
+                "\nUse the --list option to get the "
+                "possible files." % config_file
+            )
             logger.write("%s\n" % src.printcolors.printcError(msg), 1)
             return 1
         l_conf_files_path.append(file_jobs_cfg)
         # Read the config that is in the file
         one_config_jobs = src.read_config_from_a_file(file_jobs_cfg)
         merger.merge(config_jobs, one_config_jobs)
-    
+
     info = [
         (_("Platform"), runner.cfg.VARS.dist),
-        (_("Files containing the jobs configuration"), l_conf_files_path)
-    ]    
+        (_("Files containing the jobs configuration"), l_conf_files_path),
+    ]
     src.print_info(logger, info)
 
     if options.only_jobs:
         l_jb = src.pyconf.Sequence()
         for jb in config_jobs.jobs:
             if jb.name in options.only_jobs:
-                l_jb.append(jb,
-                "Job that was given in only_jobs option parameters\n")
+                l_jb.append(jb, "Job that was given in only_jobs option parameters\n")
         config_jobs.jobs = l_jb
-    
+
     # Parse the config jobs in order to develop all the factorized jobs
     develop_factorized_jobs(config_jobs)
-    
-    # Make a unique file that contain all the jobs in order to use it 
+
+    # Make a unique file that contain all the jobs in order to use it
     # on every machine
-    name_pyconf = "_".join([os.path.basename(path)[:-len('.pyconf')] 
-                            for path in l_conf_files_path]) + ".pyconf"
+    name_pyconf = (
+        "_".join(
+            [os.path.basename(path)[: -len(".pyconf")] for path in l_conf_files_path]
+        )
+        + ".pyconf"
+    )
     path_pyconf = src.get_tmp_filename(runner.cfg, name_pyconf)
-    #Save config
-    f = file( path_pyconf , 'w')
+    # Save config
+    f = file(path_pyconf, "w")
     config_jobs.__save__(f)
-    
-    # log the paramiko problems
+
+    # log the paramiko problems
     log_dir = src.get_log_path(runner.cfg)
     paramiko_log_dir_path = os.path.join(log_dir, "JOBS")
     src.ensure_path_exists(paramiko_log_dir_path)
-    paramiko.util.log_to_file(os.path.join(paramiko_log_dir_path,
-                                           logger.txtFileName))
-    
+    paramiko.util.log_to_file(os.path.join(paramiko_log_dir_path, logger.txtFileName))
+
     # Initialization
-    today_jobs = Jobs(runner,
-                      logger,
-                      path_pyconf,
-                      config_jobs)
-    
+    today_jobs = Jobs(runner, logger, path_pyconf, config_jobs)
+
     # SSH connection to all machines
     today_jobs.ssh_connection_all_machines()
     if options.test_connection:
         return 0
-    
+
     gui = None
     if options.publish:
-        logger.write(src.printcolors.printcInfo(
-                                        _("Initialize the xml boards : ")), 5)
+        logger.write(src.printcolors.printcInfo(_("Initialize the xml boards : ")), 5)
         logger.flush()
-        
-        # Copy the stylesheets in the log directory 
+
+        # Copy the stylesheets in the log directory
         log_dir = log_dir
-        xsl_dir = os.path.join(runner.cfg.VARS.srcDir, 'xsl')
+        xsl_dir = os.path.join(runner.cfg.VARS.srcDir, "xsl")
         files_to_copy = []
         files_to_copy.append(os.path.join(xsl_dir, STYLESHEET_GLOBAL))
         files_to_copy.append(os.path.join(xsl_dir, STYLESHEET_BOARD))
@@ -1808,45 +1856,50 @@ def run(args, runner, logger):
             # OP We use copy instead of copy2 to update the creation date
             #    So we can clean the LOGS directories easily
             shutil.copy(file_path, log_dir)
-        
+
         # Instanciate the Gui in order to produce the xml files that contain all
         # the boards
-        gui = Gui(log_dir,
-                  today_jobs.ljobs,
-                  today_jobs.ljobs_not_today,
-                  runner.cfg.VARS.datehour,
-                  logger,
-                  file_boards = options.input_boards)
-        
+        gui = Gui(
+            log_dir,
+            today_jobs.ljobs,
+            today_jobs.ljobs_not_today,
+            runner.cfg.VARS.datehour,
+            logger,
+            file_boards=options.input_boards,
+        )
+
         logger.write(src.printcolors.printcSuccess("OK"), 5)
         logger.write("\n\n", 5)
         logger.flush()
-        
+
         # Display the list of the xml files
-        logger.write(src.printcolors.printcInfo(("Here is the list of published"
-                                                 " files :\n")), 4)
+        logger.write(
+            src.printcolors.printcInfo(("Here is the list of published" " files :\n")),
+            4,
+        )
         logger.write("%s\n" % gui.xml_global_file.logFile, 4)
         for board in gui.d_xml_board_files.keys():
             file_path = gui.d_xml_board_files[board].logFile
             file_name = os.path.basename(file_path)
             logger.write("%s\n" % file_path, 4)
             logger.add_link(file_name, "board", 0, board)
-              
+
         logger.write("\n", 4)
-        
+
     today_jobs.gui = gui
-    
+
     interruped = False
     try:
         # Run all the jobs contained in config_jobs
         today_jobs.run_jobs()
     except KeyboardInterrupt:
         interruped = True
-        logger.write("\n\n%s\n\n" % 
-                (src.printcolors.printcWarning(_("Forced interruption"))), 1)
+        logger.write(
+            "\n\n%s\n\n" % (src.printcolors.printcWarning(_("Forced interruption"))), 1
+        )
     except Exception as e:
         msg = _("CRITICAL ERROR: The jobs loop has been interrupted\n")
-        logger.write("\n\n%s\n" % src.printcolors.printcError(msg) )
+        logger.write("\n\n%s\n" % src.printcolors.printcError(msg))
         logger.write("%s\n" % str(e))
         # get stack
         __, __, exc_traceback = sys.exc_info()
@@ -1854,16 +1907,17 @@ def run(args, runner, logger):
         traceback.print_tb(exc_traceback, file=fp)
         fp.seek(0)
         stack = fp.read()
-        logger.write("\nTRACEBACK: %s\n" % stack.replace('"',"'"), 1)
-        
+        logger.write("\nTRACEBACK: %s\n" % stack.replace('"', "'"), 1)
+
     finally:
         res = 0
         if interruped:
             res = 1
-            msg = _("Killing the running jobs and trying"
-                    " to get the corresponding logs\n")
+            msg = _(
+                "Killing the running jobs and trying" " to get the corresponding logs\n"
+            )
             logger.write(src.printcolors.printcWarning(msg))
-            
+
         # find the potential not finished jobs and kill them
         for jb in today_jobs.ljobs:
             if not jb.has_finished():
index ea7cc82136738a343d63a2ecde9c1becc34f72f4..35057bf61e89998268208696e56de2a372c75949 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 #  Copyright (C) 2010-2013  CEA/DEN
 #
 #  This library is free software; you can redistribute it and/or
@@ -28,89 +28,134 @@ import src.debug as DBG
 
 parser = src.options.Options()
 
-parser.add_option('n', 'name', 'string', 'name', _('Optional: The name of the'
-                  ' launcher (default is APPLICATION.profile.launcher_name)'))
-parser.add_option('e', 'exe', 'string', 'path_exe', _('Use this option to generate a launcher which sets'
-                  ' the environment and call the executable given as argument'
-                  ' (its relative path to application workdir, or an exe name present in appli PATH)'))
-parser.add_option('p', 'products', 'list2', 'products',
-    _("Optional: Includes only the specified products."))
-parser.add_option('c', 'catalog', 'string', 'catalog',
-                  _('Optional: The resources catalog to use'))
-parser.add_option('', 'gencat', 'string', 'gencat',
-                  _("Optional: Create a resources catalog for the specified machines "
-                  "(separated with ',') \n\tNOTICE: this command will ssh to retrieve"
-                  " information to each machine in the list"))
-parser.add_option('', 'use_mesa', 'boolean', 'use_mesa',
-                  _("Optional: Create a launcher that will use mesa products\n\t"
-                  "It can be usefull whan salome is used on a remote machine through ssh"))
-parser.add_option('', 'no_path_init', 'boolean', 'no_path_init',
-                 _("Optional: Create a launcher that will not reinitilise all path variables\n\t"
-                   "By default only PATH is not reinitialised (its value is inherited from "
-                   "user's environment)\n\tUse no_path_init option to suppress the reinitilisation"
-                   " of every paths (LD_LIBRARY_PATH, PYTHONPATH, ...)"))
-
-
-def generate_launch_file(config,
-                         logger,
-                         launcher_name,
-                         pathlauncher,
-                         path_exe,
-                         env_info,
-                         display=True,
-                         additional_env={},
-                         no_path_init=False):
-    '''Generates the launcher file.
-    
+parser.add_option(
+    "n",
+    "name",
+    "string",
+    "name",
+    _(
+        "Optional: The name of the"
+        " launcher (default is APPLICATION.profile.launcher_name)"
+    ),
+)
+parser.add_option(
+    "e",
+    "exe",
+    "string",
+    "path_exe",
+    _(
+        "Use this option to generate a launcher which sets"
+        " the environment and call the executable given as argument"
+        " (its relative path to application workdir, or an exe name present in appli PATH)"
+    ),
+)
+parser.add_option(
+    "p",
+    "products",
+    "list2",
+    "products",
+    _("Optional: Includes only the specified products."),
+)
+parser.add_option(
+    "c", "catalog", "string", "catalog", _("Optional: The resources catalog to use")
+)
+parser.add_option(
+    "",
+    "gencat",
+    "string",
+    "gencat",
+    _(
+        "Optional: Create a resources catalog for the specified machines "
+        "(separated with ',') \n\tNOTICE: this command will ssh to retrieve"
+        " information to each machine in the list"
+    ),
+)
+parser.add_option(
+    "",
+    "use_mesa",
+    "boolean",
+    "use_mesa",
+    _(
+        "Optional: Create a launcher that will use mesa products\n\t"
+        "It can be usefull whan salome is used on a remote machine through ssh"
+    ),
+)
+parser.add_option(
+    "",
+    "no_path_init",
+    "boolean",
+    "no_path_init",
+    _(
+        "Optional: Create a launcher that will not reinitilise all path variables\n\t"
+        "By default only PATH is not reinitialised (its value is inherited from "
+        "user's environment)\n\tUse no_path_init option to suppress the reinitilisation"
+        " of every paths (LD_LIBRARY_PATH, PYTHONPATH, ...)"
+    ),
+)
+
+
+def generate_launch_file(
+    config,
+    logger,
+    launcher_name,
+    pathlauncher,
+    path_exe,
+    env_info,
+    display=True,
+    additional_env={},
+    no_path_init=False,
+):
+    """Generates the launcher file.
+
     :param config Config: The global configuration
-    :param logger Logger: The logger instance to use for the display 
+    :param logger Logger: The logger instance to use for the display
                           and logging
     :param launcher_name str: The name of the launcher to generate
-    :param path_exe str: The executable to use (either a relative path to 
+    :param path_exe str: The executable to use (either a relative path to
                          application workdir, or an exe name in the path)
     :param pathlauncher str: The path to the launcher to generate
     :param display boolean: If False, do not print anything in the terminal
-    :param additional_env dict: The dict giving additional 
+    :param additional_env dict: The dict giving additional
                                 environment variables
     :param env_info str: The list of products to add in the files.
     :return: The launcher file path.
     :rtype: str
-    '''
+    """
     # build the launcher path, delete it if it exists
     filepath = os.path.join(pathlauncher, launcher_name)
     if os.path.exists(filepath):
         os.remove(filepath)
-    kernel_root_dir=None
-    cmd=None
-    salome_application_name=None
-    app_root_dir=None
+    kernel_root_dir = None
+    cmd = None
+    salome_application_name = None
+    app_root_dir = None
 
     if path_exe:
-        #case of a launcher based upon an executable
-        
+        # case of a launcher based upon an executable
+
         if os.path.basename(path_exe) != path_exe:
-            # for a relative (to workdir) path 
+            # for a relative (to workdir) path
             # build absolute path of exe and check it
-            exepath=os.path.join(config.APPLICATION.workdir, path_exe)
+            exepath = os.path.join(config.APPLICATION.workdir, path_exe)
             if not os.path.exists(exepath):
                 raise src.SatException(_("cannot find executable given : %s" % exepath))
         else:
-            exepath=path_exe 
+            exepath = path_exe
 
         # select the shell for the launcher (bast/bat)
         # and build the command used to launch the exe
         if src.architecture.is_windows():
-            shell="bat"
-            cmd="\n\nrem Launch exe with user arguments\n%s " % exepath + "%*"
+            shell = "bat"
+            cmd = "\n\nrem Launch exe with user arguments\n%s " % exepath + "%*"
         else:
-            shell="bash"
-            cmd='\n\n# Launch exe with user arguments\n%s "$*"' % exepath
+            shell = "bash"
+            cmd = '\n\n# Launch exe with user arguments\n%s "$*"' % exepath
 
     else:
-        #case of a salome python2/3 launcher
-        shell="cfgForPy"
+        # case of a salome python2/3 launcher
+        shell = "cfgForPy"
 
-        # get KERNEL bin installation path 
+        # get KERNEL bin installation path
         # (in order for the launcher to get python salomeContext API)
         kernel_cfg = src.product.get_product_config(config, "KERNEL")
         if not src.product.check_installation(config, kernel_cfg):
@@ -118,86 +163,89 @@ def generate_launch_file(config,
         kernel_root_dir = kernel_cfg.install_dir
         # set kernel bin dir (considering fhs property)
         if src.get_property_in_product_cfg(kernel_cfg, "fhs"):
-            bin_kernel_install_dir = os.path.join(kernel_root_dir,"bin") 
+            bin_kernel_install_dir = os.path.join(kernel_root_dir, "bin")
         else:
-            bin_kernel_install_dir = os.path.join(kernel_root_dir,"bin","salome") 
+            bin_kernel_install_dir = os.path.join(kernel_root_dir, "bin", "salome")
 
-        # Add two sat variables used by fileEnviron to choose the right launcher header 
+        # Add two sat variables used by fileEnviron to choose the right launcher header
         # and do substitutions
-        additional_env['sat_bin_kernel_install_dir'] = bin_kernel_install_dir
+        additional_env["sat_bin_kernel_install_dir"] = bin_kernel_install_dir
         if "python3" in config.APPLICATION and config.APPLICATION.python3 == "yes":
-            additional_env['sat_python_version'] = 3
+            additional_env["sat_python_version"] = 3
         else:
-            additional_env['sat_python_version'] = 2
+            additional_env["sat_python_version"] = 2
 
     # check if the application contains an application module
-    l_product_info = src.product.get_products_infos(config.APPLICATION.products.keys(),
-                                                    config)
+    l_product_info = src.product.get_products_infos(
+        config.APPLICATION.products.keys(), config
+    )
     for prod_name, prod_info in l_product_info:
         # look for a salome application
         if src.get_property_in_product_cfg(prod_info, "is_salome_application") == "yes":
             # if user choose -p option (env_info not None), the set appli name only if product was selected.
-            if env_info == None or ( prod_name in env_info):
-                salome_application_name=prod_info.install_dir
+            if env_info == None or (prod_name in env_info):
+                salome_application_name = prod_info.install_dir
             continue
 
     # if the application contains an application module, we set ABSOLUTE_APPLI_PATH to it.
     # if not we set it to KERNEL_INSTALL_DIR, which is sufficient, except for salome test
     if salome_application_name:
-        app_root_dir=salome_application_name
+        app_root_dir = salome_application_name
     elif kernel_root_dir:
-        app_root_dir=kernel_root_dir
+        app_root_dir = kernel_root_dir
 
     # Add the APPLI and ABSOLUTE_APPLI_PATH variable
-    additional_env['APPLI'] = filepath
+    additional_env["APPLI"] = filepath
     if app_root_dir:
-        additional_env['ABSOLUTE_APPLI_PATH'] = app_root_dir
+        additional_env["ABSOLUTE_APPLI_PATH"] = app_root_dir
 
     # create an environment file writer
-    writer = src.environment.FileEnvWriter(config,
-                                           logger,
-                                           pathlauncher,
-                                           None,
-                                           env_info)
+    writer = src.environment.FileEnvWriter(config, logger, pathlauncher, None, env_info)
 
     # Display some information
     if display:
         # Write the launcher file
-        logger.write(_("Generating launcher for %s :\n") % 
-                     src.printcolors.printcLabel(config.VARS.application), 1)
+        logger.write(
+            _("Generating launcher for %s :\n")
+            % src.printcolors.printcLabel(config.VARS.application),
+            1,
+        )
         logger.write("  %s\n" % src.printcolors.printcLabel(filepath), 1)
-    
+
     # Write the launcher
-    writer.write_env_file(filepath, 
-                          False,  # for launch
-                          shell,
-                          additional_env=additional_env,
-                          no_path_init=no_path_init)
-    
-
-    # ... and append the launch of the exe 
+    writer.write_env_file(
+        filepath,
+        False,  # for launch
+        shell,
+        additional_env=additional_env,
+        no_path_init=no_path_init,
+    )
+
+    # ... and append the launch of the exe
     if cmd:
         with open(filepath, "a") as exe_launcher:
             exe_launcher.write(cmd)
 
     # change the rights in order to make the file executable for everybody
-    os.chmod(filepath,
-             stat.S_IRUSR |
-             stat.S_IRGRP |
-             stat.S_IROTH |
-             stat.S_IWUSR |
-             stat.S_IXUSR |
-             stat.S_IXGRP |
-             stat.S_IXOTH)
+    os.chmod(
+        filepath,
+        stat.S_IRUSR
+        | stat.S_IRGRP
+        | stat.S_IROTH
+        | stat.S_IWUSR
+        | stat.S_IXUSR
+        | stat.S_IXGRP
+        | stat.S_IXOTH,
+    )
     return filepath
 
 
 def generate_catalog(machines, config, logger):
     """Generates an xml catalog file from a list of machines.
-    
-    :param machines List: The list of machines to add in the catalog   
+
+    :param machines List: The list of machines to add in the catalog
     :param config Config: The global configuration
-    :param logger Logger: The logger instance to use for the display 
+    :param logger Logger: The logger instance to use for the display
                           and logging
     :return: The catalog file path.
     :rtype: str
@@ -205,71 +253,77 @@ def generate_catalog(machines, config, logger):
     # remove empty machines
     machines = map(lambda l: l.strip(), machines)
     machines = filter(lambda l: len(l) > 0, machines)
-    
+
     # log something
-    src.printcolors.print_value(logger, _("Generate Resources Catalog"),
-                                ", ".join(machines), 4)
-    
+    src.printcolors.print_value(
+        logger, _("Generate Resources Catalog"), ", ".join(machines), 4
+    )
+
     # The command to execute on each machine in order to get some information
     cmd = '"cat /proc/cpuinfo | grep MHz ; cat /proc/meminfo | grep MemTotal"'
     user = getpass.getuser()
 
     # Create the catalog path
     catfile = src.get_tmp_filename(config, "CatalogResources.xml")
-    with open(catfile, 'w') as catalog:
+    with open(catfile, "w") as catalog:
         # Write into it
         catalog.write("<!DOCTYPE ResourcesCatalog>\n<resources>\n")
         for k in machines:
-            if not src.architecture.is_windows(): 
-                logger.write("    ssh %s " % (k + " ").ljust(20, '.'), 4)
+            if not src.architecture.is_windows():
+                logger.write("    ssh %s " % (k + " ").ljust(20, "."), 4)
                 logger.flush()
 
                 # Verify that the machine is accessible
                 ssh_cmd = 'ssh -o "StrictHostKeyChecking no" %s %s' % (k, cmd)
-                p = subprocess.Popen(ssh_cmd, shell=True,
-                        stdin=subprocess.PIPE,
-                        stdout=subprocess.PIPE,
-                        stderr=subprocess.PIPE)
+                p = subprocess.Popen(
+                    ssh_cmd,
+                    shell=True,
+                    stdin=subprocess.PIPE,
+                    stdout=subprocess.PIPE,
+                    stderr=subprocess.PIPE,
+                )
                 p.wait()
 
-                machine_access = (p.returncode == 0) 
-                if not machine_access: # The machine is not accessible
+                machine_access = p.returncode == 0
+                if not machine_access:  # The machine is not accessible
                     logger.write(src.printcolors.printc(src.KO_STATUS) + "\n", 4)
-                    logger.write("    " + 
-                                 src.printcolors.printcWarning(p.stderr.read()), 2)
+                    logger.write(
+                        "    " + src.printcolors.printcWarning(p.stderr.read()), 2
+                    )
                 else:
                     # The machine is accessible, write the corresponding section on
                     # the xml file
                     logger.write(src.printcolors.printc(src.OK_STATUS) + "\n", 4)
                     lines = p.stdout.readlines()
-                    freq = lines[0][:-1].split(':')[-1].split('.')[0].strip()
-                    nb_proc = len(lines) -1
-                    memory = lines[-1].split(':')[-1].split()[0].strip()
+                    freq = lines[0][:-1].split(":")[-1].split(".")[0].strip()
+                    nb_proc = len(lines) - 1
+                    memory = lines[-1].split(":")[-1].split()[0].strip()
                     memory = int(memory) / 1000
 
             catalog.write("    <machine\n")
-            catalog.write("        protocol=\"ssh\"\n")
-            catalog.write("        nbOfNodes=\"1\"\n")
-            catalog.write("        mode=\"interactif\"\n")
-            catalog.write("        OS=\"LINUX\"\n")
-
-            if (not src.architecture.is_windows()) and machine_access :
-                catalog.write("        CPUFreqMHz=\"%s\"\n" % freq)
-                catalog.write("        nbOfProcPerNode=\"%s\"\n" % nb_proc)
-                catalog.write("        memInMB=\"%s\"\n" % memory)
-
-            catalog.write("        userName=\"%s\"\n" % user)
-            catalog.write("        name=\"%s\"\n" % k)
-            catalog.write("        hostname=\"%s\"\n" % k)
+            catalog.write('        protocol="ssh"\n')
+            catalog.write('        nbOfNodes="1"\n')
+            catalog.write('        mode="interactif"\n')
+            catalog.write('        OS="LINUX"\n')
+
+            if (not src.architecture.is_windows()) and machine_access:
+                catalog.write('        CPUFreqMHz="%s"\n' % freq)
+                catalog.write('        nbOfProcPerNode="%s"\n' % nb_proc)
+                catalog.write('        memInMB="%s"\n' % memory)
+
+            catalog.write('        userName="%s"\n' % user)
+            catalog.write('        name="%s"\n' % k)
+            catalog.write('        hostname="%s"\n' % k)
             catalog.write("    >\n")
             catalog.write("    </machine>\n")
 
         catalog.write("</resources>\n")
     return catfile
 
+
 def copy_catalog(config, catalog_path):
     """Copy the xml catalog file into the right location
-    
+
     :param config Config: The global configuration
     :param catalog_path str: the catalog file path
     :return: The environment dictionary corresponding to the file path.
@@ -284,18 +338,20 @@ def copy_catalog(config, catalog_path):
     # Do the copy
     if catalog_path != new_catalog_path:
         shutil.copy(catalog_path, new_catalog_path)
-    additional_environ = {'USER_CATALOG_RESOURCES_FILE' : new_catalog_path}
+    additional_environ = {"USER_CATALOG_RESOURCES_FILE": new_catalog_path}
     return additional_environ
 
 
-
 ##################################################
 
 ##
 # Describes the command
 def description():
-    return _("The launcher command generates a SALOME launcher.\n\nexample:"
-             "\nsat launcher SALOME-master")
+    return _(
+        "The launcher command generates a SALOME launcher.\n\nexample:"
+        "\nsat launcher SALOME-master"
+    )
+
 
 ##
 # Runs the command.
@@ -305,26 +361,26 @@ def run(args, runner, logger):
     (options, args) = parser.parse_args(args)
 
     # Verify that the command was called with an application
-    src.check_config_has_application( runner.cfg )
-    
+    src.check_config_has_application(runner.cfg)
+
     # Determine the launcher name (from option, profile section or by default "salome")
     if options.products is None:
         environ_info = None
     else:
-        # add products specified by user (only products 
+        # add products specified by user (only products
         # included in the application)
-        environ_info = filter(lambda l:
-                              l in runner.cfg.APPLICATION.products.keys(),
-                              options.products)
+        environ_info = filter(
+            lambda l: l in runner.cfg.APPLICATION.products.keys(), options.products
+        )
     if options.name:
         launcher_name = options.name
     else:
         launcher_name = src.get_launcher_name(runner.cfg)
 
-    no_path_initialisation=False
+    no_path_initialisation = False
     if options.no_path_init:
         no_path_initialisation = True
-        
+
     # set the launcher path
     launcher_path = runner.cfg.APPLICATION.workdir
 
@@ -335,9 +391,7 @@ def run(args, runner, logger):
 
     # Generate a catalog of resources if the corresponding option was called
     if options.gencat:
-        catalog_path  = generate_catalog(options.gencat.split(","),
-                                         runner.cfg,
-                                         logger)
+        catalog_path = generate_catalog(options.gencat.split(","), runner.cfg, logger)
         additional_environ = copy_catalog(runner.cfg, catalog_path)
 
     # activate mesa use in the generated launcher
@@ -346,16 +400,18 @@ def run(args, runner, logger):
 
     # option -e has precedence over section profile
     if not options.path_exe and src.get_launcher_exe(runner.cfg):
-        options.path_exe=src.get_launcher_exe(runner.cfg)
+        options.path_exe = src.get_launcher_exe(runner.cfg)
 
     # Generate the launcher
-    generate_launch_file(runner.cfg,
-                         logger,
-                         launcher_name,
-                         launcher_path,
-                         options.path_exe,
-                         additional_env = additional_environ,
-                         env_info=environ_info,
-                         no_path_init = no_path_initialisation )
+    generate_launch_file(
+        runner.cfg,
+        logger,
+        launcher_name,
+        launcher_path,
+        options.path_exe,
+        additional_env=additional_environ,
+        env_info=environ_info,
+        no_path_init=no_path_initialisation,
+    )
 
     return 0
index 22a788570fc8fa85895c4ea82891a55e1b865ba6..6bd3ba4c25b8e4420ae15273731a33a580551fb3 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 #  Copyright (C) 2010-2012  CEA/DEN
 #
 #  This library is free software; you can redistribute it and/or
@@ -25,36 +25,61 @@ import stat
 
 # Compatibility python 2/3 for input function
 # input stays input for python 3 and input = raw_input for python 2
-try: 
+try:
     input = raw_input
-except NameError: 
+except NameError:
     pass
 
 import src
 
 # Define all possible option for log command :  sat log <options>
 parser = src.options.Options()
-parser.add_option('t', 'terminal', 'boolean', 'terminal',
-                  "Optional: Show the log (in terminal) of a command, with user choice.")
-parser.add_option('l', 'last', 'boolean', 'last',
-                  "Optional: Show the log (in browser) of the last launched command.")
-parser.add_option('', 'last_compile', 'boolean', 'last_compile',
-                  "Optional: Show the log (in terminal) of the last compilation for all products.")
-parser.add_option('f', 'full', 'boolean', 'full',
-                  "Optional: Show the logs of ALL the launched commands.")
-parser.add_option('c', 'clean', 'int', 'clean',
-                  "Erase the n most ancient log files.")
-parser.add_option('n', 'no_browser', 'boolean', 'no_browser',
-                  "Optional: Do not launch the browser at the end of the command. Only update the hat file.")
+parser.add_option(
+    "t",
+    "terminal",
+    "boolean",
+    "terminal",
+    "Optional: Show the log (in terminal) of a command, with user choice.",
+)
+parser.add_option(
+    "l",
+    "last",
+    "boolean",
+    "last",
+    "Optional: Show the log (in browser) of the last launched command.",
+)
+parser.add_option(
+    "",
+    "last_compile",
+    "boolean",
+    "last_compile",
+    "Optional: Show the log (in terminal) of the last compilation for all products.",
+)
+parser.add_option(
+    "f",
+    "full",
+    "boolean",
+    "full",
+    "Optional: Show the logs of ALL the launched commands.",
+)
+parser.add_option("c", "clean", "int", "clean", "Erase the n most ancient log files.")
+parser.add_option(
+    "n",
+    "no_browser",
+    "boolean",
+    "no_browser",
+    "Optional: Do not launch the browser at the end of the command. Only update the hat file.",
+)
+
 
 def get_last_log_file(logDir, notShownCommands):
-    '''Used in case of last option. Get the last log command file path.
-    
+    """Used in case of last option. Get the last log command file path.
+
     :param logDir str: The directory where to search the log files
     :param notShownCommands list: the list of commands to ignore
     :return: the path to the last log file
     :rtype: str
-    '''
+    """
     last = (_, 0)
     for fileName in os.listdir(logDir):
         # YYYYMMDD_HHMMSS_namecmd.xml
@@ -62,7 +87,7 @@ def get_last_log_file(logDir, notShownCommands):
         oExpr = re.compile(sExpr)
         if oExpr.search(fileName):
             # get date and hour and format it
-            date_hour_cmd = fileName.split('_')
+            date_hour_cmd = fileName.split("_")
             datehour = date_hour_cmd[0] + date_hour_cmd[1]
             cmd = date_hour_cmd[2]
             if cmd in notShownCommands:
@@ -71,29 +96,30 @@ def get_last_log_file(logDir, notShownCommands):
                 last = (fileName, int(datehour))
     return os.path.join(logDir, last[0])
 
+
 def remove_log_file(filePath, logger):
-    '''if it exists, print a warning and remove the input file
-    
+    """if it exists, print a warning and remove the input file
+
     :param filePath: the path of the file to delete
-    :param logger Logger: the logger instance to use for the print 
-    '''
+    :param logger Logger: the logger instance to use for the print
+    """
     if os.path.exists(filePath):
-        logger.write(src.printcolors.printcWarning("Removing ")
-                     + filePath + "\n", 5)
+        logger.write(src.printcolors.printcWarning("Removing ") + filePath + "\n", 5)
         os.remove(filePath)
 
+
 def print_log_command_in_terminal(filePath, logger):
-    '''Print the contain of filePath. It contains a command log in xml format.
-    
-    :param filePath: The command xml file from which extract the commands 
+    """Print the contain of filePath. It contains a command log in xml format.
+
+    :param filePath: The command xml file from which extract the commands
                      context and traces
-    :param logger Logger: the logging instance to use in order to print.  
-    '''
+    :param logger Logger: the logging instance to use in order to print.
+    """
     logger.write(_("Reading ") + src.printcolors.printcHeader(filePath) + "\n", 5)
     # Instantiate the ReadXmlFile class that reads xml files
     xmlRead = src.xmlManager.ReadXmlFile(filePath)
     # Get the attributes containing the context (user, OS, time, etc..)
-    dAttrText = xmlRead.get_attrib('Site')
+    dAttrText = xmlRead.get_attrib("Site")
     # format dAttrText and print the context
     lAttrText = []
     for attrib in dAttrText:
@@ -101,17 +127,19 @@ def print_log_command_in_terminal(filePath, logger):
     logger.write("\n", 1)
     src.print_info(logger, lAttrText)
     # Get the traces
-    command_traces = xmlRead.get_node_text('Log')
+    command_traces = xmlRead.get_node_text("Log")
     # Print it if there is any
     if command_traces:
-        logger.write(src.printcolors.printcHeader(
-                                    _("Here are the command traces :\n")), 1)
+        logger.write(
+            src.printcolors.printcHeader(_("Here are the command traces :\n")), 1
+        )
         logger.write(command_traces, 1)
         logger.write("\n", 1)
 
+
 def show_last_logs(logger, config, log_dirs):
     """Show last compilation logs"""
-    log_dir = os.path.join(config.APPLICATION.workdir, 'LOGS')
+    log_dir = os.path.join(config.APPLICATION.workdir, "LOGS")
     sorted_log_dirs = sorted(log_dirs)
     # list the logs
     nb = len(log_dirs)
@@ -122,19 +150,20 @@ def show_last_logs(logger, config, log_dirs):
             k = index + i * col_size
             if k < nb:
                 l = sorted_log_dirs[k]
-                str_indice = src.printcolors.printcLabel("%2d" % (k+1))
+                str_indice = src.printcolors.printcLabel("%2d" % (k + 1))
                 log_name = l
                 logger.write("%s: %-30s" % (str_indice, log_name), 1, False)
         logger.write("\n", 1, False)
 
     # loop till exit
     x = -1
-    while (x < 0):
+    while x < 0:
         x = ask_value(nb)
         if x > 0:
-            product_log_dir = os.path.join(log_dir, sorted_log_dirs[x-1])
+            product_log_dir = os.path.join(log_dir, sorted_log_dirs[x - 1])
             show_product_last_logs(logger, config, product_log_dir)
 
+
 def show_product_last_logs(logger, config, product_log_dir):
     """Show last compilation logs of a product"""
     # sort the files chronologically
@@ -142,35 +171,37 @@ def show_product_last_logs(logger, config, product_log_dir):
     for file_n in os.listdir(product_log_dir):
         my_stat = os.stat(os.path.join(product_log_dir, file_n))
         l_time_file.append(
-              (datetime.datetime.fromtimestamp(my_stat[stat.ST_MTIME]), file_n))
-    
-    # display the available logs
+            (datetime.datetime.fromtimestamp(my_stat[stat.ST_MTIME]), file_n)
+        )
+
+    # display the available logs
     for i, (__, file_name) in enumerate(sorted(l_time_file)):
-        str_indice = src.printcolors.printcLabel("%2d" % (i+1))
+        str_indice = src.printcolors.printcLabel("%2d" % (i + 1))
         opt = []
         my_stat = os.stat(os.path.join(product_log_dir, file_name))
         opt.append(str(datetime.datetime.fromtimestamp(my_stat[stat.ST_MTIME])))
-        
+
         opt.append("(%8.2f)" % (my_stat[stat.ST_SIZE] / 1024.0))
         logger.write(" %-35s" % " ".join(opt), 1, False)
         logger.write("%s: %-30s\n" % (str_indice, file_name), 1, False)
-        
+
     # loop till exit
     x = -1
-    while (x < 0):
+    while x < 0:
         x = ask_value(len(l_time_file))
         if x > 0:
-            (__, file_name) =  sorted(l_time_file)[x-1]
+            (__, file_name) = sorted(l_time_file)[x - 1]
             log_file_path = os.path.join(product_log_dir, file_name)
             src.system.show_in_editor(config.USER.editor, log_file_path, logger)
-        
+
+
 def ask_value(nb):
-    '''Ask for an int n. 0<n<nb
-    
+    """Ask for an int n. 0<n<nb
+
     :param nb int: The maximum value of the value to be returned by the user.
     :return: the value entered by the user. Return -1 if it is not as expected
     :rtype: int
-    '''
+    """
     try:
         # ask for a value
         rep = input(_("Which one (enter or 0 to quit)? "))
@@ -183,25 +214,28 @@ def ask_value(nb):
                 x = -1
     except:
         x = -1
-    
+
     return x
 
+
 def description():
-    '''method that is called when salomeTools is called with --help option.
-    
+    """method that is called when salomeTools is called with --help option.
+
     :return: The text to display for the log command description.
     :rtype: str
-    '''
-    return _("""\
+    """
+    return _(
+        """\
 The log command gives access to the logs produced by the salomeTools commands.
 
 example:
 >> sat log
-""")
+"""
+    )
+
 
 def run(args, runner, logger):
-    '''method that is called when salomeTools is called with log parameter.
-    '''
+    """method that is called when salomeTools is called with log parameter."""
     # Parse the options
     (options, args) = parser.parse_args(args)
 
@@ -210,17 +244,17 @@ def run(args, runner, logger):
 
     # Print a header
     nb_files_log_dir = len(glob.glob(os.path.join(logDir, "*")))
-    info = [("log directory", logDir), 
-            ("number of log files", nb_files_log_dir)]
+    info = [("log directory", logDir), ("number of log files", nb_files_log_dir)]
     src.print_info(logger, info)
-    
-    # If the clean options is invoked, 
+
+    # If the clean options is invoked,
     # do nothing but deleting the concerned files.
     if options.clean:
         nbClean = options.clean
         # get the list of files to remove
-        lLogs = src.logger.list_log_file(logDir, 
-                                   src.logger.log_all_command_file_expression)
+        lLogs = src.logger.list_log_file(
+            logDir, src.logger.log_all_command_file_expression
+        )
         nbLogFiles = len(lLogs)
         # Delete all if the invoked number is bigger than the number of log files
         if nbClean > nbLogFiles:
@@ -231,21 +265,24 @@ def run(args, runner, logger):
             # remove the xml log file
             remove_log_file(filePath, logger)
             # remove also the corresponding txt file in OUT directory
-            txtFilePath = os.path.join(os.path.dirname(filePath), 
-                            'OUT', 
-                            os.path.basename(filePath)[:-len('.xml')] + '.txt')
+            txtFilePath = os.path.join(
+                os.path.dirname(filePath),
+                "OUT",
+                os.path.basename(filePath)[: -len(".xml")] + ".txt",
+            )
             remove_log_file(txtFilePath, logger)
-            # remove also the corresponding pyconf (do not exist 2016-06) 
+            # remove also the corresponding pyconf (do not exist 2016-06)
             # file in OUT directory
-            pyconfFilePath = os.path.join(os.path.dirname(filePath), 
-                            'OUT', 
-                            os.path.basename(filePath)[:-len('.xml')] + '.pyconf')
+            pyconfFilePath = os.path.join(
+                os.path.dirname(filePath),
+                "OUT",
+                os.path.basename(filePath)[: -len(".xml")] + ".pyconf",
+            )
             remove_log_file(pyconfFilePath, logger)
 
-        
         logger.write(src.printcolors.printcSuccess("OK\n"))
         logger.write("%i logs deleted.\n" % nbClean)
-        return 0 
+        return 0
 
     # determine the commands to show in the hat log
     notShownCommands = list(runner.cfg.INTERNAL.log.not_shown_commands)
@@ -253,97 +290,102 @@ def run(args, runner, logger):
         notShownCommands = []
 
     # Find the stylesheets Directory and files
-    xslDir = os.path.join(runner.cfg.VARS.srcDir, 'xsl')
+    xslDir = os.path.join(runner.cfg.VARS.srcDir, "xsl")
     xslCommand = os.path.join(xslDir, "command.xsl")
     xslHat = os.path.join(xslDir, "hat.xsl")
     xsltest = os.path.join(xslDir, "test.xsl")
     imgLogo = os.path.join(xslDir, "LOGO-SAT.png")
-    
+
     # copy the stylesheets in the log directory
     # OP We use copy instead of copy2 to update the creation date
     #    So we can clean the LOGS directories easily
     try:
-      src.ensure_path_exists(logDir)
-      shutil.copy(xslCommand, logDir)
-      shutil.copy(xslHat, logDir)
-      src.ensure_path_exists(os.path.join(logDir, "TEST"))
-      shutil.copy(xsltest, os.path.join(logDir, "TEST"))
-      shutil.copy(imgLogo, logDir)
+        src.ensure_path_exists(logDir)
+        shutil.copy(xslCommand, logDir)
+        shutil.copy(xslHat, logDir)
+        src.ensure_path_exists(os.path.join(logDir, "TEST"))
+        shutil.copy(xsltest, os.path.join(logDir, "TEST"))
+        shutil.copy(imgLogo, logDir)
     except:
-      # we are here  if an user make sat log in jenkins LOGS without write rights
-      # Make a warning and do nothing
-      logger.warning("problem for writing in directory '%s', may be not owner." % logDir)
+        # we are here  if an user make sat log in jenkins LOGS without write rights
+        # Make a warning and do nothing
+        logger.warning(
+            "problem for writing in directory '%s', may be not owner." % logDir
+        )
 
     # If the last option is invoked, just, show the last log file
     if options.last_compile:
         src.check_config_has_application(runner.cfg)
-        log_dirs = os.listdir(os.path.join(runner.cfg.APPLICATION.workdir, 'LOGS'))
+        log_dirs = os.listdir(os.path.join(runner.cfg.APPLICATION.workdir, "LOGS"))
         show_last_logs(logger, runner.cfg, log_dirs)
         return 0
 
     # If the last option is invoked, just, show the last log file
     if options.last:
-        lastLogFilePath = get_last_log_file(logDir,
-                                            notShownCommands + ["config"])
+        lastLogFilePath = get_last_log_file(logDir, notShownCommands + ["config"])
         if options.terminal:
             # Show the log corresponding to the selected command call
             print_log_command_in_terminal(lastLogFilePath, logger)
         else:
             # open the log xml file in the user editor
-            src.system.show_in_editor(runner.cfg.USER.browser, 
-                                      lastLogFilePath, logger)
+            src.system.show_in_editor(runner.cfg.USER.browser, lastLogFilePath, logger)
         return 0
 
     # If the user asks for a terminal display
     if options.terminal:
-        # Parse the log directory in order to find 
+        # Parse the log directory in order to find
         # all the files corresponding to the commands
-        lLogs = src.logger.list_log_file(logDir, 
-                                   src.logger.log_macro_command_file_expression)
+        lLogs = src.logger.list_log_file(
+            logDir, src.logger.log_macro_command_file_expression
+        )
         lLogsFiltered = []
         for filePath, __, date, __, hour, cmd, __ in lLogs:
-            showLog, cmdAppli, __ = src.logger.show_command_log(filePath, cmd, 
-                                runner.cfg.VARS.application, notShownCommands)
+            showLog, cmdAppli, __ = src.logger.show_command_log(
+                filePath, cmd, runner.cfg.VARS.application, notShownCommands
+            )
             if showLog:
                 lLogsFiltered.append((filePath, date, hour, cmd, cmdAppli))
-            
+
         lLogsFiltered = sorted(lLogsFiltered)
         nb_logs = len(lLogsFiltered)
         index = 0
-        # loop on all files and print it with date, time and command name 
-        for __, date, hour, cmd, cmdAppli in lLogsFiltered:          
+        # loop on all files and print it with date, time and command name
+        for __, date, hour, cmd, cmdAppli in lLogsFiltered:
             num = src.printcolors.printcLabel("%2d" % (nb_logs - index))
-            logger.write("%s: %13s %s %s %s\n" % 
-                         (num, cmd, date, hour, cmdAppli), 1, False)
+            logger.write(
+                "%s: %13s %s %s %s\n" % (num, cmd, date, hour, cmdAppli), 1, False
+            )
             index += 1
-        
+
         # ask the user what for what command he wants to be displayed
         x = -1
-        while (x < 0):
+        while x < 0:
             x = ask_value(nb_logs)
             if x > 0:
                 index = len(lLogsFiltered) - int(x)
                 # Show the log corresponding to the selected command call
-                print_log_command_in_terminal(lLogsFiltered[index][0], logger)                
+                print_log_command_in_terminal(lLogsFiltered[index][0], logger)
                 x = 0
-        
+
         return 0
-                    
+
     # Create or update the hat xml that gives access to all the commands log files
     logger.write(_("Generating the hat log file (can be long) ... "), 3)
-    xmlHatFilePath = os.path.join(logDir, 'hat.xml')
+    xmlHatFilePath = os.path.join(logDir, "hat.xml")
     try:
-      src.logger.update_hat_xml(logDir,
-                              application = runner.cfg.VARS.application, 
-                              notShownCommands = notShownCommands)
+        src.logger.update_hat_xml(
+            logDir,
+            application=runner.cfg.VARS.application,
+            notShownCommands=notShownCommands,
+        )
 
-      logger.write(src.printcolors.printc("OK"), 3)
+        logger.write(src.printcolors.printc("OK"), 3)
     except:
-      logger.write(src.printcolors.printc("KO"), 3)
-      logger.write(" problem update hat.xml", 3)
+        logger.write(src.printcolors.printc("KO"), 3)
+        logger.write(" problem update hat.xml", 3)
 
     logger.write("\n", 3)
-    
+
     # open the hat xml in the user editor
     if not options.no_browser:
         logger.write(_("\nOpening the hat log file %s\n" % xmlHatFilePath), 3)
index 39f562a100e94d500bdf68572b9a93e8f8d2b762..6001d237643eee9ee121b8b86a9c42284b784042 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 #  Copyright (C) 2010-2012  CEA/DEN
 #
 #  This library is free software; you can redistribute it and/or
@@ -23,10 +23,21 @@ import src
 
 # Define all possible option for the make command :  sat make <options>
 parser = src.options.Options()
-parser.add_option('p', 'products', 'list2', 'products',
-    _('Optional: products to configure. This option accepts a comma separated list.'))
-parser.add_option('o', 'option', 'string', 'option',
-    _('Optional: Option to add to the make command.'), "")
+parser.add_option(
+    "p",
+    "products",
+    "list2",
+    "products",
+    _("Optional: products to configure. This option accepts a comma separated list."),
+)
+parser.add_option(
+    "o",
+    "option",
+    "string",
+    "option",
+    _("Optional: Option to add to the make command."),
+    "",
+)
 
 
 def log_step(logger, header, step):
@@ -35,6 +46,7 @@ def log_step(logger, header, step):
     logger.write("\n==== %s \n" % src.printcolors.printcInfo(step), 4)
     logger.flush()
 
+
 def log_res_step(logger, res):
     if res == 0:
         logger.write("%s \n" % src.printcolors.printcSuccess("OK"), 4)
@@ -43,40 +55,42 @@ def log_res_step(logger, res):
         logger.write("%s \n" % src.printcolors.printcError("KO"), 4)
         logger.flush()
 
+
 def make_all_products(config, products_infos, make_option, logger):
-    '''Execute the proper configuration commands 
+    """Execute the proper configuration commands
        in each product build directory.
 
     :param config Config: The global configuration
-    :param products_info list: List of 
+    :param products_info list: List of
                                  (str, Config) => (product_name, product_info)
     :param make_option str: The options to add to the command
     :param logger Logger: The logger instance to use for the display and logging
     :return: the number of failing commands.
     :rtype: int
-    '''
+    """
     res = 0
     for p_name_info in products_infos:
         res_prod = make_product(p_name_info, make_option, config, logger)
         if res_prod != 0:
-            res += 1 
+            res += 1
     return res
 
+
 def make_product(p_name_info, make_option, config, logger):
-    '''Execute the proper configuration command(s) 
+    """Execute the proper configuration command(s)
        in the product build directory.
-    
+
     :param p_name_info tuple: (str, Config) => (product_name, product_info)
     :param make_option str: The options to add to the command
     :param config Config: The global configuration
-    :param logger Logger: The logger instance to use for the display 
+    :param logger Logger: The logger instance to use for the display
                           and logging
     :return: 1 if it fails, else 0.
     :rtype: int
-    '''
-    
+    """
+
     p_name, p_info = p_name_info
-    
+
     # Logging
     logger.write("\n", 4, False)
     logger.write("################ ", 4)
@@ -87,8 +101,11 @@ def make_product(p_name_info, make_option, config, logger):
     logger.flush()
 
     # Do nothing if he product is not compilable
-    if ("properties" in p_info and "compilation" in p_info.properties and 
-                                        p_info.properties.compilation == "no"):
+    if (
+        "properties" in p_info
+        and "compilation" in p_info.properties
+        and p_info.properties.compilation == "no"
+    ):
         log_step(logger, header, "ignored")
         logger.write("\n", 3, False)
         return 0
@@ -96,12 +113,12 @@ def make_product(p_name_info, make_option, config, logger):
     # Instantiate the class that manages all the construction commands
     # like cmake, make, make install, make test, environment management, etc...
     builder = src.compilation.Builder(config, logger, p_name, p_info)
-    
+
     # Prepare the environment
     log_step(logger, header, "PREPARE ENV")
     res_prepare = builder.prepare()
     log_res_step(logger, res_prepare)
-    
+
     # Execute buildconfigure, configure if the product is autotools
     # Execute cmake if the product is cmake
     len_end_line = 20
@@ -113,35 +130,42 @@ def make_product(p_name_info, make_option, config, logger):
     else:
         res = builder.make(nb_proc, make_opt_without_j)
     log_res_step(logger, res)
-    
+
     # Log the result
     if res > 0:
         logger.write("\r%s%s" % (header, " " * len_end_line), 3)
         logger.write("\r" + header + src.printcolors.printcError("KO"))
-        logger.write("==== %(KO)s in make of %(name)s \n" %
-            { "name" : p_name , "KO" : src.printcolors.printcInfo("ERROR")}, 4)
+        logger.write(
+            "==== %(KO)s in make of %(name)s \n"
+            % {"name": p_name, "KO": src.printcolors.printcInfo("ERROR")},
+            4,
+        )
         logger.flush()
     else:
         logger.write("\r%s%s" % (header, " " * len_end_line), 3)
         logger.write("\r" + header + src.printcolors.printcSuccess("OK"))
         logger.write("==== %s \n" % src.printcolors.printcInfo("OK"), 4)
-        logger.write("==== Make of %(name)s %(OK)s \n" %
-            { "name" : p_name , "OK" : src.printcolors.printcInfo("OK")}, 4)
+        logger.write(
+            "==== Make of %(name)s %(OK)s \n"
+            % {"name": p_name, "OK": src.printcolors.printcInfo("OK")},
+            4,
+        )
         logger.flush()
     logger.write("\n", 3, False)
 
     return res
 
+
 def get_nb_proc(product_info, config, make_option):
-    
+
     opt_nb_proc = None
     new_make_option = make_option
     if "-j" in make_option:
         oExpr = re.compile("-j[0-9]+")
         found = oExpr.search(make_option)
-        opt_nb_proc = int(re.findall('\d+', found.group())[0])
+        opt_nb_proc = int(re.findall("\d+", found.group())[0])
         new_make_option = make_option.replace(found.group(), "")
-    
+
     nbproc = -1
     if "nb_proc" in product_info:
         # nb proc is specified in module definition
@@ -155,58 +179,72 @@ def get_nb_proc(product_info, config, make_option):
             nbproc = opt_nb_proc
         else:
             nbproc = config.VARS.nb_proc
-    
+
     assert nbproc > 0
     return nbproc, new_make_option
 
+
 def description():
-    '''method that is called when salomeTools is called with --help option.
-    
+    """method that is called when salomeTools is called with --help option.
+
     :return: The text to display for the make command description.
     :rtype: str
-    '''
-    return _("The make command executes the \"make\" command in"
-             " the build directory.\nexample:\nsat make SALOME-master "
-             "--products Python,KERNEL,GUI")
-  
+    """
+    return _(
+        'The make command executes the "make" command in'
+        " the build directory.\nexample:\nsat make SALOME-master "
+        "--products Python,KERNEL,GUI"
+    )
+
+
 def run(args, runner, logger):
-    '''method that is called when salomeTools is called with make parameter.
-    '''
-    
+    """method that is called when salomeTools is called with make parameter."""
+
     # Parse the options
     (options, args) = parser.parse_args(args)
 
     # check that the command has been called with an application
-    src.check_config_has_application( runner.cfg )
+    src.check_config_has_application(runner.cfg)
 
     # Get the list of products to treat
     products_infos = src.product.get_products_list(options, runner.cfg, logger)
-    
+
     # Print some informations
-    logger.write(_('Executing the make command in the build '
-                                'directories of the application %s\n') % 
-                src.printcolors.printcLabel(runner.cfg.VARS.application), 1)
-    
-    info = [(_("BUILD directory"),
-             os.path.join(runner.cfg.APPLICATION.workdir, 'BUILD'))]
+    logger.write(
+        _(
+            "Executing the make command in the build "
+            "directories of the application %s\n"
+        )
+        % src.printcolors.printcLabel(runner.cfg.VARS.application),
+        1,
+    )
+
+    info = [
+        (_("BUILD directory"), os.path.join(runner.cfg.APPLICATION.workdir, "BUILD"))
+    ]
     src.print_info(logger, info)
-    
+
     # Call the function that will loop over all the products and execute
     # the right command(s)
     if options.option is None:
         options.option = ""
     res = make_all_products(runner.cfg, products_infos, options.option, logger)
-    
+
     # Print the final state
     nb_products = len(products_infos)
     if res == 0:
         final_status = "OK"
     else:
         final_status = "KO"
-   
-    logger.write(_("\nMake: %(status)s (%(valid_result)d/%(nb_products)d)\n") % \
-        { 'status': src.printcolors.printc(final_status), 
-          'valid_result': nb_products - res,
-          'nb_products': nb_products }, 1)    
-    
-    return res 
+
+    logger.write(
+        _("\nMake: %(status)s (%(valid_result)d/%(nb_products)d)\n")
+        % {
+            "status": src.printcolors.printc(final_status),
+            "valid_result": nb_products - res,
+            "nb_products": nb_products,
+        },
+        1,
+    )
+
+    return res
index eea3f85b7615712108646a33b69652c3e7777c53..6a93e273abef30c286ad2ff0d888059c2263d6c6 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 #  Copyright (C) 2010-2012  CEA/DEN
 #
 #  This library is free software; you can redistribute it and/or
@@ -22,8 +22,13 @@ import src
 
 # Define all possible option for the makeinstall command : sat makeinstall <options>
 parser = src.options.Options()
-parser.add_option('p', 'products', 'list2', 'products',
-    _('Optional: products to install. This option accepts a comma separated list.'))
+parser.add_option(
+    "p",
+    "products",
+    "list2",
+    "products",
+    _("Optional: products to install. This option accepts a comma separated list."),
+)
 
 
 def log_step(logger, header, step):
@@ -32,6 +37,7 @@ def log_step(logger, header, step):
     logger.write("\n==== %s \n" % src.printcolors.printcInfo(step), 4)
     logger.flush()
 
+
 def log_res_step(logger, res):
     if res == 0:
         logger.write("%s \n" % src.printcolors.printcSuccess("OK"), 4)
@@ -40,38 +46,40 @@ def log_res_step(logger, res):
         logger.write("%s \n" % src.printcolors.printcError("KO"), 4)
         logger.flush()
 
+
 def makeinstall_all_products(config, products_infos, logger):
-    '''Execute the proper configuration commands 
+    """Execute the proper configuration commands
        in each product build directory.
 
     :param config Config: The global configuration
-    :param products_info list: List of 
+    :param products_info list: List of
                                  (str, Config) => (product_name, product_info)
     :param logger Logger: The logger instance to use for the display and logging
     :return: the number of failing commands.
     :rtype: int
-    '''
+    """
     res = 0
     for p_name_info in products_infos:
         res_prod = makeinstall_product(p_name_info, config, logger)
         if res_prod != 0:
-            res += 1 
+            res += 1
     return res
 
+
 def makeinstall_product(p_name_info, config, logger):
-    '''Execute the proper configuration command(s) 
+    """Execute the proper configuration command(s)
        in the product build directory.
-    
+
     :param p_name_info tuple: (str, Config) => (product_name, product_info)
     :param config Config: The global configuration
-    :param logger Logger: The logger instance to use for the display 
+    :param logger Logger: The logger instance to use for the display
                           and logging
     :return: 1 if it fails, else 0.
     :rtype: int
-    '''
-    
+    """
+
     p_name, p_info = p_name_info
-    
+
     # Logging
     logger.write("\n", 4, False)
     logger.write("################ ", 4)
@@ -82,8 +90,11 @@ def makeinstall_product(p_name_info, config, logger):
     logger.flush()
 
     # Do nothing if he product is not compilable
-    if ("properties" in p_info and "compilation" in p_info.properties and 
-                                        p_info.properties.compilation == "no"):
+    if (
+        "properties" in p_info
+        and "compilation" in p_info.properties
+        and p_info.properties.compilation == "no"
+    ):
         log_step(logger, header, "ignored")
         logger.write("\n", 3, False)
         return 0
@@ -91,12 +102,12 @@ def makeinstall_product(p_name_info, config, logger):
     # Instantiate the class that manages all the construction commands
     # like cmake, make, make install, make test, environment management, etc...
     builder = src.compilation.Builder(config, logger, p_name, p_info)
-    
+
     # Prepare the environment
     log_step(logger, header, "PREPARE ENV")
     res_prepare = builder.prepare()
     log_res_step(logger, res_prepare)
-    
+
     # Execute buildconfigure, configure if the product is autotools
     # Execute cmake if the product is cmake
     res = 0
@@ -105,13 +116,16 @@ def makeinstall_product(p_name_info, config, logger):
         res_m = builder.install()
         log_res_step(logger, res_m)
         res += res_m
-    
+
     # Log the result
     if res > 0:
         logger.write("\r%s%s" % (header, " " * 20), 3)
         logger.write("\r" + header + src.printcolors.printcError("KO"))
-        logger.write("==== %(KO)s in make install of %(name)s \n" %
-            { "name" : p_name , "KO" : src.printcolors.printcInfo("ERROR")}, 4)
+        logger.write(
+            "==== %(KO)s in make install of %(name)s \n"
+            % {"name": p_name, "KO": src.printcolors.printcInfo("ERROR")},
+            4,
+        )
         logger.write("\n", 3, False)
         logger.flush()
         return res
@@ -122,8 +136,11 @@ def makeinstall_product(p_name_info, config, logger):
         if res > 0:
             logger.write("\r%s%s" % (header, " " * len_end_line), 3)
             logger.write("\r" + header + src.printcolors.printcError("KO"))
-            logger.write("==== %(KO)s in post script execution of %(name)s \n" %
-                { "name" : p_name , "KO" : src.printcolors.printcInfo("ERROR")}, 4)
+            logger.write(
+                "==== %(KO)s in post script execution of %(name)s \n"
+                % {"name": p_name, "KO": src.printcolors.printcInfo("ERROR")},
+                4,
+            )
             logger.write("\n", 3, False)
             logger.flush()
             return res
@@ -131,60 +148,77 @@ def makeinstall_product(p_name_info, config, logger):
     logger.write("\r%s%s" % (header, " " * 20), 3)
     logger.write("\r" + header + src.printcolors.printcSuccess("OK"))
     logger.write("==== %s \n" % src.printcolors.printcInfo("OK"), 4)
-    logger.write("==== Make install of %(name)s %(OK)s \n" %
-        { "name" : p_name , "OK" : src.printcolors.printcInfo("OK")}, 4)
+    logger.write(
+        "==== Make install of %(name)s %(OK)s \n"
+        % {"name": p_name, "OK": src.printcolors.printcInfo("OK")},
+        4,
+    )
     logger.flush()
     logger.write("\n", 3, False)
 
     return res
 
+
 def description():
-    '''method that is called when salomeTools is called with --help option.
-    
+    """method that is called when salomeTools is called with --help option.
+
     :return: The text to display for the makeinstall command description.
     :rtype: str
-    '''
-    return _("The makeinstall command executes the \"make install\" command in"
-             " the build directory.\nIn case of  product that is constructed "
-             "using a script (build_source :  \"script\"), then the "
-             "makeinstall command do nothing.\n\nexample:\nsat makeinstall "
-             "SALOME-master --products KERNEL,GUI")
-  
+    """
+    return _(
+        'The makeinstall command executes the "make install" command in'
+        " the build directory.\nIn case of  product that is constructed "
+        'using a script (build_source :  "script"), then the '
+        "makeinstall command do nothing.\n\nexample:\nsat makeinstall "
+        "SALOME-master --products KERNEL,GUI"
+    )
+
+
 def run(args, runner, logger):
-    '''method that is called when salomeTools is called with makeinstall parameter.
-    '''
-    
+    """method that is called when salomeTools is called with makeinstall parameter."""
+
     # Parse the options
     (options, args) = parser.parse_args(args)
 
     # check that the command has been called with an application
-    src.check_config_has_application( runner.cfg )
+    src.check_config_has_application(runner.cfg)
 
     # Get the list of products to treat
     products_infos = src.product.get_products_list(options, runner.cfg, logger)
 
     # Print some informations
-    logger.write(_('Executing the make install command in the build directories of the application %s\n') % 
-                src.printcolors.printcLabel(runner.cfg.VARS.application), 1)
-    
-    info = [(_("BUILD directory"),
-             os.path.join(runner.cfg.APPLICATION.workdir, 'BUILD'))]
+    logger.write(
+        _(
+            "Executing the make install command in the build directories of the application %s\n"
+        )
+        % src.printcolors.printcLabel(runner.cfg.VARS.application),
+        1,
+    )
+
+    info = [
+        (_("BUILD directory"), os.path.join(runner.cfg.APPLICATION.workdir, "BUILD"))
+    ]
     src.print_info(logger, info)
-    
+
     # Call the function that will loop over all the products and execute
     # the right command(s)
     res = makeinstall_all_products(runner.cfg, products_infos, logger)
-    
+
     # Print the final state
     nb_products = len(products_infos)
     if res == 0:
         final_status = "OK"
     else:
         final_status = "KO"
-   
-    logger.write(_("\nMake install: %(status)s (%(valid_result)d/%(nb_products)d)\n") % \
-        { 'status': src.printcolors.printc(final_status), 
-          'valid_result': nb_products - res,
-          'nb_products': nb_products }, 1)    
-    
-    return res 
+
+    logger.write(
+        _("\nMake install: %(status)s (%(valid_result)d/%(nb_products)d)\n")
+        % {
+            "status": src.printcolors.printc(final_status),
+            "valid_result": nb_products - res,
+            "nb_products": nb_products,
+        },
+        1,
+    )
+
+    return res
index f91476e2d6b6b87c7027b2a4ba37431b0af6de62..383232ae2c30e32b30a5f185e76f7b696f6ef064 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 #  Copyright (C) 2010-2012  CEA/DEN
 #
 #  This library is free software; you can redistribute it and/or
@@ -29,7 +29,7 @@ import sys
 import src
 
 from application import get_SALOME_modules
-from  src.versionMinorMajorPatch import MinorMajorPatch as MMP
+from src.versionMinorMajorPatch import MinorMajorPatch as MMP
 import src.debug as DBG
 
 old_python = sys.version_info[0] == 2 and sys.version_info[1] <= 6
@@ -45,7 +45,7 @@ PROJECT_DIR = "PROJECT"
 IGNORED_DIRS = [".git", ".svn"]
 IGNORED_EXTENSIONS = []
 
-PACKAGE_EXT=".tar.gz" # the extension we use for the packages
+PACKAGE_EXT = ".tar.gz"  # the extension we use for the packages
 
 if src.architecture.is_windows():
     PROJECT_TEMPLATE = """#!/usr/bin/env python
@@ -87,7 +87,8 @@ MACHINEPATH : $project_path + "machines/"
 """
 
 
-LOCAL_TEMPLATE = ("""#!/usr/bin/env python
+LOCAL_TEMPLATE = (
+    """#!/usr/bin/env python
 #-*- coding:utf-8 -*-
 
   LOCAL :
@@ -104,49 +105,128 @@ PROJECTS :
 {
   project_file_paths :
   [
-$LOCAL.workdir + $VARS.sep + \"""" + PROJECT_DIR + """\" + $VARS.sep + "project.pyconf"
+$LOCAL.workdir + $VARS.sep + \""""
+    + PROJECT_DIR
+    + """\" + $VARS.sep + "project.pyconf"
   ]
 }
-""")
+"""
+)
 
 # Define all possible option for the package command :  sat package <options>
 parser = src.options.Options()
-parser.add_option('b', 'binaries', 'boolean', 'binaries',
-    _('Optional: Produce a binary package.'), False)
-parser.add_option('f', 'force_creation', 'boolean', 'force_creation',
-    _('Optional: Only binary package: produce the archive even if '
-      'there are some missing products.'), False)
-parser.add_option('s', 'sources', 'boolean', 'sources',
-    _('Optional: Produce a compilable archive of the sources of the '
-      'application.'), False)
-parser.add_option('', 'bin_products', 'boolean', 'bin_products',
-    _('Optional: Create binary archives for all products.'), False)
-parser.add_option('', 'with_vcs', 'boolean', 'with_vcs',
-    _('Optional: Do not make archive for products in VCS mode (git, cvs, svn). '
-      'Sat prepare will use VCS mode instead to retrieve them.'
-      '\n          Also, when combined with "--bin_products" option, restrict the building of product archives to VCS products.'),
-    False)
-parser.add_option('', 'ftp', 'boolean', 'ftp',
-    _('Optional: Do not embed archives for products in archive mode.'
-    'Sat prepare will use ftp instead to retrieve them'),
-    False)
-parser.add_option('e', 'exe', 'string', 'exe',
-    _('Optional: Produce an extra launcher based upon the exe given as argument.'), "")
-parser.add_option('p', 'project', 'string', 'project',
-    _('Optional: Produce an archive that contains a project.'), "")
-parser.add_option('t', 'salometools', 'boolean', 'sat',
-    _('Optional: Produce an archive that contains salomeTools.'), False)
-parser.add_option('n', 'name', 'string', 'name',
-    _('Optional: The name or full path of the archive.'), None)
-parser.add_option('', 'add_files', 'list2', 'add_files',
-    _('Optional: The list of additional files to add to the archive.'), [])
-parser.add_option('', 'without_properties', 'properties', 'without_properties',
-    _('Optional: Filter the products by their properties.\n\tSyntax: '
-      '--without_properties <property>:<value>'))
+parser.add_option(
+    "b",
+    "binaries",
+    "boolean",
+    "binaries",
+    _("Optional: Produce a binary package."),
+    False,
+)
+parser.add_option(
+    "f",
+    "force_creation",
+    "boolean",
+    "force_creation",
+    _(
+        "Optional: Only binary package: produce the archive even if "
+        "there are some missing products."
+    ),
+    False,
+)
+parser.add_option(
+    "s",
+    "sources",
+    "boolean",
+    "sources",
+    _("Optional: Produce a compilable archive of the sources of the " "application."),
+    False,
+)
+parser.add_option(
+    "",
+    "bin_products",
+    "boolean",
+    "bin_products",
+    _("Optional: Create binary archives for all products."),
+    False,
+)
+parser.add_option(
+    "",
+    "with_vcs",
+    "boolean",
+    "with_vcs",
+    _(
+        "Optional: Do not make archive for products in VCS mode (git, cvs, svn). "
+        "Sat prepare will use VCS mode instead to retrieve them."
+        '\n          Also, when combined with "--bin_products" option, restrict the building of product archives to VCS products.'
+    ),
+    False,
+)
+parser.add_option(
+    "",
+    "ftp",
+    "boolean",
+    "ftp",
+    _(
+        "Optional: Do not embed archives for products in archive mode."
+        "Sat prepare will use ftp instead to retrieve them"
+    ),
+    False,
+)
+parser.add_option(
+    "e",
+    "exe",
+    "string",
+    "exe",
+    _("Optional: Produce an extra launcher based upon the exe given as argument."),
+    "",
+)
+parser.add_option(
+    "p",
+    "project",
+    "string",
+    "project",
+    _("Optional: Produce an archive that contains a project."),
+    "",
+)
+parser.add_option(
+    "t",
+    "salometools",
+    "boolean",
+    "sat",
+    _("Optional: Produce an archive that contains salomeTools."),
+    False,
+)
+parser.add_option(
+    "n",
+    "name",
+    "string",
+    "name",
+    _("Optional: The name or full path of the archive."),
+    None,
+)
+parser.add_option(
+    "",
+    "add_files",
+    "list2",
+    "add_files",
+    _("Optional: The list of additional files to add to the archive."),
+    [],
+)
+parser.add_option(
+    "",
+    "without_properties",
+    "properties",
+    "without_properties",
+    _(
+        "Optional: Filter the products by their properties.\n\tSyntax: "
+        "--without_properties <property>:<value>"
+    ),
+)
 
 
 def add_files(tar, name_archive, d_content, logger, f_exclude=None):
-    '''Create an archive containing all directories and files that are given in
+    """Create an archive containing all directories and files that are given in
        the d_content argument.
 
     :param tar tarfile: The tarfile instance used to make the archive.
@@ -159,7 +239,7 @@ def add_files(tar, name_archive, d_content, logger, f_exclude=None):
     :param f_exclude Function: the function that filters
     :return: 0 if success, 1 if not.
     :rtype: int
-    '''
+    """
     # get the max length of the messages in order to make the display
     max_len = len(max(d_content.keys(), key=len))
 
@@ -169,27 +249,31 @@ def add_files(tar, name_archive, d_content, logger, f_exclude=None):
     DBG.write("add tar names", names)
 
     # used to avoid duplications (for pip install in python, or single_install_dir cases)
-    already_added=set()
+    already_added = set()
     for name in names:
         # display information
         len_points = max_len - len(name) + 3
         local_path, archive_path = d_content[name]
         in_archive = os.path.join(name_archive, archive_path)
-        logger.write(name + " " + len_points * "." + " "+ in_archive + " ", 3)
+        logger.write(name + " " + len_points * "." + " " + in_archive + " ", 3)
         # Get the local path and the path in archive
         # of the directory or file to add
         # Add it in the archive
         try:
-            key=local_path+"->"+in_archive
+            key = local_path + "->" + in_archive
             if key not in already_added:
                 if old_python:
-                    tar.add(local_path,
-                                 arcname=in_archive,
-                                 exclude=exclude_VCS_and_extensions_26)
+                    tar.add(
+                        local_path,
+                        arcname=in_archive,
+                        exclude=exclude_VCS_and_extensions_26,
+                    )
                 else:
-                    tar.add(local_path,
-                                 arcname=in_archive,
-                                 filter=exclude_VCS_and_extensions)
+                    tar.add(
+                        local_path,
+                        arcname=in_archive,
+                        filter=exclude_VCS_and_extensions,
+                    )
                 already_added.add(key)
             logger.write(src.printcolors.printcSuccess(_("OK")), 3)
         except Exception as e:
@@ -201,13 +285,13 @@ def add_files(tar, name_archive, d_content, logger, f_exclude=None):
 
 
 def exclude_VCS_and_extensions_26(filename):
-    ''' The function that is used to exclude from package the link to the
+    """The function that is used to exclude from package the link to the
         VCS repositories (like .git) (only for python 2.6)
 
     :param filename Str: The filname to exclude (or not).
     :return: True if the file has to be exclude
     :rtype: Boolean
-    '''
+    """
     for dir_name in IGNORED_DIRS:
         if dir_name in filename:
             return True
@@ -216,14 +300,15 @@ def exclude_VCS_and_extensions_26(filename):
             return True
     return False
 
+
 def exclude_VCS_and_extensions(tarinfo):
-    ''' The function that is used to exclude from package the link to the
+    """The function that is used to exclude from package the link to the
         VCS repositories (like .git)
 
     :param filename Str: The filname to exclude (or not).
     :return: None if the file has to be exclude
     :rtype: tarinfo or None
-    '''
+    """
     filename = tarinfo.name
     for dir_name in IGNORED_DIRS:
         if dir_name in filename:
@@ -233,12 +318,9 @@ def exclude_VCS_and_extensions(tarinfo):
             return None
     return tarinfo
 
-def produce_relative_launcher(config,
-                              logger,
-                              file_dir,
-                              file_name,
-                              binaries_dir_name):
-    '''Create a specific SALOME launcher for the binary package. This launcher
+
+def produce_relative_launcher(config, logger, file_dir, file_name, binaries_dir_name):
+    """Create a specific SALOME launcher for the binary package. This launcher
        uses relative paths.
 
     :param config Config: The global configuration.
@@ -249,119 +331,130 @@ def produce_relative_launcher(config,
                                   are, in the archive.
     :return: the path of the produced launcher
     :rtype: str
-    '''
+    """
 
     # set base mode to "no" for the archive - save current mode to restore it at the end
     if "base" in config.APPLICATION:
-        base_setting=config.APPLICATION.base
+        base_setting = config.APPLICATION.base
     else:
-        base_setting="maybe"
-    config.APPLICATION.base="no"
+        base_setting = "maybe"
+    config.APPLICATION.base = "no"
 
     # get KERNEL installation path
     kernel_info = src.product.get_product_config(config, "KERNEL")
-    kernel_base_name=os.path.basename(kernel_info.install_dir)
+    kernel_base_name = os.path.basename(kernel_info.install_dir)
     if kernel_info.install_mode == "base":
         # case of kernel installed in base. the kernel install dir name is different in the archive
-        kernel_base_name=os.path.basename(os.path.dirname(kernel_info.install_dir))
+        kernel_base_name = os.path.basename(os.path.dirname(kernel_info.install_dir))
 
     kernel_root_dir = os.path.join(binaries_dir_name, kernel_base_name)
 
     # set kernel bin dir (considering fhs property)
     kernel_cfg = src.product.get_product_config(config, "KERNEL")
     if src.get_property_in_product_cfg(kernel_cfg, "fhs"):
-        bin_kernel_install_dir = os.path.join(kernel_root_dir,"bin")
+        bin_kernel_install_dir = os.path.join(kernel_root_dir, "bin")
     else:
-        bin_kernel_install_dir = os.path.join(kernel_root_dir,"bin","salome")
+        bin_kernel_install_dir = os.path.join(kernel_root_dir, "bin", "salome")
 
     # check if the application contains an application module
     # check also if the application has a distene product,
     # in this case get its licence file name
-    l_product_info = src.product.get_products_infos(config.APPLICATION.products.keys(), config)
-    salome_application_name="Not defined"
-    distene_licence_file_name=False
+    l_product_info = src.product.get_products_infos(
+        config.APPLICATION.products.keys(), config
+    )
+    salome_application_name = "Not defined"
+    distene_licence_file_name = False
     for prod_name, prod_info in l_product_info:
         # look for a "salome application" and a distene product
         if src.get_property_in_product_cfg(prod_info, "is_distene") == "yes":
-            distene_licence_file_name = src.product.product_has_licence(prod_info,
-                                            config.PATHS.LICENCEPATH)
+            distene_licence_file_name = src.product.product_has_licence(
+                prod_info, config.PATHS.LICENCEPATH
+            )
         if src.get_property_in_product_cfg(prod_info, "is_salome_application") == "yes":
-            salome_application_name=prod_info.name
+            salome_application_name = prod_info.name
 
     # if the application contains an application module, we set ABSOLUTE_APPLI_PATH to it
     # if not we set it to KERNEL_INSTALL_DIR, which is sufficient, except for salome test
     if salome_application_name == "Not defined":
-        app_root_dir=kernel_root_dir
+        app_root_dir = kernel_root_dir
     else:
-        app_root_dir=os.path.join(binaries_dir_name, salome_application_name)
+        app_root_dir = os.path.join(binaries_dir_name, salome_application_name)
 
-    additional_env={}
-    additional_env['sat_bin_kernel_install_dir'] = "out_dir_Path + " +\
-                                                   config.VARS.sep + bin_kernel_install_dir
+    additional_env = {}
+    additional_env["sat_bin_kernel_install_dir"] = (
+        "out_dir_Path + " + config.VARS.sep + bin_kernel_install_dir
+    )
     if "python3" in config.APPLICATION and config.APPLICATION.python3 == "yes":
-        additional_env['sat_python_version'] = 3
+        additional_env["sat_python_version"] = 3
     else:
-        additional_env['sat_python_version'] = 2
+        additional_env["sat_python_version"] = 2
 
-    additional_env['ABSOLUTE_APPLI_PATH'] = "out_dir_Path" + config.VARS.sep + app_root_dir
+    additional_env["ABSOLUTE_APPLI_PATH"] = (
+        "out_dir_Path" + config.VARS.sep + app_root_dir
+    )
     launcher_name = src.get_launcher_name(config)
-    additional_env['APPLI'] = "out_dir_Path" + config.VARS.sep + file_name
+    additional_env["APPLI"] = "out_dir_Path" + config.VARS.sep + file_name
 
     # create an environment file writer
-    writer = src.environment.FileEnvWriter(config,
-                                           logger,
-                                           file_dir,
-                                           src_root=None,
-                                           env_info=None)
+    writer = src.environment.FileEnvWriter(
+        config, logger, file_dir, src_root=None, env_info=None
+    )
 
     filepath = os.path.join(file_dir, file_name)
     # Write
-    writer.write_env_file(filepath,
-                          False,  # for launch
-                          "cfgForPy",
-                          additional_env=additional_env,
-                          no_path_init=False,
-                          for_package = binaries_dir_name)
+    writer.write_env_file(
+        filepath,
+        False,  # for launch
+        "cfgForPy",
+        additional_env=additional_env,
+        no_path_init=False,
+        for_package=binaries_dir_name,
+    )
 
     # Little hack to put out_dir_Path outside the strings
-    src.replace_in_file(filepath, 'r"out_dir_Path', 'out_dir_Path + r"' )
-    src.replace_in_file(filepath, "r'out_dir_Path + ", "out_dir_Path + r'" )
+    src.replace_in_file(filepath, 'r"out_dir_Path', 'out_dir_Path + r"')
+    src.replace_in_file(filepath, "r'out_dir_Path + ", "out_dir_Path + r'")
 
     # A hack to put a call to a file for distene licence.
     # It does nothing to an application that has no distene product
     if distene_licence_file_name:
-        logger.write("Application has a distene licence file! We use it in package launcher", 5)
+        logger.write(
+            "Application has a distene licence file! We use it in package launcher", 5
+        )
         hack_for_distene_licence(filepath, distene_licence_file_name)
 
     # change the rights in order to make the file executable for everybody
-    os.chmod(filepath,
-             stat.S_IRUSR |
-             stat.S_IRGRP |
-             stat.S_IROTH |
-             stat.S_IWUSR |
-             stat.S_IXUSR |
-             stat.S_IXGRP |
-             stat.S_IXOTH)
+    os.chmod(
+        filepath,
+        stat.S_IRUSR
+        | stat.S_IRGRP
+        | stat.S_IROTH
+        | stat.S_IWUSR
+        | stat.S_IXUSR
+        | stat.S_IXGRP
+        | stat.S_IXOTH,
+    )
 
     # restore modified setting by its initial value
-    config.APPLICATION.base=base_setting
+    config.APPLICATION.base = base_setting
 
     return filepath
 
+
 def hack_for_distene_licence(filepath, licence_file):
-    '''Replace the distene licence env variable by a call to a file.
+    """Replace the distene licence env variable by a call to a file.
 
     :param filepath Str: The path to the launcher to modify.
-    '''
+    """
     shutil.move(filepath, filepath + "_old")
-    fileout= filepath
+    fileout = filepath
     filein = filepath + "_old"
     fin = open(filein, "r")
     fout = open(fileout, "w")
     text = fin.readlines()
     # Find the Distene section
     num_line = -1
-    for i,line in enumerate(text):
+    for i, line in enumerate(text):
         if "# Set DISTENE License" in line:
             num_line = i
             break
@@ -372,9 +465,10 @@ def hack_for_distene_licence(filepath, licence_file):
             fout.write(line)
         fout.close()
         return
-    del text[num_line +1]
-    del text[num_line +1]
-    text_to_insert ="""    try:
+    del text[num_line + 1]
+    del text[num_line + 1]
+    text_to_insert = (
+        """    try:
         distene_licence_file=r"%s"
         if sys.version_info[0] >= 3 and sys.version_info[1] >= 5:
             import importlib.util
@@ -386,7 +480,9 @@ def hack_for_distene_licence(filepath, licence_file):
             distene = imp.load_source('distene_licence', distene_licence_file)
         distene.set_distene_variables(context)
     except:
-        pass\n"""  % licence_file
+        pass\n"""
+        % licence_file
+    )
     text.insert(num_line + 1, text_to_insert)
     for line in text:
         fout.write(line)
@@ -394,12 +490,11 @@ def hack_for_distene_licence(filepath, licence_file):
     fout.close()
     return
 
-def produce_relative_env_files(config,
-                              logger,
-                              file_dir,
-                              binaries_dir_name,
-                              exe_name=None):
-    '''Create some specific environment files for the binary package. These
+
+def produce_relative_env_files(
+    config, logger, file_dir, binaries_dir_name, exe_name=None
+):
+    """Create some specific environment files for the binary package. These
        files use relative paths.
 
     :param config Config: The global configuration.
@@ -410,76 +505,71 @@ def produce_relative_env_files(config,
     :param exe_name str: if given generate a launcher executing exe_name
     :return: the list of path of the produced environment files
     :rtype: List
-    '''
+    """
 
     # set base mode to "no" for the archive - save current mode to restore it at the end
     if "base" in config.APPLICATION:
-        base_setting=config.APPLICATION.base
+        base_setting = config.APPLICATION.base
     else:
-        base_setting="maybe"
-    config.APPLICATION.base="no"
+        base_setting = "maybe"
+    config.APPLICATION.base = "no"
 
     # create an environment file writer
-    writer = src.environment.FileEnvWriter(config,
-                                           logger,
-                                           file_dir,
-                                           src_root=None)
+    writer = src.environment.FileEnvWriter(config, logger, file_dir, src_root=None)
 
     if src.architecture.is_windows():
-      shell = "bat"
-      filename  = "env_launch.bat"
+        shell = "bat"
+        filename = "env_launch.bat"
     else:
-      shell = "bash"
-      filename  = "env_launch.sh"
+        shell = "bash"
+        filename = "env_launch.sh"
 
     if exe_name:
-        filename=os.path.basename(exe_name)
+        filename = os.path.basename(exe_name)
 
     # Write
-    filepath = writer.write_env_file(filename,
-                          False, # for launch
-                          shell,
-                          for_package = binaries_dir_name)
+    filepath = writer.write_env_file(
+        filename, False, shell, for_package=binaries_dir_name  # for launch
+    )
 
     # Little hack to put out_dir_Path as environment variable
-    if src.architecture.is_windows() :
-      src.replace_in_file(filepath, '"out_dir_Path', '"%out_dir_Path%' )
-      src.replace_in_file(filepath, '=out_dir_Path', '=%out_dir_Path%' )
-      src.replace_in_file(filepath, ';out_dir_Path', ';%out_dir_Path%' )
+    if src.architecture.is_windows():
+        src.replace_in_file(filepath, '"out_dir_Path', '"%out_dir_Path%')
+        src.replace_in_file(filepath, "=out_dir_Path", "=%out_dir_Path%")
+        src.replace_in_file(filepath, ";out_dir_Path", ";%out_dir_Path%")
     else:
-      src.replace_in_file(filepath, '"out_dir_Path', '"${out_dir_Path}' )
-      src.replace_in_file(filepath, ':out_dir_Path', ':${out_dir_Path}' )
-      src.replace_in_file(filepath, ';out_dir_Path', ';${out_dir_Path}' )
+        src.replace_in_file(filepath, '"out_dir_Path', '"${out_dir_Path}')
+        src.replace_in_file(filepath, ":out_dir_Path", ":${out_dir_Path}")
+        src.replace_in_file(filepath, ";out_dir_Path", ";${out_dir_Path}")
 
     if exe_name:
         if src.architecture.is_windows():
-            cmd="\n\nrem Launch exe with user arguments\n%s " % exe_name + "%*"
+            cmd = "\n\nrem Launch exe with user arguments\n%s " % exe_name + "%*"
         else:
-            cmd='\n\n# Launch exe with user arguments\n%s "$*"' % exe_name
+            cmd = '\n\n# Launch exe with user arguments\n%s "$*"' % exe_name
         with open(filepath, "a") as exe_launcher:
             exe_launcher.write(cmd)
 
     # change the rights in order to make the file executable for everybody
-    os.chmod(filepath,
-             stat.S_IRUSR |
-             stat.S_IRGRP |
-             stat.S_IROTH |
-             stat.S_IWUSR |
-             stat.S_IXUSR |
-             stat.S_IXGRP |
-             stat.S_IXOTH)
+    os.chmod(
+        filepath,
+        stat.S_IRUSR
+        | stat.S_IRGRP
+        | stat.S_IROTH
+        | stat.S_IWUSR
+        | stat.S_IXUSR
+        | stat.S_IXGRP
+        | stat.S_IXOTH,
+    )
 
     # restore modified setting by its initial value
-    config.APPLICATION.base=base_setting
+    config.APPLICATION.base = base_setting
 
     return filepath
 
-def produce_install_bin_file(config,
-                             logger,
-                             file_dir,
-                             d_sub,
-                             file_name):
-    '''Create a bash shell script which do substitutions in BIRARIES dir
+
+def produce_install_bin_file(config, logger, file_dir, d_sub, file_name):
+    """Create a bash shell script which do substitutions in BIRARIES dir
        in order to use it for extra compilations.
 
     :param config Config: The global configuration.
@@ -489,52 +579,52 @@ def produce_install_bin_file(config,
     :param file_name str: the name of the install script file
     :return: the produced file
     :rtype: str
-    '''
+    """
     # Write
     filepath = os.path.join(file_dir, file_name)
     # open the file and write into it
     # use codec utf-8 as sat variables are in unicode
-    with codecs.open(filepath, "w", 'utf-8') as installbin_file:
-        installbin_template_path = os.path.join(config.VARS.internal_dir,
-                                        "INSTALL_BIN.template")
+    with codecs.open(filepath, "w", "utf-8") as installbin_file:
+        installbin_template_path = os.path.join(
+            config.VARS.internal_dir, "INSTALL_BIN.template"
+        )
 
         # build the name of the directory that will contain the binaries
         binaries_dir_name = config.INTERNAL.config.binary_dir + config.VARS.dist
         # build the substitution loop
         loop_cmd = "for f in $(grep -RIl"
         for key in d_sub:
-            loop_cmd += " -e "+ key
-        loop_cmd += ' ' + config.INTERNAL.config.install_dir +\
-                    '); do\n     sed -i "\n'
+            loop_cmd += " -e " + key
+        loop_cmd += " " + config.INTERNAL.config.install_dir + '); do\n     sed -i "\n'
         for key in d_sub:
             loop_cmd += "        s?" + key + "?$(pwd)/" + d_sub[key] + "?g\n"
         loop_cmd += '            " $f\ndone'
 
-        d={}
+        d = {}
         d["BINARIES_DIR"] = binaries_dir_name
-        d["SUBSTITUTION_LOOP"]=loop_cmd
-        d["INSTALL_DIR"]=config.INTERNAL.config.install_dir
+        d["SUBSTITUTION_LOOP"] = loop_cmd
+        d["INSTALL_DIR"] = config.INTERNAL.config.install_dir
 
         # substitute the template and write it in file
-        content=src.template.substitute(installbin_template_path, d)
+        content = src.template.substitute(installbin_template_path, d)
         installbin_file.write(content)
         # change the rights in order to make the file executable for everybody
-        os.chmod(filepath,
-                 stat.S_IRUSR |
-                 stat.S_IRGRP |
-                 stat.S_IROTH |
-                 stat.S_IWUSR |
-                 stat.S_IXUSR |
-                 stat.S_IXGRP |
-                 stat.S_IXOTH)
+        os.chmod(
+            filepath,
+            stat.S_IRUSR
+            | stat.S_IRGRP
+            | stat.S_IROTH
+            | stat.S_IWUSR
+            | stat.S_IXUSR
+            | stat.S_IXGRP
+            | stat.S_IXOTH,
+        )
 
     return filepath
 
-def product_appli_creation_script(config,
-                                  logger,
-                                  file_dir,
-                                  binaries_dir_name):
-    '''Create a script that can produce an application (EDF style) in the binary
+
+def product_appli_creation_script(config, logger, file_dir, binaries_dir_name):
+    """Create a script that can produce an application (EDF style) in the binary
        package.
 
     :param config Config: The global configuration.
@@ -544,12 +634,11 @@ def product_appli_creation_script(config,
                                   are, in the archive.
     :return: the path of the produced script file
     :rtype: Str
-    '''
+    """
     template_name = "create_appli.py.for_bin_packages.template"
     template_path = os.path.join(config.VARS.internal_dir, template_name)
     text_to_fill = open(template_path, "r").read()
-    text_to_fill = text_to_fill.replace("TO BE FILLED 1",
-                                        '"' + binaries_dir_name + '"')
+    text_to_fill = text_to_fill.replace("TO BE FILLED 1", '"' + binaries_dir_name + '"')
 
     text_to_add = ""
     for product_name in get_SALOME_modules(config):
@@ -558,22 +647,20 @@ def product_appli_creation_script(config,
         if src.product.product_is_smesh_plugin(product_info):
             continue
 
-        if 'install_dir' in product_info and bool(product_info.install_dir):
+        if "install_dir" in product_info and bool(product_info.install_dir):
             if src.product.product_is_cpp(product_info):
                 # cpp module
                 for cpp_name in src.product.get_product_components(product_info):
-                    line_to_add = ("<module name=\"" +
-                                   cpp_name +
-                                   "\" gui=\"yes\" path=\"''' + "
-                                   "os.path.join(dir_bin_name, \"" +
-                                   cpp_name + "\") + '''\"/>")
+                    line_to_add = (
+                        '<module name="' + cpp_name + '" gui="yes" path="\'\'\' + '
+                        'os.path.join(dir_bin_name, "' + cpp_name + "\") + '''\"/>"
+                    )
             else:
                 # regular module
-                line_to_add = ("<module name=\"" +
-                               product_name +
-                               "\" gui=\"yes\" path=\"''' + "
-                               "os.path.join(dir_bin_name, \"" +
-                               product_name + "\") + '''\"/>")
+                line_to_add = (
+                    '<module name="' + product_name + '" gui="yes" path="\'\'\' + '
+                    'os.path.join(dir_bin_name, "' + product_name + "\") + '''\"/>"
+                )
             text_to_add += line_to_add + "\n"
 
     filled_text = text_to_fill.replace("TO BE FILLED 2", text_to_add)
@@ -584,23 +671,26 @@ def product_appli_creation_script(config,
     ff.close()
 
     # change the rights in order to make the file executable for everybody
-    os.chmod(tmp_file_path,
-             stat.S_IRUSR |
-             stat.S_IRGRP |
-             stat.S_IROTH |
-             stat.S_IWUSR |
-             stat.S_IXUSR |
-             stat.S_IXGRP |
-             stat.S_IXOTH)
+    os.chmod(
+        tmp_file_path,
+        stat.S_IRUSR
+        | stat.S_IRGRP
+        | stat.S_IROTH
+        | stat.S_IWUSR
+        | stat.S_IXUSR
+        | stat.S_IXGRP
+        | stat.S_IXOTH,
+    )
 
     return tmp_file_path
 
+
 def bin_products_archives(config, logger, only_vcs):
-    '''Prepare binary packages for all products
+    """Prepare binary packages for all products
     :param config Config: The global configuration.
     :return: the error status
     :rtype: bool
-    '''
+    """
 
     logger.write("Make %s binary archives\n" % config.VARS.dist)
     # Get the default directory where to put the packages
@@ -608,17 +698,18 @@ def bin_products_archives(config, logger, only_vcs):
     src.ensure_path_exists(binpackage_path)
     # Get the list of product installation to add to the archive
     l_products_name = sorted(config.APPLICATION.products.keys())
-    l_product_info = src.product.get_products_infos(l_products_name,
-                                                    config)
+    l_product_info = src.product.get_products_infos(l_products_name, config)
     # first loop on products : filter products, analyse properties,
     # and store the information that will be used to create the archive in the second loop
-    l_not_installed=[] # store not installed products for warning at the end
+    l_not_installed = []  # store not installed products for warning at the end
     for prod_name, prod_info in l_product_info:
         # ignore the native and fixed products for install directories
-        if (src.get_property_in_product_cfg(prod_info, "not_in_package") == "yes"
-                or src.product.product_is_native(prod_info)
-                or src.product.product_is_fixed(prod_info)
-                or not src.product.product_compiles(prod_info)):
+        if (
+            src.get_property_in_product_cfg(prod_info, "not_in_package") == "yes"
+            or src.product.product_is_native(prod_info)
+            or src.product.product_is_fixed(prod_info)
+            or not src.product.product_compiles(prod_info)
+        ):
             continue
         if only_vcs and not src.product.product_is_vcs(prod_info):
             continue
@@ -626,24 +717,38 @@ def bin_products_archives(config, logger, only_vcs):
             l_not_installed.append(prod_name)
             continue  # product is not installed, we skip it
         # prepare call to make_bin_archive
-        path_targz_prod = os.path.join(binpackage_path, prod_name + '-' + prod_info.version.replace("/", "_") + "-" + config.VARS.dist + PACKAGE_EXT)
-        targz_prod = tarfile.open(path_targz_prod, mode='w:gz')
+        path_targz_prod = os.path.join(
+            binpackage_path,
+            prod_name
+            + "-"
+            + prod_info.version.replace("/", "_")
+            + "-"
+            + config.VARS.dist
+            + PACKAGE_EXT,
+        )
+        targz_prod = tarfile.open(path_targz_prod, mode="w:gz")
         bin_path = prod_info.install_dir
         targz_prod.add(bin_path)
         targz_prod.close()
         # Python program to find MD5 hash value of a file
         import hashlib
-        with open(path_targz_prod,"rb") as f:
-            bytes = f.read() # read file as bytes
-            readable_hash = hashlib.md5(bytes).hexdigest();
-            with open(path_targz_prod+".md5", "w") as md5sum:
-               md5sum.write("%s  %s" % (readable_hash, os.path.basename(path_targz_prod)))
-            logger.write("   archive : %s   (md5sum = %s)\n" % (path_targz_prod, readable_hash))
+
+        with open(path_targz_prod, "rb") as f:
+            bytes = f.read()  # read file as bytes
+            readable_hash = hashlib.md5(bytes).hexdigest()
+            with open(path_targz_prod + ".md5", "w") as md5sum:
+                md5sum.write(
+                    "%s  %s" % (readable_hash, os.path.basename(path_targz_prod))
+                )
+            logger.write(
+                "   archive : %s   (md5sum = %s)\n" % (path_targz_prod, readable_hash)
+            )
 
     return 0
 
+
 def binary_package(config, logger, options, tmp_working_dir):
-    '''Prepare a dictionary that stores all the needed directories and files to
+    """Prepare a dictionary that stores all the needed directories and files to
        add in a binary package.
 
     :param config Config: The global configuration.
@@ -656,12 +761,11 @@ def binary_package(config, logger, options, tmp_working_dir):
              add in a binary package.
              {label : (path_on_local_machine, path_in_archive)}
     :rtype: dict
-    '''
+    """
 
     # Get the list of product installation to add to the archive
     l_products_name = sorted(config.APPLICATION.products.keys())
-    l_product_info = src.product.get_products_infos(l_products_name,
-                                                    config)
+    l_product_info = src.product.get_products_infos(l_products_name, config)
 
     # suppress compile time products for binaries-only archives
     if not options.sources:
@@ -672,11 +776,13 @@ def binary_package(config, logger, options, tmp_working_dir):
     l_not_installed = []
     l_sources_not_present = []
     generate_mesa_launcher = False  # a flag to know if we generate a mesa launcher
-    if ("APPLICATION" in config  and
-        "properties"  in config.APPLICATION  and
-        "mesa_launcher_in_package"    in config.APPLICATION.properties  and
-        config.APPLICATION.properties.mesa_launcher_in_package == "yes") :
-            generate_mesa_launcher=True
+    if (
+        "APPLICATION" in config
+        and "properties" in config.APPLICATION
+        and "mesa_launcher_in_package" in config.APPLICATION.properties
+        and config.APPLICATION.properties.mesa_launcher_in_package == "yes"
+    ):
+        generate_mesa_launcher = True
 
     # first loop on products : filter products, analyse properties,
     # and store the information that will be used to create the archive in the second loop
@@ -687,25 +793,34 @@ def binary_package(config, logger, options, tmp_working_dir):
 
         # Add the sources of the products that have the property
         # sources_in_package : "yes"
-        if src.get_property_in_product_cfg(prod_info,
-                                           "sources_in_package") == "yes":
+        if src.get_property_in_product_cfg(prod_info, "sources_in_package") == "yes":
             if os.path.exists(prod_info.source_dir):
                 l_source_dir.append((prod_name, prod_info.source_dir))
             else:
                 l_sources_not_present.append(prod_name)
 
         # ignore the native and fixed products for install directories
-        if (src.product.product_is_native(prod_info)
-                or src.product.product_is_fixed(prod_info)
-                or not src.product.product_compiles(prod_info)):
+        if (
+            src.product.product_is_native(prod_info)
+            or src.product.product_is_fixed(prod_info)
+            or not src.product.product_compiles(prod_info)
+        ):
             continue
         #
         # products with single_dir property will be installed in the PRODUCTS directory of the archive
-        is_single_dir=(src.appli_test_property(config,"single_install_dir", "yes") and \
-                       src.product.product_test_property(prod_info,"single_install_dir", "yes"))
+        is_single_dir = src.appli_test_property(
+            config, "single_install_dir", "yes"
+        ) and src.product.product_test_property(prod_info, "single_install_dir", "yes")
         if src.product.check_installation(config, prod_info):
-            l_install_dir.append((prod_name, prod_info.name, prod_info.install_dir,
-                                  is_single_dir, prod_info.install_mode))
+            l_install_dir.append(
+                (
+                    prod_name,
+                    prod_info.name,
+                    prod_info.install_dir,
+                    is_single_dir,
+                    prod_info.install_mode,
+                )
+            )
         else:
             l_not_installed.append(prod_name)
 
@@ -713,21 +828,26 @@ def binary_package(config, logger, options, tmp_working_dir):
         if src.product.product_is_cpp(prod_info):
             # cpp module
             for name_cpp in src.product.get_product_components(prod_info):
-                install_dir = os.path.join(config.APPLICATION.workdir,
-                                           config.INTERNAL.config.install_dir,
-                                           name_cpp)
+                install_dir = os.path.join(
+                    config.APPLICATION.workdir,
+                    config.INTERNAL.config.install_dir,
+                    name_cpp,
+                )
                 if os.path.exists(install_dir):
-                    l_install_dir.append((name_cpp, name_cpp, install_dir, False, "value"))
+                    l_install_dir.append(
+                        (name_cpp, name_cpp, install_dir, False, "value")
+                    )
                 else:
                     l_not_installed.append(name_cpp)
 
     # check the name of the directory that (could) contains the binaries
     # from previous detar
     binaries_from_detar = os.path.join(
-                              config.APPLICATION.workdir,
-                              config.INTERNAL.config.binary_dir + config.VARS.dist)
+        config.APPLICATION.workdir, config.INTERNAL.config.binary_dir + config.VARS.dist
+    )
     if os.path.exists(binaries_from_detar):
-         logger.write("""
+        logger.write(
+            """
 WARNING: existing binaries directory from previous detar installation:
          %s
          To make new package from this, you have to:
@@ -736,7 +856,9 @@ WARNING: existing binaries directory from previous detar installation:
          2) or recompile everything in INSTALL with "sat compile" command
             this step is long, and requires some linux packages to be installed
             on your system\n
-""" % binaries_from_detar)
+"""
+            % binaries_from_detar
+        )
 
     # Print warning or error if there are some missing products
     if len(l_not_installed) > 0:
@@ -745,15 +867,15 @@ WARNING: existing binaries directory from previous detar installation:
             text_missing_prods += " - " + p_name + "\n"
         if not options.force_creation:
             msg = _("ERROR: there are missing product installations:")
-            logger.write("%s\n%s" % (src.printcolors.printcError(msg),
-                                     text_missing_prods),
-                         1)
+            logger.write(
+                "%s\n%s" % (src.printcolors.printcError(msg), text_missing_prods), 1
+            )
             raise src.SatException(msg)
         else:
             msg = _("WARNING: there are missing products installations:")
-            logger.write("%s\n%s" % (src.printcolors.printcWarning(msg),
-                                     text_missing_prods),
-                         1)
+            logger.write(
+                "%s\n%s" % (src.printcolors.printcWarning(msg), text_missing_prods), 1
+            )
 
     # Do the same for sources
     if len(l_sources_not_present) > 0:
@@ -762,15 +884,15 @@ WARNING: existing binaries directory from previous detar installation:
             text_missing_prods += "-" + p_name + "\n"
         if not options.force_creation:
             msg = _("ERROR: there are missing product sources:")
-            logger.write("%s\n%s" % (src.printcolors.printcError(msg),
-                                     text_missing_prods),
-                         1)
+            logger.write(
+                "%s\n%s" % (src.printcolors.printcError(msg), text_missing_prods), 1
+            )
             raise src.SatException(msg)
         else:
             msg = _("WARNING: there are missing products sources:")
-            logger.write("%s\n%s" % (src.printcolors.printcWarning(msg),
-                                     text_missing_prods),
-                         1)
+            logger.write(
+                "%s\n%s" % (src.printcolors.printcWarning(msg), text_missing_prods), 1
+            )
 
     # construct the name of the directory that will contain the binaries
     if src.architecture.is_windows():
@@ -780,16 +902,22 @@ WARNING: existing binaries directory from previous detar installation:
     # construct the correlation table between the product names, there
     # actual install directories and there install directory in archive
     d_products = {}
-    for prod_name, prod_info_name, install_dir, is_single_dir, install_mode in l_install_dir:
-        prod_base_name=os.path.basename(install_dir)
+    for (
+        prod_name,
+        prod_info_name,
+        install_dir,
+        is_single_dir,
+        install_mode,
+    ) in l_install_dir:
+        prod_base_name = os.path.basename(install_dir)
         if install_mode == "base":
             # case of a products installed in base.
             # because the archive is in base:no mode, the name of the install dir is different inside archive
             # we set it to the product name or by PRODUCTS if single-dir
             if is_single_dir:
-                prod_base_name=config.INTERNAL.config.single_install_dir
+                prod_base_name = config.INTERNAL.config.single_install_dir
             else:
-                prod_base_name=prod_info_name
+                prod_base_name = prod_info_name
         path_in_archive = os.path.join(binaries_dir_name, prod_base_name)
         d_products[prod_name + " (bin)"] = (install_dir, path_in_archive)
 
@@ -797,106 +925,112 @@ WARNING: existing binaries directory from previous detar installation:
         path_in_archive = os.path.join("SOURCES", prod_name)
         d_products[prod_name + " (sources)"] = (source_dir, path_in_archive)
 
-    # create an archives of compilation logs, and insert it into the tarball
-    logpath=os.path.join(config.APPLICATION.workdir, "LOGS")
+    # create an archives of compilation logs, and insert it into the tarball
+    logpath = os.path.join(config.APPLICATION.workdir, "LOGS")
     path_targz_logs = os.path.join(tmp_working_dir, "logs.tgz")
-    tar_log = tarfile.open(path_targz_logs, mode='w:gz')
+    tar_log = tarfile.open(path_targz_logs, mode="w:gz")
     tar_log.add(logpath, arcname="LOGS")
     tar_log.close()
     d_products["LOGS"] = (path_targz_logs, "logs.tgz")
 
     # for packages of SALOME applications including KERNEL,
     # we produce a salome launcher or a virtual application (depending on salome version)
-    if 'KERNEL' in config.APPLICATION.products:
+    if "KERNEL" in config.APPLICATION.products:
         VersionSalome = src.get_salome_version(config)
         # Case where SALOME has the launcher that uses the SalomeContext API
-        if VersionSalome >= MMP([7,3,0]):
+        if VersionSalome >= MMP([7, 3, 0]):
             # create the relative launcher and add it to the files to add
             launcher_name = src.get_launcher_name(config)
-            launcher_package = produce_relative_launcher(config,
-                                                 logger,
-                                                 tmp_working_dir,
-                                                 launcher_name,
-                                                 binaries_dir_name)
+            launcher_package = produce_relative_launcher(
+                config, logger, tmp_working_dir, launcher_name, binaries_dir_name
+            )
             d_products["launcher"] = (launcher_package, launcher_name)
 
             # if the application contains mesa products, we generate in addition to the
             # classical salome launcher a launcher using mesa and called mesa_salome
             # (the mesa launcher will be used for remote usage through ssh).
             if generate_mesa_launcher:
-                #if there is one : store the use_mesa property
-                restore_use_mesa_option=None
-                if ('properties' in config.APPLICATION and
-                    'use_mesa' in config.APPLICATION.properties):
+                # if there is one : store the use_mesa property
+                restore_use_mesa_option = None
+                if (
+                    "properties" in config.APPLICATION
+                    and "use_mesa" in config.APPLICATION.properties
+                ):
                     restore_use_mesa_option = config.APPLICATION.properties.use_mesa
 
                 # activate mesa property, and generate a mesa launcher
-                src.activate_mesa_property(config)  #activate use_mesa property
-                launcher_mesa_name="mesa_"+launcher_name
-                launcher_package_mesa = produce_relative_launcher(config,
-                                                     logger,
-                                                     tmp_working_dir,
-                                                     launcher_mesa_name,
-                                                     binaries_dir_name)
-                d_products["launcher (mesa)"] = (launcher_package_mesa, launcher_mesa_name)
+                src.activate_mesa_property(config)  # activate use_mesa property
+                launcher_mesa_name = "mesa_" + launcher_name
+                launcher_package_mesa = produce_relative_launcher(
+                    config,
+                    logger,
+                    tmp_working_dir,
+                    launcher_mesa_name,
+                    binaries_dir_name,
+                )
+                d_products["launcher (mesa)"] = (
+                    launcher_package_mesa,
+                    launcher_mesa_name,
+                )
 
                 # if there was a use_mesa value, we restore it
                 # else we set it to the default value "no"
                 if restore_use_mesa_option != None:
-                    config.APPLICATION.properties.use_mesa=restore_use_mesa_option
+                    config.APPLICATION.properties.use_mesa = restore_use_mesa_option
                 else:
-                    config.APPLICATION.properties.use_mesa="no"
+                    config.APPLICATION.properties.use_mesa = "no"
 
             if options.sources:
                 # if we mix binaries and sources, we add a copy of the launcher,
                 # prefixed  with "bin",in order to avoid clashes
-                launcher_copy_name="bin"+launcher_name
-                launcher_package_copy = produce_relative_launcher(config,
-                                                     logger,
-                                                     tmp_working_dir,
-                                                     launcher_copy_name,
-                                                     binaries_dir_name)
-                d_products["launcher (copy)"] = (launcher_package_copy, launcher_copy_name)
+                launcher_copy_name = "bin" + launcher_name
+                launcher_package_copy = produce_relative_launcher(
+                    config,
+                    logger,
+                    tmp_working_dir,
+                    launcher_copy_name,
+                    binaries_dir_name,
+                )
+                d_products["launcher (copy)"] = (
+                    launcher_package_copy,
+                    launcher_copy_name,
+                )
         else:
             # Provide a script for the creation of an application EDF style
-            appli_script = product_appli_creation_script(config,
-                                                        logger,
-                                                        tmp_working_dir,
-                                                        binaries_dir_name)
+            appli_script = product_appli_creation_script(
+                config, logger, tmp_working_dir, binaries_dir_name
+            )
 
             d_products["appli script"] = (appli_script, "create_appli.py")
 
     # Put also the environment file
-    env_file = produce_relative_env_files(config,
-                                           logger,
-                                           tmp_working_dir,
-                                           binaries_dir_name)
+    env_file = produce_relative_env_files(
+        config, logger, tmp_working_dir, binaries_dir_name
+    )
 
     if src.architecture.is_windows():
-      filename  = "env_launch.bat"
+        filename = "env_launch.bat"
     else:
-      filename  = "env_launch.sh"
+        filename = "env_launch.sh"
     d_products["environment file"] = (env_file, filename)
 
     # If option exe, produce an extra launcher based on specified exe
     if options.exe:
-        exe_file = produce_relative_env_files(config,
-                                              logger,
-                                              tmp_working_dir,
-                                              binaries_dir_name,
-                                              options.exe)
+        exe_file = produce_relative_env_files(
+            config, logger, tmp_working_dir, binaries_dir_name, options.exe
+        )
 
         if src.architecture.is_windows():
-          filename  = os.path.basename(options.exe) + ".bat"
+            filename = os.path.basename(options.exe) + ".bat"
         else:
-          filename  = os.path.basename(options.exe) + ".sh"
+            filename = os.path.basename(options.exe) + ".sh"
         d_products["exe file"] = (exe_file, filename)
 
-
     return d_products
 
+
 def source_package(sat, config, logger, options, tmp_working_dir):
-    '''Prepare a dictionary that stores all the needed directories and files to
+    """Prepare a dictionary that stores all the needed directories and files to
        add in a source package.
 
     :param config Config: The global configuration.
@@ -909,9 +1043,9 @@ def source_package(sat, config, logger, options, tmp_working_dir):
              add in a source package.
              {label : (path_on_local_machine, path_in_archive)}
     :rtype: dict
-    '''
+    """
 
-    d_archives={}
+    d_archives = {}
     # Get all the products that are prepared using an archive
     # unless ftp mode is specified (in this case the user of the
     # archive will get the sources through the ftp mode of sat prepare
@@ -925,24 +1059,21 @@ def source_package(sat, config, logger, options, tmp_working_dir):
         # Make archives with the products that are not prepared using an archive
         # (git, cvs, svn, etc)
         logger.write("Construct archives for vcs products ... ")
-        d_archives_vcs = get_archives_vcs(l_pinfo_vcs,
-                                          sat,
-                                          config,
-                                          logger,
-                                          tmp_working_dir)
+        d_archives_vcs = get_archives_vcs(
+            l_pinfo_vcs, sat, config, logger, tmp_working_dir
+        )
         logger.write("Done\n")
 
     # Create a project
     logger.write("Create the project ... ")
-    d_project = create_project_for_src_package(config,
-                                               tmp_working_dir,
-                                               options.with_vcs,
-                                               options.ftp)
+    d_project = create_project_for_src_package(
+        config, tmp_working_dir, options.with_vcs, options.ftp
+    )
     logger.write("Done\n")
 
     # Add salomeTools
     tmp_sat = add_salomeTools(config, tmp_working_dir)
-    d_sat = {"salomeTools" : (tmp_sat, "sat")}
+    d_sat = {"salomeTools": (tmp_sat, "sat")}
 
     # Add a sat symbolic link if not win
     if not src.architecture.is_windows():
@@ -960,14 +1091,17 @@ def source_package(sat, config, logger, options, tmp_working_dir):
         os.symlink("../ARCHIVES", "ARCHIVES")
         os.chdir(t)
 
-        d_sat["sat archive link"] = (os.path.join(tmp_working_dir,"PROJECT", "ARCHIVES"),
-                                     os.path.join("PROJECT", "ARCHIVES"))
+        d_sat["sat archive link"] = (
+            os.path.join(tmp_working_dir, "PROJECT", "ARCHIVES"),
+            os.path.join("PROJECT", "ARCHIVES"),
+        )
 
     d_source = src.merge_dicts(d_archives, d_archives_vcs, d_project, d_sat)
     return d_source
 
+
 def get_archives(config, logger):
-    '''Find all the products that are get using an archive and all the products
+    """Find all the products that are get using an archive and all the products
        that are get using a vcs (git, cvs, svn) repository.
 
     :param config Config: The global configuration.
@@ -977,11 +1111,10 @@ def get_archives(config, logger):
              and the list of specific configuration corresponding to the vcs
              products
     :rtype: (Dict, List)
-    '''
+    """
     # Get the list of product informations
     l_products_name = config.APPLICATION.products.keys()
-    l_product_info = src.product.get_products_infos(l_products_name,
-                                                    config)
+    l_product_info = src.product.get_products_infos(l_products_name, config)
     d_archives = {}
     l_pinfo_vcs = []
     for p_name, p_info in l_product_info:
@@ -989,39 +1122,56 @@ def get_archives(config, logger):
         if src.get_property_in_product_cfg(p_info, "not_in_package") == "yes":
             continue
         # ignore the native and fixed products
-        if (src.product.product_is_native(p_info)
-                or src.product.product_is_fixed(p_info)):
+        if src.product.product_is_native(p_info) or src.product.product_is_fixed(
+            p_info
+        ):
             continue
         if p_info.get_source == "archive":
             archive_path = p_info.archive_info.archive_name
             archive_name = os.path.basename(archive_path)
-            d_archives[p_name] = (archive_path,
-                                  os.path.join(ARCHIVE_DIR, archive_name))
-            if (src.appli_test_property(config,"pip", "yes") and
-                src.product.product_test_property(p_info,"pip", "yes")):
+            d_archives[p_name] = (archive_path, os.path.join(ARCHIVE_DIR, archive_name))
+            if src.appli_test_property(
+                config, "pip", "yes"
+            ) and src.product.product_test_property(p_info, "pip", "yes"):
                 # if pip mode is activated, and product is managed by pip
-                pip_wheels_dir=os.path.join(config.LOCAL.archive_dir,"wheels")
-                if "archive_prefix" in p_info.archive_info and p_info.archive_info.archive_prefix:
-                    pip_wheel_pattern=os.path.join(pip_wheels_dir,
-                                                   "%s-%s*" % (p_info.archive_info.archive_prefix, p_info.version))
+                pip_wheels_dir = os.path.join(config.LOCAL.archive_dir, "wheels")
+                if (
+                    "archive_prefix" in p_info.archive_info
+                    and p_info.archive_info.archive_prefix
+                ):
+                    pip_wheel_pattern = os.path.join(
+                        pip_wheels_dir,
+                        "%s-%s*" % (p_info.archive_info.archive_prefix, p_info.version),
+                    )
                 else:
-                    pip_wheel_pattern=os.path.join(pip_wheels_dir,
-                                                   "%s-%s*" % (p_info.name, p_info.version))
-                pip_wheel_path=glob.glob(pip_wheel_pattern)
-                msg_pip_not_found="Error in get_archive, pip wheel for "\
-                                  "product %s-%s was not found in %s directory"
-                msg_pip_two_or_more="Error in get_archive, several pip wheels for "\
-                                  "product %s-%s were found in %s directory"
-                if len(pip_wheel_path)==0:
-                    raise src.SatException(msg_pip_not_found %\
-                        (p_info.name, p_info.version, pip_wheels_dir))
-                if len(pip_wheel_path)>1:
-                    raise src.SatException(msg_pip_two_or_more %\
-                        (p_info.name, p_info.version, pip_wheels_dir))
-
-                pip_wheel_name=os.path.basename(pip_wheel_path[0])
-                d_archives[p_name+" (pip wheel)"]=(pip_wheel_path[0],
-                    os.path.join(ARCHIVE_DIR, "wheels", pip_wheel_name))
+                    pip_wheel_pattern = os.path.join(
+                        pip_wheels_dir, "%s-%s*" % (p_info.name, p_info.version)
+                    )
+                pip_wheel_path = glob.glob(pip_wheel_pattern)
+                msg_pip_not_found = (
+                    "Error in get_archive, pip wheel for "
+                    "product %s-%s was not found in %s directory"
+                )
+                msg_pip_two_or_more = (
+                    "Error in get_archive, several pip wheels for "
+                    "product %s-%s were found in %s directory"
+                )
+                if len(pip_wheel_path) == 0:
+                    raise src.SatException(
+                        msg_pip_not_found
+                        % (p_info.name, p_info.version, pip_wheels_dir)
+                    )
+                if len(pip_wheel_path) > 1:
+                    raise src.SatException(
+                        msg_pip_two_or_more
+                        % (p_info.name, p_info.version, pip_wheels_dir)
+                    )
+
+                pip_wheel_name = os.path.basename(pip_wheel_path[0])
+                d_archives[p_name + " (pip wheel)"] = (
+                    pip_wheel_path[0],
+                    os.path.join(ARCHIVE_DIR, "wheels", pip_wheel_name),
+                )
         else:
             # this product is not managed by archive,
             # an archive of the vcs directory will be created by get_archive_vcs
@@ -1029,8 +1179,9 @@ def get_archives(config, logger):
 
     return d_archives, l_pinfo_vcs
 
+
 def add_salomeTools(config, tmp_working_dir):
-    '''Prepare a version of salomeTools that has a specific local.pyconf file
+    """Prepare a version of salomeTools that has a specific local.pyconf file
        configured for a source package.
 
     :param config Config: The global configuration.
@@ -1039,7 +1190,7 @@ def add_salomeTools(config, tmp_working_dir):
                                 source package
     :return: The path to the local salomeTools directory to add in the package
     :rtype: str
-    '''
+    """
     # Copy sat in the temporary working directory
     sat_tmp_path = src.Path(os.path.join(tmp_working_dir, "salomeTools"))
     sat_running_path = src.Path(config.VARS.salometoolsway)
@@ -1055,9 +1206,7 @@ def add_salomeTools(config, tmp_working_dir):
     files_or_dir_SAT = os.listdir(os.path.join(tmp_working_dir, "salomeTools"))
     for file_or_dir in files_or_dir_SAT:
         if file_or_dir.endswith(".pyconf") or file_or_dir.endswith(".txt"):
-            file_path = os.path.join(tmp_working_dir,
-                                     "salomeTools",
-                                     file_or_dir)
+            file_path = os.path.join(tmp_working_dir, "salomeTools", file_or_dir)
             os.remove(file_path)
 
     ff = open(local_pyconf_file, "w")
@@ -1066,8 +1215,9 @@ def add_salomeTools(config, tmp_working_dir):
 
     return sat_tmp_path.path
 
+
 def get_archives_vcs(l_pinfo_vcs, sat, config, logger, tmp_working_dir):
-    '''For sources package that require that all products are get using an
+    """For sources package that require that all products are get using an
        archive, one has to create some archive for the vcs products.
        So this method calls the clean and source command of sat and then create
        the archives.
@@ -1084,46 +1234,49 @@ def get_archives_vcs(l_pinfo_vcs, sat, config, logger, tmp_working_dir):
     :return: the dictionary that stores all the archives to add in the source
              package. {label : (path_on_local_machine, path_in_archive)}
     :rtype: dict
-    '''
+    """
     # clean the source directory of all the vcs products, then use the source
     # command and thus construct an archive that will not contain the patches
     l_prod_names = [pn for pn, __ in l_pinfo_vcs]
-    if False: # clean is dangerous in user/SOURCES, fixed in tmp_local_working_dir
-      logger.write(_("\nclean sources\n"))
-      args_clean = config.VARS.application
-      args_clean += " --sources --products "
-      args_clean += ",".join(l_prod_names)
-      logger.write("WARNING: get_archives_vcs clean\n         '%s'\n" % args_clean, 1)
-      sat.clean(args_clean, batch=True, verbose=0, logger_add_link = logger)
+    if False:  # clean is dangerous in user/SOURCES, fixed in tmp_local_working_dir
+        logger.write(_("\nclean sources\n"))
+        args_clean = config.VARS.application
+        args_clean += " --sources --products "
+        args_clean += ",".join(l_prod_names)
+        logger.write("WARNING: get_archives_vcs clean\n         '%s'\n" % args_clean, 1)
+        sat.clean(args_clean, batch=True, verbose=0, logger_add_link=logger)
     if True:
-      # source
-      logger.write(_("get sources\n"))
-      args_source = config.VARS.application
-      args_source += " --products "
-      args_source += ",".join(l_prod_names)
-      svgDir = sat.cfg.APPLICATION.workdir
-      tmp_local_working_dir = os.path.join(sat.cfg.APPLICATION.workdir, "tmp_package")  # to avoid too much big files in /tmp
-      sat.cfg.APPLICATION.workdir = tmp_local_working_dir
-      # DBG.write("SSS sat config.APPLICATION.workdir", sat.cfg.APPLICATION, True)
-      # DBG.write("sat config id", id(sat.cfg), True)
-      # shit as config is not same id() as for sat.source()
-      # sat.source(args_source, batch=True, verbose=5, logger_add_link = logger)
-      import source
-      source.run(args_source, sat, logger) #use this mode as runner.cfg reference
-
-      # make the new archives
-      d_archives_vcs = {}
-      for pn, pinfo in l_pinfo_vcs:
-          path_archive = make_archive(pn, pinfo, tmp_local_working_dir)
-          logger.write("make archive vcs '%s'\n" % path_archive)
-          d_archives_vcs[pn] = (path_archive,
-                                os.path.join(ARCHIVE_DIR, pn + ".tgz"))
-      sat.cfg.APPLICATION.workdir = svgDir
-      # DBG.write("END sat config", sat.cfg.APPLICATION, True)
+        # source
+        logger.write(_("get sources\n"))
+        args_source = config.VARS.application
+        args_source += " --products "
+        args_source += ",".join(l_prod_names)
+        svgDir = sat.cfg.APPLICATION.workdir
+        tmp_local_working_dir = os.path.join(
+            sat.cfg.APPLICATION.workdir, "tmp_package"
+        )  # to avoid too much big files in /tmp
+        sat.cfg.APPLICATION.workdir = tmp_local_working_dir
+        # DBG.write("SSS sat config.APPLICATION.workdir", sat.cfg.APPLICATION, True)
+        # DBG.write("sat config id", id(sat.cfg), True)
+        # shit as config is not same id() as for sat.source()
+        # sat.source(args_source, batch=True, verbose=5, logger_add_link = logger)
+        import source
+
+        source.run(args_source, sat, logger)  # use this mode as runner.cfg reference
+
+        # make the new archives
+        d_archives_vcs = {}
+        for pn, pinfo in l_pinfo_vcs:
+            path_archive = make_archive(pn, pinfo, tmp_local_working_dir)
+            logger.write("make archive vcs '%s'\n" % path_archive)
+            d_archives_vcs[pn] = (path_archive, os.path.join(ARCHIVE_DIR, pn + ".tgz"))
+        sat.cfg.APPLICATION.workdir = svgDir
+        # DBG.write("END sat config", sat.cfg.APPLICATION, True)
     return d_archives_vcs
 
+
 def make_bin_archive(prod_name, prod_info, where):
-    '''Create an archive of a product by searching its source directory.
+    """Create an archive of a product by searching its source directory.
 
     :param prod_name str: The name of the product.
     :param prod_info Config: The specific configuration corresponding to the
@@ -1132,16 +1285,17 @@ def make_bin_archive(prod_name, prod_info, where):
                       archive
     :return: The path of the resulting archive
     :rtype: str
-    '''
+    """
     path_targz_prod = os.path.join(where, prod_name + PACKAGE_EXT)
-    tar_prod = tarfile.open(path_targz_prod, mode='w:gz')
+    tar_prod = tarfile.open(path_targz_prod, mode="w:gz")
     bin_path = prod_info.install_dir
     tar_prod.add(bin_path, arcname=path_targz_prod)
     tar_prod.close()
     return path_targz_prod
 
+
 def make_archive(prod_name, prod_info, where):
-    '''Create an archive of a product by searching its source directory.
+    """Create an archive of a product by searching its source directory.
 
     :param prod_name str: The name of the product.
     :param prod_info Config: The specific configuration corresponding to the
@@ -1150,23 +1304,22 @@ def make_archive(prod_name, prod_info, where):
                       archive
     :return: The path of the resulting archive
     :rtype: str
-    '''
+    """
     path_targz_prod = os.path.join(where, prod_name + PACKAGE_EXT)
-    tar_prod = tarfile.open(path_targz_prod, mode='w:gz')
+    tar_prod = tarfile.open(path_targz_prod, mode="w:gz")
     local_path = prod_info.source_dir
     if old_python:
-        tar_prod.add(local_path,
-                     arcname=prod_name,
-                     exclude=exclude_VCS_and_extensions_26)
+        tar_prod.add(
+            local_path, arcname=prod_name, exclude=exclude_VCS_and_extensions_26
+        )
     else:
-        tar_prod.add(local_path,
-                     arcname=prod_name,
-                     filter=exclude_VCS_and_extensions)
+        tar_prod.add(local_path, arcname=prod_name, filter=exclude_VCS_and_extensions)
     tar_prod.close()
     return path_targz_prod
 
+
 def create_project_for_src_package(config, tmp_working_dir, with_vcs, with_ftp):
-    '''Create a specific project for a source package.
+    """Create a specific project for a source package.
 
     :param config Config: The global configuration.
     :param tmp_working_dir str: The temporary local directory containing some
@@ -1178,32 +1331,24 @@ def create_project_for_src_package(config, tmp_working_dir, with_vcs, with_ftp):
     :return: The dictionary
              {"project" : (produced project, project path in the archive)}
     :rtype: Dict
-    '''
+    """
 
     # Create in the working temporary directory the full project tree
     project_tmp_dir = os.path.join(tmp_working_dir, PROJECT_DIR)
-    products_pyconf_tmp_dir = os.path.join(project_tmp_dir,
-                                         "products")
-    compil_scripts_tmp_dir = os.path.join(project_tmp_dir,
-                                         "products",
-                                         "compil_scripts")
-    post_scripts_tmp_dir = os.path.join(project_tmp_dir,
-                                         "products",
-                                         "post_scripts")
-    env_scripts_tmp_dir = os.path.join(project_tmp_dir,
-                                         "products",
-                                         "env_scripts")
-    patches_tmp_dir = os.path.join(project_tmp_dir,
-                                         "products",
-                                         "patches")
-    application_tmp_dir = os.path.join(project_tmp_dir,
-                                         "applications")
-    for directory in [project_tmp_dir,
-                      compil_scripts_tmp_dir,
-                      env_scripts_tmp_dir,
-                      post_scripts_tmp_dir,
-                      patches_tmp_dir,
-                      application_tmp_dir]:
+    products_pyconf_tmp_dir = os.path.join(project_tmp_dir, "products")
+    compil_scripts_tmp_dir = os.path.join(project_tmp_dir, "products", "compil_scripts")
+    post_scripts_tmp_dir = os.path.join(project_tmp_dir, "products", "post_scripts")
+    env_scripts_tmp_dir = os.path.join(project_tmp_dir, "products", "env_scripts")
+    patches_tmp_dir = os.path.join(project_tmp_dir, "products", "patches")
+    application_tmp_dir = os.path.join(project_tmp_dir, "applications")
+    for directory in [
+        project_tmp_dir,
+        compil_scripts_tmp_dir,
+        env_scripts_tmp_dir,
+        post_scripts_tmp_dir,
+        patches_tmp_dir,
+        application_tmp_dir,
+    ]:
         src.ensure_path_exists(directory)
 
     # Create the pyconf that contains the information of the project
@@ -1212,22 +1357,21 @@ def create_project_for_src_package(config, tmp_working_dir, with_vcs, with_ftp):
     ff = open(project_pyconf_file, "w")
     ff.write(PROJECT_TEMPLATE)
     if with_ftp and len(config.PATHS.ARCHIVEFTP) > 0:
-        ftp_path='ARCHIVEFTP : "'+config.PATHS.ARCHIVEFTP[0]
+        ftp_path = 'ARCHIVEFTP : "' + config.PATHS.ARCHIVEFTP[0]
         for ftpserver in config.PATHS.ARCHIVEFTP[1:]:
-            ftp_path=ftp_path+":"+ftpserver
-        ftp_path+='"'
+            ftp_path = ftp_path + ":" + ftpserver
+        ftp_path += '"'
         ff.write("# ftp servers where to search for prerequisite archives\n")
         ff.write(ftp_path)
     # add licence paths if any
     if len(config.PATHS.LICENCEPATH) > 0:
-        licence_path='LICENCEPATH : "'+config.PATHS.LICENCEPATH[0]
+        licence_path = 'LICENCEPATH : "' + config.PATHS.LICENCEPATH[0]
         for path in config.PATHS.LICENCEPATH[1:]:
-            licence_path=licence_path+":"+path
-        licence_path+='"'
+            licence_path = licence_path + ":" + path
+        licence_path += '"'
         ff.write("\n# Where to search for licences\n")
         ff.write(licence_path)
 
-
     ff.close()
 
     # Loop over the products to get there pyconf and all the scripts
@@ -1239,34 +1383,39 @@ def create_project_for_src_package(config, tmp_working_dir, with_vcs, with_ftp):
         # skip product with property not_in_package set to yes
         if src.get_property_in_product_cfg(p_info, "not_in_package") == "yes":
             continue
-        find_product_scripts_and_pyconf(p_name,
-                                        p_info,
-                                        config,
-                                        with_vcs,
-                                        compil_scripts_tmp_dir,
-                                        env_scripts_tmp_dir,
-                                        post_scripts_tmp_dir,
-                                        patches_tmp_dir,
-                                        products_pyconf_tmp_dir)
+        find_product_scripts_and_pyconf(
+            p_name,
+            p_info,
+            config,
+            with_vcs,
+            compil_scripts_tmp_dir,
+            env_scripts_tmp_dir,
+            post_scripts_tmp_dir,
+            patches_tmp_dir,
+            products_pyconf_tmp_dir,
+        )
 
     # for the application pyconf, we write directly the config
     # don't search for the original pyconf file
     # to avoid problems with overwrite sections and rm_products key
     write_application_pyconf(config, application_tmp_dir)
 
-    d_project = {"project" : (project_tmp_dir, PROJECT_DIR )}
+    d_project = {"project": (project_tmp_dir, PROJECT_DIR)}
     return d_project
 
-def find_product_scripts_and_pyconf(p_name,
-                                    p_info,
-                                    config,
-                                    with_vcs,
-                                    compil_scripts_tmp_dir,
-                                    env_scripts_tmp_dir,
-                                    post_scripts_tmp_dir,
-                                    patches_tmp_dir,
-                                    products_pyconf_tmp_dir):
-    '''Create a specific pyconf file for a given product. Get its environment
+
+def find_product_scripts_and_pyconf(
+    p_name,
+    p_info,
+    config,
+    with_vcs,
+    compil_scripts_tmp_dir,
+    env_scripts_tmp_dir,
+    post_scripts_tmp_dir,
+    patches_tmp_dir,
+    products_pyconf_tmp_dir,
+):
+    """Create a specific pyconf file for a given product. Get its environment
        script, its compilation script and patches and put it in the temporary
        working directory. This method is used in the source package in order to
        construct the specific project.
@@ -1287,7 +1436,7 @@ def find_product_scripts_and_pyconf(p_name,
                                 directory of the project.
     :param products_pyconf_tmp_dir str: The path to the temporary product
                                         scripts directory of the project.
-    '''
+    """
 
     # read the pyconf of the product
     product_pyconf_cfg = src.pyconf.Config(p_info.from_file)
@@ -1319,22 +1468,36 @@ def find_product_scripts_and_pyconf(p_name,
         # in non vcs mode, if the product is not archive, then make it become archive.
 
         # depending upon the incremental mode, select impacted sections
-        if "properties" in p_info and "incremental" in p_info.properties and\
-            p_info.properties.incremental == "yes":
-            sections = ["default", "default_win", p_info.section, p_info.section+"_win"]
+        if (
+            "properties" in p_info
+            and "incremental" in p_info.properties
+            and p_info.properties.incremental == "yes"
+        ):
+            sections = [
+                "default",
+                "default_win",
+                p_info.section,
+                p_info.section + "_win",
+            ]
         else:
             sections = [p_info.section]
         for section in sections:
-            if section in product_pyconf_cfg and "get_source" in product_pyconf_cfg[section]:
-                DBG.write("sat package set archive mode to archive for product %s and section %s" %\
-                          (p_name,section))
+            if (
+                section in product_pyconf_cfg
+                and "get_source" in product_pyconf_cfg[section]
+            ):
+                DBG.write(
+                    "sat package set archive mode to archive for product %s and section %s"
+                    % (p_name, section)
+                )
                 product_pyconf_cfg[section].get_source = "archive"
                 if not "archive_info" in product_pyconf_cfg[section]:
-                    product_pyconf_cfg[section].addMapping("archive_info",
-                                        src.pyconf.Mapping(product_pyconf_cfg),
-                                        "")
-                    product_pyconf_cfg[section].archive_info.archive_name =\
+                    product_pyconf_cfg[section].addMapping(
+                        "archive_info", src.pyconf.Mapping(product_pyconf_cfg), ""
+                    )
+                    product_pyconf_cfg[section].archive_info.archive_name = (
                         p_info.name + ".tgz"
+                    )
 
     # save git repositories for vcs products, even if archive is not in VCS mode
     # in this case the user will be able to change get_source flag and work with git
@@ -1346,30 +1509,32 @@ def find_product_scripts_and_pyconf(p_name,
             if "git_info" in product_pyconf_cfg[section]:
                 for repo in product_pyconf_cfg[section].git_info:
                     if repo in p_info.git_info:
-                        product_pyconf_cfg[section].git_info[repo] =  p_info.git_info[repo]
+                        product_pyconf_cfg[section].git_info[repo] = p_info.git_info[
+                            repo
+                        ]
 
     # write the pyconf file to the temporary project location
-    product_tmp_pyconf_path = os.path.join(products_pyconf_tmp_dir,
-                                           p_name + ".pyconf")
-    ff = open(product_tmp_pyconf_path, 'w')
+    product_tmp_pyconf_path = os.path.join(products_pyconf_tmp_dir, p_name + ".pyconf")
+    ff = open(product_tmp_pyconf_path, "w")
     ff.write("#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n\n")
     product_pyconf_cfg.__save__(ff, 1)
     ff.close()
 
 
 def write_application_pyconf(config, application_tmp_dir):
-    '''Write the application pyconf file in the specific temporary
+    """Write the application pyconf file in the specific temporary
        directory containing the specific project of a source package.
 
     :param config Config: The global configuration.
     :param application_tmp_dir str: The path to the temporary application
                                     scripts directory of the project.
-    '''
+    """
     application_name = config.VARS.application
     # write the pyconf file to the temporary application location
-    application_tmp_pyconf_path = os.path.join(application_tmp_dir,
-                                               application_name + ".pyconf")
-    with open(application_tmp_pyconf_path, 'w') as f:
+    application_tmp_pyconf_path = os.path.join(
+        application_tmp_dir, application_name + ".pyconf"
+    )
+    with open(application_tmp_pyconf_path, "w") as f:
         f.write("#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n\n")
         res = src.pyconf.Config()
         app = src.pyconf.deepCopyMapping(config.APPLICATION)
@@ -1378,16 +1543,13 @@ def write_application_pyconf(config, application_tmp_dir):
         app.base = "no"
 
         # Change the workdir
-        app.workdir = src.pyconf.Reference(
-                                 app,
-                                 src.pyconf.DOLLAR,
-                                 'LOCAL.workdir')
+        app.workdir = src.pyconf.Reference(app, src.pyconf.DOLLAR, "LOCAL.workdir")
         res.addMapping("APPLICATION", app, "")
         res.__save__(f, evaluated=False)
 
 
 def sat_package(config, tmp_working_dir, options, logger):
-    '''Prepare a dictionary that stores all the needed directories and files to
+    """Prepare a dictionary that stores all the needed directories and files to
        add in a salomeTool package.
 
     :param tmp_working_dir str: The temporary local working directory
@@ -1396,17 +1558,17 @@ def sat_package(config, tmp_working_dir, options, logger):
              add in a salomeTool package.
              {label : (path_on_local_machine, path_in_archive)}
     :rtype: dict
-    '''
+    """
     d_project = {}
 
     # we include sat himself
-    d_project["all_sat"]=(config.VARS.salometoolsway, "")
+    d_project["all_sat"] = (config.VARS.salometoolsway, "")
 
     # and we overwrite local.pyconf with a clean wersion.
     local_pyconf_tmp_path = os.path.join(tmp_working_dir, "local.pyconf")
     local_file_path = os.path.join(config.VARS.datadir, "local.pyconf")
     local_cfg = src.pyconf.Config(local_file_path)
-    local_cfg.PROJECTS.project_file_paths=src.pyconf.Sequence(local_cfg.PROJECTS)
+    local_cfg.PROJECTS.project_file_paths = src.pyconf.Sequence(local_cfg.PROJECTS)
     local_cfg.LOCAL["base"] = "default"
     local_cfg.LOCAL["workdir"] = "default"
     local_cfg.LOCAL["log_dir"] = "default"
@@ -1416,19 +1578,28 @@ def sat_package(config, tmp_working_dir, options, logger):
 
     # if the archive contains a project, we write its relative path in local.pyconf
     if options.project:
-        project_arch_path = os.path.join("projects", options.project,
-                                         os.path.basename(options.project_file_path))
+        project_arch_path = os.path.join(
+            "projects", options.project, os.path.basename(options.project_file_path)
+        )
         local_cfg.PROJECTS.project_file_paths.append(project_arch_path, "")
 
-    ff = open(local_pyconf_tmp_path, 'w')
+    ff = open(local_pyconf_tmp_path, "w")
     local_cfg.__save__(ff, 1)
     ff.close()
-    d_project["local.pyconf"]=(local_pyconf_tmp_path, "data/local.pyconf")
+    d_project["local.pyconf"] = (local_pyconf_tmp_path, "data/local.pyconf")
     return d_project
 
 
-def project_package(config, name_project, project_file_path, ftp_mode, tmp_working_dir, embedded_in_sat, logger):
-    '''Prepare a dictionary that stores all the needed directories and files to
+def project_package(
+    config,
+    name_project,
+    project_file_path,
+    ftp_mode,
+    tmp_working_dir,
+    embedded_in_sat,
+    logger,
+):
+    """Prepare a dictionary that stores all the needed directories and files to
        add in a project package.
 
     :param project_file_path str: The path to the local project.
@@ -1441,22 +1612,27 @@ def project_package(config, name_project, project_file_path, ftp_mode, tmp_worki
              add in a project package.
              {label : (path_on_local_machine, path_in_archive)}
     :rtype: dict
-    '''
+    """
     d_project = {}
     # Read the project file and get the directories to add to the package
 
     try:
-      project_pyconf_cfg = config.PROJECTS.projects.__getattr__(name_project)
+        project_pyconf_cfg = config.PROJECTS.projects.__getattr__(name_project)
     except:
-      logger.write("""
-WARNING: inexisting config.PROJECTS.projects.%s, try to read now from:\n%s\n""" % (name_project, project_file_path))
-      project_pyconf_cfg = src.pyconf.Config(project_file_path)
-      project_pyconf_cfg.PWD = os.path.dirname(project_file_path)
-
-    paths = {"APPLICATIONPATH" : "applications",
-             "PRODUCTPATH" : "products",
-             "JOBPATH" : "jobs",
-             "MACHINEPATH" : "machines"}
+        logger.write(
+            """
+WARNING: inexisting config.PROJECTS.projects.%s, try to read now from:\n%s\n"""
+            % (name_project, project_file_path)
+        )
+        project_pyconf_cfg = src.pyconf.Config(project_file_path)
+        project_pyconf_cfg.PWD = os.path.dirname(project_file_path)
+
+    paths = {
+        "APPLICATIONPATH": "applications",
+        "PRODUCTPATH": "products",
+        "JOBPATH": "jobs",
+        "MACHINEPATH": "machines",
+    }
     if not ftp_mode:
         paths["ARCHIVEPATH"] = "archives"
 
@@ -1467,7 +1643,9 @@ WARNING: inexisting config.PROJECTS.projects.%s, try to read now from:\n%s\n"""
             continue
         if embedded_in_sat:
             dest_path = os.path.join("projects", name_project, paths[path])
-            project_file_dest = os.path.join("projects", name_project, project_file_name)
+            project_file_dest = os.path.join(
+                "projects", name_project, project_file_name
+            )
         else:
             dest_path = paths[path]
             project_file_dest = project_file_name
@@ -1477,18 +1655,19 @@ WARNING: inexisting config.PROJECTS.projects.%s, try to read now from:\n%s\n"""
 
         # Modify the value of the path in the package
         project_pyconf_cfg[path] = src.pyconf.Reference(
-                                    project_pyconf_cfg,
-                                    src.pyconf.DOLLAR,
-                                    'project_path + "/' + paths[path] + '"')
+            project_pyconf_cfg,
+            src.pyconf.DOLLAR,
+            'project_path + "/' + paths[path] + '"',
+        )
 
     # Modify some values
     if "project_path" not in project_pyconf_cfg:
-        project_pyconf_cfg.addMapping("project_path",
-                                      src.pyconf.Mapping(project_pyconf_cfg),
-                                      "")
-    project_pyconf_cfg.project_path = src.pyconf.Reference(project_pyconf_cfg,
-                                                           src.pyconf.DOLLAR,
-                                                           'PWD')
+        project_pyconf_cfg.addMapping(
+            "project_path", src.pyconf.Mapping(project_pyconf_cfg), ""
+        )
+    project_pyconf_cfg.project_path = src.pyconf.Reference(
+        project_pyconf_cfg, src.pyconf.DOLLAR, "PWD"
+    )
     # we don't want to export these two fields
     project_pyconf_cfg.__delitem__("file_path")
     project_pyconf_cfg.__delitem__("PWD")
@@ -1497,7 +1676,7 @@ WARNING: inexisting config.PROJECTS.projects.%s, try to read now from:\n%s\n"""
 
     # Write the project pyconf file
     project_pyconf_tmp_path = os.path.join(tmp_working_dir, project_file_name)
-    ff = open(project_pyconf_tmp_path, 'w')
+    ff = open(project_pyconf_tmp_path, "w")
     ff.write("#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n\n")
     project_pyconf_cfg.__save__(ff, 1)
     ff.close()
@@ -1505,12 +1684,13 @@ WARNING: inexisting config.PROJECTS.projects.%s, try to read now from:\n%s\n"""
 
     return d_project
 
+
 def add_readme(config, options, where):
     readme_path = os.path.join(where, "README")
-    with codecs.open(readme_path, "w", 'utf-8') as f:
+    with codecs.open(readme_path, "w", "utf-8") as f:
 
-    # templates for building the header
-        readme_header="""
+        # templates for building the header
+        readme_header = """
 # This package was generated with sat $version
 # Date: $date
 # User: $user
@@ -1521,8 +1701,8 @@ SALOME (the directory where this file is located).
 
 """
         if src.architecture.is_windows():
-            readme_header = readme_header.replace('$$ROOT','%ROOT%')
-        readme_compilation_with_binaries="""
+            readme_header = readme_header.replace("$$ROOT", "%ROOT%")
+        readme_compilation_with_binaries = """
 
 compilation based on the binaries used as prerequisites
 =======================================================
@@ -1542,48 +1722,54 @@ The procedure to do it is:
     modules you need to (with -p option)
 
 """
-        readme_header_tpl=string.Template(readme_header)
-        readme_template_path_bin = os.path.join(config.VARS.internal_dir,
-                "README_BIN.template")
-        readme_template_path_bin_launcher = os.path.join(config.VARS.internal_dir,
-                "README_LAUNCHER.template")
-        readme_template_path_bin_virtapp = os.path.join(config.VARS.internal_dir,
-                "README_BIN_VIRTUAL_APP.template")
-        readme_template_path_src = os.path.join(config.VARS.internal_dir,
-                "README_SRC.template")
-        readme_template_path_pro = os.path.join(config.VARS.internal_dir,
-                "README_PROJECT.template")
-        readme_template_path_sat = os.path.join(config.VARS.internal_dir,
-                "README_SAT.template")
+        readme_header_tpl = string.Template(readme_header)
+        readme_template_path_bin = os.path.join(
+            config.VARS.internal_dir, "README_BIN.template"
+        )
+        readme_template_path_bin_launcher = os.path.join(
+            config.VARS.internal_dir, "README_LAUNCHER.template"
+        )
+        readme_template_path_bin_virtapp = os.path.join(
+            config.VARS.internal_dir, "README_BIN_VIRTUAL_APP.template"
+        )
+        readme_template_path_src = os.path.join(
+            config.VARS.internal_dir, "README_SRC.template"
+        )
+        readme_template_path_pro = os.path.join(
+            config.VARS.internal_dir, "README_PROJECT.template"
+        )
+        readme_template_path_sat = os.path.join(
+            config.VARS.internal_dir, "README_SAT.template"
+        )
 
         # prepare substitution dictionary
         d = dict()
-        d['user'] = config.VARS.user
-        d['date'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
-        d['version'] = src.get_salometool_version(config)
-        d['dist'] = config.VARS.dist
-        f.write(readme_header_tpl.substitute(d)) # write the general header (common)
+        d["user"] = config.VARS.user
+        d["date"] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
+        d["version"] = src.get_salometool_version(config)
+        d["dist"] = config.VARS.dist
+        f.write(readme_header_tpl.substitute(d))  # write the general header (common)
 
         if options.binaries or options.sources:
-            d['application'] = config.VARS.application
-            d['BINARIES']    = config.INTERNAL.config.binary_dir
-            d['SEPARATOR'] = config.VARS.sep
+            d["application"] = config.VARS.application
+            d["BINARIES"] = config.INTERNAL.config.binary_dir
+            d["SEPARATOR"] = config.VARS.sep
             if src.architecture.is_windows():
-                d['operatingSystem'] = 'Windows'
-                d['PYTHON3'] = 'python3'
-                d['ROOT']    = '%ROOT%'
+                d["operatingSystem"] = "Windows"
+                d["PYTHON3"] = "python3"
+                d["ROOT"] = "%ROOT%"
             else:
-                d['operatingSystem'] = 'Linux'
-                d['PYTHON3'] = ''
-                d['ROOT']    = '$ROOT'
-            f.write("# Application: " + d['application'] + "\n")
-            if 'KERNEL' in config.APPLICATION.products:
+                d["operatingSystem"] = "Linux"
+                d["PYTHON3"] = ""
+                d["ROOT"] = "$ROOT"
+            f.write("# Application: " + d["application"] + "\n")
+            if "KERNEL" in config.APPLICATION.products:
                 VersionSalome = src.get_salome_version(config)
                 # Case where SALOME has the launcher that uses the SalomeContext API
-                if VersionSalome >= MMP([7,3,0]):
-                    d['launcher'] = config.APPLICATION.profile.launcher_name
+                if VersionSalome >= MMP([7, 3, 0]):
+                    d["launcher"] = config.APPLICATION.profile.launcher_name
                 else:
-                    d['virtual_app'] = 'runAppli' # this info is not used now)
+                    d["virtual_app"] = "runAppli"  # this info is not used now)
 
         # write the specific sections
         if options.binaries:
@@ -1607,13 +1793,14 @@ The procedure to do it is:
 
     return readme_path
 
-def update_config(config, logger,  prop, value):
-    '''Remove from config.APPLICATION.products the products that have the property given as input.
+
+def update_config(config, logger, prop, value):
+    """Remove from config.APPLICATION.products the products that have the property given as input.
 
     :param config Config: The global config.
     :param prop str: The property to filter
     :param value str: The value of the property to filter
-    '''
+    """
     # if there is no APPLICATION (ex sat package -t) : nothing to do
     if "APPLICATION" in config:
         l_product_to_remove = []
@@ -1623,15 +1810,19 @@ def update_config(config, logger,  prop, value):
                 l_product_to_remove.append(product_name)
         for product_name in l_product_to_remove:
             config.APPLICATION.products.__delitem__(product_name)
-            logger.write("Remove product %s with property %s\n" % (product_name, prop), 5)
+            logger.write(
+                "Remove product %s with property %s\n" % (product_name, prop), 5
+            )
+
 
 def description():
-    '''method that is called when salomeTools is called with --help option.
+    """method that is called when salomeTools is called with --help option.
 
     :return: The text to display for the package command description.
     :rtype: str
-    '''
-    return _("""
+    """
+    return _(
+        """
 The package command creates a tar file archive of a product.
 There are four kinds of archive, which can be mixed:
 
@@ -1645,36 +1836,42 @@ There are four kinds of archive, which can be mixed:
      It contains code utility salomeTools.
 
 example:
- >> sat package SALOME-master --binaries --sources""")
+ >> sat package SALOME-master --binaries --sources"""
+    )
+
 
 def run(args, runner, logger):
-    '''method that is called when salomeTools is called with package parameter.
-    '''
+    """method that is called when salomeTools is called with package parameter."""
 
     # Parse the options
     (options, args) = parser.parse_args(args)
 
-
     # Check that a type of package is called, and only one
-    all_option_types = (options.binaries,
-                        options.sources,
-                        options.project not in ["", None],
-                        options.sat,
-                        options.bin_products)
+    all_option_types = (
+        options.binaries,
+        options.sources,
+        options.project not in ["", None],
+        options.sat,
+        options.bin_products,
+    )
 
     # Check if no option for package type
     if all_option_types.count(True) == 0:
-        msg = _("Error: Precise a type for the package\nUse one of the "
-                "following options: --binaries, --sources, --project or"
-                " --salometools, --bin_products")
+        msg = _(
+            "Error: Precise a type for the package\nUse one of the "
+            "following options: --binaries, --sources, --project or"
+            " --salometools, --bin_products"
+        )
         logger.write(src.printcolors.printcError(msg), 1)
         logger.write("\n", 1)
         return 1
-    do_create_package = options.binaries or options.sources or options.project or options.sat
+    do_create_package = (
+        options.binaries or options.sources or options.project or options.sat
+    )
 
     if options.bin_products:
         ret = bin_products_archives(runner.cfg, logger, options.with_vcs)
-        if ret!=0:
+        if ret != 0:
             return ret
     if not do_create_package:
         return 0
@@ -1689,8 +1886,11 @@ def run(args, runner, logger):
         src.check_config_has_application(runner.cfg)
 
         # Display information
-        logger.write(_("Packaging application %s\n") % src.printcolors.printcLabel(
-                                                    runner.cfg.VARS.application), 1)
+        logger.write(
+            _("Packaging application %s\n")
+            % src.printcolors.printcLabel(runner.cfg.VARS.application),
+            1,
+        )
 
         # Get the default directory where to put the packages
         package_default_path = os.path.join(runner.cfg.APPLICATION.workdir, "PACKAGE")
@@ -1708,20 +1908,30 @@ def run(args, runner, logger):
                 break
 
         if foundProject is None:
-            local_path = os.path.join(runner.cfg.VARS.salometoolsway, "data", "local.pyconf")
-            msg = _("""ERROR: the project %(1)s is not visible by salomeTools.
+            local_path = os.path.join(
+                runner.cfg.VARS.salometoolsway, "data", "local.pyconf"
+            )
+            msg = _(
+                """ERROR: the project %(1)s is not visible by salomeTools.
 known projects are:
 %(2)s
 
 Please add it in file:
-%(3)s""" % \
-                    {"1": options.project, "2": "\n  ".join(runner.cfg.PROJECTS.project_file_paths), "3": local_path})
+%(3)s"""
+                % {
+                    "1": options.project,
+                    "2": "\n  ".join(runner.cfg.PROJECTS.project_file_paths),
+                    "3": local_path,
+                }
+            )
             logger.write(src.printcolors.printcError(msg), 1)
             logger.write("\n", 1)
             return 1
         else:
             options.project_file_path = foundProject
-            src.printcolors.print_value(logger, "Project path", options.project_file_path, 2)
+            src.printcolors.print_value(
+                logger, "Project path", options.project_file_path, 2
+            )
 
     # Remove the products that are filtered by the --without_properties option
     if options.without_properties:
@@ -1742,19 +1952,19 @@ Please add it in file:
             dir_name = os.path.dirname(options.name)
 
         # suppress extension
-        if archive_name[-len(".tgz"):] == ".tgz":
-            archive_name = archive_name[:-len(".tgz")]
-        if archive_name[-len(".tar.gz"):] == ".tar.gz":
-            archive_name = archive_name[:-len(".tar.gz")]
+        if archive_name[-len(".tgz") :] == ".tgz":
+            archive_name = archive_name[: -len(".tgz")]
+        if archive_name[-len(".tar.gz") :] == ".tar.gz":
+            archive_name = archive_name[: -len(".tar.gz")]
 
     else:
-        archive_name=""
+        archive_name = ""
         dir_name = package_default_path
         if options.binaries or options.sources:
             archive_name = runner.cfg.APPLICATION.name
 
         if options.binaries:
-            archive_name += "-"+runner.cfg.VARS.dist
+            archive_name += "-" + runner.cfg.VARS.dist
 
         if options.sources:
             archive_name += "-SRC"
@@ -1762,18 +1972,20 @@ Please add it in file:
                 archive_name += "-VCS"
 
         if options.sat:
-            archive_name += ("salomeTools_" + src.get_salometool_version(runner.cfg))
+            archive_name += "salomeTools_" + src.get_salometool_version(runner.cfg)
 
         if options.project:
             if options.sat:
                 archive_name += "_"
-            archive_name += ("satproject_" + options.project)
-
-        if len(archive_name)==0: # no option worked
-            msg = _("Error: Cannot name the archive\n"
-                    " check if at least one of the following options was "
-                    "selected : --binaries, --sources, --project or"
-                    " --salometools")
+            archive_name += "satproject_" + options.project
+
+        if len(archive_name) == 0:  # no option worked
+            msg = _(
+                "Error: Cannot name the archive\n"
+                " check if at least one of the following options was "
+                "selected : --binaries, --sources, --project or"
+                " --salometools"
+            )
             logger.write(src.printcolors.printcError(msg), 1)
             logger.write("\n", 1)
             return 1
@@ -1787,7 +1999,7 @@ Please add it in file:
     tmp_working_dir = os.path.join(runner.cfg.VARS.tmp_root, runner.cfg.VARS.datehour)
     src.ensure_path_exists(tmp_working_dir)
     logger.write("\n", 5)
-    logger.write(_("The temporary working directory: %s\n" % tmp_working_dir),5)
+    logger.write(_("The temporary working directory: %s\n" % tmp_working_dir), 5)
 
     logger.write("\n", 3)
 
@@ -1795,46 +2007,48 @@ Please add it in file:
     logger.write(src.printcolors.printcLabel(msg), 2)
     logger.write("\n", 2)
 
-    d_files_to_add={}  # content of the archive
+    d_files_to_add = {}  # content of the archive
 
     # a dict to hold paths that will need to be substitute for users recompilations
-    d_paths_to_substitute={}
+    d_paths_to_substitute = {}
 
     if options.binaries:
-        d_bin_files_to_add = binary_package(runner.cfg,
-                                            logger,
-                                            options,
-                                            tmp_working_dir)
+        d_bin_files_to_add = binary_package(
+            runner.cfg, logger, options, tmp_working_dir
+        )
         # for all binaries dir, store the substitution that will be required
         # for extra compilations
         for key in d_bin_files_to_add:
             if key.endswith("(bin)"):
                 source_dir = d_bin_files_to_add[key][0]
                 path_in_archive = d_bin_files_to_add[key][1].replace(
-                   runner.cfg.INTERNAL.config.binary_dir + runner.cfg.VARS.dist,
-                   runner.cfg.INTERNAL.config.install_dir)
-                if os.path.basename(source_dir)==os.path.basename(path_in_archive):
+                    runner.cfg.INTERNAL.config.binary_dir + runner.cfg.VARS.dist,
+                    runner.cfg.INTERNAL.config.install_dir,
+                )
+                if os.path.basename(source_dir) == os.path.basename(path_in_archive):
                     # if basename is the same we will just substitute the dirname
-                    d_paths_to_substitute[os.path.dirname(source_dir)]=\
-                        os.path.dirname(path_in_archive)
+                    d_paths_to_substitute[
+                        os.path.dirname(source_dir)
+                    ] = os.path.dirname(path_in_archive)
                 else:
-                    d_paths_to_substitute[source_dir]=path_in_archive
+                    d_paths_to_substitute[source_dir] = path_in_archive
 
         d_files_to_add.update(d_bin_files_to_add)
     if options.sources:
-        d_files_to_add.update(source_package(runner,
-                                        runner.cfg,
-                                        logger,
-                                        options,
-                                        tmp_working_dir))
+        d_files_to_add.update(
+            source_package(runner, runner.cfg, logger, options, tmp_working_dir)
+        )
         if options.binaries:
             # for archives with bin and sources we provide a shell script able to
             # install binaries for compilation
-            file_install_bin=produce_install_bin_file(runner.cfg,logger,
-                                                      tmp_working_dir,
-                                                      d_paths_to_substitute,
-                                                      "install_bin.sh")
-            d_files_to_add.update({"install_bin" : (file_install_bin, "install_bin.sh")})
+            file_install_bin = produce_install_bin_file(
+                runner.cfg,
+                logger,
+                tmp_working_dir,
+                d_paths_to_substitute,
+                "install_bin.sh",
+            )
+            d_files_to_add.update({"install_bin": (file_install_bin, "install_bin.sh")})
             logger.write("substitutions that need to be done later : \n", 5)
             logger.write(str(d_paths_to_substitute), 5)
             logger.write("\n", 5)
@@ -1842,14 +2056,25 @@ Please add it in file:
         # --salomeTool option is not considered when --sources is selected, as this option
         # already brings salomeTool!
         if options.sat:
-            d_files_to_add.update(sat_package(runner.cfg, tmp_working_dir,
-                                  options, logger))
+            d_files_to_add.update(
+                sat_package(runner.cfg, tmp_working_dir, options, logger)
+            )
 
     if options.project:
         DBG.write("config for package %s" % options.project, runner.cfg)
-        d_files_to_add.update(project_package(runner.cfg, options.project, options.project_file_path, options.ftp, tmp_working_dir, options.sat, logger))
-
-    if not(d_files_to_add):
+        d_files_to_add.update(
+            project_package(
+                runner.cfg,
+                options.project,
+                options.project_file_path,
+                options.ftp,
+                tmp_working_dir,
+                options.sat,
+                logger,
+            )
+        )
+
+    if not (d_files_to_add):
         msg = _("Error: Empty dictionnary to build the archive!\n")
         logger.write(src.printcolors.printcError(msg), 1)
         logger.write("\n", 1)
@@ -1859,7 +2084,7 @@ Please add it in file:
     local_readme_tmp_path = add_readme(runner.cfg, options, tmp_working_dir)
     d_files_to_add["README"] = (local_readme_tmp_path, "README")
 
-    # Add the additional files of option add_files
+    # Add the additional files of option add_files
     if options.add_files:
         for file_path in options.add_files:
             if not os.path.exists(file_path):
@@ -1871,12 +2096,14 @@ Please add it in file:
     logger.write("\n", 2)
     logger.write(src.printcolors.printcLabel(_("Actually do the package")), 2)
     logger.write("\n", 2)
-    logger.write("\nfiles and directories to add:\n%s\n\n" % PP.pformat(d_files_to_add), 5)
+    logger.write(
+        "\nfiles and directories to add:\n%s\n\n" % PP.pformat(d_files_to_add), 5
+    )
 
     res = 0
     try:
         # Creating the object tarfile
-        tar = tarfile.open(path_targz, mode='w:gz')
+        tar = tarfile.open(path_targz, mode="w:gz")
 
         # get the filtering function if needed
         if old_python:
@@ -1885,11 +2112,15 @@ Please add it in file:
             filter_function = exclude_VCS_and_extensions
 
         # Add the files to the tarfile object
-        res = add_files(tar, archive_name, d_files_to_add, logger, f_exclude=filter_function)
+        res = add_files(
+            tar, archive_name, d_files_to_add, logger, f_exclude=filter_function
+        )
         tar.close()
     except KeyboardInterrupt:
         logger.write(src.printcolors.printcError("\nERROR: forced interruption\n"), 1)
-        logger.write(_("Removing the temporary working directory '%s'... ") % tmp_working_dir, 1)
+        logger.write(
+            _("Removing the temporary working directory '%s'... ") % tmp_working_dir, 1
+        )
         # remove the working directory
         shutil.rmtree(tmp_working_dir)
         logger.write(_("OK"), 1)
index ec438655fd61df9be55ada8452628d251ee8fc97..b1fe135bdad4c7f033e14c80f6b8edf7f078f4b0 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 #  Copyright (C) 2010-2012  CEA/DEN
 #
 #  This library is free software; you can redistribute it and/or
@@ -25,36 +25,46 @@ import prepare
 
 # Define all possible option for patch command :  sat patch <options>
 parser = src.options.Options()
-parser.add_option('p', 'products', 'list2', 'products',
-    _('Optional: products to get the sources. This option accepts a comma separated list.'))
+parser.add_option(
+    "p",
+    "products",
+    "list2",
+    "products",
+    _(
+        "Optional: products to get the sources. This option accepts a comma separated list."
+    ),
+)
+
 
 def apply_patch(config, product_info, max_product_name_len, logger):
-    '''The method called to apply patches on a product
+    """The method called to apply patches on a product
 
     :param config Config: The global configuration
-    :param product_info Config: The configuration specific to 
+    :param product_info Config: The configuration specific to
                                the product to be patched
     :param logger Logger: The logger instance to use for the display and logging
     :return: (True if it succeed, else False, message to display)
     :rtype: (boolean, str)
-    '''
+    """
 
     # if the product is native, do not apply patch
     if src.product.product_is_native(product_info):
         # display and log
-        logger.write('%s: ' % src.printcolors.printcLabel(product_info.name), 4)
-        logger.write(' ' * (max_product_name_len - len(product_info.name)), 4, False)
+        logger.write("%s: " % src.printcolors.printcLabel(product_info.name), 4)
+        logger.write(" " * (max_product_name_len - len(product_info.name)), 4, False)
         logger.write("\n", 4, False)
-        msg = _("The %s product is native. Do not apply "
-                "any patch.") % product_info.name
+        msg = (
+            _("The %s product is native. Do not apply " "any patch.")
+            % product_info.name
+        )
         logger.write(msg, 4)
         logger.write("\n", 4)
-        return True, ""       
+        return True, ""
 
     if not "patches" in product_info or len(product_info.patches) == 0:
         # display and log
-        logger.write('%s: ' % src.printcolors.printcLabel(product_info.name), 4)
-        logger.write(' ' * (max_product_name_len - len(product_info.name)), 4, False)
+        logger.write("%s: " % src.printcolors.printcLabel(product_info.name), 4)
+        logger.write(" " * (max_product_name_len - len(product_info.name)), 4, False)
         logger.write("\n", 4, False)
         msg = _("No patch for the %s product") % product_info.name
         logger.write(msg, 4)
@@ -62,8 +72,8 @@ def apply_patch(config, product_info, max_product_name_len, logger):
         return True, ""
     else:
         # display and log
-        logger.write('%s: ' % src.printcolors.printcLabel(product_info.name), 3)
-        logger.write(' ' * (max_product_name_len - len(product_info.name)), 3, False)
+        logger.write("%s: " % src.printcolors.printcLabel(product_info.name), 3)
+        logger.write(" " * (max_product_name_len - len(product_info.name)), 3, False)
         logger.write("\n", 4, False)
 
     if not os.path.exists(product_info.source_dir):
@@ -77,97 +87,107 @@ def apply_patch(config, product_info, max_product_name_len, logger):
     # Loop on all the patches of the product
     for patch in product_info.patches:
         details = []
-        
+
         # Check the existence and apply the patch
         if os.path.isfile(patch):
             patch_cmd = "patch -p1 < %s" % patch
-            
+
             # Write the command in the terminal if verbose level is at 5
-            logger.write(("    >%s\n" % patch_cmd),5)
-            
+            logger.write(("    >%s\n" % patch_cmd), 5)
+
             # Write the command in the log file (can be seen using 'sat log')
             logger.logTxtFile.write("\n    >%s\n" % patch_cmd)
             logger.logTxtFile.flush()
-            
+
             # Call the command
-            res_cmd = (subprocess.call(patch_cmd, 
-                                   shell=True, 
-                                   cwd=product_info.source_dir,
-                                   stdout=logger.logTxtFile, 
-                                   stderr=subprocess.STDOUT) == 0)        
+            res_cmd = (
+                subprocess.call(
+                    patch_cmd,
+                    shell=True,
+                    cwd=product_info.source_dir,
+                    stdout=logger.logTxtFile,
+                    stderr=subprocess.STDOUT,
+                )
+                == 0
+            )
         else:
             res_cmd = False
-            details.append("  " + 
-                src.printcolors.printcError(_("Not a valid patch: %s") % patch))
+            details.append(
+                "  " + src.printcolors.printcError(_("Not a valid patch: %s") % patch)
+            )
 
         res.append(res_cmd)
-        
+
         if res_cmd:
-            message = (_("Apply patch %s") % 
-                       src.printcolors.printcHighlight(patch))
+            message = _("Apply patch %s") % src.printcolors.printcHighlight(patch)
         else:
             message = src.printcolors.printcWarning(
-                                        _("Failed to apply patch %s") % patch)
+                _("Failed to apply patch %s") % patch
+            )
 
         if config.USER.output_verbose_level >= 3:
             retcode.append("  %s" % message)
         else:
             retcode.append("%s: %s" % (product_info.name, message))
-        
+
         if len(details) > 0:
             retcode.extend(details)
 
     res = not (False in res)
-    
+
     return res, "\n".join(retcode) + "\n"
 
+
 def description():
-    '''method that is called when salomeTools is called with --help option.
-    
+    """method that is called when salomeTools is called with --help option.
+
     :return: The text to display for the patch command description.
     :rtype: str
-    '''
-    return _("The patch command apply the patches on the sources of "
-             "the application products if there is any.\n\nexample:\nsat "
-             "patch SALOME-master --products qt,boost")
-  
+    """
+    return _(
+        "The patch command apply the patches on the sources of "
+        "the application products if there is any.\n\nexample:\nsat "
+        "patch SALOME-master --products qt,boost"
+    )
+
+
 def run(args, runner, logger):
-    '''method that is called when salomeTools is called with patch parameter.
-    '''
+    """method that is called when salomeTools is called with patch parameter."""
     # Parse the options
     (options, args) = parser.parse_args(args)
-    
+
     # check that the command has been called with an application
-    src.check_config_has_application( runner.cfg )
+    src.check_config_has_application(runner.cfg)
 
     # Print some informations
-    logger.write('Patching sources of the application %s\n' % 
-                src.printcolors.printcLabel(runner.cfg.VARS.application), 1)
+    logger.write(
+        "Patching sources of the application %s\n"
+        % src.printcolors.printcLabel(runner.cfg.VARS.application),
+        1,
+    )
 
-    src.printcolors.print_value(logger, 'workdir', 
-                                runner.cfg.APPLICATION.workdir, 2)
+    src.printcolors.print_value(logger, "workdir", runner.cfg.APPLICATION.workdir, 2)
     logger.write("\n", 2, False)
 
     # Get the products list with products informations regarding the options
     products_infos = src.product.get_products_list(options, runner.cfg, logger)
-    
+
     # Get the maximum name length in order to format the terminal display
     max_product_name_len = 1
     if len(products_infos) > 0:
         max_product_name_len = max(map(lambda l: len(l), products_infos[0])) + 4
-    
+
     # The loop on all the products on which to apply the patches
     good_result = 0
     for __, product_info in products_infos:
         # Apply the patch
-        return_code, patch_res = apply_patch(runner.cfg,
-                                             product_info,
-                                             max_product_name_len,
-                                             logger)
+        return_code, patch_res = apply_patch(
+            runner.cfg, product_info, max_product_name_len, logger
+        )
         logger.write(patch_res, 1, False)
         if return_code:
             good_result += 1
-    
+
     # Display the results (how much passed, how much failed, etc...)
 
     logger.write("\n", 2, False)
@@ -177,10 +197,10 @@ def run(args, runner, logger):
     else:
         status = src.KO_STATUS
         res_count = "%d / %d" % (good_result, len(products_infos))
-    
+
     # write results
     logger.write("Patching sources of the application:", 1)
     logger.write(" " + src.printcolors.printc(status), 1, False)
     logger.write(" (%s)\n" % res_count, 1, False)
-    
+
     return len(products_infos) - good_result
index 71a44a805b935d1dac409f7f2f6256675dd37a09..18f04281e258b4bdedc6071c7dd87756240cda5d 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 #  Copyright (C) 2010-2012  CEA/DEN
 #
 #  This library is free software; you can redistribute it and/or
@@ -26,26 +26,46 @@ import src.debug as DBG
 
 # Define all possible option for prepare command :  sat prepare <options>
 parser = src.options.Options()
-parser.add_option('p', 'products', 'list2', 'products',
-    _('Optional: products to prepare. This option accepts a comma separated list.'))
-parser.add_option('f', 'force', 'boolean', 'force',
-    _("Optional: force to prepare the products in development mode."))
-parser.add_option('', 'force_patch', 'boolean', 'force_patch', 
-    _("Optional: force to apply patch to the products in development mode."))
-parser.add_option('c', 'complete', 'boolean', 'complete',
+parser.add_option(
+    "p",
+    "products",
+    "list2",
+    "products",
+    _("Optional: products to prepare. This option accepts a comma separated list."),
+)
+parser.add_option(
+    "f",
+    "force",
+    "boolean",
+    "force",
+    _("Optional: force to prepare the products in development mode."),
+)
+parser.add_option(
+    "",
+    "force_patch",
+    "boolean",
+    "force_patch",
+    _("Optional: force to apply patch to the products in development mode."),
+)
+parser.add_option(
+    "c",
+    "complete",
+    "boolean",
+    "complete",
     _("Optional: completion mode, only prepare products not present in SOURCES dir."),
-    False)
+    False,
+)
 
 
 def find_products_already_prepared(l_products):
-    '''function that returns the list of products that have an existing source 
+    """function that returns the list of products that have an existing source
        directory.
-    
+
     :param l_products List: The list of products to check
-    :return: The list of product configurations that have an existing source 
+    :return: The list of product configurations that have an existing source
              directory.
     :rtype: List
-    '''
+    """
     l_res = []
     for p_name_p_cfg in l_products:
         __, prod_cfg = p_name_p_cfg
@@ -53,43 +73,47 @@ def find_products_already_prepared(l_products):
             l_res.append(p_name_p_cfg)
     return l_res
 
+
 def find_products_with_patchs(l_products):
-    '''function that returns the list of products that have one or more patches.
-    
+    """function that returns the list of products that have one or more patches.
+
     :param l_products List: The list of products to check
     :return: The list of product configurations that have one or more patches.
     :rtype: List
-    '''
+    """
     l_res = []
     for p_name_p_cfg in l_products:
         __, prod_cfg = p_name_p_cfg
         l_patchs = src.get_cfg_param(prod_cfg, "patches", [])
-        if len(l_patchs)>0:
+        if len(l_patchs) > 0:
             l_res.append(p_name_p_cfg)
     return l_res
 
+
 def description():
-    '''method that is called when salomeTools is called with --help option.
-    
+    """method that is called when salomeTools is called with --help option.
+
     :return: The text to display for the prepare command description.
     :rtype: str
-    '''
-    return _("The prepare command gets the sources of "
-             "the application products and apply the patches if there is any."
-             "\n\nexample:\nsat prepare SALOME-master --products KERNEL,GUI")
-  
+    """
+    return _(
+        "The prepare command gets the sources of "
+        "the application products and apply the patches if there is any."
+        "\n\nexample:\nsat prepare SALOME-master --products KERNEL,GUI"
+    )
+
+
 def run(args, runner, logger):
-    '''method that is called when salomeTools is called with prepare parameter.
-    '''
-    
+    """method that is called when salomeTools is called with prepare parameter."""
+
     # Parse the options
     (options, args) = parser.parse_args(args)
 
     # check that the command has been called with an application
-    src.check_config_has_application( runner.cfg )
+    src.check_config_has_application(runner.cfg)
 
     # write warning if platform is not declared as supported
-    src.check_platform_is_supported( runner.cfg, logger )
+    src.check_platform_is_supported(runner.cfg, logger)
 
     products_infos = src.product.get_products_list(options, runner.cfg, logger)
 
@@ -97,110 +121,115 @@ def run(args, runner, logger):
     args_appli = runner.cfg.VARS.application + " "  # useful whitespace
     if options.products:
         listProd = list(options.products)
-    else: # no product interpeted as all products
+    else:  # no product interpeted as all products
         listProd = [name for name, tmp in products_infos]
 
     if options.complete:
         # remove products that are already prepared 'completion mode)
-        pi_already_prepared=find_products_already_prepared(products_infos)
+        pi_already_prepared = find_products_already_prepared(products_infos)
         l_already_prepared = [i for i, tmp in pi_already_prepared]
         newList, removedList = removeInList(listProd, l_already_prepared)
         listProd = newList
-        if len(newList) == 0 and len(removedList) > 0 :
+        if len(newList) == 0 and len(removedList) > 0:
             msg = "\nAll the products are already installed, do nothing!\n"
             logger.write(src.printcolors.printcWarning(msg), 1)
             return 0
-        if len(removedList) > 0 :
-            msg = "\nList of already prepared products that are skipped : %s\n" % ",".join(removedList)
+        if len(removedList) > 0:
+            msg = (
+                "\nList of already prepared products that are skipped : %s\n"
+                % ",".join(removedList)
+            )
             logger.write(msg, 3)
-        
-
-    args_product_opt = '--products ' + ",".join(listProd)
-    do_source = (len(listProd) > 0)
 
+    args_product_opt = "--products " + ",".join(listProd)
+    do_source = len(listProd) > 0
 
     ldev_products = [p for p in products_infos if src.product.product_is_dev(p[1])]
-    newList = listProd # default
+    newList = listProd  # default
     if not options.force and len(ldev_products) > 0:
         l_products_not_getted = find_products_already_prepared(ldev_products)
         listNot = [i for i, tmp in l_products_not_getted]
         newList, removedList = removeInList(listProd, listNot)
         if len(removedList) > 0:
-            msg = _("""\
+            msg = _(
+                """\
 Do not get the source of the following products in development mode.
 Use the --force option to overwrite it.
-""")
+"""
+            )
             msg += "\n%s\n" % ",".join(removedList)
             logger.write(src.printcolors.printcWarning(msg), 1)
 
-    args_product_opt_clean = '--products ' + ",".join(newList)
-    do_clean = (len(newList) > 0)
-    
-    newList = listProd # default
+    args_product_opt_clean = "--products " + ",".join(newList)
+    do_clean = len(newList) > 0
+
+    newList = listProd  # default
     if not options.force_patch and len(ldev_products) > 0:
         l_products_with_patchs = find_products_with_patchs(ldev_products)
         listNot = [i for i, tmp in l_products_with_patchs]
         newList, removedList = removeInList(listProd, listNot)
         if len(removedList) > 0:
-            msg = _("""\
+            msg = _(
+                """\
 Do not patch the following products in development mode.
 Use the --force_patch option to overwrite it.
-""")
+"""
+            )
             msg += "\n%s\n" % ",".join(removedList)
             logger.write(src.printcolors.printcWarning(msg), 1)
-                                                     
-    args_product_opt_patch = '--products ' + ",".join(newList)
-    do_patch = (len(newList) > 0)
-      
+
+    args_product_opt_patch = "--products " + ",".join(newList)
+    do_patch = len(newList) > 0
+
     # Construct the final commands arguments
     args_clean = args_appli + args_product_opt_clean + " --sources"
-    args_source = args_appli + args_product_opt  
+    args_source = args_appli + args_product_opt
     args_patch = args_appli + args_product_opt_patch
-      
+
     # Initialize the results to a running status
     res_clean = 0
     res_source = 0
     res_patch = 0
-    
+
     # Call the commands using the API
     if do_clean:
         msg = _("Clean the source directories ...")
         logger.write(msg, 3)
         logger.flush()
-        res_clean = runner.clean(args_clean, batch=True, verbose = 0, logger_add_link = logger)
+        res_clean = runner.clean(
+            args_clean, batch=True, verbose=0, logger_add_link=logger
+        )
         if res_clean == 0:
-            logger.write('%s\n' % src.printcolors.printc(src.OK_STATUS), 3)
+            logger.write("%s\n" % src.printcolors.printc(src.OK_STATUS), 3)
         else:
-            logger.write('%s\n' % src.printcolors.printc(src.KO_STATUS), 3)
+            logger.write("%s\n" % src.printcolors.printc(src.KO_STATUS), 3)
     if do_source:
         msg = _("Get the sources of the products ...")
         logger.write(msg, 5)
-        res_source = runner.source(args_source, logger_add_link = logger)
+        res_source = runner.source(args_source, logger_add_link=logger)
         if res_source == 0:
-            logger.write('%s\n' % src.printcolors.printc(src.OK_STATUS), 5)
+            logger.write("%s\n" % src.printcolors.printc(src.OK_STATUS), 5)
         else:
-            logger.write('%s\n' % src.printcolors.printc(src.KO_STATUS), 5)
+            logger.write("%s\n" % src.printcolors.printc(src.KO_STATUS), 5)
     if do_patch:
         msg = _("Patch the product sources (if any) ...")
         logger.write(msg, 5)
-        res_patch = runner.patch(args_patch, logger_add_link = logger)
+        res_patch = runner.patch(args_patch, logger_add_link=logger)
         if res_patch == 0:
-            logger.write('%s\n' % src.printcolors.printc(src.OK_STATUS), 5)
+            logger.write("%s\n" % src.printcolors.printc(src.OK_STATUS), 5)
         else:
-            logger.write('%s\n' % src.printcolors.printc(src.KO_STATUS), 5)
-    
+            logger.write("%s\n" % src.printcolors.printc(src.KO_STATUS), 5)
+
     return res_clean + res_source + res_patch
 
 
 def removeInList(aList, removeList):
     """Removes elements of removeList list from aList
-    
+
     :param aList: (list) The list from which to remove elements
     :param removeList: (list) The list which contains elements to remove
-    :return: (list, list) (list with elements removed, list of elements removed) 
+    :return: (list, list) (list with elements removed, list of elements removed)
     """
     res1 = [i for i in aList if i not in removeList]
     res2 = [i for i in aList if i in removeList]
     return (res1, res2)
-
-
index c6138e5087e18db34ab8fb1fbb3021dc15102e87..eb615c5dac49cb856b6b2d4045629b74023914ad 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 #  Copyright (C) 2010-2013  CEA/DEN
 #
 #  This library is free software; you can redistribute it and/or
@@ -25,41 +25,51 @@ import src
 parser = src.options.Options()
 # no option more than -h as generic default
 
+
 def description():
-    '''method that is called when salomeTools is called with --help option.
-    
+    """method that is called when salomeTools is called with --help option.
+
     :return: The text to display for the run command description.
     :rtype: str
-    '''
-    return _("""\
+    """
+    return _(
+        """\
 The run command runs the application launcher with the given arguments.
 
 example:
 >> sat run SALOME-master
-""")
+"""
+    )
+
 
 def run(args, runner, logger):
-    '''method that is called when salomeTools is called with run parameter.
-    '''
+    """method that is called when salomeTools is called with run parameter."""
 
     # check for product
     src.check_config_has_application(runner.cfg)
 
-    # Determine launcher path 
+    # Determine launcher path
     launcher_name = src.get_launcher_name(runner.cfg)
     launcher_dir = runner.cfg.APPLICATION.workdir
-    
+
     # Check the launcher existence
-    if launcher_name not in  os.listdir(launcher_dir):
-        message = _("The launcher %s was not found in directory %s!\nDid you run the"
-                    " command 'sat launcher' ?\n") % (launcher_name, launcher_dir)
+    if launcher_name not in os.listdir(launcher_dir):
+        message = _(
+            "The launcher %s was not found in directory %s!\nDid you run the"
+            " command 'sat launcher' ?\n"
+        ) % (launcher_name, launcher_dir)
         raise src.SatException(message)
-          
+
     launcher_path = os.path.join(launcher_dir, launcher_name)
 
     if not os.path.exists(launcher_path):
-        message = _("The launcher at path %s is missing.\nDid you run the"
-                    " command 'sat launcher' ?\n") % launcher_path
+        message = (
+            _(
+                "The launcher at path %s is missing.\nDid you run the"
+                " command 'sat launcher' ?\n"
+            )
+            % launcher_path
+        )
         raise src.SatException(message)
 
     # Determine the command to launch (add the additional arguments)
@@ -69,21 +79,23 @@ def run(args, runner, logger):
     src.printcolors.print_value(logger, _("Executed command"), command, 2)
     logger.write(_("Launching ...\n"))
     logger.flush()
-    
+
     # Run the launcher
-    subprocess.call(command,
-                    shell=True,
-                    stdout=logger.logTxtFile,
-                    stderr=subprocess.STDOUT)
-    
+    subprocess.call(
+        command, shell=True, stdout=logger.logTxtFile, stderr=subprocess.STDOUT
+    )
+
     # Display information : how to get the logs
-    messageFirstPart = _("\nEnd of execution. To see the traces, "
-                         "please tap the following command :\n")
+    messageFirstPart = _(
+        "\nEnd of execution. To see the traces, " "please tap the following command :\n"
+    )
     messageSecondPart = src.printcolors.printcLabel(
-                                            runner.cfg.VARS.salometoolsway +
-                                            os.sep +
-                                            "sat log " +
-                                            runner.cfg.VARS.application + "\n")
-    logger.write("  %s\n" %(messageFirstPart + messageSecondPart), 2)
-    
+        runner.cfg.VARS.salometoolsway
+        + os.sep
+        + "sat log "
+        + runner.cfg.VARS.application
+        + "\n"
+    )
+    logger.write("  %s\n" % (messageFirstPart + messageSecondPart), 2)
+
     return 0
index 824a1331b20fc139ee661b600cf6fb15edbf6523..513bf38e65bc26def7bea4a733acdd9031bd2087 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 #  Copyright (C) 2010-2012  CEA/DEN
 #
 #  This library is free software; you can redistribute it and/or
@@ -22,12 +22,25 @@ import src
 
 # Define all possible option for the script command :  sat script <options>
 parser = src.options.Options()
-parser.add_option('p', 'products', 'list2', 'products',
-    _('Optional: products to configure. This option accepts a comma separated list.'))
-parser.add_option('', 'nb_proc', 'int', 'nb_proc',
-    _("""Optional: The number of processors to use in the script if the make command is used in it.
+parser.add_option(
+    "p",
+    "products",
+    "list2",
+    "products",
+    _("Optional: products to configure. This option accepts a comma separated list."),
+)
+parser.add_option(
+    "",
+    "nb_proc",
+    "int",
+    "nb_proc",
+    _(
+        """Optional: The number of processors to use in the script if the make command is used in it.
       Warning: the script has to be correctly written if you want this option to work.
-      The $MAKE_OPTIONS has to be used."""), 0)
+      The $MAKE_OPTIONS has to be used."""
+    ),
+    0,
+)
 
 
 def log_step(logger, header, step):
@@ -36,6 +49,7 @@ def log_step(logger, header, step):
     logger.write("\n==== %s \n" % src.printcolors.printcInfo(step), 4)
     logger.flush()
 
+
 def log_res_step(logger, res):
     if res == 0:
         logger.write("%s \n" % src.printcolors.printcSuccess("OK"), 4)
@@ -44,42 +58,41 @@ def log_res_step(logger, res):
         logger.write("%s \n" % src.printcolors.printcError("KO"), 4)
         logger.flush()
 
+
 def run_script_all_products(config, products_infos, nb_proc, logger):
-    '''Execute the script in each product build directory.
+    """Execute the script in each product build directory.
 
     :param config Config: The global configuration
-    :param products_info list: List of 
+    :param products_info list: List of
                                  (str, Config) => (product_name, product_info)
     :param nb_proc int: The number of processors to use
     :param logger Logger: The logger instance to use for the display and logging
     :return: the number of failing commands.
     :rtype: int
-    '''
+    """
     res = 0
     for p_name_info in products_infos:
-        res_prod = run_script_of_product(p_name_info,
-                                      nb_proc,
-                                      config,
-                                      logger)
+        res_prod = run_script_of_product(p_name_info, nb_proc, config, logger)
         if res_prod != 0:
-            res += 1 
+            res += 1
     return res
 
+
 def run_script_of_product(p_name_info, nb_proc, config, logger):
-    '''Execute the proper configuration command(s) 
+    """Execute the proper configuration command(s)
        in the product build directory.
-    
+
     :param p_name_info tuple: (str, Config) => (product_name, product_info)
     :param nb_proc int: The number of processors to use
     :param config Config: The global configuration
-    :param logger Logger: The logger instance to use for the display 
+    :param logger Logger: The logger instance to use for the display
                           and logging
     :return: 1 if it fails, else 0.
     :rtype: int
-    '''
-    
+    """
+
     p_name, p_info = p_name_info
-    
+
     # Logging
     logger.write("\n", 4, False)
     logger.write("################ ", 4)
@@ -90,29 +103,33 @@ def run_script_of_product(p_name_info, nb_proc, config, logger):
     logger.flush()
 
     # Do nothing if he product is not compilable or has no compilation script
-    if (("properties" in p_info and "compilation" in p_info.properties and 
-                                  p_info.properties.compilation == "no") or
-                                  (not src.product.product_has_script(p_info))):
+    if (
+        "properties" in p_info
+        and "compilation" in p_info.properties
+        and p_info.properties.compilation == "no"
+    ) or (not src.product.product_has_script(p_info)):
         log_step(logger, header, "ignored")
         logger.write("\n", 3, False)
         return 0
 
     if not os.path.isfile(p_info.compil_script):
-        msg_err="\n\nError : The compilation script file do not exists!"+\
-                "\n        It was not found by sat!"+\
-                "\n        Please check your salomeTool configuration\n"
+        msg_err = (
+            "\n\nError : The compilation script file do not exists!"
+            + "\n        It was not found by sat!"
+            + "\n        Please check your salomeTool configuration\n"
+        )
         logger.error(msg_err)
         return 1
 
     # Instantiate the class that manages all the construction commands
     # like cmake, make, make install, make test, environment management, etc...
     builder = src.compilation.Builder(config, logger, p_name, p_info)
-    
+
     # Prepare the environment
     log_step(logger, header, "PREPARE ENV")
     res_prepare = builder.prepare()
     log_res_step(logger, res_prepare)
-    
+
     # Execute the script
     len_end_line = 20
     script_path_display = src.printcolors.printcLabel(p_info.compil_script)
@@ -120,100 +137,119 @@ def run_script_of_product(p_name_info, nb_proc, config, logger):
     len_end_line += len(script_path_display)
     res = builder.do_script_build(p_info.compil_script, number_of_proc=nb_proc)
     log_res_step(logger, res)
-    
+
     # Log the result
     if res > 0:
         logger.write("\r%s%s" % (header, " " * len_end_line), 3)
         logger.write("\r" + header + src.printcolors.printcError("KO"))
-        logger.write("==== %(KO)s in script execution of %(name)s \n" %
-            { "name" : p_name , "KO" : src.printcolors.printcInfo("ERROR")}, 4)
+        logger.write(
+            "==== %(KO)s in script execution of %(name)s \n"
+            % {"name": p_name, "KO": src.printcolors.printcInfo("ERROR")},
+            4,
+        )
         logger.write("\n", 3, False)
         logger.flush()
         return res
 
     if src.product.product_has_post_script(p_info):
         # the product has a post install script we run
-        #script_path_display = src.printcolors.printcLabel(p_info.p_info.post_script)
-        #log_step(logger, header, "POST SCRIPT " + script_path_display)
+        # script_path_display = src.printcolors.printcLabel(p_info.p_info.post_script)
+        # log_step(logger, header, "POST SCRIPT " + script_path_display)
         res = builder.do_script_build(p_info.post_script)
-        #log_res_step(logger, res)
+        # log_res_step(logger, res)
         if res > 0:
             logger.write("\r%s%s" % (header, " " * len_end_line), 3)
             logger.write("\r" + header + src.printcolors.printcError("KO"))
-            logger.write("==== %(KO)s in post script execution of %(name)s \n" %
-                { "name" : p_name , "KO" : src.printcolors.printcInfo("ERROR")}, 4)
+            logger.write(
+                "==== %(KO)s in post script execution of %(name)s \n"
+                % {"name": p_name, "KO": src.printcolors.printcInfo("ERROR")},
+                4,
+            )
             logger.write("\n", 3, False)
             logger.flush()
             return res
-    
+
     logger.write("\r%s%s" % (header, " " * len_end_line), 3)
     logger.write("\r" + header + src.printcolors.printcSuccess("OK"))
     logger.write("==== %s \n" % src.printcolors.printcInfo("OK"), 4)
-    logger.write("==== Script execution of %(name)s %(OK)s \n" %
-        { "name" : p_name , "OK" : src.printcolors.printcInfo("OK")}, 4)
+    logger.write(
+        "==== Script execution of %(name)s %(OK)s \n"
+        % {"name": p_name, "OK": src.printcolors.printcInfo("OK")},
+        4,
+    )
     logger.write("\n", 3, False)
     logger.flush()
 
     return res
 
+
 def description():
-    '''method that is called when salomeTools is called with --help option.
-    
+    """method that is called when salomeTools is called with --help option.
+
     :return: The text to display for the script command description.
     :rtype: str
-    '''
-    return _("The script command executes the script(s) of the the given "
-             "products in the build directory.\nThis is done only for the "
-             "products that are constructed using a script (build_source "
-             ": \"script\").\nOtherwise, nothing is done."
-             "\n\nexample:\nsat script SALOME-master --products Python,numpy")
-  
+    """
+    return _(
+        "The script command executes the script(s) of the the given "
+        "products in the build directory.\nThis is done only for the "
+        "products that are constructed using a script (build_source "
+        ': "script").\nOtherwise, nothing is done.'
+        "\n\nexample:\nsat script SALOME-master --products Python,numpy"
+    )
+
+
 def run(args, runner, logger):
-    '''method that is called when salomeTools is called with make parameter.
-    '''
-    
+    """method that is called when salomeTools is called with make parameter."""
+
     # Parse the options
     (options, args) = parser.parse_args(args)
 
     # check that the command has been called with an application
-    src.check_config_has_application( runner.cfg )
+    src.check_config_has_application(runner.cfg)
 
     # Get the list of products to treat
     products_infos = src.product.get_products_list(options, runner.cfg, logger)
-    products_infos = [pi for pi in products_infos if not(
-                                     src.product.product_is_native(pi[1]) or 
-                                     src.product.product_is_fixed(pi[1]))]
-    
-    
+    products_infos = [
+        pi
+        for pi in products_infos
+        if not (
+            src.product.product_is_native(pi[1]) or src.product.product_is_fixed(pi[1])
+        )
+    ]
+
     # Print some informations
-    logger.write(_('Executing the script in the build '
-                                'directories of the application %s\n') % 
-                src.printcolors.printcLabel(runner.cfg.VARS.application), 1)
-    
-    info = [(_("BUILD directory"),
-             os.path.join(runner.cfg.APPLICATION.workdir, 'BUILD'))]
+    logger.write(
+        _("Executing the script in the build " "directories of the application %s\n")
+        % src.printcolors.printcLabel(runner.cfg.VARS.application),
+        1,
+    )
+
+    info = [
+        (_("BUILD directory"), os.path.join(runner.cfg.APPLICATION.workdir, "BUILD"))
+    ]
     src.print_info(logger, info)
-    
+
     # Call the function that will loop over all the products and execute
     # the right command(s)
     if options.nb_proc is None:
         options.nb_proc = 0
-    res = run_script_all_products(runner.cfg,
-                                  products_infos,
-                                  options.nb_proc,
-                                  logger)
-    
+    res = run_script_all_products(runner.cfg, products_infos, options.nb_proc, logger)
+
     # Print the final state
     nb_products = len(products_infos)
     if res == 0:
         final_status = "OK"
     else:
         final_status = "KO"
-   
-    logger.write(_("\nScript: %(status)s "
-                   "(%(valid_result)d/%(nb_products)d)\n") % \
-        { 'status': src.printcolors.printc(final_status), 
-          'valid_result': nb_products - res,
-          'nb_products': nb_products }, 1)    
-    
-    return res 
+
+    logger.write(
+        _("\nScript: %(status)s " "(%(valid_result)d/%(nb_products)d)\n")
+        % {
+            "status": src.printcolors.printc(final_status),
+            "valid_result": nb_products - res,
+            "nb_products": nb_products,
+        },
+        1,
+    )
+
+    return res
index c2139755c3b373d66b2a1ab64b1a4be4cc03dee2..8edd596036257817e3010151c3eca85b9caf4d9d 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 #  Copyright (C) 2010-2012  CEA/DEN
 #
 #  This library is free software; you can redistribute it and/or
@@ -22,51 +22,59 @@ import src
 
 # Define all possible option for the shell command :  sat shell <options>
 parser = src.options.Options()
-parser.add_option('c', 'command', 'string', 'command',
-    _('Mandatory: The shell command to execute.'), "")
+parser.add_option(
+    "c",
+    "command",
+    "string",
+    "command",
+    _("Mandatory: The shell command to execute."),
+    "",
+)
+
 
 def description():
-    '''method that is called when salomeTools is called with --help option.
-    
+    """method that is called when salomeTools is called with --help option.
+
     :return: The text to display for the shell command description.
     :rtype: str
-    '''
-    return _("""\
+    """
+    return _(
+        """\
 The shell command executes the shell commands passed as argument.
 
 example:
->> sat shell --command "ls -l /tmp" """)
-  
+>> sat shell --command "ls -l /tmp" """
+    )
+
+
 def run(args, runner, logger):
-    '''method that is called when salomeTools is called with shell parameter.
-    '''
-    
+    """method that is called when salomeTools is called with shell parameter."""
+
     # Parse the options
     (options, args) = parser.parse_args(args)
 
     # Make sure the command option has been called
     if not options.command:
-        message = _("The option --command is required\n")      
+        message = _("The option --command is required\n")
         logger.write(src.printcolors.printcError(message))
         return 1
-    
+
     # Print the input command
     msg = _("Command to execute:\n%s\nExecution ... " % options.command)
     logger.write(msg, 3)
-    
+
     # Call the input command
-    res = subprocess.call(options.command,
-                          shell=True,
-                          stdout=logger.logTxtFile,
-                          stderr=subprocess.STDOUT)
+    res = subprocess.call(
+        options.command, shell=True, stdout=logger.logTxtFile, stderr=subprocess.STDOUT
+    )
+
     # Format the result to be 0 (success) or 1 (fail)
     if res != 0:
         res = 1
         logger.write(src.printcolors.printc("KO"), 3)
     else:
         logger.write(src.printcolors.printc("OK"), 3)
-    
-    logger.write("\n",3)
-    
-    return res
\ No newline at end of file
+
+    logger.write("\n", 3)
+
+    return res
index 832e484f0f032653e3522caa83bbfe2e58b87ef7..8a78ee6b0bc4baa9cd6537593c1d6b073ced153c 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 #  Copyright (C) 2010-2012  CEA/DEN
 #
 #  This library is free software; you can redistribute it and/or
@@ -27,53 +27,55 @@ import src.debug as DBG
 
 # Define all possible option for patch command :  sat patch <options>
 parser = src.options.Options()
-parser.add_option('p', 'products', 'list2', 'products',
-    _('Optional: products from which to get the sources. This option accepts a comma separated list.'))
+parser.add_option(
+    "p",
+    "products",
+    "list2",
+    "products",
+    _(
+        "Optional: products from which to get the sources. This option accepts a comma separated list."
+    ),
+)
+
 
 def get_source_for_dev(config, product_info, source_dir, logger, pad):
-    '''The method called if the product is in development mode
-    
+    """The method called if the product is in development mode
+
     :param config Config: The global configuration
-    :param product_info Config: The configuration specific to 
+    :param product_info Config: The configuration specific to
                                the product to be prepared
-    :param source_dir Path: The Path instance corresponding to the 
+    :param source_dir Path: The Path instance corresponding to the
                             directory where to put the sources
     :param logger Logger: The logger instance to use for the display and logging
     :param pad int: The gap to apply for the terminal display
     :return: True if it succeed, else False
     :rtype: boolean
-    '''
-       
+    """
+
     # Call the function corresponding to get the sources with True checkout
-    retcode = get_product_sources(config, 
-                                 product_info, 
-                                 True, 
-                                 source_dir,
-                                 logger, 
-                                 pad, 
-                                 checkout=True)
+    retcode = get_product_sources(
+        config, product_info, True, source_dir, logger, pad, checkout=True
+    )
     logger.write("\n", 3, False)
     # +2 because product name is followed by ': '
-    logger.write(" " * (pad+2), 3, False) 
-    
-    logger.write('dev: %s ... ' % 
-                 src.printcolors.printcInfo(product_info.source_dir), 3, False)
+    logger.write(" " * (pad + 2), 3, False)
+
+    logger.write(
+        "dev: %s ... " % src.printcolors.printcInfo(product_info.source_dir), 3, False
+    )
     logger.flush()
-    
+
     return retcode
 
-def get_source_from_git(config,
-                        product_info,
-                        source_dir,
-                        logger,
-                        pad,
-                        is_dev=False,
-                        environ = None):
-    '''The method called if the product is to be get in git mode
-    
-    :param product_info Config: The configuration specific to 
+
+def get_source_from_git(
+    config, product_info, source_dir, logger, pad, is_dev=False, environ=None
+):
+    """The method called if the product is to be get in git mode
+
+    :param product_info Config: The configuration specific to
                                the product to be prepared
-    :param source_dir Path: The Path instance corresponding to the 
+    :param source_dir Path: The Path instance corresponding to the
                             directory where to put the sources
     :param logger Logger: The logger instance to use for the display and logging
     :param pad int: The gap to apply for the terminal display
@@ -82,42 +84,40 @@ def get_source_from_git(config,
                                                 extracting.
     :return: True if it succeed, else False
     :rtype: boolean
-    '''
+    """
     # The str to display
-    coflag = 'git'
+    coflag = "git"
 
-    use_repo_dev=False
-    if ("APPLICATION" in config  and
-            "properties"  in config.APPLICATION  and
-            "repo_dev"    in config.APPLICATION.properties  and
-            config.APPLICATION.properties.repo_dev == "yes") :
-                use_repo_dev=True
+    use_repo_dev = False
+    if (
+        "APPLICATION" in config
+        and "properties" in config.APPLICATION
+        and "repo_dev" in config.APPLICATION.properties
+        and config.APPLICATION.properties.repo_dev == "yes"
+    ):
+        use_repo_dev = True
 
     # Get the repository address.
     # If the application has the repo_dev property
     # Or if the product is in dev mode
     # Then we use repo_dev if the key exists
-    if (is_dev or use_repo_dev) and 'repo_dev' in product_info.git_info:
+    if (is_dev or use_repo_dev) and "repo_dev" in product_info.git_info:
         coflag = src.printcolors.printcHighlight(coflag.upper())
-        repo_git = product_info.git_info.repo_dev    
+        repo_git = product_info.git_info.repo_dev
     else:
-        repo_git = product_info.git_info.repo    
-
+        repo_git = product_info.git_info.repo
 
     # Display informations
-    logger.write('%s:%s' % (coflag, src.printcolors.printcInfo(repo_git)), 3, 
-                 False)
-    logger.write(' ' * (pad + 50 - len(repo_git)), 3, False)
-    logger.write(' tag:%s' % src.printcolors.printcInfo(
-                                                    product_info.git_info.tag), 
-                 3,
-                 False)
-    logger.write(' %s. ' % ('.' * (10 - len(product_info.git_info.tag))), 3, 
-                 False)
+    logger.write("%s:%s" % (coflag, src.printcolors.printcInfo(repo_git)), 3, False)
+    logger.write(" " * (pad + 50 - len(repo_git)), 3, False)
+    logger.write(
+        " tag:%s" % src.printcolors.printcInfo(product_info.git_info.tag), 3, False
+    )
+    logger.write(" %s. " % ("." * (10 - len(product_info.git_info.tag))), 3, False)
     logger.flush()
-    logger.write('\n', 5, False)
+    logger.write("\n", 5, False)
 
-    git_options= ''
+    git_options = ""
     if "git_options" in product_info.git_info:
         git_options = product_info.git_info.git_options
 
@@ -131,117 +131,148 @@ def get_source_from_git(config,
         sub_dir = product_info.git_info.sub_dir
 
     if sub_dir is None:
-      # Call the system function that do the extraction in git mode
-      retcode = src.system.git_extract(repo_git,
-                                   product_info.git_info.tag, git_options,
-                                   source_dir, logger, environ)
+        # Call the system function that do the extraction in git mode
+        retcode = src.system.git_extract(
+            repo_git,
+            product_info.git_info.tag,
+            git_options,
+            source_dir,
+            logger,
+            environ,
+        )
     else:
-      # Call the system function that do the extraction of a sub_dir in git mode
-      logger.write("sub_dir:%s " % sub_dir, 3)
-      retcode = src.system.git_extract_sub_dir(repo_git,
-                                   product_info.git_info.tag,git_options,
-                                   source_dir, sub_dir, logger, environ)
-
+        # Call the system function that do the extraction of a sub_dir in git mode
+        logger.write("sub_dir:%s " % sub_dir, 3)
+        retcode = src.system.git_extract_sub_dir(
+            repo_git,
+            product_info.git_info.tag,
+            git_options,
+            source_dir,
+            sub_dir,
+            logger,
+            environ,
+        )
 
     return retcode
 
+
 def get_source_from_archive(config, product_info, source_dir, logger):
-    '''The method called if the product is to be get in archive mode
-    
+    """The method called if the product is to be get in archive mode
+
     :param config Config: The global configuration
-    :param product_info Config: The configuration specific to 
+    :param product_info Config: The configuration specific to
                                the product to be prepared
-    :param source_dir Path: The Path instance corresponding to the 
+    :param source_dir Path: The Path instance corresponding to the
                             directory where to put the sources
     :param logger Logger: The logger instance to use for the display and logging
     :return: True if it succeed, else False
     :rtype: boolean
-    '''
+    """
 
     # check if pip should be used : pip mode id activated if the application and product have pip property
-    if (src.appli_test_property(config,"pip", "yes") and 
-       src.product.product_test_property(product_info,"pip", "yes")):
+    if src.appli_test_property(
+        config, "pip", "yes"
+    ) and src.product.product_test_property(product_info, "pip", "yes"):
         pip_msg = "PIP : do nothing, product will be downloaded later at compile time "
-        logger.write(pip_msg, 3) 
+        logger.write(pip_msg, 3)
         return True
 
     # check archive exists
     if not os.path.exists(product_info.archive_info.archive_name):
         # The archive is not found on local file system (ARCHIVEPATH)
         # We try ftp!
-        logger.write("\n   The archive is not found on local file system, we try ftp\n", 3)
-        ret=src.find_file_in_ftppath(product_info.archive_info.archive_name, 
-                                     config.PATHS.ARCHIVEFTP, config.LOCAL.archive_dir, logger)
+        logger.write(
+            "\n   The archive is not found on local file system, we try ftp\n", 3
+        )
+        ret = src.find_file_in_ftppath(
+            product_info.archive_info.archive_name,
+            config.PATHS.ARCHIVEFTP,
+            config.LOCAL.archive_dir,
+            logger,
+        )
         if ret:
             # archive was found on ftp and stored in ret
-            product_info.archive_info.archive_name=ret
+            product_info.archive_info.archive_name = ret
         else:
-            raise src.SatException(_("Archive not found in ARCHIVEPATH, nor on ARCHIVEFTP: '%s'") % 
-                                   product_info.archive_info.archive_name)
-
-    logger.write('arc:%s ... ' % 
-                 src.printcolors.printcInfo(product_info.archive_info.archive_name),
-                 3, 
-                 False)
+            raise src.SatException(
+                _("Archive not found in ARCHIVEPATH, nor on ARCHIVEFTP: '%s'")
+                % product_info.archive_info.archive_name
+            )
+
+    logger.write(
+        "arc:%s ... "
+        % src.printcolors.printcInfo(product_info.archive_info.archive_name),
+        3,
+        False,
+    )
     logger.flush()
     # Call the system function that do the extraction in archive mode
     retcode, NameExtractedDirectory = src.system.archive_extract(
-                                    product_info.archive_info.archive_name,
-                                    source_dir.dir(), logger)
-    
-    # Rename the source directory if 
+        product_info.archive_info.archive_name, source_dir.dir(), logger
+    )
+
+    # Rename the source directory if
     # it does not match with product_info.source_dir
-    if (NameExtractedDirectory.replace('/', '') != 
-            os.path.basename(product_info.source_dir)):
-        shutil.move(os.path.join(os.path.dirname(product_info.source_dir), 
-                                 NameExtractedDirectory), 
-                    product_info.source_dir)
-    
+    if NameExtractedDirectory.replace("/", "") != os.path.basename(
+        product_info.source_dir
+    ):
+        shutil.move(
+            os.path.join(
+                os.path.dirname(product_info.source_dir), NameExtractedDirectory
+            ),
+            product_info.source_dir,
+        )
+
     return retcode
 
+
 def get_source_from_dir(product_info, source_dir, logger):
-    
+
     if "dir_info" not in product_info:
-        msg = _("Error: you must put a dir_info section"
-                " in the file %s.pyconf" % product_info.name)
+        msg = _(
+            "Error: you must put a dir_info section"
+            " in the file %s.pyconf" % product_info.name
+        )
         logger.write("\n%s\n" % src.printcolors.printcError(msg), 1)
         return False
 
     if "dir" not in product_info.dir_info:
-        msg = _("Error: you must put a dir in the dir_info section"
-                " in the file %s.pyconf" % product_info.name)
+        msg = _(
+            "Error: you must put a dir in the dir_info section"
+            " in the file %s.pyconf" % product_info.name
+        )
         logger.write("\n%s\n" % src.printcolors.printcError(msg), 1)
         return False
 
     # check that source exists
     if not os.path.exists(product_info.dir_info.dir):
-        msg = _("Error: the dir %s defined in the file"
-                " %s.pyconf does not exists" % (product_info.dir_info.dir,
-                                                product_info.name))
+        msg = _(
+            "Error: the dir %s defined in the file"
+            " %s.pyconf does not exists"
+            % (product_info.dir_info.dir, product_info.name)
+        )
         logger.write("\n%s\n" % src.printcolors.printcError(msg), 1)
         return False
-    
-    logger.write('DIR: %s ... ' % src.printcolors.printcInfo(
-                                           product_info.dir_info.dir), 3)
+
+    logger.write(
+        "DIR: %s ... " % src.printcolors.printcInfo(product_info.dir_info.dir), 3
+    )
     logger.flush()
 
     retcode = src.Path(product_info.dir_info.dir).copy(source_dir)
-    
+
     return retcode
-    
-def get_source_from_cvs(user,
-                        product_info,
-                        source_dir,
-                        checkout,
-                        logger,
-                        pad,
-                        environ = None):
-    '''The method called if the product is to be get in cvs mode
-    
+
+
+def get_source_from_cvs(
+    user, product_info, source_dir, checkout, logger, pad, environ=None
+):
+    """The method called if the product is to be get in cvs mode
+
     :param user str: The user to use in for the cvs command
-    :param product_info Config: The configuration specific to 
+    :param product_info Config: The configuration specific to
                                the product to be prepared
-    :param source_dir Path: The Path instance corresponding to the 
+    :param source_dir Path: The Path instance corresponding to the
                             directory where to put the sources
     :param checkout boolean: If True, get the source in checkout mode
     :param logger Logger: The logger instance to use for the display and logging
@@ -250,66 +281,68 @@ def get_source_from_cvs(user,
                                                 extracting.
     :return: True if it succeed, else False
     :rtype: boolean
-    '''
+    """
     # Get the protocol to use in the command
     if "protocol" in product_info.cvs_info:
         protocol = product_info.cvs_info.protocol
     else:
         protocol = "pserver"
-    
+
     # Construct the line to display
     if "protocol" in product_info.cvs_info:
-        cvs_line = "%s:%s@%s:%s" % \
-            (protocol, user, product_info.cvs_info.server, 
-             product_info.cvs_info.product_base)
+        cvs_line = "%s:%s@%s:%s" % (
+            protocol,
+            user,
+            product_info.cvs_info.server,
+            product_info.cvs_info.product_base,
+        )
     else:
-        cvs_line = "%s / %s" % (product_info.cvs_info.server, 
-                                product_info.cvs_info.product_base)
-
-    coflag = 'cvs'
-    if checkout: coflag = src.printcolors.printcHighlight(coflag.upper())
-
-    logger.write('%s:%s' % (coflag, src.printcolors.printcInfo(cvs_line)), 
-                 3, 
-                 False)
-    logger.write(' ' * (pad + 50 - len(cvs_line)), 3, False)
-    logger.write(' src:%s' % 
-                 src.printcolors.printcInfo(product_info.cvs_info.source), 
-                 3, 
-                 False)
-    logger.write(' ' * (pad + 1 - len(product_info.cvs_info.source)), 3, False)
-    logger.write(' tag:%s' % 
-                    src.printcolors.printcInfo(product_info.cvs_info.tag), 
-                 3, 
-                 False)
+        cvs_line = "%s / %s" % (
+            product_info.cvs_info.server,
+            product_info.cvs_info.product_base,
+        )
+
+    coflag = "cvs"
+    if checkout:
+        coflag = src.printcolors.printcHighlight(coflag.upper())
+
+    logger.write("%s:%s" % (coflag, src.printcolors.printcInfo(cvs_line)), 3, False)
+    logger.write(" " * (pad + 50 - len(cvs_line)), 3, False)
+    logger.write(
+        " src:%s" % src.printcolors.printcInfo(product_info.cvs_info.source), 3, False
+    )
+    logger.write(" " * (pad + 1 - len(product_info.cvs_info.source)), 3, False)
+    logger.write(
+        " tag:%s" % src.printcolors.printcInfo(product_info.cvs_info.tag), 3, False
+    )
     # at least one '.' is visible
-    logger.write(' %s. ' % ('.' * (10 - len(product_info.cvs_info.tag))), 
-                 3, 
-                 False) 
+    logger.write(" %s. " % ("." * (10 - len(product_info.cvs_info.tag))), 3, False)
     logger.flush()
-    logger.write('\n', 5, False)
+    logger.write("\n", 5, False)
 
     # Call the system function that do the extraction in cvs mode
-    retcode = src.system.cvs_extract(protocol, user,
-                                 product_info.cvs_info.server,
-                                 product_info.cvs_info.product_base,
-                                 product_info.cvs_info.tag,
-                                 product_info.cvs_info.source,
-                                 source_dir, logger, checkout, environ)
+    retcode = src.system.cvs_extract(
+        protocol,
+        user,
+        product_info.cvs_info.server,
+        product_info.cvs_info.product_base,
+        product_info.cvs_info.tag,
+        product_info.cvs_info.source,
+        source_dir,
+        logger,
+        checkout,
+        environ,
+    )
     return retcode
 
-def get_source_from_svn(user,
-                        product_info,
-                        source_dir,
-                        checkout,
-                        logger,
-                        environ = None):
-    '''The method called if the product is to be get in svn mode
-    
+
+def get_source_from_svn(user, product_info, source_dir, checkout, logger, environ=None):
+    """The method called if the product is to be get in svn mode
+
     :param user str: The user to use in for the svn command
-    :param product_info Config: The configuration specific to 
+    :param product_info Config: The configuration specific to
                                the product to be prepared
-    :param source_dir Path: The Path instance corresponding to the 
+    :param source_dir Path: The Path instance corresponding to the
                             directory where to put the sources
     :param checkout boolean: If True, get the source in checkout mode
     :param logger Logger: The logger instance to use for the display and logging
@@ -317,131 +350,143 @@ def get_source_from_svn(user,
                                                 extracting.
     :return: True if it succeed, else False
     :rtype: boolean
-    '''
-    coflag = 'svn'
-    if checkout: coflag = src.printcolors.printcHighlight(coflag.upper())
-
-    logger.write('%s:%s ... ' % (coflag, 
-                                 src.printcolors.printcInfo(
-                                            product_info.svn_info.repo)), 
-                 3, 
-                 False)
+    """
+    coflag = "svn"
+    if checkout:
+        coflag = src.printcolors.printcHighlight(coflag.upper())
+
+    logger.write(
+        "%s:%s ... " % (coflag, src.printcolors.printcInfo(product_info.svn_info.repo)),
+        3,
+        False,
+    )
     logger.flush()
-    logger.write('\n', 5, False)
+    logger.write("\n", 5, False)
     # Call the system function that do the extraction in svn mode
-    retcode = src.system.svn_extract(user, 
-                                     product_info.svn_info.repo, 
-                                     product_info.svn_info.tag,
-                                     source_dir, 
-                                     logger, 
-                                     checkout,
-                                     environ)
+    retcode = src.system.svn_extract(
+        user,
+        product_info.svn_info.repo,
+        product_info.svn_info.tag,
+        source_dir,
+        logger,
+        checkout,
+        environ,
+    )
     return retcode
 
-def get_product_sources(config, 
-                       product_info, 
-                       is_dev, 
-                       source_dir,
-                       logger, 
-                       pad, 
-                       checkout=False):
-    '''Get the product sources.
-    
+
+def get_product_sources(
+    config, product_info, is_dev, source_dir, logger, pad, checkout=False
+):
+    """Get the product sources.
+
     :param config Config: The global configuration
-    :param product_info Config: The configuration specific to 
+    :param product_info Config: The configuration specific to
                                the product to be prepared
     :param is_dev boolean: True if the product is in development mode
-    :param source_dir Path: The Path instance corresponding to the 
+    :param source_dir Path: The Path instance corresponding to the
                             directory where to put the sources
     :param logger Logger: The logger instance to use for the display and logging
     :param pad int: The gap to apply for the terminal display
     :param checkout boolean: If True, get the source in checkout mode
     :return: True if it succeed, else False
     :rtype: boolean
-    '''
-    
+    """
+
     # Get the application environment
     logger.write(_("Set the application environment\n"), 5)
-    env_appli = src.environment.SalomeEnviron(config,
-                                      src.environment.Environ(dict(os.environ)))
+    env_appli = src.environment.SalomeEnviron(
+        config, src.environment.Environ(dict(os.environ))
+    )
     env_appli.set_application_env(logger)
-    
+
     # Call the right function to get sources regarding the product settings
     if not checkout and is_dev:
-        return get_source_for_dev(config, 
-                                   product_info, 
-                                   source_dir, 
-                                   logger, 
-                                   pad)
+        return get_source_for_dev(config, product_info, source_dir, logger, pad)
 
     if product_info.get_source == "git":
-        return get_source_from_git(config, product_info, source_dir, logger, pad, 
-                                    is_dev, env_appli)
+        return get_source_from_git(
+            config, product_info, source_dir, logger, pad, is_dev, env_appli
+        )
 
     if product_info.get_source == "archive":
         return get_source_from_archive(config, product_info, source_dir, logger)
 
     if product_info.get_source == "dir":
         return get_source_from_dir(product_info, source_dir, logger)
-    
+
     if product_info.get_source == "cvs":
         cvs_user = config.USER.cvs_user
-        return get_source_from_cvs(cvs_user, product_info, source_dir, 
-                                    checkout, logger, pad, env_appli)
+        return get_source_from_cvs(
+            cvs_user, product_info, source_dir, checkout, logger, pad, env_appli
+        )
 
     if product_info.get_source == "svn":
         svn_user = config.USER.svn_user
-        return get_source_from_svn(svn_user, product_info, source_dir, 
-                                    checkout, logger, env_appli)
+        return get_source_from_svn(
+            svn_user, product_info, source_dir, checkout, logger, env_appli
+        )
 
     if product_info.get_source == "native":
         # for native products we check the corresponding system packages are installed
-        logger.write("Native : Checking system packages are installed\n" , 3)
-        check_cmd=src.system.get_pkg_check_cmd(config.VARS.dist_name) # (either rmp or apt)
-        run_pkg,build_pkg=src.product.check_system_dep(config.VARS.dist, check_cmd, product_info)
-        result=True
+        logger.write("Native : Checking system packages are installed\n", 3)
+        check_cmd = src.system.get_pkg_check_cmd(
+            config.VARS.dist_name
+        )  # (either rmp or apt)
+        run_pkg, build_pkg = src.product.check_system_dep(
+            config.VARS.dist, check_cmd, product_info
+        )
+        result = True
         for pkg in run_pkg:
-            logger.write(" - " + pkg + " : " + run_pkg[pkg] + '\n', 1)
+            logger.write(" - " + pkg + " : " + run_pkg[pkg] + "\n", 1)
             if "KO" in run_pkg[pkg]:
-                result=False
+                result = False
         for pkg in build_pkg:
-            logger.write(" - " + pkg + " : " + build_pkg[pkg] + '\n', 1)
+            logger.write(" - " + pkg + " : " + build_pkg[pkg] + "\n", 1)
             if "KO" in build_pkg[pkg]:
-                result=False
-        if result==False:
-            logger.error("some system dependencies are missing, please install them with "+\
-                         check_cmd[0])
-        return result        
+                result = False
+        if result == False:
+            logger.error(
+                "some system dependencies are missing, please install them with "
+                + check_cmd[0]
+            )
+        return result
 
     if product_info.get_source == "fixed":
         # skip
-        logger.write('%s  ' % src.printcolors.printc(src.OK_STATUS),
-                     3,
-                     False)
+        logger.write("%s  " % src.printcolors.printc(src.OK_STATUS), 3, False)
         msg = "FIXED : %s\n" % product_info.install_dir
 
         if not os.path.isdir(product_info.install_dir):
-            logger.warning("The fixed path do not exixts!! Please check it : %s\n" % product_info.install_dir)
+            logger.warning(
+                "The fixed path do not exixts!! Please check it : %s\n"
+                % product_info.install_dir
+            )
         else:
             logger.write(msg, 3)
-        return True  
+        return True
 
     # if the get_source is not in [git, archive, cvs, svn, fixed, native]
-    logger.write(_("Unknown get source method \"%(get)s\" for product %(product)s") % \
-        { 'get': product_info.get_source, 'product': product_info.name }, 3, False)
+    logger.write(
+        _('Unknown get source method "%(get)s" for product %(product)s')
+        % {"get": product_info.get_source, "product": product_info.name},
+        3,
+        False,
+    )
     logger.write(" ... ", 3, False)
     logger.flush()
     return False
 
+
 def get_all_product_sources(config, products, logger):
-    '''Get all the product sources.
-    
+    """Get all the product sources.
+
     :param config Config: The global configuration
     :param products List: The list of tuples (product name, product informations)
     :param logger Logger: The logger instance to be used for the logging
     :return: the tuple (number of success, dictionary product_name/success_fail)
     :rtype: (int,dict)
-    '''
+    """
 
     # Initialize the variables that will count the fails and success
     results = dict()
@@ -451,45 +496,58 @@ def get_all_product_sources(config, products, logger):
     max_product_name_len = 1
     if len(products) > 0:
         max_product_name_len = max(map(lambda l: len(l), products[0])) + 4
-    
+
     # The loop on all the products from which to get the sources
     # DBG.write("source.get_all_product_sources config id", id(config), True)
     for product_name, product_info in products:
         # get product name, product informations and the directory where to put
         # the sources
-        if (not (src.product.product_is_fixed(product_info) or 
-                 src.product.product_is_native(product_info))):
+        if not (
+            src.product.product_is_fixed(product_info)
+            or src.product.product_is_native(product_info)
+        ):
             source_dir = src.Path(product_info.source_dir)
         else:
-            source_dir = src.Path('')
+            source_dir = src.Path("")
 
         # display and log
-        logger.write('%s: ' % src.printcolors.printcLabel(product_name), 3)
-        logger.write(' ' * (max_product_name_len - len(product_name)), 3, False)
+        logger.write("%s: " % src.printcolors.printcLabel(product_name), 3)
+        logger.write(" " * (max_product_name_len - len(product_name)), 3, False)
         logger.write("\n", 4, False)
-        
-        # Remove the existing source directory if 
+
+        # Remove the existing source directory if
         # the product is not in development mode
         is_dev = src.product.product_is_dev(product_info)
         if source_dir.exists():
-            logger.write('%s  ' % src.printcolors.printc(src.OK_STATUS), 3, False)
-            msg = _("INFO : Not doing anything because the source directory already exists:\n    %s\n") % source_dir
+            logger.write("%s  " % src.printcolors.printc(src.OK_STATUS), 3, False)
+            msg = (
+                _(
+                    "INFO : Not doing anything because the source directory already exists:\n    %s\n"
+                )
+                % source_dir
+            )
             logger.write(msg, 3)
             good_result = good_result + 1
             # Do not get the sources and go to next product
             continue
 
         # Call to the function that get the sources for one product
-        retcode = get_product_sources(config, product_info, is_dev, 
-                                     source_dir, logger, max_product_name_len, 
-                                     checkout=False)
-        
-        '''
+        retcode = get_product_sources(
+            config,
+            product_info,
+            is_dev,
+            source_dir,
+            logger,
+            max_product_name_len,
+            checkout=False,
+        )
+
+        """
         if 'no_rpath' in product_info.keys():
             if product_info.no_rpath:
                 hack_no_rpath(config, product_info, logger)
-        '''
-        
+        """
+
         # Check that the sources are correctly get using the files to be tested
         # in product information
         if retcode:
@@ -509,29 +567,31 @@ def get_all_product_sources(config, products, logger):
         else:
             # The case where it failed
             res = src.KO_STATUS
-        
+
         # print the result
-        if not(src.product.product_is_fixed(product_info) or 
-               src.product.product_is_native(product_info)):
-            logger.write('%s\n' % src.printcolors.printc(res), 3, False)
+        if not (
+            src.product.product_is_fixed(product_info)
+            or src.product.product_is_native(product_info)
+        ):
+            logger.write("%s\n" % src.printcolors.printc(res), 3, False)
 
     return good_result, results
 
+
 def check_sources(product_info, logger):
-    '''Check that the sources are correctly get, using the files to be tested
+    """Check that the sources are correctly get, using the files to be tested
        in product information
-    
-    :param product_info Config: The configuration specific to 
+
+    :param product_info Config: The configuration specific to
                                 the product to be prepared
     :return: True if the files exists (or no files to test is provided).
     :rtype: boolean
-    '''
+    """
     # Get the files to test if there is any
-    if ("present_files" in product_info and 
-        "source" in product_info.present_files):
+    if "present_files" in product_info and "source" in product_info.present_files:
         l_files_to_be_tested = product_info.present_files.source
         for file_path in l_files_to_be_tested:
-            # The path to test is the source directory 
+            # The path to test is the source directory
             # of the product joined the file path provided
             path_to_test = os.path.join(product_info.source_dir, file_path)
             logger.write(_("\nTesting existence of file: \n"), 5)
@@ -541,36 +601,41 @@ def check_sources(product_info, logger):
             logger.write(src.printcolors.printcSuccess(" OK\n"), 5)
     return True, ""
 
+
 def description():
-    '''method that is called when salomeTools is called with --help option.
-    
+    """method that is called when salomeTools is called with --help option.
+
     :return: The text to display for the source command description.
     :rtype: str
-    '''
-    return _("The source command gets the sources of the application products "
-             "from cvs, git or an archive.\n\nexample:"
-             "\nsat source SALOME-master --products KERNEL,GUI")
-  
+    """
+    return _(
+        "The source command gets the sources of the application products "
+        "from cvs, git or an archive.\n\nexample:"
+        "\nsat source SALOME-master --products KERNEL,GUI"
+    )
+
+
 def run(args, runner, logger):
-    '''method that is called when salomeTools is called with source parameter.
-    '''
+    """method that is called when salomeTools is called with source parameter."""
     DBG.write("source.run()", args)
     # Parse the options
     (options, args) = parser.parse_args(args)
-    
+
     # check that the command has been called with an application
-    src.check_config_has_application( runner.cfg )
+    src.check_config_has_application(runner.cfg)
 
     # Print some informations
-    logger.write(_('Getting sources of the application %s\n') % 
-                src.printcolors.printcLabel(runner.cfg.VARS.application), 1)
-    src.printcolors.print_value(logger, 'workdir', 
-                                runner.cfg.APPLICATION.workdir, 2)
+    logger.write(
+        _("Getting sources of the application %s\n")
+        % src.printcolors.printcLabel(runner.cfg.VARS.application),
+        1,
+    )
+    src.printcolors.print_value(logger, "workdir", runner.cfg.APPLICATION.workdir, 2)
     logger.write("\n", 2, False)
-       
+
     # Get the products list with products informations regarding the options
     products_infos = src.product.get_products_list(options, runner.cfg, logger)
-    
+
     # Call to the function that gets all the sources
     good_result, results = get_all_product_sources(runner.cfg, products_infos, logger)
 
index c934d62d94e3168a9a0b42236abe1875521cb2c2..95e6ae147aa8b9680ec745ec0b3aeefe487887d1 100644 (file)
@@ -632,7 +632,9 @@ def run(args, runner, logger):
     products_infos = src.product.get_products_list(options, runner.cfg, logger)
 
     # Call to the function that gets all the sources
-    good_result, results = update_all_product_sources(runner.cfg, products_infos, logger)
+    good_result, results = update_all_product_sources(
+        runner.cfg, products_infos, logger
+    )
 
     # Display the results (how much passed, how much failed, etc...)
     status = src.OK_STATUS
index e4ddc59677672ec65b8130cd0f57eff046c6248d..ec1b090832c1691f8003425a47a9f67d810ae1cd 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 #  Copyright (C) 2010-2013  CEA/DEN
 #
 #  This library is free software; you can redistribute it and/or
@@ -27,53 +27,82 @@ import src
 
 # Compatibility python 2/3 for input function
 # input stays input for python 3 and input = raw_input for python 2
-try: 
+try:
     input = raw_input
-except NameError: 
+except NameError:
     pass
 
 # Python 2/3 compatibility for execfile function
 try:
     execfile
 except:
+
     def execfile(somefile, global_vars, local_vars):
         with open(somefile) as f:
-            code = compile(f.read(), somefile, 'exec')
+            code = compile(f.read(), somefile, "exec")
             exec(code, global_vars, local_vars)
 
+
 parser = src.options.Options()
-parser.add_option('n', 'name', 'string', 'name',
-    _("""REQUIRED: the name of the module to create.
+parser.add_option(
+    "n",
+    "name",
+    "string",
+    "name",
+    _(
+        """REQUIRED: the name of the module to create.
 \tThe name must be a single word in upper case with only alphanumeric characters.
 \tWhen generating a c++ component the module's """
-"""name must be suffixed with 'CPP'."""))
-parser.add_option('t', 'template', 'string', 'template',
-    _('REQUIRED: the template to use.'))
-parser.add_option('', 'target', 'string', 'target',
-    _('REQUIRED: where to create the module.'))
-parser.add_option('', 'param', 'string', 'param',
-    _('''Optional: dictionary to generate the configuration for salomeTools.
+        """name must be suffixed with 'CPP'."""
+    ),
+)
+parser.add_option(
+    "t", "template", "string", "template", _("REQUIRED: the template to use.")
+)
+parser.add_option(
+    "", "target", "string", "target", _("REQUIRED: where to create the module.")
+)
+parser.add_option(
+    "",
+    "param",
+    "string",
+    "param",
+    _(
+        """Optional: dictionary to generate the configuration for salomeTools.
 \tFormat is: --param param1=value1,param2=value2... without spaces
-\tNote that when using this option you must supply all the '''
-'''values otherwise an error will be raised.'''))
-parser.add_option('', 'info', 'boolean', 'info',
-    _('Optional: Get information on the template.'), False)
+\tNote that when using this option you must supply all the """
+        """values otherwise an error will be raised."""
+    ),
+)
+parser.add_option(
+    "",
+    "info",
+    "boolean",
+    "info",
+    _("Optional: Get information on the template."),
+    False,
+)
+
 
 class TParam:
     def __init__(self, param_def, compo_name, dico=None):
         self.default = ""
         self.prompt = ""
         self.check_method = None
-        
+
         if isinstance(param_def, str):
             self.name = param_def
         elif isinstance(param_def, tuple):
             self.name = param_def[0]
             if len(param_def) > 1:
-                if dico is not None: self.default = param_def[1] % dico
-                else: self.default = param_def[1]
-            if len(param_def) > 2: self.prompt = param_def[2]
-            if len(param_def) > 3: self.check_method = param_def[3]
+                if dico is not None:
+                    self.default = param_def[1] % dico
+                else:
+                    self.default = param_def[1]
+            if len(param_def) > 2:
+                self.prompt = param_def[2]
+            if len(param_def) > 3:
+                self.check_method = param_def[3]
         else:
             raise src.SatException(_("ERROR in template parameter definition"))
 
@@ -89,11 +118,13 @@ class TParam:
             return len(val) > 0
         return len(val) > 0 and self.check_method(val)
 
+
 def get_dico_param(dico, key, default):
     if key in dico:
         return dico[key]
     return default
 
+
 class TemplateSettings:
     def __init__(self, compo_name, settings_file, target):
         self.compo_name = compo_name
@@ -107,14 +138,15 @@ class TemplateSettings:
         # check required parameters in template.info
         missing = []
         for pp in ["file_subst", "parameters"]:
-            if not (pp in ldic): missing.append("'%s'" % pp)
+            if not (pp in ldic):
+                missing.append("'%s'" % pp)
         if len(missing) > 0:
-            raise src.SatException(_(
-                "Bad format in settings file! %s not defined.") % ", ".join(
-                                                                       missing))
-        
+            raise src.SatException(
+                _("Bad format in settings file! %s not defined.") % ", ".join(missing)
+            )
+
         self.file_subst = ldic["file_subst"]
-        self.parameters = ldic['parameters']
+        self.parameters = ldic["parameters"]
         self.info = get_dico_param(ldic, "info", "").strip()
         self.pyconf = get_dico_param(ldic, "pyconf", "")
         self.post_command = get_dico_param(ldic, "post_command", "")
@@ -123,7 +155,7 @@ class TemplateSettings:
         self.delimiter_char = get_dico_param(ldic, "delimiter", ":sat:")
 
         # get the ignore filter
-        self.ignore_filters = [l.strip() for l in ldic["ignore_filters"].split(',')]
+        self.ignore_filters = [l.strip() for l in ldic["ignore_filters"].split(",")]
 
     def has_pyconf(self):
         return len(self.pyconf) > 0
@@ -144,15 +176,15 @@ class TemplateSettings:
     def check_user_values(self, values):
         if values is None:
             return
-        
+
         # create a list of all parameters (pyconf + list))
         pnames = self.get_pyconf_parameters()
         for p in self.parameters:
             tp = TParam(p, self.compo_name)
             pnames.append(tp.name)
-        
+
         # reduce the list
-        pnames = list(set(pnames)) # remove duplicates
+        pnames = list(set(pnames))  # remove duplicates
 
         known_values = ["name", "Name", "NAME", "target", self.file_subst]
         known_values.extend(values.keys())
@@ -160,10 +192,9 @@ class TemplateSettings:
         for p in pnames:
             if p not in known_values:
                 missing.append(p)
-        
+
         if len(missing) > 0:
-            raise src.SatException(_(
-                                 "Missing parameters: %s") % ", ".join(missing))
+            raise src.SatException(_("Missing parameters: %s") % ", ".join(missing))
 
     def get_parameters(self, conf_values=None):
         if self.dico is not None:
@@ -188,7 +219,7 @@ class TemplateSettings:
             tp = TParam(p, self.compo_name, dico)
             if tp.name in dico:
                 continue
-            
+
             val = ""
             while not tp.check_value(val):
                 val = input(tp.prompt)
@@ -207,6 +238,7 @@ class TemplateSettings:
         self.dico = dico
         return self.dico
 
+
 def search_template(config, template):
     # search template
     template_src_dir = ""
@@ -225,27 +257,24 @@ def search_template(config, template):
         raise src.SatException(_("Template not found: %s") % template)
 
     return template_src_dir
+
+
 ##
 # Prepares a module from a template.
-def prepare_from_template(config,
-                          name,
-                          template,
-                          target_dir,
-                          conf_values,
-                          logger):
+def prepare_from_template(config, name, template, target_dir, conf_values, logger):
     template_src_dir = search_template(config, template)
     res = 0
 
     # copy the template
     if os.path.isfile(template_src_dir):
-        logger.write("  " + _(
-                        "Extract template %s\n") % src.printcolors.printcInfo(
-                                                                   template), 4)
+        logger.write(
+            "  " + _("Extract template %s\n") % src.printcolors.printcInfo(template), 4
+        )
         src.system.archive_extract(template_src_dir, target_dir)
     else:
-        logger.write("  " + _(
-                        "Copy template %s\n") % src.printcolors.printcInfo(
-                                                                   template), 4)
+        logger.write(
+            "  " + _("Copy template %s\n") % src.printcolors.printcInfo(template), 4
+        )
         shutil.copytree(template_src_dir, target_dir)
     logger.write("\n", 5)
 
@@ -266,35 +295,38 @@ def prepare_from_template(config,
             ff = fic.replace(tsettings.file_subst, compo_name)
             if ff != fic:
                 if os.path.exists(os.path.join(root, ff)):
-                    raise src.SatException(_(
-                        "Destination file already exists: %s") % os.path.join(
-                                                                      root, ff))
+                    raise src.SatException(
+                        _("Destination file already exists: %s")
+                        % os.path.join(root, ff)
+                    )
                 logger.write("    %s -> %s\n" % (fic, ff), 5)
                 os.rename(os.path.join(root, fic), os.path.join(root, ff))
 
     # rename the directories
     logger.write("\n", 5)
-    logger.write("  " + src.printcolors.printcLabel(_("Rename directories\n")),
-                 4)
+    logger.write("  " + src.printcolors.printcLabel(_("Rename directories\n")), 4)
     for root, dirs, files in os.walk(target_dir, topdown=False):
         for rep in dirs:
             dd = rep.replace(tsettings.file_subst, compo_name)
             if dd != rep:
                 if os.path.exists(os.path.join(root, dd)):
-                    raise src.SatException(_(
-                                "Destination directory "
-                                "already exists: %s") % os.path.join(root, dd))
+                    raise src.SatException(
+                        _("Destination directory " "already exists: %s")
+                        % os.path.join(root, dd)
+                    )
                 logger.write("    %s -> %s\n" % (rep, dd), 5)
                 os.rename(os.path.join(root, rep), os.path.join(root, dd))
 
     # ask for missing parameters
     logger.write("\n", 5)
-    logger.write("  " + src.printcolors.printcLabel(
-                                        _("Make substitution in files\n")), 4)
-    logger.write("    " + _("Delimiter =") + " %s\n" % tsettings.delimiter_char,
-                 5)
-    logger.write("    " + _("Ignore Filters =") + " %s\n" % ', '.join(
-                                                   tsettings.ignore_filters), 5)
+    logger.write(
+        "  " + src.printcolors.printcLabel(_("Make substitution in files\n")), 4
+    )
+    logger.write("    " + _("Delimiter =") + " %s\n" % tsettings.delimiter_char, 5)
+    logger.write(
+        "    " + _("Ignore Filters =") + " %s\n" % ", ".join(tsettings.ignore_filters),
+        5,
+    )
     dico = tsettings.get_parameters(conf_values)
     logger.write("\n", 3)
 
@@ -312,44 +344,54 @@ def prepare_from_template(config,
                 logger.write("  - %s\n" % fpath[pathlen:], 5)
                 continue
             # read the file
-            with open(fpath, 'r') as f:
+            with open(fpath, "r") as f:
                 m = f.read()
                 # make the substitution
                 template = CompoTemplate(m)
                 d = template.safe_substitute(dico)
-                        
+
             changed = " "
             if d != m:
                 changed = "*"
-                with open(fpath, 'w') as f:
+                with open(fpath, "w") as f:
                     f.write(d)
             logger.write("  %s %s\n" % (changed, fpath[pathlen:]), 5)
 
     if not tsettings.has_pyconf:
-        logger.write(src.printcolors.printcWarning(_(
-                   "Definition for sat not found in settings file.")) + "\n", 2)
+        logger.write(
+            src.printcolors.printcWarning(
+                _("Definition for sat not found in settings file.")
+            )
+            + "\n",
+            2,
+        )
     else:
         definition = tsettings.pyconf % dico
-        pyconf_file = os.path.join(target_dir, name + '.pyconf')
-        f = open(pyconf_file, 'w')
+        pyconf_file = os.path.join(target_dir, name + ".pyconf")
+        f = open(pyconf_file, "w")
         f.write(definition)
         f.close
-        logger.write(_(
-            "Create configuration file: ") + src.printcolors.printcInfo(
-                                                         pyconf_file) + "\n", 2)
+        logger.write(
+            _("Create configuration file: ")
+            + src.printcolors.printcInfo(pyconf_file)
+            + "\n",
+            2,
+        )
 
     if len(tsettings.post_command) > 0:
         cmd = tsettings.post_command % dico
         logger.write("\n", 5, True)
-        logger.write(_(
-              "Run post command: ") + src.printcolors.printcInfo(cmd) + "\n", 3)
-        
+        logger.write(
+            _("Run post command: ") + src.printcolors.printcInfo(cmd) + "\n", 3
+        )
+
         p = subprocess.Popen(cmd, shell=True, cwd=target_dir)
         p.wait()
         res = p.returncode
 
     return res
 
+
 def get_template_info(config, template_name, logger):
     sources = search_template(config, template_name)
     src.printcolors.print_value(logger, _("Template"), sources)
@@ -368,26 +410,23 @@ def get_template_info(config, template_name, logger):
     if not os.path.exists(settings_file):
         raise src.SatException(_("Settings file not found"))
     tsettings = TemplateSettings("NAME", settings_file, "target")
-    
+
     logger.write("\n", 3)
     if len(tsettings.info) == 0:
-        logger.write(src.printcolors.printcWarning(_(
-                                       "No information for this template.")), 3)
+        logger.write(
+            src.printcolors.printcWarning(_("No information for this template.")), 3
+        )
     else:
         logger.write(tsettings.info, 3)
 
     logger.write("\n", 3)
     logger.write("= Configuration", 3)
-    src.printcolors.print_value(logger,
-                                "file substitution key",
-                                tsettings.file_subst)
-    src.printcolors.print_value(logger,
-                                "subsitution key",
-                                tsettings.delimiter_char)
+    src.printcolors.print_value(logger, "file substitution key", tsettings.file_subst)
+    src.printcolors.print_value(logger, "subsitution key", tsettings.delimiter_char)
     if len(tsettings.ignore_filters) > 0:
-        src.printcolors.print_value(logger,
-                                    "Ignore Filter",
-                                    ', '.join(tsettings.ignore_filters))
+        src.printcolors.print_value(
+            logger, "Ignore Filter", ", ".join(tsettings.ignore_filters)
+        )
 
     logger.write("\n", 3)
     logger.write("= Parameters", 3)
@@ -404,10 +443,12 @@ def get_template_info(config, template_name, logger):
     logger.write("= Verification\n", 3)
     if tsettings.file_subst not in pnames:
         logger.write(
-                     "file substitution key not defined as a "
-                     "parameter: %s" % tsettings.file_subst, 3)
+            "file substitution key not defined as a "
+            "parameter: %s" % tsettings.file_subst,
+            3,
+        )
         retcode = 1
-    
+
     reexp = tsettings.delimiter_char.replace("$", "\$") + "{(?P<name>\S[^}]*)"
     pathlen = len(tmpdir) + 1
     for root, __, files in os.walk(tmpdir):
@@ -416,15 +457,17 @@ def get_template_info(config, template_name, logger):
             if not tsettings.check_file_for_substitution(fpath[pathlen:]):
                 continue
             # read the file
-            with open(fpath, 'r') as f:
+            with open(fpath, "r") as f:
                 m = f.read()
                 zz = re.findall(reexp, m)
-                zz = list(set(zz)) # reduce
+                zz = list(set(zz))  # reduce
                 zz = filter(lambda l: l not in pnames, zz)
                 if len(zz) > 0:
-                    logger.write("Missing definition in %s: %s" % (
-                        src.printcolors.printcLabel(
-                                                fpath[pathlen:]), ", ".join(zz)), 3)
+                    logger.write(
+                        "Missing definition in %s: %s"
+                        % (src.printcolors.printcLabel(fpath[pathlen:]), ", ".join(zz)),
+                        3,
+                    )
                     retcode = 1
 
     if retcode == 0:
@@ -439,16 +482,19 @@ def get_template_info(config, template_name, logger):
 
     return retcode
 
+
 ##
 # Describes the command
 def description():
-    return _("The template command creates the sources for a SALOME "
-             "module from a template.\n\nexample\nsat template "
-             "--name my_product_name --template PythonComponent --target /tmp")
+    return _(
+        "The template command creates the sources for a SALOME "
+        "module from a template.\n\nexample\nsat template "
+        "--name my_product_name --template PythonComponent --target /tmp"
+    )
+
 
 def run(args, runner, logger):
-    '''method that is called when salomeTools is called with template parameter.
-    '''
+    """method that is called when salomeTools is called with template parameter."""
     (options, args) = parser.parse_args(args)
 
     if options.template is None:
@@ -478,9 +524,11 @@ def run(args, runner, logger):
         logger.write("\n", 1)
         return 1
 
-    if not options.name.replace('_', '').isalnum():
-        msg = _("Error: component name must contains only alphanumeric "
-                "characters and no spaces\n")
+    if not options.name.replace("_", "").isalnum():
+        msg = _(
+            "Error: component name must contains only alphanumeric "
+            "characters and no spaces\n"
+        )
         logger.write(src.printcolors.printcError(msg), 1)
         logger.write("\n", 1)
         return 1
@@ -498,35 +546,41 @@ def run(args, runner, logger):
         logger.write("\n", 1)
         return 1
 
-
-    logger.write(_('Create sources from template\n'), 1)
-    src.printcolors.print_value(logger, 'destination', target_dir, 2)
-    src.printcolors.print_value(logger, 'name', options.name, 2)
-    src.printcolors.print_value(logger, 'template', options.template, 2)
+    logger.write(_("Create sources from template\n"), 1)
+    src.printcolors.print_value(logger, "destination", target_dir, 2)
+    src.printcolors.print_value(logger, "name", options.name, 2)
+    src.printcolors.print_value(logger, "template", options.template, 2)
     logger.write("\n", 3, False)
-    
+
     conf_values = None
     if options.param is not None:
         conf_values = {}
         for elt in options.param.split(","):
-            param_def = elt.strip().split('=')
+            param_def = elt.strip().split("=")
             if len(param_def) != 2:
                 msg = _("Error: bad parameter definition")
                 logger.write(src.printcolors.printcError(msg), 1)
                 logger.write("\n", 1)
                 return 1
             conf_values[param_def[0].strip()] = param_def[1].strip()
-    
-    retcode = prepare_from_template(runner.cfg, options.name, options.template,
-        target_dir, conf_values, logger)
+
+    retcode = prepare_from_template(
+        runner.cfg, options.name, options.template, target_dir, conf_values, logger
+    )
 
     if retcode == 0:
-        logger.write(_(
-                 "The sources were created in %s") % src.printcolors.printcInfo(
-                                                                 target_dir), 3)
-        logger.write(src.printcolors.printcWarning(_("\nDo not forget to put "
-                                   "them in your version control system.")), 3)
-        
+        logger.write(
+            _("The sources were created in %s")
+            % src.printcolors.printcInfo(target_dir),
+            3,
+        )
+        logger.write(
+            src.printcolors.printcWarning(
+                _("\nDo not forget to put " "them in your version control system.")
+            ),
+            3,
+        )
+
     logger.write("\n", 3)
-    
+
     return retcode
index 62fd7fef5cc6d55e793a4955b2dc6bde2f4ebc72..60ce342183c7da22ee083bba32b21c74abb69143 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 #  Copyright (C) 2010-2012  CEA/DEN
 #
 #  This library is free software; you can redistribute it and/or
@@ -43,34 +43,75 @@ from src.xmlManager import add_simple_node
 
 # Define all possible option for the test command :  sat test <options>
 parser = src.options.Options()
-parser.add_option('b', 'base', 'string', 'base',
-    _("""Optional: The name of the test base to use."
+parser.add_option(
+    "b",
+    "base",
+    "string",
+    "base",
+    _(
+        """Optional: The name of the test base to use."
           This name has to be registered in your application and in a project.
-          A path to a test base can also be used."""))
-parser.add_option('l', 'launcher', 'string', 'launcher',
-    _("""Optional: Specify the path to a SALOME launcher
-          used to launch the test scripts of the test base."""))
-parser.add_option('g', 'grid', 'list', 'grids',
-    _('Optional: Which grid(s) to test (subdirectory of the test base).'))
-parser.add_option('s', 'session', 'list2', 'sessions',
-    _('Optional: Which session(s) to test (subdirectory of the grid).'))
-parser.add_option('', 'display', 'string', 'display',
-    _("""Optional: Set the display where to launch SALOME.
-          If value is NO then option --show-desktop=0 will be used to launch SALOME."""))
-parser.add_option('', 'keep', 'boolean', 'keeptempdir',
-                  _('Optional: keep temporary big tests directories.'))
+          A path to a test base can also be used."""
+    ),
+)
+parser.add_option(
+    "l",
+    "launcher",
+    "string",
+    "launcher",
+    _(
+        """Optional: Specify the path to a SALOME launcher
+          used to launch the test scripts of the test base."""
+    ),
+)
+parser.add_option(
+    "g",
+    "grid",
+    "list",
+    "grids",
+    _("Optional: Which grid(s) to test (subdirectory of the test base)."),
+)
+parser.add_option(
+    "s",
+    "session",
+    "list2",
+    "sessions",
+    _("Optional: Which session(s) to test (subdirectory of the grid)."),
+)
+parser.add_option(
+    "",
+    "display",
+    "string",
+    "display",
+    _(
+        """Optional: Set the display where to launch SALOME.
+          If value is NO then option --show-desktop=0 will be used to launch SALOME."""
+    ),
+)
+parser.add_option(
+    "",
+    "keep",
+    "boolean",
+    "keeptempdir",
+    _("Optional: keep temporary big tests directories."),
+)
+
+
 def description():
-    '''method that is called when salomeTools is called with --help option.
-    
+    """method that is called when salomeTools is called with --help option.
+
     :return: The text to display for the test command description.
     :rtype: str
-    '''
-    return _("The test command runs a test base on a SALOME installation.\n\n"
-             "example:\nsat test SALOME-master --grid GEOM --session light")     
+    """
+    return _(
+        "The test command runs a test base on a SALOME installation.\n\n"
+        "example:\nsat test SALOME-master --grid GEOM --session light"
+    )
+
 
 def parse_option_old(args, config):
-    """ Parse the options and do some verifications about it
-    
+    """Parse the options and do some verifications about it
+
     :param args List: The list of arguments of the command
     :param config Config: The global configuration
     :return: the options of the current command launch and the full arguments
@@ -82,20 +123,20 @@ def parse_option_old(args, config):
         options.launcher = ""
     elif not os.path.isabs(options.launcher):
         if not src.config_has_application(config):
-            msg = _("An application is required to use a relative path with option --appli")
+            msg = _(
+                "An application is required to use a relative path with option --appli"
+            )
             raise src.SatException(msg)
-        options.launcher = os.path.join(config.APPLICATION.workdir,
-                                        options.launcher)
+        options.launcher = os.path.join(config.APPLICATION.workdir, options.launcher)
 
         if not os.path.exists(options.launcher):
-            raise src.SatException(_("Launcher not found: %s") % 
-                                   options.launcher)
+            raise src.SatException(_("Launcher not found: %s") % options.launcher)
 
     return (options, args)
 
 
 def parse_option(args, config):
-    """ Parse the options and do some verifications about it
+    """Parse the options and do some verifications about it
 
     :param args List: The list of arguments of the command
     :param config Config: The global configuration
@@ -110,12 +151,16 @@ def parse_option(args, config):
 
     if not os.path.isabs(options.launcher):
         if not src.config_has_application(config):
-            msg = _("An application is required to use a relative path with option --appli")
+            msg = _(
+                "An application is required to use a relative path with option --appli"
+            )
             raise src.SatException(msg)
         else:
-            options.launcher = os.path.join(config.APPLICATION.workdir, options.launcher)
+            options.launcher = os.path.join(
+                config.APPLICATION.workdir, options.launcher
+            )
             if not os.path.exists(options.launcher):
-                raise src.SatException(_("Launcher not found: %s") %  options.launcher)
+                raise src.SatException(_("Launcher not found: %s") % options.launcher)
 
     # absolute path
     launcher = os.path.realpath(os.path.expandvars(options.launcher))
@@ -123,24 +168,26 @@ def parse_option(args, config):
         options.launcher = launcher
         return (options, args)
 
-    raise src.SatException(_("Launcher not found: %s") %  options.launcher)
+    raise src.SatException(_("Launcher not found: %s") % options.launcher)
 
 
 def ask_a_path():
-    """ 
-    """
+    """ """
     path = input("enter a path where to save the result: ")
     if path == "":
-        result = input("the result will be not save. Are you sure to "
-                           "continue ? [y/n] ")
+        result = input(
+            "the result will be not save. Are you sure to " "continue ? [y/n] "
+        )
         if result == "y":
             return path
         else:
             return ask_a_path()
 
     elif os.path.exists(path):
-        result = input("Warning, the content of %s will be deleted. Are you"
-                           " sure to continue ? [y/n] " % path)
+        result = input(
+            "Warning, the content of %s will be deleted. Are you"
+            " sure to continue ? [y/n] " % path
+        )
         if result == "y":
             return path
         else:
@@ -148,18 +195,20 @@ def ask_a_path():
     else:
         return path
 
+
 def save_file(filename, base):
-    f = open(filename, 'r')
+    f = open(filename, "r")
     content = f.read()
     f.close()
 
     objectname = sha1(content).hexdigest()
 
-    f = gzip.open(os.path.join(base, '.objects', objectname), 'w')
+    f = gzip.open(os.path.join(base, ".objects", objectname), "w")
     f.write(content)
     f.close()
     return objectname
 
+
 def move_test_results(in_dir, what, out_dir, logger):
     if out_dir == in_dir:
         return
@@ -169,9 +218,9 @@ def move_test_results(in_dir, what, out_dir, logger):
     while not pathIsOk:
         try:
             # create test results directory if necessary
-            #logger.write("FINAL = %s\n" % finalPath, 5)
+            # logger.write("FINAL = %s\n" % finalPath, 5)
             if not os.access(finalPath, os.F_OK):
-                #shutil.rmtree(finalPath)
+                # shutil.rmtree(finalPath)
                 os.makedirs(finalPath)
             pathIsOk = True
         except:
@@ -179,101 +228,116 @@ def move_test_results(in_dir, what, out_dir, logger):
             finalPath = ask_a_path()
 
     if finalPath != "":
-        os.makedirs(os.path.join(finalPath, what, 'BASES'))
+        os.makedirs(os.path.join(finalPath, what, "BASES"))
 
         # check if .objects directory exists
-        if not os.access(os.path.join(finalPath, '.objects'), os.F_OK):
-            os.makedirs(os.path.join(finalPath, '.objects'))
+        if not os.access(os.path.join(finalPath, ".objects"), os.F_OK):
+            os.makedirs(os.path.join(finalPath, ".objects"))
 
-        logger.write(_('copy tests results to %s ... ') % finalPath, 3)
+        logger.write(_("copy tests results to %s ... ") % finalPath, 3)
         logger.flush()
-        #logger.write("\n", 5)
+        # logger.write("\n", 5)
 
         # copy env_info.py
-        shutil.copy2(os.path.join(in_dir, what, 'env_info.py'),
-                     os.path.join(finalPath, what, 'env_info.py'))
+        shutil.copy2(
+            os.path.join(in_dir, what, "env_info.py"),
+            os.path.join(finalPath, what, "env_info.py"),
+        )
 
         # for all sub directory (ie testbase) in the BASES directory
-        for testbase in os.listdir(os.path.join(in_dir, what, 'BASES')):
-            outtestbase = os.path.join(finalPath, what, 'BASES', testbase)
-            intestbase = os.path.join(in_dir, what, 'BASES', testbase)
+        for testbase in os.listdir(os.path.join(in_dir, what, "BASES")):
+            outtestbase = os.path.join(finalPath, what, "BASES", testbase)
+            intestbase = os.path.join(in_dir, what, "BASES", testbase)
 
             # ignore files in root dir
             if not os.path.isdir(intestbase):
                 continue
 
             os.makedirs(outtestbase)
-            #logger.write("  copy testbase %s\n" % testbase, 5)
+            # logger.write("  copy testbase %s\n" % testbase, 5)
 
-            for grid_ in [m for m in os.listdir(intestbase) \
-                            if os.path.isdir(os.path.join(intestbase, m))]:
+            for grid_ in [
+                m
+                for m in os.listdir(intestbase)
+                if os.path.isdir(os.path.join(intestbase, m))
+            ]:
                 # ignore source configuration directories
-                if grid_[:4] == '.git' or grid_ == 'CVS':
+                if grid_[:4] == ".git" or grid_ == "CVS":
                     continue
 
                 outgrid = os.path.join(outtestbase, grid_)
                 ingrid = os.path.join(intestbase, grid_)
                 os.makedirs(outgrid)
-                #logger.write("    copy grid %s\n" % grid_, 5)
+                # logger.write("    copy grid %s\n" % grid_, 5)
 
-                if grid_ == 'RESSOURCES':
+                if grid_ == "RESSOURCES":
                     for file_name in os.listdir(ingrid):
-                        if not os.path.isfile(os.path.join(ingrid,
-                                                           file_name)):
+                        if not os.path.isfile(os.path.join(ingrid, file_name)):
                             continue
                         f = open(os.path.join(outgrid, file_name), "w")
-                        f.write(save_file(os.path.join(ingrid, file_name),
-                                          finalPath))
+                        f.write(save_file(os.path.join(ingrid, file_name), finalPath))
                         f.close()
                 else:
-                    for session_name in [t for t in os.listdir(ingrid) if 
-                                      os.path.isdir(os.path.join(ingrid, t))]:
+                    for session_name in [
+                        t
+                        for t in os.listdir(ingrid)
+                        if os.path.isdir(os.path.join(ingrid, t))
+                    ]:
                         outsession = os.path.join(outgrid, session_name)
                         insession = os.path.join(ingrid, session_name)
                         os.makedirs(outsession)
-                        
+
                         for file_name in os.listdir(insession):
-                            if not os.path.isfile(os.path.join(insession,
-                                                               file_name)):
+                            if not os.path.isfile(os.path.join(insession, file_name)):
                                 continue
-                            if file_name.endswith('result.py'):
-                                shutil.copy2(os.path.join(insession, file_name),
-                                             os.path.join(outsession, file_name))
+                            if file_name.endswith("result.py"):
+                                shutil.copy2(
+                                    os.path.join(insession, file_name),
+                                    os.path.join(outsession, file_name),
+                                )
                             else:
                                 f = open(os.path.join(outsession, file_name), "w")
-                                f.write(save_file(os.path.join(insession,
-                                                               file_name),
-                                                  finalPath))
+                                f.write(
+                                    save_file(
+                                        os.path.join(insession, file_name), finalPath
+                                    )
+                                )
                                 f.close()
 
     logger.write(src.printcolors.printc("OK"), 3, False)
     logger.write("\n", 3, False)
 
+
 def check_remote_machine(machine_name, logger):
     logger.write(_("\ncheck the display on %s\n" % machine_name), 4)
     ssh_cmd = 'ssh -o "StrictHostKeyChecking no" %s "ls"' % machine_name
     logger.write(_("Executing the command : %s " % ssh_cmd), 4)
-    p = subprocess.Popen(ssh_cmd, 
-                         shell=True,
-                         stdin =subprocess.PIPE,
-                         stdout=subprocess.PIPE,
-                         stderr=subprocess.PIPE)
+    p = subprocess.Popen(
+        ssh_cmd,
+        shell=True,
+        stdin=subprocess.PIPE,
+        stdout=subprocess.PIPE,
+        stderr=subprocess.PIPE,
+    )
     p.wait()
     if p.returncode != 0:
         logger.write(src.printcolors.printc(src.KO_STATUS) + "\n", 1)
         logger.write("    " + src.printcolors.printcError(p.stderr.read()), 2)
-        logger.write(src.printcolors.printcWarning((
-                                    "No ssh access to the display machine.")),1)
+        logger.write(
+            src.printcolors.printcWarning(("No ssh access to the display machine.")), 1
+        )
     else:
         logger.write(src.printcolors.printcSuccess(src.OK_STATUS) + "\n\n", 4)
 
+
 def findOrCreateNode(parentNode, nameNodeToFind):
     found = parentNode.find(nameNodeToFind)
     if found is None:
-      created = add_simple_node(parentNode, nameNodeToFind)
-      return created
+        created = add_simple_node(parentNode, nameNodeToFind)
+        return created
     else:
-      return found
+        return found
+
 
 def purgeEmptyNodes(root):
     """
@@ -288,32 +352,29 @@ def purgeEmptyNodes(root):
     text = root.text
     tail = root.tail
     if text is not None:
-      if text.replace(" ", "").replace("\n", "") == "":
-        # print("purgeEmptyNodes text %s" % root.tag)
-        root.text = None
+        if text.replace(" ", "").replace("\n", "") == "":
+            # print("purgeEmptyNodes text %s" % root.tag)
+            root.text = None
     if tail is not None:
-      if tail.replace(" ", "").replace("\n", "") == "":
-        # print("purgeEmptyNodes tail %s" % root.tag)
-        root.tail = None
+        if tail.replace(" ", "").replace("\n", "") == "":
+            # print("purgeEmptyNodes tail %s" % root.tag)
+            root.tail = None
     for node in root:
-      purgeEmptyNodes(node)
+        purgeEmptyNodes(node)
     return
 
+
 ##
 # Creates the XML report for a product.
-def create_test_report(config,
-                       xml_history_path,
-                       dest_path,
-                       retcode,
-                       xmlname=""):
+def create_test_report(config, xml_history_path, dest_path, retcode, xmlname=""):
     # get the date and hour of the launching of the command, in order to keep
     # history
     date_hour = config.VARS.datehour
-    
+
     # Get some information to put in the xml file
     application_name = config.VARS.application
     withappli = src.config_has_application(config)
-    
+
     first_time = False
     if not os.path.exists(xml_history_path):
         print("Log file creation %s" % xml_history_path)
@@ -327,16 +388,16 @@ def create_test_report(config,
         purgeEmptyNodes(root)
         prod_node = root.find("product")
 
-
     prod_node.attrib["history_file"] = os.path.basename(xml_history_path)
     prod_node.attrib["global_res"] = str(retcode)
 
     if withappli:
         if not first_time:
-            for node in (prod_node.findall("version_to_download") + 
-                         prod_node.findall("out_dir")):
+            for node in prod_node.findall("version_to_download") + prod_node.findall(
+                "out_dir"
+            ):
                 prod_node.remove(node)
-                
+
         add_simple_node(prod_node, "version_to_download", config.APPLICATION.name)
         add_simple_node(prod_node, "out_dir", config.APPLICATION.workdir)
 
@@ -344,30 +405,39 @@ def create_test_report(config,
     if not first_time:
         for node in prod_node.findall("exec"):
             prod_node.remove(node)
-        
+
     exec_node = add_simple_node(prod_node, "exec")
     exec_node.append(etree.Element("env", name="Host", value=config.VARS.node))
     exec_node.append(etree.Element("env", name="Architecture", value=config.VARS.dist))
-    exec_node.append(etree.Element("env", name="Number of processors", value=str(config.VARS.nb_proc)))
-    exec_node.append(etree.Element("env", name="Begin date", value=src.parse_date(date_hour)))
+    exec_node.append(
+        etree.Element(
+            "env", name="Number of processors", value=str(config.VARS.nb_proc)
+        )
+    )
+    exec_node.append(
+        etree.Element("env", name="Begin date", value=src.parse_date(date_hour))
+    )
     exec_node.append(etree.Element("env", name="Command", value=config.VARS.command))
-    exec_node.append(etree.Element("env", name="sat version", value=config.INTERNAL.sat_version))
+    exec_node.append(
+        etree.Element("env", name="sat version", value=config.INTERNAL.sat_version)
+    )
 
-    if 'TESTS' in config:
+    if "TESTS" in config:
         tests = findOrCreateNode(prod_node, "tests")
         known_errors = findOrCreateNode(prod_node, "known_errors")
         new_errors = findOrCreateNode(prod_node, "new_errors")
         amend = findOrCreateNode(prod_node, "amend")
-        
+
         tt = {}
         for test in config.TESTS:
             if not test.testbase in tt:
                 tt[test.testbase] = [test]
             else:
                 tt[test.testbase].append(test)
-        
+
         for testbase in tt.keys():
-            if verbose: print("---- create_test_report %s %s" % (testbase, first_time))
+            if verbose:
+                print("---- create_test_report %s %s" % (testbase, first_time))
             gn = findOrCreateNode(tests, "testbase")
 
             # initialize all grids and session to "not executed"
@@ -375,20 +445,19 @@ def create_test_report(config,
                 mn.attrib["executed_last_time"] = "no"
                 for tyn in mn.findall("session"):
                     tyn.attrib["executed_last_time"] = "no"
-                    for test_node in tyn.findall('test'):
+                    for test_node in tyn.findall("test"):
                         for node in test_node.getchildren():
                             if node.tag != "history":
                                 test_node.remove(node)
 
                         attribs_to_pop = []
                         for attribute in test_node.attrib:
-                            if (attribute != "script" and
-                                                    attribute != "res"):
+                            if attribute != "script" and attribute != "res":
                                 attribs_to_pop.append(attribute)
                         for attribute in attribs_to_pop:
                             test_node.attrib.pop(attribute)
-            
-            gn.attrib['name'] = testbase
+
+            gn.attrib["name"] = testbase
             nb, nb_pass, nb_failed, nb_timeout, nb_not_run = 0, 0, 0, 0, 0
             grids = {}
             sessions = {}
@@ -396,64 +465,65 @@ def create_test_report(config,
                 if not (test.grid in grids):
                     if first_time:
                         mn = add_simple_node(gn, "grid")
-                        mn.attrib['name'] = test.grid
+                        mn.attrib["name"] = test.grid
                     else:
                         l_mn = gn.findall("grid")
                         mn = None
                         for grid_node in l_mn:
-                            if grid_node.attrib['name'] == test.grid:
+                            if grid_node.attrib["name"] == test.grid:
                                 mn = grid_node
                                 break
                         if mn == None:
                             mn = add_simple_node(gn, "grid")
-                            mn.attrib['name'] = test.grid
-                    
+                            mn.attrib["name"] = test.grid
+
                     grids[test.grid] = mn
-                
+
                 mn.attrib["executed_last_time"] = "yes"
-                
+
                 if not "%s/%s" % (test.grid, test.session) in sessions:
                     if first_time:
                         tyn = add_simple_node(mn, "session")
-                        tyn.attrib['name'] = test.session
+                        tyn.attrib["name"] = test.session
                     else:
                         l_tyn = mn.findall("session")
                         tyn = None
                         for session_node in l_tyn:
-                            if session_node.attrib['name'] == test.session:
+                            if session_node.attrib["name"] == test.session:
                                 tyn = session_node
                                 break
                         if tyn == None:
                             tyn = add_simple_node(mn, "session")
-                            tyn.attrib['name'] = test.session
-                        
+                            tyn.attrib["name"] = test.session
+
                     sessions["%s/%s" % (test.grid, test.session)] = tyn
 
                 tyn.attrib["executed_last_time"] = "yes"
 
                 for script in test.script:
                     if first_time:
-                        tn = add_simple_node(sessions[
-                                           "%s/%s" % (test.grid, test.session)],
-                                             "test")
-                        tn.attrib['session'] = test.session
-                        tn.attrib['script'] = script.name
+                        tn = add_simple_node(
+                            sessions["%s/%s" % (test.grid, test.session)], "test"
+                        )
+                        tn.attrib["session"] = test.session
+                        tn.attrib["script"] = script.name
                         hn = add_simple_node(tn, "history")
                     else:
                         l_tn = sessions["%s/%s" % (test.grid, test.session)].findall(
-                                                                         "test")
+                            "test"
+                        )
                         tn = None
                         for test_node in l_tn:
-                            if test_node.attrib['script'] == script['name']:
+                            if test_node.attrib["script"] == script["name"]:
                                 tn = test_node
                                 break
-                        
+
                         if tn == None:
-                            tn = add_simple_node(sessions[
-                                           "%s/%s" % (test.grid, test.session)],
-                                             "test")
-                            tn.attrib['session'] = test.session
-                            tn.attrib['script'] = script.name
+                            tn = add_simple_node(
+                                sessions["%s/%s" % (test.grid, test.session)], "test"
+                            )
+                            tn.attrib["session"] = test.session
+                            tn.attrib["script"] = script.name
                             hn = add_simple_node(tn, "history")
                         else:
                             # Get or create the history node for the current test
@@ -462,110 +532,120 @@ def create_test_report(config,
                             else:
                                 hn = tn.find("history")
                             # Put the last test data into the history
-                            if 'res' in tn.attrib:
-                                attributes = {"date_hour" : date_hour,
-                                              "res" : tn.attrib['res'] }
-                                add_simple_node(hn,
-                                                "previous_test",
-                                                attrib=attributes)
+                            if "res" in tn.attrib:
+                                attributes = {
+                                    "date_hour": date_hour,
+                                    "res": tn.attrib["res"],
+                                }
+                                add_simple_node(hn, "previous_test", attrib=attributes)
                             for node in tn:
                                 if node.tag != "history":
                                     tn.remove(node)
-                    
-                    if 'callback' in script:
+
+                    if "callback" in script:
                         try:
                             cnode = add_simple_node(tn, "callback")
-                            if True:  # bug xml mal forme colorisation  src.architecture.is_windows():
+                            if (
+                                True
+                            ):  # bug xml mal forme colorisation  src.architecture.is_windows():
                                 import string
+
                                 cnode.text = filter(
-                                                lambda x: x in string.printable,
-                                                script.callback)
+                                    lambda x: x in string.printable, script.callback
+                                )
                             else:
-                                cnode.text = script.callback.decode(
-                                                                'string_escape')
+                                cnode.text = script.callback.decode("string_escape")
                         except UnicodeDecodeError as exc:
-                            zz = (script.callback[:exc.start] +
-                                  '?' +
-                                  script.callback[exc.end-2:])
+                            zz = (
+                                script.callback[: exc.start]
+                                + "?"
+                                + script.callback[exc.end - 2 :]
+                            )
                             cnode = add_simple_node(tn, "callback")
                             cnode.text = zz.decode("UTF-8")
-                    
+
                     # Add the script content
                     cnode = add_simple_node(tn, "content")
                     cnode.text = script.content
-                    
+
                     # Add the script execution log
                     cnode = add_simple_node(tn, "out")
                     cnode.text = script.out
-                    
-                    if 'amend' in script:
+
+                    if "amend" in script:
                         cnode = add_simple_node(tn, "amend")
                         cnode.text = script.amend.decode("UTF-8")
 
                     if script.time < 0:
-                        tn.attrib['exec_time'] = "?"
+                        tn.attrib["exec_time"] = "?"
                     else:
-                        tn.attrib['exec_time'] = "%.3f" % script.time
-                    tn.attrib['res'] = script.res
+                        tn.attrib["exec_time"] = "%.3f" % script.time
+                    tn.attrib["res"] = script.res
 
                     if "amend" in script:
                         amend_test = add_simple_node(amend, "atest")
-                        amend_test.attrib['name'] = os.path.join(test.grid,
-                                                                 test.session,
-                                                                 script.name)
-                        amend_test.attrib['reason'] = script.amend.decode(
-                                                                        "UTF-8")
+                        amend_test.attrib["name"] = os.path.join(
+                            test.grid, test.session, script.name
+                        )
+                        amend_test.attrib["reason"] = script.amend.decode("UTF-8")
 
                     # calculate status
                     nb += 1
-                    if script.res == src.OK_STATUS: nb_pass += 1
-                    elif script.res == src.TIMEOUT_STATUS: nb_timeout += 1
-                    elif script.res == src.KO_STATUS: nb_failed += 1
-                    else: nb_not_run += 1
+                    if script.res == src.OK_STATUS:
+                        nb_pass += 1
+                    elif script.res == src.TIMEOUT_STATUS:
+                        nb_timeout += 1
+                    elif script.res == src.KO_STATUS:
+                        nb_failed += 1
+                    else:
+                        nb_not_run += 1
 
                     if "known_error" in script:
                         kf_script = add_simple_node(known_errors, "error")
-                        kf_script.attrib['name'] = os.path.join(test.grid,
-                                                                test.session,
-                                                                script.name)
-                        kf_script.attrib['date'] = script.known_error.date
-                        kf_script.attrib[
-                                    'expected'] = script.known_error.expected
-                        kf_script.attrib[
-                         'comment'] = script.known_error.comment.decode("UTF-8")
-                        kf_script.attrib['fixed'] = str(
-                                                       script.known_error.fixed)
-                        overdue = datetime.datetime.today().strftime("%Y-%m-"
-                                            "%d") > script.known_error.expected
+                        kf_script.attrib["name"] = os.path.join(
+                            test.grid, test.session, script.name
+                        )
+                        kf_script.attrib["date"] = script.known_error.date
+                        kf_script.attrib["expected"] = script.known_error.expected
+                        kf_script.attrib["comment"] = script.known_error.comment.decode(
+                            "UTF-8"
+                        )
+                        kf_script.attrib["fixed"] = str(script.known_error.fixed)
+                        overdue = (
+                            datetime.datetime.today().strftime("%Y-%m-" "%d")
+                            > script.known_error.expected
+                        )
                         if overdue:
-                            kf_script.attrib['overdue'] = str(overdue)
-                        
+                            kf_script.attrib["overdue"] = str(overdue)
+
                     elif script.res == src.KO_STATUS:
                         new_err = add_simple_node(new_errors, "new_error")
-                        script_path = os.path.join(test.grid,
-                                                   test.session, script.name)
-                        new_err.attrib['name'] = script_path
-                        new_err.attrib['cmd'] = ("sat testerror %s -s %s -c 'my"
-                                                 " comment' -p %s" % \
-                            (application_name, script_path, config.VARS.dist))
-
-
-            gn.attrib['total'] = str(nb)
-            gn.attrib['pass'] = str(nb_pass)
-            gn.attrib['failed'] = str(nb_failed)
-            gn.attrib['timeout'] = str(nb_timeout)
-            gn.attrib['not_run'] = str(nb_not_run)
-            
-            # Remove the res attribute of all tests that were not launched 
-            # this time
+                        script_path = os.path.join(test.grid, test.session, script.name)
+                        new_err.attrib["name"] = script_path
+                        new_err.attrib[
+                            "cmd"
+                        ] = "sat testerror %s -s %s -c 'my" " comment' -p %s" % (
+                            application_name,
+                            script_path,
+                            config.VARS.dist,
+                        )
+
+            gn.attrib["total"] = str(nb)
+            gn.attrib["pass"] = str(nb_pass)
+            gn.attrib["failed"] = str(nb_failed)
+            gn.attrib["timeout"] = str(nb_timeout)
+            gn.attrib["not_run"] = str(nb_not_run)
+
+            # Remove the res attribute of all tests that were not launched
+            # this time
             for mn in gn.findall("grid"):
                 if mn.attrib["executed_last_time"] == "no":
                     for tyn in mn.findall("session"):
                         if tyn.attrib["executed_last_time"] == "no":
-                            for test_node in tyn.findall('test'):
+                            for test_node in tyn.findall("test"):
                                 if "res" in test_node.attrib:
-                                    test_node.attrib.pop("res")          
-    
+                                    test_node.attrib.pop("res")
+
     if len(xmlname) == 0:
         xmlname = application_name
     if not xmlname.endswith(".xml"):
@@ -575,10 +655,11 @@ def create_test_report(config,
     src.xmlManager.write_report(xml_history_path, root, "test_history.xsl")
     return src.OK_STATUS
 
+
 def generate_history_xml_path(config, test_base):
     """Generate the name of the xml file that contain the history of the tests
        on the machine with the current APPLICATION and the current test base.
-    
+
     :param config Config: The global configuration
     :param test_base Str: The test base name (or path)
     :return: the full path of the history xml file
@@ -587,7 +668,7 @@ def generate_history_xml_path(config, test_base):
     history_xml_name = ""
     if "APPLICATION" in config:
         history_xml_name += config.APPLICATION.name
-        history_xml_name += "-" 
+        history_xml_name += "-"
     history_xml_name += config.VARS.dist
     history_xml_name += "-"
     test_base_name = test_base
@@ -598,75 +679,84 @@ def generate_history_xml_path(config, test_base):
     log_dir = src.get_log_path(config)
     return os.path.join(log_dir, "TEST", history_xml_name)
 
+
 def run(args, runner, logger):
-    '''method that is called when salomeTools is called with test parameter.
-    '''
+    """method that is called when salomeTools is called with test parameter."""
     (options, args) = parse_option(args, runner.cfg)
 
     # the test base is specified either by the application, or by the --base option
     with_application = False
-    if runner.cfg.VARS.application != 'None':
-        logger.write(_('Running tests on application %s\n') % 
-                            src.printcolors.printcLabel(
-                                                runner.cfg.VARS.application), 1)
+    if runner.cfg.VARS.application != "None":
+        logger.write(
+            _("Running tests on application %s\n")
+            % src.printcolors.printcLabel(runner.cfg.VARS.application),
+            1,
+        )
         with_application = True
     elif not options.base:
-        raise src.SatException(_('A test base is required. Use the --base '
-                                 'option'))
+        raise src.SatException(_("A test base is required. Use the --base " "option"))
 
     # the launcher is specified either by the application, or by the --launcher option
     if with_application:
         # check if environment is loaded
-        if 'KERNEL_ROOT_DIR' in os.environ:
-            logger.write(src.printcolors.printcWarning(_("WARNING: "
-                            "SALOME environment already sourced")) + "\n", 1)
-            
-        
+        if "KERNEL_ROOT_DIR" in os.environ:
+            logger.write(
+                src.printcolors.printcWarning(
+                    _("WARNING: " "SALOME environment already sourced")
+                )
+                + "\n",
+                1,
+            )
+
     elif options.launcher:
-        logger.write(src.printcolors.printcWarning(_("Running SALOME "
-                                                "application.")) + "\n\n", 1)
+        logger.write(
+            src.printcolors.printcWarning(_("Running SALOME " "application.")) + "\n\n",
+            1,
+        )
     else:
-        msg = _("Impossible to find any launcher.\nPlease specify an "
-                "application or a launcher")
+        msg = _(
+            "Impossible to find any launcher.\nPlease specify an "
+            "application or a launcher"
+        )
         logger.write(src.printcolors.printcError(msg))
         logger.write("\n")
         return 1
 
     # set the display
-    show_desktop = (options.display and options.display.upper() == "NO")
+    show_desktop = options.display and options.display.upper() == "NO"
     if options.display and options.display != "NO":
-        remote_name = options.display.split(':')[0]
+        remote_name = options.display.split(":")[0]
         if remote_name != "" and (not src.architecture.is_windows()):
             check_remote_machine(remote_name, logger)
         # if explicitly set use user choice
-        os.environ['DISPLAY'] = options.display
-    elif 'DISPLAY' not in os.environ:
+        os.environ["DISPLAY"] = options.display
+    elif "DISPLAY" not in os.environ:
         # if no display set
-        if ('test' in runner.cfg.LOCAL and
-                'display' in runner.cfg.LOCAL.test and 
-                len(runner.cfg.LOCAL.test.display) > 0):
+        if (
+            "test" in runner.cfg.LOCAL
+            and "display" in runner.cfg.LOCAL.test
+            and len(runner.cfg.LOCAL.test.display) > 0
+        ):
             # use default value for test tool
-            os.environ['DISPLAY'] = runner.cfg.LOCAL.test.display
+            os.environ["DISPLAY"] = runner.cfg.LOCAL.test.display
         else:
-            os.environ['DISPLAY'] = "localhost:0.0"
+            os.environ["DISPLAY"] = "localhost:0.0"
 
     # initialization
     #################
     if with_application:
-        tmp_dir = os.path.join(runner.cfg.VARS.tmp_root,
-                               runner.cfg.APPLICATION.name,
-                               "test")
+        tmp_dir = os.path.join(
+            runner.cfg.VARS.tmp_root, runner.cfg.APPLICATION.name, "test"
+        )
     else:
-        tmp_dir = os.path.join(runner.cfg.VARS.tmp_root,
-                               "test")
+        tmp_dir = os.path.join(runner.cfg.VARS.tmp_root, "test")
 
     # remove previous tmp dir
     if os.access(tmp_dir, os.F_OK):
         try:
             shutil.rmtree(tmp_dir)
         except:
-            logger.error(_("error removing TT_TMP_RESULT %s\n") 
-                                % tmp_dir)
+            logger.error(_("error removing TT_TMP_RESULT %s\n") % tmp_dir)
 
     lines = []
     lines.append("date = '%s'" % runner.cfg.VARS.date)
@@ -674,38 +764,37 @@ def run(args, runner, logger):
     lines.append("node = '%s'" % runner.cfg.VARS.node)
     lines.append("arch = '%s'" % runner.cfg.VARS.dist)
 
-    if 'APPLICATION' in runner.cfg:
+    if "APPLICATION" in runner.cfg:
         lines.append("application_info = {}")
-        lines.append("application_info['name'] = '%s'" % 
-                     runner.cfg.APPLICATION.name)
-        lines.append("application_info['tag'] = '%s'" % 
-                     runner.cfg.APPLICATION.tag)
-        lines.append("application_info['products'] = %s" % 
-                     str(runner.cfg.APPLICATION.products))
+        lines.append("application_info['name'] = '%s'" % runner.cfg.APPLICATION.name)
+        lines.append("application_info['tag'] = '%s'" % runner.cfg.APPLICATION.tag)
+        lines.append(
+            "application_info['products'] = %s" % str(runner.cfg.APPLICATION.products)
+        )
 
     content = "\n".join(lines)
 
     # create hash from context information
     # CVW TODO or not dirname = datetime.datetime.now().strftime("%y%m%d_%H%M%S_") + sha1(content.encode()).hexdigest()[0:8]
-    dirname = sha1(content.encode()).hexdigest()[0:8] # only 8 firsts probably good
+    dirname = sha1(content.encode()).hexdigest()[0:8]  # only 8 firsts probably good
     base_dir = os.path.join(tmp_dir, dirname)
     os.makedirs(base_dir)
-    os.environ['TT_TMP_RESULT'] = base_dir
+    os.environ["TT_TMP_RESULT"] = base_dir
 
     # create env_info file
-    with open(os.path.join(base_dir, 'env_info.py'), "w") as f:
+    with open(os.path.join(base_dir, "env_info.py"), "w") as f:
         f.write(content)
 
     # create working dir and bases dir
-    working_dir = os.path.join(base_dir, 'WORK')
+    working_dir = os.path.join(base_dir, "WORK")
     os.makedirs(working_dir)
-    os.makedirs(os.path.join(base_dir, 'BASES'))
+    os.makedirs(os.path.join(base_dir, "BASES"))
     os.chdir(working_dir)
 
-    if 'PYTHONPATH' not in os.environ:
-        os.environ['PYTHONPATH'] = ''
+    if "PYTHONPATH" not in os.environ:
+        os.environ["PYTHONPATH"] = ""
     else:
-        for var in os.environ['PYTHONPATH'].split(':'):
+        for var in os.environ["PYTHONPATH"].split(":"):
             if var not in sys.path:
                 sys.path.append(var)
 
@@ -717,25 +806,28 @@ def run(args, runner, logger):
     elif with_application and "test_base" in runner.cfg.APPLICATION:
         test_base = runner.cfg.APPLICATION.test_base.name
 
-    src.printcolors.print_value(logger, _('Display'), os.environ['DISPLAY'], 2)
-    src.printcolors.print_value(logger, _('Timeout'),
-                                src.test_module.DEFAULT_TIMEOUT, 2)
+    src.printcolors.print_value(logger, _("Display"), os.environ["DISPLAY"], 2)
+    src.printcolors.print_value(
+        logger, _("Timeout"), src.test_module.DEFAULT_TIMEOUT, 2
+    )
     src.printcolors.print_value(logger, _("Working dir"), base_dir, 3)
 
     # create the test object
-    test_runner = src.test_module.Test(runner.cfg,
-                                  logger,
-                                  base_dir,
-                                  testbase=test_base,
-                                  grids=options.grids,
-                                  sessions=options.sessions,
-                                  launcher=options.launcher,
-                                  show_desktop=show_desktop)
-    
+    test_runner = src.test_module.Test(
+        runner.cfg,
+        logger,
+        base_dir,
+        testbase=test_base,
+        grids=options.grids,
+        sessions=options.sessions,
+        launcher=options.launcher,
+        show_desktop=show_desktop,
+    )
+
     if not test_runner.test_base_found:
-        # Fail 
+        # Fail
         return 1
-        
+
     # run the test
     logger.allowPrintLevel = False
     retcode = test_runner.run_all_tests()
@@ -743,37 +835,44 @@ def run(args, runner, logger):
 
     logger.write(_("Tests finished"), 1)
     logger.write("\n", 2, False)
-    
+
     logger.write(_("\nGenerate the specific test log\n"), 5)
     log_dir = src.get_log_path(runner.cfg)
     out_dir = os.path.join(log_dir, "TEST")
     src.ensure_path_exists(out_dir)
     name_xml_board = logger.logFileName.split(".")[0] + "_board.xml"
     historic_xml_path = generate_history_xml_path(runner.cfg, test_base)
-    
-    create_test_report(runner.cfg,
-                       historic_xml_path,
-                       out_dir,
-                       retcode,
-                       xmlname = name_xml_board)
+
+    create_test_report(
+        runner.cfg, historic_xml_path, out_dir, retcode, xmlname=name_xml_board
+    )
     xml_board_path = os.path.join(out_dir, name_xml_board)
 
     logger.l_logFiles.append(xml_board_path)
-    logger.add_link(os.path.join("TEST", name_xml_board),
-                    "board",
-                    retcode,
-                    "Click on the link to get the detailed test results")
+    logger.add_link(
+        os.path.join("TEST", name_xml_board),
+        "board",
+        retcode,
+        "Click on the link to get the detailed test results",
+    )
     logger.write("\nTests board file %s\n" % xml_board_path, 1)
 
     # Add the historic files into the log files list of the command
     logger.l_logFiles.append(historic_xml_path)
 
     if not options.keeptempdir:
-      logger.write("Removing the temporary directory: rm -rf %s\n" % test_runner.tmp_working_dir, 5)
-      if os.path.exists(test_runner.tmp_working_dir):
-        shutil.rmtree(test_runner.tmp_working_dir)
+        logger.write(
+            "Removing the temporary directory: rm -rf %s\n"
+            % test_runner.tmp_working_dir,
+            5,
+        )
+        if os.path.exists(test_runner.tmp_working_dir):
+            shutil.rmtree(test_runner.tmp_working_dir)
     else:
-      logger.write("NOT Removing the temporary directory: rm -rf %s\n" % test_runner.tmp_working_dir, 5)
+        logger.write(
+            "NOT Removing the temporary directory: rm -rf %s\n"
+            % test_runner.tmp_working_dir,
+            5,
+        )
 
     return retcode
-
index f44f547f24ee0d5d19098d6fd8f3be02f185d95a..3e45858755bbeaed6f97225d322fbe32b2053dc2 100644 (file)
 #  License along with this library; if not, write to the Free Software
 #  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 
-import re
 import os
-import pprint as PP
-
 import src
-import src.debug as DBG
 
 
 # Define all possible option for update command :  sat update <options>
@@ -33,10 +29,20 @@ parser.add_option(
     "products",
     _("Optional: products to update. This option accepts a comma separated list."),
 )
-parser.add_option('f', 'force', 'boolean', 'force',
-    _("Optional: force to update the products in development mode."))
-parser.add_option('', 'force_patch', 'boolean', 'force_patch',
-    _("Optional: force to apply patch to the products in development mode."))
+parser.add_option(
+    "f",
+    "force",
+    "boolean",
+    "force",
+    _("Optional: force to update the products in development mode."),
+)
+parser.add_option(
+    "",
+    "force_patch",
+    "boolean",
+    "force_patch",
+    _("Optional: force to apply patch to the products in development mode."),
+)
 # parser.add_option('c', 'complete', 'boolean', 'complete',
 #     _("Optional: completion mode, only update products not present in SOURCES dir."),
 #     False)
@@ -53,11 +59,12 @@ def find_products_already_prepared(l_products):
     """
     l_res = []
     for p_name_p_cfg in l_products:
-        __, prod_cfg = p_name_p_cfg
+        _, prod_cfg = p_name_p_cfg
         if "source_dir" in prod_cfg and os.path.exists(prod_cfg.source_dir):
             l_res.append(p_name_p_cfg)
     return l_res
 
+
 def find_git_products(l_products):
     """
     function that returns the list of products that have an existing source
@@ -75,7 +82,7 @@ def find_git_products(l_products):
     """
     l_res = []
     for p_name_p_cfg in l_products:
-        __, prod_cfg = p_name_p_cfg
+        _, prod_cfg = p_name_p_cfg
         if "source_dir" in prod_cfg and os.path.exists(prod_cfg.source_dir):
             if prod_cfg.get_source == "git":
                 l_res.append(p_name_p_cfg)
@@ -91,7 +98,7 @@ def find_products_with_patchs(l_products):
     """
     l_res = []
     for p_name_p_cfg in l_products:
-        __, prod_cfg = p_name_p_cfg
+        _, prod_cfg = p_name_p_cfg
         l_patchs = src.get_cfg_param(prod_cfg, "patches", [])
         if len(l_patchs) > 0:
             l_res.append(p_name_p_cfg)
@@ -110,6 +117,7 @@ def description():
         "\n\nexample:\nsat update SALOME-master --products KERNEL,GUI"
     )
 
+
 class UpdateOp:
     """
     This is an operation class. It is prepared though the init and launched
@@ -148,7 +156,9 @@ class UpdateOp:
 
         self.runner = runner
         self.logger = logger
-        self.products_infos = src.product.get_products_list(options, self.runner.cfg, self.logger)
+        self.products_infos = src.product.get_products_list(
+            options, self.runner.cfg, self.logger
+        )
 
         # Construct the arguments to pass to the clean, source and patch commands
         self.args_appli = runner.cfg.VARS.application + " "  # useful whitespace
@@ -157,7 +167,7 @@ class UpdateOp:
     def products(self):
         if self._list_of_products:
             return list(self._list_of_products)
-        return [name for name, tmp in self.products_infos]
+        return [name for name, _ in self.products_infos]
 
     def getProductsToPrepare(self):
         """
@@ -165,7 +175,7 @@ class UpdateOp:
         that only new products (and not tracked ones) are prepared.
         """
         pi_already_prepared = find_git_products(self.products_infos)
-        l_already_prepared = [i for i, tmp in pi_already_prepared]
+        l_already_prepared = [i for i, _ in pi_already_prepared]
         newList, removedList = removeInList(self.products, l_already_prepared)
         if len(newList) == 0 and len(removedList) > 0:
             msg = "\nAll the products are already installed, do nothing!\n"
@@ -181,15 +191,17 @@ class UpdateOp:
 
     def getProductsToUpdate(self):
         pi_already_prepared = find_git_products(self.products_infos)
-        productsToUpdate = [i for i, tmp in pi_already_prepared]
+        productsToUpdate = [i for i, _ in pi_already_prepared]
         return productsToUpdate
 
     def getProductsToClean(self, listProdToPrepare):
-        ldev_products = [p for p in self.products_infos if src.product.product_is_dev(p[1])]
+        ldev_products = [
+            p for p in self.products_infos if src.product.product_is_dev(p[1])
+        ]
         productsToClean = listProdToPrepare  # default
         if len(ldev_products) > 0:
             l_products_not_getted = find_products_already_prepared(ldev_products)
-            listNot = [i for i, tmp in l_products_not_getted]
+            listNot = [i for i, _ in l_products_not_getted]
             productsToClean, removedList = removeInList(listProdToPrepare, listNot)
             if len(removedList) > 0:
                 msg = _(
@@ -204,10 +216,12 @@ class UpdateOp:
 
     def getProductsToPatch(self, listProdToPrepare):
         productsToPatch = listProdToPrepare  # default
-        ldev_products = [p for p in self.products_infos if src.product.product_is_dev(p[1])]
+        ldev_products = [
+            p for p in self.products_infos if src.product.product_is_dev(p[1])
+        ]
         if not self._force_patch and len(ldev_products) > 0:
             l_products_with_patchs = find_products_with_patchs(ldev_products)
-            listNot = [i for i, tmp in l_products_with_patchs]
+            listNot = [i for i, _ in l_products_with_patchs]
             productsToPatch, removedList = removeInList(listProdToPrepare, listNot)
             if len(removedList) > 0:
                 msg = _(
@@ -233,7 +247,6 @@ class UpdateOp:
         productsToPatch = self.getProductsToPatch(productsToPrepare)
         args_product_opt_patch = "--products " + ",".join(productsToPatch)
 
-
         # Initialize the results to a running status
         res_clean = 0
         res_source = 0
@@ -246,16 +259,20 @@ class UpdateOp:
                 self.logger.write(msg, 3)
                 self.logger.flush()
                 args_clean = self.args_appli + args_product_opt_clean + " --sources"
-                res_clean = self.runner.clean(args_clean, batch=True, verbose = 0, logger_add_link = self.logger)
+                res_clean = self.runner.clean(
+                    args_clean, batch=True, verbose=0, logger_add_link=self.logger
+                )
                 if res_clean == 0:
-                    self.logger.write('%s\n' % src.printcolors.printc(src.OK_STATUS), 3)
+                    self.logger.write("%s\n" % src.printcolors.printc(src.OK_STATUS), 3)
                 else:
-                    self.logger.write('%s\n' % src.printcolors.printc(src.KO_STATUS), 3)
+                    self.logger.write("%s\n" % src.printcolors.printc(src.KO_STATUS), 3)
             if len(productsToPrepare) > 0:
                 msg = _("Get the sources of the products ...")
                 self.logger.write(msg, 5)
                 args_source = self.args_appli + args_product_to_prepare_opt
-                res_source = self.runner.source(args_source, logger_add_link=self.logger)
+                res_source = self.runner.source(
+                    args_source, logger_add_link=self.logger
+                )
                 if res_source == 0:
                     self.logger.write("%s\n" % src.printcolors.printc(src.OK_STATUS), 5)
                 else:
@@ -273,7 +290,9 @@ class UpdateOp:
             msg = _("Update the sources of the products ...")
             self.logger.write(msg, 5)
             args_source = self.args_appli + args_product_to_update_opt
-            res_source = self.runner.source_update(args_source, logger_add_link=self.logger)
+            res_source = self.runner.source_update(
+                args_source, logger_add_link=self.logger
+            )
             if res_source == 0:
                 self.logger.write("%s\n" % src.printcolors.printc(src.OK_STATUS), 5)
             else:
index 245df7b5c1f4fbdc8dae647b67681bad99d51b83..21b048654a68f816296fc99c1033869b8fb86772 100755 (executable)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 
 # This script is used to build the application module.
 # First, it copies the content of the sources directory to the install directory.
@@ -9,28 +9,40 @@ import subprocess
 
 import src
 
+
 def compil(config, builder, logger):
     builder.prepare()
     if not builder.source_dir.smartcopy(builder.install_dir):
-        raise src.SatException(_("Error when copying %s sources to install dir") % builder.product_info.name)
-    
+        raise src.SatException(
+            _("Error when copying %s sources to install dir")
+            % builder.product_info.name
+        )
+
     # test lrelease #.pyconf needs in ..._APPLI pre_depend : ['qt']
-    command = "which lrelease" 
-    res = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE,env=builder.build_environ.environ.environ).communicate()
-    if res[1] != "": #an error occured
+    command = "which lrelease"
+    res = subprocess.Popen(
+        command,
+        shell=True,
+        stdout=subprocess.PIPE,
+        stderr=subprocess.PIPE,
+        env=builder.build_environ.environ.environ,
+    ).communicate()
+    if res[1] != "":  # an error occured
         logger.write("ERROR: %s" % res[1])
-        builder.log(res[1]+"\n")
+        builder.log(res[1] + "\n")
         return 1
-    
+
     # run lrelease
     command = "lrelease *.ts"
-    res = subprocess.call(command,
-                          shell=True,
-                          cwd=str(builder.install_dir + "resources"),
-                          env=builder.build_environ.environ.environ,
-                          stdout=logger.logTxtFile,
-                          stderr=subprocess.STDOUT)
+    res = subprocess.call(
+        command,
+        shell=True,
+        cwd=str(builder.install_dir + "resources"),
+        env=builder.build_environ.environ.environ,
+        stdout=logger.logTxtFile,
+        stderr=subprocess.STDOUT,
+    )
     if res != 0:
         res = 1
-    
+
     return res
index b42de0c99dd5b9ce827c64cc15fd992b2e123be3..81db59176bedea8663879cc3cdd5804c9f8903de 100755 (executable)
@@ -1,10 +1,11 @@
 from Polyline import Polyline
 from Circle import Circle
 
-class Controller() :
+
+class Controller:
     """Manages the Model instances"""
 
-    def __init__( self, MainFrame ) :
+    def __init__(self, MainFrame):
         """Constructor"""
 
         self._models = []
@@ -13,92 +14,93 @@ class Controller() :
         self._nbCircles = 0
         pass
 
-    def getModels( self ) :
+    def getModels(self):
         return self._models
 
-    def getMainFrame( self ) :
+    def getMainFrame(self):
         return self._mainFrame
 
-    def getNbPolylines( self ) :
+    def getNbPolylines(self):
         return self._nbPolylines
 
-    def setNbPolylines( self, n ) :
+    def setNbPolylines(self, n):
         self._nbPolylines = n
         pass
 
-    def getNbCircles( self ) :
+    def getNbCircles(self):
         return self._nbCircles
 
-    def setNbCircles( self, n ) :
+    def setNbCircles(self, n):
         self._nbCircles = n
         pass
 
-    def createPolyline( self, name, randomNumberOfPoints ) :
+    def createPolyline(self, name, randomNumberOfPoints):
         """Creates a Polyline object nammed name with randomNumberOfPoints points"""
 
         import random
 
         # Making randomNumberOfPoints random positionned points
         points = []
-        x = random.uniform( 0, randomNumberOfPoints )
-        for i in range( randomNumberOfPoints ) :
-           x = random.uniform( x, x+randomNumberOfPoints )
-           y = random.uniform( 0, x )
-           point = x, y
-           points.append( point )
-           pass
-
-        myPolyline = Polyline( name, points, self )
-        self._models.append( myPolyline )
-        myPolyline.updateViews( mode = 'creation' )
-
-        self._nbPolylines +=1
+        x = random.uniform(0, randomNumberOfPoints)
+        for i in range(randomNumberOfPoints):
+            x = random.uniform(x, x + randomNumberOfPoints)
+            y = random.uniform(0, x)
+            point = x, y
+            points.append(point)
+            pass
+
+        myPolyline = Polyline(name, points, self)
+        self._models.append(myPolyline)
+        myPolyline.updateViews(mode="creation")
+
+        self._nbPolylines += 1
         return myPolyline
 
-    def createCircle( self, name, center, radius ) :
+    def createCircle(self, name, center, radius):
         """Creates a Circle object nammed name with center and radius"""
 
-        myCircle = Circle( name, center, radius, self )
-        self._models.append( myCircle )
-        myCircle.updateViews( mode = 'creation' )
+        myCircle = Circle(name, center, radius, self)
+        self._models.append(myCircle)
+        myCircle.updateViews(mode="creation")
 
-        self._nbCircles +=1
+        self._nbCircles += 1
         return myCircle
 
-    def showModel( self, model ) :
-        model.updateViews( mode = 'showing' )
+    def showModel(self, model):
+        model.updateViews(mode="showing")
         pass
 
-    def editName( self, model, name ) :
-        model.setName( name )
-        model.updateViews( mode = 'modification' )
+    def editName(self, model, name):
+        model.setName(name)
+        model.updateViews(mode="modification")
         return model
 
-    def editPoint( self, polyline, newPoint, pointRange ) :
-        polyline.editPoint( pointRange, newPoint )
-        polyline.updateViews( mode = 'modification' )
+    def editPoint(self, polyline, newPoint, pointRange):
+        polyline.editPoint(pointRange, newPoint)
+        polyline.updateViews(mode="modification")
         return polyline
 
-    def editCenter( self, circle, center ) :
-        circle.setCenter( center )
-        circle.updateViews( mode = 'modification' )
+    def editCenter(self, circle, center):
+        circle.setCenter(center)
+        circle.updateViews(mode="modification")
         return circle
 
-    def editRadius( self, circle, radius ) :
-        circle.setRadius( radius )
-        circle.updateViews( mode = 'modification' )
+    def editRadius(self, circle, radius):
+        circle.setRadius(radius)
+        circle.updateViews(mode="modification")
         return circle
 
-    def removeModel( self, model ) :
-        model.updateViews( mode = 'supression' )
-        index = self._models.index( model )
+    def removeModel(self, model):
+        model.updateViews(mode="supression")
+        index = self._models.index(model)
         del model
         pass
 
-    def saveListOfModels( self ) :
-        for model in self._models :
-           model.save()
-           pass
+    def saveListOfModels(self):
+        for model in self._models:
+            model.save()
+            pass
         pass
 
+
 pass
index d4e408ab8c0d732d23f8a7995b0a78b476997e68..1cc1818172eb70f674be041866a2f54837eaa9b9 100755 (executable)
@@ -1,70 +1,76 @@
 from Dialog import *
 from qtsalome import *
 
-class CreateCircleDialog( Dialog ) :
 
-   def __init__( self, helpFile, controller, widgetDialogBox ) :
-       """Constructor"""
+class CreateCircleDialog(Dialog):
+    def __init__(self, helpFile, controller, widgetDialogBox):
+        """Constructor"""
 
-       # Initializing parent widget
-       Dialog.__init__( self, helpFile, controller, widgetDialogBox )
+        # Initializing parent widget
+        Dialog.__init__(self, helpFile, controller, widgetDialogBox)
 
-       # Setting default name
-       nbCircles = controller.getNbCircles()
-       self.entryName.setText( "circle_" + str(nbCircles+1) )
-       pass
+        # Setting default name
+        nbCircles = controller.getNbCircles()
+        self.entryName.setText("circle_" + str(nbCircles + 1))
+        pass
 
-   def addSpecialWidgets( self ) :
-       floatValidator = QDoubleValidator( self )
+    def addSpecialWidgets(self):
+        floatValidator = QDoubleValidator(self)
 
-       lxCenter = QLabel( "xCenter", self )
-       self.v11.addWidget( lxCenter )
-       lyCenter = QLabel( "yCenter", self )
-       self.v11.addWidget( lyCenter )
-       lRadius = QLabel( "Radius", self )
-       self.v11.addWidget( lRadius )
+        lxCenter = QLabel("xCenter", self)
+        self.v11.addWidget(lxCenter)
+        lyCenter = QLabel("yCenter", self)
+        self.v11.addWidget(lyCenter)
+        lRadius = QLabel("Radius", self)
+        self.v11.addWidget(lRadius)
 
-       self.entryxCenter = QLineEdit( self )
-       self.entryxCenter.setValidator( floatValidator )
-       self.entryxCenter.setText( "0" )
-       self.v12.addWidget( self.entryxCenter )
-       self.entryyCenter = QLineEdit( self )
-       self.entryyCenter.setValidator( floatValidator )
-       self.entryyCenter.setText( "0" )
-       self.v12.addWidget( self.entryyCenter )
-       self.entryRadius = QLineEdit( self )
-       self.entryRadius.setValidator( floatValidator )
-       self.entryRadius.setText( "10" )
-       self.v12.addWidget( self.entryRadius)
-       pass
+        self.entryxCenter = QLineEdit(self)
+        self.entryxCenter.setValidator(floatValidator)
+        self.entryxCenter.setText("0")
+        self.v12.addWidget(self.entryxCenter)
+        self.entryyCenter = QLineEdit(self)
+        self.entryyCenter.setValidator(floatValidator)
+        self.entryyCenter.setText("0")
+        self.v12.addWidget(self.entryyCenter)
+        self.entryRadius = QLineEdit(self)
+        self.entryRadius.setValidator(floatValidator)
+        self.entryRadius.setText("10")
+        self.v12.addWidget(self.entryRadius)
+        pass
 
-   def execApply( self ) :
-       name = self.name
-       center = float(self.xCenter), float(self.yCenter)
-       radius = float( self.radius )
-       self.getController().createCircle( name, center, radius )
-       self.reInitializeDialog()
-       return
+    def execApply(self):
+        name = self.name
+        center = float(self.xCenter), float(self.yCenter)
+        radius = float(self.radius)
+        self.getController().createCircle(name, center, radius)
+        self.reInitializeDialog()
+        return
 
-   def retrieveUserEntries( self ) :
-       self.name = str( self.entryName.text() )
-       self.xCenter = str( self.entryxCenter.text() )
-       self.yCenter = str( self.entryyCenter.text() )
-       self.radius = str( self.entryRadius.text() )
-       pass
+    def retrieveUserEntries(self):
+        self.name = str(self.entryName.text())
+        self.xCenter = str(self.entryxCenter.text())
+        self.yCenter = str(self.entryyCenter.text())
+        self.radius = str(self.entryRadius.text())
+        pass
 
-   def checkUserEntries( self ) :
-       if self.name == "" or self.xCenter == "" or self.yCenter == "" or self.radius == "" :
-          self.errMessage = 'All attributes must be filled'
-          return False
-       return True
+    def checkUserEntries(self):
+        if (
+            self.name == ""
+            or self.xCenter == ""
+            or self.yCenter == ""
+            or self.radius == ""
+        ):
+            self.errMessage = "All attributes must be filled"
+            return False
+        return True
+
+    def reInitializeDialog(self):
+        nbCircles = self.getController().getNbCircles()
+        self.entryName.setText("circle_" + str(nbCircles + 1))
+        self.entryxCenter.setText("0")
+        self.entryyCenter.setText("0")
+        self.entryRadius.setText("10")
+        pass
 
-   def reInitializeDialog( self ) :
-       nbCircles = self.getController().getNbCircles()
-       self.entryName.setText( "circle_" + str(nbCircles+1) )
-       self.entryxCenter.setText( "0" )
-       self.entryyCenter.setText( "0" )
-       self.entryRadius.setText( "10" )
-       pass
 
 pass
index f0a92a6d02089a8a944cb83c196a0df1be14d61d..e09946a62dac0c3b4f3e1dc9ed4e4c249dfdc93b 100755 (executable)
@@ -1,58 +1,58 @@
 from Dialog import Dialog
 from qtsalome import *
 
-class CreatePolylineDialog( Dialog ) :
-
-   def __init__( self, helpFile, controller, widgetDialogBox  ) :
-       """Constructor"""
-
-       #Initializing parent widget
-       Dialog.__init__( self, helpFile, controller, widgetDialogBox )
-
-       #Setting default name
-       nbPolylines = controller.getNbPolylines()
-       self.entryName.setText( "polyline_" + str(nbPolylines+1) )
-       pass
-
-   def addSpecialWidgets( self ) :
-
-       intValidator = QIntValidator( self )
-
-       lNbPoints = QLabel( "Number of points", self )
-       self.v11.addWidget( lNbPoints )
-
-       self.entryNbPoints = QLineEdit( self )
-       self.entryNbPoints.setValidator( intValidator )
-       self.entryNbPoints.setText( "10" )
-       self.v12.addWidget( self.entryNbPoints )
-       pass
-
-   def execApply( self ) :
-       name = self.name
-       nbPoints = int( self.nbPoints )
-       self.getController().createPolyline( name, nbPoints )
-       self.reInitializeDialog()
-       return
-
-
-   def retrieveUserEntries( self ) :
-       self.name = str( self.entryName.text() )
-       self.nbPoints = str( self.entryNbPoints.text() )
-       pass
-
-   def checkUserEntries( self ) :
-       if self.name == "" or self.nbPoints == "" :
-          self.errMessage = 'All attributes must be filled'
-          return False
-       if int( self.nbPoints ) > 10 :
-          self.errMessage = 'The number of points must not exceed 10'
-          return False
-       return True
-
-   def reInitializeDialog( self ) :
-       nbPolylines = self.getController().getNbPolylines()
-       self.entryName.setText( "polyline_" + str(nbPolylines+1) )
-       self.entryNbPoints.setText( "10" )
-       pass
+
+class CreatePolylineDialog(Dialog):
+    def __init__(self, helpFile, controller, widgetDialogBox):
+        """Constructor"""
+
+        # Initializing parent widget
+        Dialog.__init__(self, helpFile, controller, widgetDialogBox)
+
+        # Setting default name
+        nbPolylines = controller.getNbPolylines()
+        self.entryName.setText("polyline_" + str(nbPolylines + 1))
+        pass
+
+    def addSpecialWidgets(self):
+
+        intValidator = QIntValidator(self)
+
+        lNbPoints = QLabel("Number of points", self)
+        self.v11.addWidget(lNbPoints)
+
+        self.entryNbPoints = QLineEdit(self)
+        self.entryNbPoints.setValidator(intValidator)
+        self.entryNbPoints.setText("10")
+        self.v12.addWidget(self.entryNbPoints)
+        pass
+
+    def execApply(self):
+        name = self.name
+        nbPoints = int(self.nbPoints)
+        self.getController().createPolyline(name, nbPoints)
+        self.reInitializeDialog()
+        return
+
+    def retrieveUserEntries(self):
+        self.name = str(self.entryName.text())
+        self.nbPoints = str(self.entryNbPoints.text())
+        pass
+
+    def checkUserEntries(self):
+        if self.name == "" or self.nbPoints == "":
+            self.errMessage = "All attributes must be filled"
+            return False
+        if int(self.nbPoints) > 10:
+            self.errMessage = "The number of points must not exceed 10"
+            return False
+        return True
+
+    def reInitializeDialog(self):
+        nbPolylines = self.getController().getNbPolylines()
+        self.entryName.setText("polyline_" + str(nbPolylines + 1))
+        self.entryNbPoints.setText("10")
+        pass
+
 
 pass
index 47f1974672d3bde82851c2e52e756c35c747ddac..fc7eaa8a7d97c1950de870e82be213acbb9622ff 100755 (executable)
 from qtsalome import *
 
-class Dialog( QDialog ) :
-
-   def __init__( self, helpFile, controller, widgetDialogBox ) :
-       """Constructor"""
-
-       # Initializing parent widget
-       QDialog.__init__( self )
-
-       # Setting attributes
-       self.setObjectName( "Dialog" )
-       self.setWindowTitle( "Dialog data" )
-       self._helpFile = helpFile
-       self._controller = controller
-       self._widgetDialogBox = widgetDialogBox
-
-       # Setting layouts
-       self.mainLayout = QVBoxLayout( self )
-       self.h1 = QHBoxLayout( self )
-       self.h2 = QHBoxLayout( self )
-       self.mainLayout.addLayout( self.h1 )
-       self.mainLayout.addLayout( self.h2 )
-       self.v11 = QVBoxLayout( self)
-       self.v12 = QVBoxLayout( self )
-       self.h1.addLayout( self.v11 )
-       self.h1.addLayout( self.v12 )
-
-       # Filling layouts with standard widgets( common to all childre )
-       self.fillStandardWidgets()
-       # Adding special widgets to layouts( special to each child )
-       self.addSpecialWidgets()
-
-       # Connecting widgets to slots
-       self.connectSlots()
-       pass
-
-   def getController( self ) :
-       return self._controller
-
-   def fillStandardWidgets( self ) :
-
-       lName = QLabel( "Name", self )
-       self.v11.addWidget( lName )
-
-       self.entryName = QLineEdit( self )
-       self.v12.addWidget( self.entryName )
-
-       #Setting buttons
-       self.bApply = QPushButton( "Apply", self )
-       self.h2.addWidget( self.bApply )
-       self.bClose = QPushButton( "Close", self )
-       self.h2.addWidget( self.bClose )
-       self.bHelp = QPushButton( "Help", self )
-       self.h2.addWidget( self.bHelp )
-       pass
-
-   def addSpecialWidgets( self ) :
-       print('Virtual method')
-       pass
-
-   def connectSlots( self ) :
-       self.bApply.clicked.connect(self.apply)
-       self.bHelp.clicked.connect(self.help)
-       self.bClose.clicked.connect(self.close)
-       pass
-
-   def apply( self ) :
-
-       self.retrieveUserEntries()
-       if not self.checkUserEntries() :
-          QMessageBox.warning( self, 'information faillure', self.errMessage )
-          return
-       self.execApply()
-       return
-
-   def retrieveUserEntries( self ) :
-       self.name = str( self.entryName.text() )
-       pass
-
-   def checkUserEntries( self ) :
-       if self.name == "" :
-          self.errMessage = 'All attributes must be filled'
-          return False
-       return True
-
-   def execApply( self ) :
-       print('Virtual method')
-       pass
-
-   def reInitializeDialog( self ) :
-       print('Virtual method')
-       pass
-
-   def help( self ) :
-       import os
-       os.system( 'firefox ' + self._helpFile + '&' )
-       pass
-
-   def close( self ) :
-       self._widgetDialogBox.close()
-       pass
+
+class Dialog(QDialog):
+    def __init__(self, helpFile, controller, widgetDialogBox):
+        """Constructor"""
+
+        # Initializing parent widget
+        QDialog.__init__(self)
+
+        # Setting attributes
+        self.setObjectName("Dialog")
+        self.setWindowTitle("Dialog data")
+        self._helpFile = helpFile
+        self._controller = controller
+        self._widgetDialogBox = widgetDialogBox
+
+        # Setting layouts
+        self.mainLayout = QVBoxLayout(self)
+        self.h1 = QHBoxLayout(self)
+        self.h2 = QHBoxLayout(self)
+        self.mainLayout.addLayout(self.h1)
+        self.mainLayout.addLayout(self.h2)
+        self.v11 = QVBoxLayout(self)
+        self.v12 = QVBoxLayout(self)
+        self.h1.addLayout(self.v11)
+        self.h1.addLayout(self.v12)
+
+        # Filling layouts with standard widgets( common to all childre )
+        self.fillStandardWidgets()
+        # Adding special widgets to layouts( special to each child )
+        self.addSpecialWidgets()
+
+        # Connecting widgets to slots
+        self.connectSlots()
+        pass
+
+    def getController(self):
+        return self._controller
+
+    def fillStandardWidgets(self):
+
+        lName = QLabel("Name", self)
+        self.v11.addWidget(lName)
+
+        self.entryName = QLineEdit(self)
+        self.v12.addWidget(self.entryName)
+
+        # Setting buttons
+        self.bApply = QPushButton("Apply", self)
+        self.h2.addWidget(self.bApply)
+        self.bClose = QPushButton("Close", self)
+        self.h2.addWidget(self.bClose)
+        self.bHelp = QPushButton("Help", self)
+        self.h2.addWidget(self.bHelp)
+        pass
+
+    def addSpecialWidgets(self):
+        print("Virtual method")
+        pass
+
+    def connectSlots(self):
+        self.bApply.clicked.connect(self.apply)
+        self.bHelp.clicked.connect(self.help)
+        self.bClose.clicked.connect(self.close)
+        pass
+
+    def apply(self):
+
+        self.retrieveUserEntries()
+        if not self.checkUserEntries():
+            QMessageBox.warning(self, "information faillure", self.errMessage)
+            return
+        self.execApply()
+        return
+
+    def retrieveUserEntries(self):
+        self.name = str(self.entryName.text())
+        pass
+
+    def checkUserEntries(self):
+        if self.name == "":
+            self.errMessage = "All attributes must be filled"
+            return False
+        return True
+
+    def execApply(self):
+        print("Virtual method")
+        pass
+
+    def reInitializeDialog(self):
+        print("Virtual method")
+        pass
+
+    def help(self):
+        import os
+
+        os.system("firefox " + self._helpFile + "&")
+        pass
+
+    def close(self):
+        self._widgetDialogBox.close()
+        pass
+
 
 pass
index d62a299841119712a538fabfc2391e893ebe6032..c76563b5c5573c0000a50b022ad49c8bdf0cd693 100755 (executable)
@@ -1,92 +1,94 @@
 from qtsalome import *
 
-class DialogEdit( QDialog ) :
-
-   def __init__( self, helpFile, controller, widgetDialogBox ) :
-       """Constructor"""
-
-       # Initializing parent widget
-       QDialog.__init__( self )
-
-       # Setting attributes
-       self.setObjectName( "Dialog" )
-       self.setWindowTitle( "Dialog data" )
-       self._helpFile = helpFile
-       self._controller = controller
-       self._widgetDialogBox = widgetDialogBox
-
-       # Setting layouts
-       self.mainLayout = QVBoxLayout( self )
-       self.h1 = QHBoxLayout( self )
-       self.h2 = QHBoxLayout( self )
-       self.mainLayout.addLayout( self.h1 )
-       self.mainLayout.addLayout( self.h2 )
-       self.v11 = QVBoxLayout( self)
-       self.v12 = QVBoxLayout( self )
-       self.h1.addLayout( self.v11 )
-       self.h1.addLayout( self.v12 )
-
-       # Filling layouts with standard widgets( common to all childre )
-       self.fillStandardWidgets()
-       # Adding special widgets to layouts( special to each child )
-       self.addSpecialWidgets()
-
-       # Connecting widgets to slots
-       self.connectSlots()
-       pass
-
-   def getController( self ) :
-       return self._controller
-
-   def fillStandardWidgets( self ) :
-
-       #Setting buttons
-       self.bOk = QPushButton( "OK", self )
-       self.h2.addWidget( self.bOk )
-       self.bCancel = QPushButton( "Cancel", self )
-       self.h2.addWidget( self.bCancel )
-       self.bHelp = QPushButton( "Help", self )
-       self.h2.addWidget( self.bHelp )
-       pass
-
-   def addSpecialWidgets( self ) :
-       print('Virtual method')
-       pass
-
-   def connectSlots( self ) :
-       self.bOk.clicked.connect(self.apply)
-       self.bHelp.clicked.connect(self.help)
-       self.bCancel.clicked.connect(self.close)
-       pass
-
-   def apply( self ) :
-       self.retrieveUserEntries()
-       if not self.checkUserEntries() :
-          QMessageBox.warning( self, 'information faillure', self.errMessage )
-          return
-       self.execApply()
-       self.close()
-       return
-
-   def retrieveUserEntries( self ) :
-       print('Virtual method')
-       pass
-
-   def checkUserEntries( self ) :
-       print('Virtual method')
-       return True
-
-   def execApply( self ) :
-       print('Virtual method')
-       pass
-
-   def help( self ) :
-       import os
-       os.system( 'firefox ' + self._helpFile + '&' )
-       pass
-
-   def close( self ) :
-       self._widgetDialogBox.close()
-       pass
+
+class DialogEdit(QDialog):
+    def __init__(self, helpFile, controller, widgetDialogBox):
+        """Constructor"""
+
+        # Initializing parent widget
+        QDialog.__init__(self)
+
+        # Setting attributes
+        self.setObjectName("Dialog")
+        self.setWindowTitle("Dialog data")
+        self._helpFile = helpFile
+        self._controller = controller
+        self._widgetDialogBox = widgetDialogBox
+
+        # Setting layouts
+        self.mainLayout = QVBoxLayout(self)
+        self.h1 = QHBoxLayout(self)
+        self.h2 = QHBoxLayout(self)
+        self.mainLayout.addLayout(self.h1)
+        self.mainLayout.addLayout(self.h2)
+        self.v11 = QVBoxLayout(self)
+        self.v12 = QVBoxLayout(self)
+        self.h1.addLayout(self.v11)
+        self.h1.addLayout(self.v12)
+
+        # Filling layouts with standard widgets( common to all childre )
+        self.fillStandardWidgets()
+        # Adding special widgets to layouts( special to each child )
+        self.addSpecialWidgets()
+
+        # Connecting widgets to slots
+        self.connectSlots()
+        pass
+
+    def getController(self):
+        return self._controller
+
+    def fillStandardWidgets(self):
+
+        # Setting buttons
+        self.bOk = QPushButton("OK", self)
+        self.h2.addWidget(self.bOk)
+        self.bCancel = QPushButton("Cancel", self)
+        self.h2.addWidget(self.bCancel)
+        self.bHelp = QPushButton("Help", self)
+        self.h2.addWidget(self.bHelp)
+        pass
+
+    def addSpecialWidgets(self):
+        print("Virtual method")
+        pass
+
+    def connectSlots(self):
+        self.bOk.clicked.connect(self.apply)
+        self.bHelp.clicked.connect(self.help)
+        self.bCancel.clicked.connect(self.close)
+        pass
+
+    def apply(self):
+        self.retrieveUserEntries()
+        if not self.checkUserEntries():
+            QMessageBox.warning(self, "information faillure", self.errMessage)
+            return
+        self.execApply()
+        self.close()
+        return
+
+    def retrieveUserEntries(self):
+        print("Virtual method")
+        pass
+
+    def checkUserEntries(self):
+        print("Virtual method")
+        return True
+
+    def execApply(self):
+        print("Virtual method")
+        pass
+
+    def help(self):
+        import os
+
+        os.system("firefox " + self._helpFile + "&")
+        pass
+
+    def close(self):
+        self._widgetDialogBox.close()
+        pass
+
 
 pass
index 1e3a10001077291bc4db1009b1c5f71563c9fc8e..d6a9d03a750337366ad9b6f64268b485c24a575f 100755 (executable)
@@ -1,63 +1,64 @@
 from DialogEdit import *
 from qtsalome import *
 
-class EditCenterDialog( DialogEdit ) :
-
-   def __init__( self, helpFile, controller, widgetDialogBox, model, oldCenter ) :
-       """Constructor"""
-
-       # Initializing parent widget
-       DialogEdit.__init__( self, helpFile, controller, widgetDialogBox )
-
-       self._model = model
-
-       # Reading oldX and oldY
-       oldX = ""
-       oldY = ""
-       i = 0
-       while oldCenter[i] != ':' :
-          oldX += oldCenter[i]
-          i += 1
-          pass
-       for j in range( i+1, len(oldCenter) ) :
-          oldY += oldCenter[j]
-          pass
-       self.entryX.setText( oldX )
-       self.entryY.setText( oldY )
-       pass
-
-   def addSpecialWidgets( self ) :
-       floatValidator = QDoubleValidator( self )
-
-       lX = QLabel( "X", self )
-       self.v11.addWidget( lX )
-       lY = QLabel( "Y", self )
-       self.v11.addWidget( lY )
-
-       self.entryX = QLineEdit( self )
-       self.entryX.setValidator( floatValidator )
-       self.v12.addWidget( self.entryX )
-       self.entryY = QLineEdit( self )
-       self.entryY.setValidator( floatValidator )
-       self.v12.addWidget( self.entryY )
-       pass
-
-   def execApply( self ) :
-       newX = float( self.newX )
-       newY = float( self.newY )
-       newCenter = newX, newY
-       self.getController().editCenter( self._model, newCenter )
-       return
-
-   def retrieveUserEntries( self ) :
-       self.newX= str( self.entryX.text() )
-       self.newY= str( self.entryY.text() )
-       pass
-
-   def checkUserEntries( self ) :
-       if self.newX == "" or self.newY == "" :
-          self.errMessage = 'All attributes must be filled'
-          return False
-       return True
+
+class EditCenterDialog(DialogEdit):
+    def __init__(self, helpFile, controller, widgetDialogBox, model, oldCenter):
+        """Constructor"""
+
+        # Initializing parent widget
+        DialogEdit.__init__(self, helpFile, controller, widgetDialogBox)
+
+        self._model = model
+
+        # Reading oldX and oldY
+        oldX = ""
+        oldY = ""
+        i = 0
+        while oldCenter[i] != ":":
+            oldX += oldCenter[i]
+            i += 1
+            pass
+        for j in range(i + 1, len(oldCenter)):
+            oldY += oldCenter[j]
+            pass
+        self.entryX.setText(oldX)
+        self.entryY.setText(oldY)
+        pass
+
+    def addSpecialWidgets(self):
+        floatValidator = QDoubleValidator(self)
+
+        lX = QLabel("X", self)
+        self.v11.addWidget(lX)
+        lY = QLabel("Y", self)
+        self.v11.addWidget(lY)
+
+        self.entryX = QLineEdit(self)
+        self.entryX.setValidator(floatValidator)
+        self.v12.addWidget(self.entryX)
+        self.entryY = QLineEdit(self)
+        self.entryY.setValidator(floatValidator)
+        self.v12.addWidget(self.entryY)
+        pass
+
+    def execApply(self):
+        newX = float(self.newX)
+        newY = float(self.newY)
+        newCenter = newX, newY
+        self.getController().editCenter(self._model, newCenter)
+        return
+
+    def retrieveUserEntries(self):
+        self.newX = str(self.entryX.text())
+        self.newY = str(self.entryY.text())
+        pass
+
+    def checkUserEntries(self):
+        if self.newX == "" or self.newY == "":
+            self.errMessage = "All attributes must be filled"
+            return False
+        return True
+
 
 pass
index 00702ad4788889ad4a19abcaafdae5a0ab16c5c0..1c83890cf400578c05d92368e4cea183eaff01ac 100755 (executable)
@@ -1,66 +1,68 @@
 from DialogEdit import *
 from qtsalome import *
 
-class EditPointDialog( DialogEdit ) :
 
-   def __init__( self, helpFile, controller, widgetDialogBox, model, oldPoint, pointRange  ) :
-       """Constructor"""
+class EditPointDialog(DialogEdit):
+    def __init__(
+        self, helpFile, controller, widgetDialogBox, model, oldPoint, pointRange
+    ):
+        """Constructor"""
 
-       #Initializing parent widget
-       DialogEdit.__init__( self, helpFile, controller, widgetDialogBox )
+        # Initializing parent widget
+        DialogEdit.__init__(self, helpFile, controller, widgetDialogBox)
 
-       self._model = model
+        self._model = model
 
-       #Reading oldX and oldY
-       oldX = ""
-       oldY = ""
-       i = 0
-       while oldPoint[i] != ':' :
-          oldX += oldPoint[i]
-          i += 1
-          pass
-       for j in range( i+1, len(oldPoint) ) :
-          oldY += oldPoint[j]
-          pass
-       self.pointRange = pointRange
-       self.entryX.setText( oldX )
-       self.entryY.setText( oldY )
-       pass
+        # Reading oldX and oldY
+        oldX = ""
+        oldY = ""
+        i = 0
+        while oldPoint[i] != ":":
+            oldX += oldPoint[i]
+            i += 1
+            pass
+        for j in range(i + 1, len(oldPoint)):
+            oldY += oldPoint[j]
+            pass
+        self.pointRange = pointRange
+        self.entryX.setText(oldX)
+        self.entryY.setText(oldY)
+        pass
 
-   def addSpecialWidgets( self ) :
-       floatValidator = QDoubleValidator( self )
+    def addSpecialWidgets(self):
+        floatValidator = QDoubleValidator(self)
 
-       lX = QLabel( "X", self )
-       self.v11.addWidget( lX )
-       lY = QLabel( "Y", self )
-       self.v11.addWidget( lY )
+        lX = QLabel("X", self)
+        self.v11.addWidget(lX)
+        lY = QLabel("Y", self)
+        self.v11.addWidget(lY)
 
-       self.entryX = QLineEdit( self )
-       self.entryX.setValidator( floatValidator )
-       self.v12.addWidget( self.entryX )
-       self.entryY = QLineEdit( self )
-       self.entryY.setValidator( floatValidator )
-       self.v12.addWidget( self.entryY )
-       pass
+        self.entryX = QLineEdit(self)
+        self.entryX.setValidator(floatValidator)
+        self.v12.addWidget(self.entryX)
+        self.entryY = QLineEdit(self)
+        self.entryY.setValidator(floatValidator)
+        self.v12.addWidget(self.entryY)
+        pass
 
-   def execApply( self ) :
-       pointRange = self.pointRange
-       newX = float( self.newX )
-       newY = float( self.newY )
-       newPoint = newX, newY
-       self.getController().editPoint( self._model, newPoint, pointRange )
-       return
+    def execApply(self):
+        pointRange = self.pointRange
+        newX = float(self.newX)
+        newY = float(self.newY)
+        newPoint = newX, newY
+        self.getController().editPoint(self._model, newPoint, pointRange)
+        return
 
+    def retrieveUserEntries(self):
+        self.newX = str(self.entryX.text())
+        self.newY = str(self.entryY.text())
+        pass
 
-   def retrieveUserEntries( self ) :
-       self.newX= str( self.entryX.text() )
-       self.newY= str( self.entryY.text() )
-       pass
+    def checkUserEntries(self):
+        if self.newX == "" or self.newY == "":
+            self.errMessage = "All attributes must be filled"
+            return False
+        return True
 
-   def checkUserEntries( self ) :
-       if self.newX == "" or self.newY == "" :
-          self.errMessage = 'All attributes must be filled'
-          return False
-       return True
 
 pass
index 69c648ebd85836013db74a67d6f150509430b40b..a9c43c8c5971c548f66c5543bf0760c7a7d740b3 100755 (executable)
@@ -1,42 +1,42 @@
 from DialogEdit import *
 from qtsalome import *
 
-class EditRadiusDialog( DialogEdit ) :
 
-   def __init__( self, helpFile, controller, widgetDialogBox, model, oldRadius  ) :
-       """Constructor"""
+class EditRadiusDialog(DialogEdit):
+    def __init__(self, helpFile, controller, widgetDialogBox, model, oldRadius):
+        """Constructor"""
 
-       #Initializing parent widget
-       DialogEdit.__init__( self, helpFile, controller, widgetDialogBox )
+        # Initializing parent widget
+        DialogEdit.__init__(self, helpFile, controller, widgetDialogBox)
 
-       self._model = model
-       self.entryRadius.setText( oldRadius )
-       pass
+        self._model = model
+        self.entryRadius.setText(oldRadius)
+        pass
 
-   def addSpecialWidgets( self ) :
-       floatValidator = QDoubleValidator( self )
+    def addSpecialWidgets(self):
+        floatValidator = QDoubleValidator(self)
 
-       lRadius = QLabel( "Radius", self )
-       self.v11.addWidget( lRadius )
-       self.entryRadius = QLineEdit( self )
-       self.entryRadius.setValidator( floatValidator )
-       self.v12.addWidget( self.entryRadius )
-       pass
+        lRadius = QLabel("Radius", self)
+        self.v11.addWidget(lRadius)
+        self.entryRadius = QLineEdit(self)
+        self.entryRadius.setValidator(floatValidator)
+        self.v12.addWidget(self.entryRadius)
+        pass
 
-   def execApply( self ) :
-       newRadius = self.newRadius
-       self.getController().editRadius( self._model, newRadius )
-       return
+    def execApply(self):
+        newRadius = self.newRadius
+        self.getController().editRadius(self._model, newRadius)
+        return
 
+    def retrieveUserEntries(self):
+        self.newRadius = str(self.entryRadius.text())
+        pass
 
-   def retrieveUserEntries( self ) :
-       self.newRadius = str( self.entryRadius.text() )
-       pass
+    def checkUserEntries(self):
+        if self.newRadius == "":
+            self.errMessage = "All attributes must be filled"
+            return False
+        return True
 
-   def checkUserEntries( self ) :
-       if self.newRadius == "" :
-          self.errMessage = 'All attributes must be filled'
-          return False
-       return True
 
 pass
index dddcc2da263d7ca62c579c72baead043e1cbde8a..6ff291bc22e9e90c01dec7c467faff71ab1946c5 100755 (executable)
@@ -1,38 +1,39 @@
 from DialogEdit import *
 from qtsalome import *
 
-class RenameDialog( DialogEdit ) :
-
-   def __init__( self, helpFile, controller, widgetDialogBox, model, oldName  ) :
-       """Constructor"""
-
-       # Initializing parent widget
-       DialogEdit.__init__( self, helpFile, controller, widgetDialogBox )
-
-       self._model = model
-       self.entryName.setText( oldName )
-       pass
-
-   def addSpecialWidgets( self ) :
-       lName = QLabel( "Name", self )
-       self.v11.addWidget( lName )
-       self.entryName = QLineEdit( self )
-       self.v12.addWidget( self.entryName )
-       pass
-
-   def execApply( self ) :
-       newName = self.newName
-       self.getController().editName( self._model, newName )
-       return
-
-   def retrieveUserEntries( self ) :
-       self.newName = str( self.entryName.text() )
-       pass
-
-   def checkUserEntries( self ) :
-       if self.newName == "" :
-          self.errMessage = 'All attributes must be filled'
-          return False
-       return True
+
+class RenameDialog(DialogEdit):
+    def __init__(self, helpFile, controller, widgetDialogBox, model, oldName):
+        """Constructor"""
+
+        # Initializing parent widget
+        DialogEdit.__init__(self, helpFile, controller, widgetDialogBox)
+
+        self._model = model
+        self.entryName.setText(oldName)
+        pass
+
+    def addSpecialWidgets(self):
+        lName = QLabel("Name", self)
+        self.v11.addWidget(lName)
+        self.entryName = QLineEdit(self)
+        self.v12.addWidget(self.entryName)
+        pass
+
+    def execApply(self):
+        newName = self.newName
+        self.getController().editName(self._model, newName)
+        return
+
+    def retrieveUserEntries(self):
+        self.newName = str(self.entryName.text())
+        pass
+
+    def checkUserEntries(self):
+        if self.newName == "":
+            self.errMessage = "All attributes must be filled"
+            return False
+        return True
+
 
 pass
index bbd59e093c09b5d009df5b8703e0d0c0a1b95668..0f252b8b4a615240ed8cfdbd128caf8f2ba3e214 100755 (executable)
@@ -2,57 +2,64 @@ from Model import *
 from qtsalome import *
 
 __all__ = [
-           "Circle",
-          ]
-
-class Circle( Model ):
-
-   def __init__( self, name, center, radius, controller ):
-       """Constructor"""
-
-       Model.__init__( self,controller )
-       self._name = name
-       self._center = center
-       self._radius = radius
-       self.addTreeWidgetItem( self.getName(), controller )
-       self.addGraphicScene( controller )
-       pass
-
-   def getCenter( self ):
-       return self._center[0], self._center[1]
-
-   def setCenter( self, center ):
-       self._center = center
-       pass
-
-   def getRadius( self ):
-       return self._radius
-
-   def setRadius( self, radius ):
-       self._radius = radius
-
-   def addTreeWidgetItem( self, name, controller ):
-       from CircleTreeWidgetItem import CircleTreeWidgetItem
-       from TreeWidgetItem import TreeWidgetItem
-
-       myTreeWidgetItem = CircleTreeWidgetItem( name, controller, ["Show", "Rename", "Delete"] )
-       newTreeWidgetItem = TreeWidgetItem( str(self.getCenter()[0]) + ':' + str(self.getCenter()[1]), controller, ["Edit"] )
-       myTreeWidgetItem.addChild( newTreeWidgetItem )
-       newTreeWidgetItem = TreeWidgetItem( str(self.getRadius()), controller, ["Edit"] )
-       myTreeWidgetItem.addChild( newTreeWidgetItem )
-       myTreeWidgetItem.setModel( self )
-       self.getViews().append( myTreeWidgetItem )
-       return myTreeWidgetItem
-
-   def addGraphicScene( self, controller ) :
-       from CircleGraphicsScene import CircleGraphicsScene
-
-       myGraphicsScene = CircleGraphicsScene( controller )
-       myGraphicsScene.setModel( self )
-       self.getViews().append( myGraphicsScene )
-       return myGraphicsScene
-
-   def save( self ):
-       pass
+    "Circle",
+]
+
+
+class Circle(Model):
+    def __init__(self, name, center, radius, controller):
+        """Constructor"""
+
+        Model.__init__(self, controller)
+        self._name = name
+        self._center = center
+        self._radius = radius
+        self.addTreeWidgetItem(self.getName(), controller)
+        self.addGraphicScene(controller)
+        pass
+
+    def getCenter(self):
+        return self._center[0], self._center[1]
+
+    def setCenter(self, center):
+        self._center = center
+        pass
+
+    def getRadius(self):
+        return self._radius
+
+    def setRadius(self, radius):
+        self._radius = radius
+
+    def addTreeWidgetItem(self, name, controller):
+        from CircleTreeWidgetItem import CircleTreeWidgetItem
+        from TreeWidgetItem import TreeWidgetItem
+
+        myTreeWidgetItem = CircleTreeWidgetItem(
+            name, controller, ["Show", "Rename", "Delete"]
+        )
+        newTreeWidgetItem = TreeWidgetItem(
+            str(self.getCenter()[0]) + ":" + str(self.getCenter()[1]),
+            controller,
+            ["Edit"],
+        )
+        myTreeWidgetItem.addChild(newTreeWidgetItem)
+        newTreeWidgetItem = TreeWidgetItem(str(self.getRadius()), controller, ["Edit"])
+        myTreeWidgetItem.addChild(newTreeWidgetItem)
+        myTreeWidgetItem.setModel(self)
+        self.getViews().append(myTreeWidgetItem)
+        return myTreeWidgetItem
+
+    def addGraphicScene(self, controller):
+        from CircleGraphicsScene import CircleGraphicsScene
+
+        myGraphicsScene = CircleGraphicsScene(controller)
+        myGraphicsScene.setModel(self)
+        self.getViews().append(myGraphicsScene)
+        return myGraphicsScene
+
+    def save(self):
+        pass
+
 
 pass
index 0340be7b37d820f2f7dfc1b0ff473c991f0870cc..3861780db2fe70890d192f26bc278594338754fb 100755 (executable)
@@ -1,34 +1,36 @@
 from View import *
 
+
 class Model:
+    def __init__(self, controller):
+        """Constructor"""
 
-   def __init__( self, controller ):
-       """Constructor"""
+        self._name = None
+        self._views = []
+        pass
 
-       self._name = None
-       self._views = []
-       pass
+    def getName(self):
+        return self._name
 
-   def getName( self ):
-       return self._name
+    def setName(self, name):
+        self._name = name
+        pass
 
-   def setName( self, name ):
-       self._name = name
-       pass
+    def getViews(self):
+        return self._views
 
-   def getViews( self ) :
-       return self._views
+    def addView(self):
+        myView = View()
+        self._views.append(myView)
+        return myView
 
-   def addView( self ) :
-       myView = View()
-       self._views.append( myView )
-       return myView
+    def updateViews(self, mode):
+        for view in self._views:
+            view.update(mode)
 
-   def updateViews( self, mode ) :
-       for view in self._views : view.update( mode )
+    def save(self):
+        print("Virtual method")
+        pass
 
-   def save( self ) :
-       print('Virtual method')
-       pass
 
 pass
index 9a6bd49b4d439ee26d0ea177c76b1841765aa528..810ffcb7fecda111f269857a260529561f5557ca 100755 (executable)
@@ -1,54 +1,59 @@
 from Model import *
 from qtsalome import *
 
-class Polyline( Model ):
-
-   def __init__( self, name, points, controller ):
-       """Constructor"""
-
-       Model.__init__( self, controller )
-       self._name = name
-       self._points = points
-       self.addTreeWidgetItem( self.getName(), controller )
-       self.addGraphicScene( controller )
-       pass
-
-   def getPoints( self ):
-       return self._points
-
-   def setPoints( self, points ):
-       self._points = points
-       pass
-
-   def editPoint( self, pointRange, newPoint ) :
-       self._points[ pointRange ] = newPoint
-       pass
-
-   def addTreeWidgetItem( self, name, controller ):
-       from PolyTreeWidgetItem import PolyTreeWidgetItem
-       from TreeWidgetItem import TreeWidgetItem
-
-       myTreeWidgetItem = PolyTreeWidgetItem( name, controller, ["Show", "Rename", "Delete"] )
-       # Adding list of points
-       for point in self.getPoints() :
-          x = point[0]
-          y = point[1]
-          newTreeWidgetItem = TreeWidgetItem( str(x) + ":" + str(y), controller, ["Edit"] )
-          myTreeWidgetItem.addChild( newTreeWidgetItem )
-          pass
-       myTreeWidgetItem.setModel( self )
-       self.getViews().append( myTreeWidgetItem )
-       return myTreeWidgetItem
-
-   def addGraphicScene( self, controller ) :
-       from PolyGraphicsScene import PolyGraphicsScene
-
-       myGraphicsScene = PolyGraphicsScene( controller )
-       myGraphicsScene.setModel( self )
-       self.getViews().append( myGraphicsScene )
-       return myGraphicsScene
-
-   def save( self ):
-       pass
+
+class Polyline(Model):
+    def __init__(self, name, points, controller):
+        """Constructor"""
+
+        Model.__init__(self, controller)
+        self._name = name
+        self._points = points
+        self.addTreeWidgetItem(self.getName(), controller)
+        self.addGraphicScene(controller)
+        pass
+
+    def getPoints(self):
+        return self._points
+
+    def setPoints(self, points):
+        self._points = points
+        pass
+
+    def editPoint(self, pointRange, newPoint):
+        self._points[pointRange] = newPoint
+        pass
+
+    def addTreeWidgetItem(self, name, controller):
+        from PolyTreeWidgetItem import PolyTreeWidgetItem
+        from TreeWidgetItem import TreeWidgetItem
+
+        myTreeWidgetItem = PolyTreeWidgetItem(
+            name, controller, ["Show", "Rename", "Delete"]
+        )
+        # Adding list of points
+        for point in self.getPoints():
+            x = point[0]
+            y = point[1]
+            newTreeWidgetItem = TreeWidgetItem(
+                str(x) + ":" + str(y), controller, ["Edit"]
+            )
+            myTreeWidgetItem.addChild(newTreeWidgetItem)
+            pass
+        myTreeWidgetItem.setModel(self)
+        self.getViews().append(myTreeWidgetItem)
+        return myTreeWidgetItem
+
+    def addGraphicScene(self, controller):
+        from PolyGraphicsScene import PolyGraphicsScene
+
+        myGraphicsScene = PolyGraphicsScene(controller)
+        myGraphicsScene.setModel(self)
+        self.getViews().append(myGraphicsScene)
+        return myGraphicsScene
+
+    def save(self):
+        pass
+
 
 pass
index 69256a5d0d51f0b64dec0f83ac525800e447f7ad..be011f94a1a2eb70604d843b125b615be6cd49b2 100755 (executable)
-from PyQt5.QtCore    import *
-from PyQt5.QtGui     import *
+from PyQt5.QtCore import *
+from PyQt5.QtGui import *
 from PyQt5.QtWidgets import *
 
 from TreeWidget import TreeWidget
 from GraphicsView import GraphicsView
 from GraphicsScene import GraphicsScene
 
-class Desktop( QMainWindow ) :
-
-   def __init__( self ) :
-       """Constructor"""
-
-       QMainWindow.__init__( self )
-       self._controller = None
-
-       # Creating a dockWidget which will contain globalTree
-       self._globalTree= TreeWidget( self )
-       self._globalTree.setHeaderLabel ( "Object browser" )
-       dockGlobalTree = QDockWidget( "Tree view", self )
-       dockGlobalTree.setAllowedAreas( Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea )
-       dockGlobalTree.setWidget( self._globalTree )
-       self.addDockWidget( Qt.LeftDockWidgetArea, dockGlobalTree )
-
-       # Creating a central widget which contains the globalGraphicsView
-       self._dockGlobalView = QDockWidget( "Graphics view", self )
-       scene = GraphicsScene( self._controller )
-       self._globalGraphicsView = GraphicsView( scene )
-       self._dockGlobalView.setWidget( self._globalGraphicsView )
-       self._globalGraphicsView.show()
-       self.setCentralWidget( self._dockGlobalView )
-
-       # Creating menus and toolbars
-       self.createMenus()
-       self.createToolBars()
-       pass
-
-   def getController( self ) :
-       return self._controller
-
-   def setController( self, controller ) :
-       self._controller = controller
-       pass
-
-   def getGlobalTree( self ) :
-       return self._globalTree
-
-   def createMenus( self ) :
-       # Creating menus
-       curveMenu = self.menuBar().addMenu( "Curve" )
-       toolsMenu = self.menuBar().addMenu( "Tools" )
-       # Adding actions
-       createPolylineAction = QAction( "Polyline", self )
-       createCircleAction = QAction( "Circle", self )
-       curveMenu.addAction( createPolylineAction )
-       curveMenu.addAction( createCircleAction )
-
-       deleteAllAction = QAction( "Delete all", self )
-       toolsMenu.addAction( deleteAllAction )
-       # Connecting slots
-       createPolylineAction.triggered.connect(self.showCreatePolylineDialog)
-       createCircleAction.triggered.connect(self.showCreateCircleDialog)
-       deleteAllAction.triggered.connect(self.deleteAll)
-       pass
-
-   def createToolBars( self ) :
-       # Creating toolBars
-       createPolylineTB = self.addToolBar( "New polyline")
-       createCircleTB = self.addToolBar( "New circle")
-       createPolylineAction = QAction( "Polyline", self )
-       createCircleAction = QAction( "Circle", self )
-       # Adding actions
-       createPolylineTB.addAction( createPolylineAction )
-       createCircleTB.addAction( createCircleAction )
-       # Connecting slots
-       createPolylineAction.triggered.connect(self.showCreatePolylineDialog)
-       createCircleAction.triggered.connect(self.showCreateCircleDialog)
-       pass
-
-   def showCreatePolylineDialog( self ) :
-       from CreatePolylineDialog import CreatePolylineDialog
-
-       widgetDialogBox = QDockWidget( "myDockWidget", self )
-       myDialog = CreatePolylineDialog( "www.google.fr", self._controller, widgetDialogBox )
-       widgetDialogBox.setAllowedAreas( Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea )
-       widgetDialogBox.setWidget( myDialog )
-       widgetDialogBox.setWindowTitle( "Polyline definition" )
-       self.addDockWidget( Qt.LeftDockWidgetArea, widgetDialogBox )
-       pass
-
-   def showCreateCircleDialog( self ) :
-       from CreateCircleDialog import CreateCircleDialog
-
-       widgetDialogBox = QDockWidget( "myDockWidget", self )
-       myDialog = CreateCircleDialog( "www.cea.fr", self._controller, widgetDialogBox )
-       widgetDialogBox.setAllowedAreas( Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea )
-       widgetDialogBox.setWidget( myDialog )
-       widgetDialogBox.setWindowTitle( "Polyline definition" )
-       self.addDockWidget( Qt.LeftDockWidgetArea, widgetDialogBox )
-       pass
-
-   def deleteAll( self ) :
-       models = self.getController().getModels()
-       if len( models ) == 0 : return
-       answer = QMessageBox.question( self, 'Confirmation', 'Do you really want to delete all the existing objects ?' , QMessageBox.Yes | QMessageBox.No )
-       if answer == QMessageBox.Yes :
-          for model in models :
-             self.getController().removeModel( model )
-             pass
-          pass
-       pass
-
-   def updateGlobalGraphicsView( self, scene ) :
-       self._globalGraphicsView.setScene( scene )
-       if scene is None :
-          self._dockGlobalView.setWindowTitle( "Graphics view" )
-          return
-       self._dockGlobalView.setWindowTitle( "Graphics view : showing " + scene.getModel().getName() )
-       #Resizing the globalGraphicView
-       sceneRect = scene.getRect()
-       topLeft = sceneRect.topLeft()
-       viewRect = QRectF( topLeft.x(), topLeft.y(), 2*sceneRect.width(), 2*sceneRect.height() )
-       self._globalGraphicsView.fitInView ( viewRect, Qt.IgnoreAspectRatio )
-       pass
+
+class Desktop(QMainWindow):
+    def __init__(self):
+        """Constructor"""
+
+        QMainWindow.__init__(self)
+        self._controller = None
+
+        # Creating a dockWidget which will contain globalTree
+        self._globalTree = TreeWidget(self)
+        self._globalTree.setHeaderLabel("Object browser")
+        dockGlobalTree = QDockWidget("Tree view", self)
+        dockGlobalTree.setAllowedAreas(Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea)
+        dockGlobalTree.setWidget(self._globalTree)
+        self.addDockWidget(Qt.LeftDockWidgetArea, dockGlobalTree)
+
+        # Creating a central widget which contains the globalGraphicsView
+        self._dockGlobalView = QDockWidget("Graphics view", self)
+        scene = GraphicsScene(self._controller)
+        self._globalGraphicsView = GraphicsView(scene)
+        self._dockGlobalView.setWidget(self._globalGraphicsView)
+        self._globalGraphicsView.show()
+        self.setCentralWidget(self._dockGlobalView)
+
+        # Creating menus and toolbars
+        self.createMenus()
+        self.createToolBars()
+        pass
+
+    def getController(self):
+        return self._controller
+
+    def setController(self, controller):
+        self._controller = controller
+        pass
+
+    def getGlobalTree(self):
+        return self._globalTree
+
+    def createMenus(self):
+        # Creating menus
+        curveMenu = self.menuBar().addMenu("Curve")
+        toolsMenu = self.menuBar().addMenu("Tools")
+        # Adding actions
+        createPolylineAction = QAction("Polyline", self)
+        createCircleAction = QAction("Circle", self)
+        curveMenu.addAction(createPolylineAction)
+        curveMenu.addAction(createCircleAction)
+
+        deleteAllAction = QAction("Delete all", self)
+        toolsMenu.addAction(deleteAllAction)
+        # Connecting slots
+        createPolylineAction.triggered.connect(self.showCreatePolylineDialog)
+        createCircleAction.triggered.connect(self.showCreateCircleDialog)
+        deleteAllAction.triggered.connect(self.deleteAll)
+        pass
+
+    def createToolBars(self):
+        # Creating toolBars
+        createPolylineTB = self.addToolBar("New polyline")
+        createCircleTB = self.addToolBar("New circle")
+        createPolylineAction = QAction("Polyline", self)
+        createCircleAction = QAction("Circle", self)
+        # Adding actions
+        createPolylineTB.addAction(createPolylineAction)
+        createCircleTB.addAction(createCircleAction)
+        # Connecting slots
+        createPolylineAction.triggered.connect(self.showCreatePolylineDialog)
+        createCircleAction.triggered.connect(self.showCreateCircleDialog)
+        pass
+
+    def showCreatePolylineDialog(self):
+        from CreatePolylineDialog import CreatePolylineDialog
+
+        widgetDialogBox = QDockWidget("myDockWidget", self)
+        myDialog = CreatePolylineDialog(
+            "www.google.fr", self._controller, widgetDialogBox
+        )
+        widgetDialogBox.setAllowedAreas(Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea)
+        widgetDialogBox.setWidget(myDialog)
+        widgetDialogBox.setWindowTitle("Polyline definition")
+        self.addDockWidget(Qt.LeftDockWidgetArea, widgetDialogBox)
+        pass
+
+    def showCreateCircleDialog(self):
+        from CreateCircleDialog import CreateCircleDialog
+
+        widgetDialogBox = QDockWidget("myDockWidget", self)
+        myDialog = CreateCircleDialog("www.cea.fr", self._controller, widgetDialogBox)
+        widgetDialogBox.setAllowedAreas(Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea)
+        widgetDialogBox.setWidget(myDialog)
+        widgetDialogBox.setWindowTitle("Polyline definition")
+        self.addDockWidget(Qt.LeftDockWidgetArea, widgetDialogBox)
+        pass
+
+    def deleteAll(self):
+        models = self.getController().getModels()
+        if len(models) == 0:
+            return
+        answer = QMessageBox.question(
+            self,
+            "Confirmation",
+            "Do you really want to delete all the existing objects ?",
+            QMessageBox.Yes | QMessageBox.No,
+        )
+        if answer == QMessageBox.Yes:
+            for model in models:
+                self.getController().removeModel(model)
+                pass
+            pass
+        pass
+
+    def updateGlobalGraphicsView(self, scene):
+        self._globalGraphicsView.setScene(scene)
+        if scene is None:
+            self._dockGlobalView.setWindowTitle("Graphics view")
+            return
+        self._dockGlobalView.setWindowTitle(
+            "Graphics view : showing " + scene.getModel().getName()
+        )
+        # Resizing the globalGraphicView
+        sceneRect = scene.getRect()
+        topLeft = sceneRect.topLeft()
+        viewRect = QRectF(
+            topLeft.x(), topLeft.y(), 2 * sceneRect.width(), 2 * sceneRect.height()
+        )
+        self._globalGraphicsView.fitInView(viewRect, Qt.IgnoreAspectRatio)
+        pass
+
 
 pass
index 45be4e97cd95404c2df6578d207dd33d425a3717..0712e48b3cd96ddacee22bfced0d97221d4ef653 100755 (executable)
@@ -1,21 +1,22 @@
 import sys
 
-from PyQt5.QtCore    import *
-from PyQt5.QtGui     import *
+from PyQt5.QtCore import *
+from PyQt5.QtGui import *
 from PyQt5.QtWidgets import *
 
 from Controller import Controller
 from Desktop import Desktop
 
-def main( args ) :
-    Appli = QApplication( args )
+
+def main(args):
+    Appli = QApplication(args)
     MainFrame = Desktop()
-    myController = Controller( MainFrame )
-    MainFrame.setController( myController )
+    myController = Controller(MainFrame)
+    MainFrame.setController(myController)
     MainFrame.show()
     Appli.exec_()
 
-if __name__ == "__main__" :
-   main( sys.argv )
-   pass
 
+if __name__ == "__main__":
+    main(sys.argv)
+    pass
index 65a0c2187a855f106815abaabe5b5f147f409446..edc3a3c99f4e4269b77ce06a2379fc9c09d282e5 100755 (executable)
@@ -2,29 +2,30 @@ from GraphicsScene import GraphicsScene
 from qtsalome import *
 from GraphicsRectItem import GraphicsRectItem
 
-class CircleGraphicsScene(  GraphicsScene ) :
-
-   def __init__( self, controller ) :
-       GraphicsScene.__init__( self, controller )
-       pass
-
-   def draw( self ) :
-
-       import math
-
-       center = self._model.getCenter()
-       radius = float( self._model.getRadius() )
-       xCenter = float( center[0] )
-       yCenter = float( center[1] )
-
-       #Drawing the center as a small rectangle
-       centerItem = GraphicsRectItem( xCenter-0.1, yCenter-0.1, 0.2, 0.2, None )
-       self.addItem( centerItem )
-       #Drawing the circle
-       rect = QRectF( xCenter-radius, yCenter-radius, 2*radius, 2*radius )
-       circleItem = QGraphicsEllipseItem()
-       circleItem.setRect( rect )
-       self.addItem( circleItem )
-       pass
+
+class CircleGraphicsScene(GraphicsScene):
+    def __init__(self, controller):
+        GraphicsScene.__init__(self, controller)
+        pass
+
+    def draw(self):
+
+        import math
+
+        center = self._model.getCenter()
+        radius = float(self._model.getRadius())
+        xCenter = float(center[0])
+        yCenter = float(center[1])
+
+        # Drawing the center as a small rectangle
+        centerItem = GraphicsRectItem(xCenter - 0.1, yCenter - 0.1, 0.2, 0.2, None)
+        self.addItem(centerItem)
+        # Drawing the circle
+        rect = QRectF(xCenter - radius, yCenter - radius, 2 * radius, 2 * radius)
+        circleItem = QGraphicsEllipseItem()
+        circleItem.setRect(rect)
+        self.addItem(circleItem)
+        pass
+
 
 pass
index da53ff370fe84e214ec7ab77398be0465b1988e6..59d4aee52e98fb37df9aa1b3764dc0caa5e58467 100755 (executable)
@@ -2,26 +2,27 @@ from View import *
 from TreeWidgetItem import TreeWidgetItem
 from qtsalome import *
 
-class CircleTreeWidgetItem( TreeWidgetItem ) :
 
-   def __init__( self, name, controller, actionsList ) :
-       """Constructor"""
+class CircleTreeWidgetItem(TreeWidgetItem):
+    def __init__(self, name, controller, actionsList):
+        """Constructor"""
 
-       TreeWidgetItem.__init__( self, name, controller, actionsList )
-       pass
+        TreeWidgetItem.__init__(self, name, controller, actionsList)
+        pass
 
-   def editInGlobalTree( self, treeWidgetItem ) :
-       name = self.getModel().getName()
-       treeWidgetItem.setText( 0 , name )
-       center = self._model.getCenter()
-       xCenter = center[0]
-       yCenter = center[1]
-       relatedItem = treeWidgetItem.child( 0 )
-       relatedItem.setText( 0 , str(xCenter) + ":" + str(yCenter) )
+    def editInGlobalTree(self, treeWidgetItem):
+        name = self.getModel().getName()
+        treeWidgetItem.setText(0, name)
+        center = self._model.getCenter()
+        xCenter = center[0]
+        yCenter = center[1]
+        relatedItem = treeWidgetItem.child(0)
+        relatedItem.setText(0, str(xCenter) + ":" + str(yCenter))
+
+        radius = self._model.getRadius()
+        relatedItem = treeWidgetItem.child(1)
+        relatedItem.setText(0, str(radius))
+        pass
 
-       radius = self._model.getRadius()
-       relatedItem = treeWidgetItem.child( 1 )
-       relatedItem.setText( 0 , str(radius) )
-       pass
 
 pass
index 9d895161c517b7a239c5f1238daf968e55dc09f0..449d2c037d7d12cfe01a0d85e666a934528cc56c 100755 (executable)
@@ -1,15 +1,16 @@
 from qtsalome import *
 
-class GraphicsRectItem( QGraphicsRectItem ) :
 
-   def __init__( self, x, y, w, h, index ) :
-       QGraphicsRectItem.__init__( self, x, y, w, h )
-       self._index = index
-       self.setFlag( self.ItemIsMovable, True )
-       self.setFlag( self.ItemIsSelectable, True )
-       pass
+class GraphicsRectItem(QGraphicsRectItem):
+    def __init__(self, x, y, w, h, index):
+        QGraphicsRectItem.__init__(self, x, y, w, h)
+        self._index = index
+        self.setFlag(self.ItemIsMovable, True)
+        self.setFlag(self.ItemIsSelectable, True)
+        pass
+
+    def getIndex(self):
+        return self._index
 
-   def getIndex( self ) :
-       return self._index
 
 pass
index 817bb5f5d207abffb875cca8fad9353fcd0a1a6b..42723d40c0ad73cb92383b467e5d0b7ad68474e6 100755 (executable)
@@ -1,67 +1,68 @@
 from View import *
 from qtsalome import *
 
-class GraphicsScene( View, QGraphicsScene ) :
 
-   def __init__( self, controller ) :
-       """Constructor"""
+class GraphicsScene(View, QGraphicsScene):
+    def __init__(self, controller):
+        """Constructor"""
 
-       View.__init__( self, controller )
-       QGraphicsScene.__init__( self )
-       pass
+        View.__init__(self, controller)
+        QGraphicsScene.__init__(self)
+        pass
 
-   def getRect( self ) :
-       rect = QRectF( 0, 0, self.width(), self.height() )
-       return rect
+    def getRect(self):
+        rect = QRectF(0, 0, self.width(), self.height())
+        return rect
 
-   def editPoint( self, oldPoint, newPoint ) :
-       polyline = self.getModel()
-       self.getController().editPoint( polyline, oldPoint, newPoint )
-       pass
+    def editPoint(self, oldPoint, newPoint):
+        polyline = self.getModel()
+        self.getController().editPoint(polyline, oldPoint, newPoint)
+        pass
 
-   def editCenter( self, center ) :
-       circle = self.getModel()
-       self.getController().editCenter( circle, center )
-       pass
+    def editCenter(self, center):
+        circle = self.getModel()
+        self.getController().editCenter(circle, center)
+        pass
 
-   def editRadius( self, radius ) :
-       circle = self.getModel()
-       self.getController().editRadius( circle, radius )
-       pass
+    def editRadius(self, radius):
+        circle = self.getModel()
+        self.getController().editRadius(circle, radius)
+        pass
 
-   def update( self, mode ) :
-       if mode == 'creation' :
-          self.showInGlobalGraphicsView()
-          pass
-       elif mode == "showing" :
-          self.showInGlobalGraphicsView()
-       elif mode == 'modification' :
-          self.undraw()
-          self.showInGlobalGraphicsView()
-          pass
-       elif mode == 'supression' :
-          self.removeFromGlobalGraphicsView()
-          pass
-       else :
-          return
+    def update(self, mode):
+        if mode == "creation":
+            self.showInGlobalGraphicsView()
+            pass
+        elif mode == "showing":
+            self.showInGlobalGraphicsView()
+        elif mode == "modification":
+            self.undraw()
+            self.showInGlobalGraphicsView()
+            pass
+        elif mode == "supression":
+            self.removeFromGlobalGraphicsView()
+            pass
+        else:
+            return
 
-   def showInGlobalGraphicsView( self ) :
-       self.draw()
-       self.getController().getMainFrame().updateGlobalGraphicsView( self  )
-       pass
+    def showInGlobalGraphicsView(self):
+        self.draw()
+        self.getController().getMainFrame().updateGlobalGraphicsView(self)
+        pass
 
-   def removeFromGlobalGraphicsView( self ) :
-       self.getController().getMainFrame().updateGlobalGraphicsView( None  )
-       pass
+    def removeFromGlobalGraphicsView(self):
+        self.getController().getMainFrame().updateGlobalGraphicsView(None)
+        pass
 
-   def draw( self ) :
-       print('Virtual method')
-       pass
+    def draw(self):
+        print("Virtual method")
+        pass
+
+    def undraw(self):
+        for item in self.items():
+            self.removeItem(item)
+            pass
+        pass
 
-   def undraw( self ) :
-       for item in self.items() :
-          self.removeItem( item )
-          pass
-       pass
 
 pass
index 6635e134747482dc111a372bfce1ae7b0dfee768..24a394ef9cd4047d551e17cfa363e613530bbc95 100755 (executable)
@@ -2,99 +2,104 @@ from Polyline import Polyline
 from Circle import Circle
 from qtsalome import *
 
-class GraphicsView( QGraphicsView ) :
 
-   moved    = pyqtSignal(QPointF)
-   released = pyqtSignal(QPointF)
+class GraphicsView(QGraphicsView):
 
-   def __init__( self, scene ) :
-       QGraphicsView.__init__( self, scene )
-       self.setMouseTracking( True )
-       self._selectedItem = None
-       self.moved[QPointF].connect(self.execMouseMoveEvent)
-       self.released[QPointF].connect(self.execMouseReleaseEvent)
-       pass
+    moved = pyqtSignal(QPointF)
+    released = pyqtSignal(QPointF)
 
-   def mousePressEvent( self, mouseEvent ) :
-       QGraphicsView.mousePressEvent( self, mouseEvent )
-       if self.scene() is None : return
-       self._selectedItem = self.scene().mouseGrabberItem()
-       pass
+    def __init__(self, scene):
+        QGraphicsView.__init__(self, scene)
+        self.setMouseTracking(True)
+        self._selectedItem = None
+        self.moved[QPointF].connect(self.execMouseMoveEvent)
+        self.released[QPointF].connect(self.execMouseReleaseEvent)
+        pass
 
-   def mouseMoveEvent( self, mouseEvent ) :
-       QGraphicsView.mouseMoveEvent( self, mouseEvent )
-       pt = mouseEvent.pos()
-       currentPos = self.mapToScene( pt )
-       self.moved.emit(currentPos)
-       pass
+    def mousePressEvent(self, mouseEvent):
+        QGraphicsView.mousePressEvent(self, mouseEvent)
+        if self.scene() is None:
+            return
+        self._selectedItem = self.scene().mouseGrabberItem()
+        pass
 
-   def mouseReleaseEvent( self, mouseEvent ) :
-       QGraphicsView.mouseReleaseEvent( self, mouseEvent )
-       if mouseEvent.button() == Qt.LeftButton :
-          pt = mouseEvent.pos()
-          newPos = self.mapToScene( pt )
-          self.released.emit(newPos)
-          self._selectedItem = None
-          pass
-       pass
+    def mouseMoveEvent(self, mouseEvent):
+        QGraphicsView.mouseMoveEvent(self, mouseEvent)
+        pt = mouseEvent.pos()
+        currentPos = self.mapToScene(pt)
+        self.moved.emit(currentPos)
+        pass
 
-   def execMouseMoveEvent( self, currentPos ) :
-       if self._selectedItem is None : return
-       selectedIndex = self._selectedItem.getIndex()
-       newX = currentPos.x()
-       newY = currentPos.y()
-       newPoint = newX, newY
-       model = self.scene().getModel()
-       pen = QPen( QColor("red") )
-       if isinstance( model, Polyline ) :
-          #Previsualisation
-          if selectedIndex == 0 :
-             nextPoint = model.getPoints()[ selectedIndex+1 ]
-             xNext = nextPoint[0]
-             yNext = nextPoint[1]
-             self.scene().addLine( newX, newY, xNext, yNext, pen )
-             pass
-          elif selectedIndex == len( model.getPoints()) - 1 :
-             previousPoint = model.getPoints()[ selectedIndex-1 ]
-             xPrevious = previousPoint[0]
-             yPrevious = previousPoint[1]
-             self.scene().addLine( xPrevious, yPrevious, newX, newY, pen )
-             pass
-          else :
-             previousPoint = model.getPoints()[ selectedIndex-1 ]
-             xPrevious = previousPoint[0]
-             yPrevious = previousPoint[1]
-             self.scene().addLine( xPrevious, yPrevious, newX, newY, pen )
-             nextPoint = model.getPoints()[ selectedIndex+1 ]
-             xNext = nextPoint[0]
-             yNext = nextPoint[1]
-             self.scene().addLine( newX, newY, xNext, yNext, pen )
-             pass
-          pass
-       elif isinstance( model, Circle ) :
-          #Previsualisation
-          radius = float( model.getRadius() )
-          rect = QRectF( newX-radius, newY-radius, 2*radius, 2*radius )
-          circleItem = QGraphicsEllipseItem()
-          circleItem.setPen( pen )
-          circleItem.setRect( rect )
-          self.scene().addItem( circleItem )
-          pass
-       pass
+    def mouseReleaseEvent(self, mouseEvent):
+        QGraphicsView.mouseReleaseEvent(self, mouseEvent)
+        if mouseEvent.button() == Qt.LeftButton:
+            pt = mouseEvent.pos()
+            newPos = self.mapToScene(pt)
+            self.released.emit(newPos)
+            self._selectedItem = None
+            pass
+        pass
+
+    def execMouseMoveEvent(self, currentPos):
+        if self._selectedItem is None:
+            return
+        selectedIndex = self._selectedItem.getIndex()
+        newX = currentPos.x()
+        newY = currentPos.y()
+        newPoint = newX, newY
+        model = self.scene().getModel()
+        pen = QPen(QColor("red"))
+        if isinstance(model, Polyline):
+            # Previsualisation
+            if selectedIndex == 0:
+                nextPoint = model.getPoints()[selectedIndex + 1]
+                xNext = nextPoint[0]
+                yNext = nextPoint[1]
+                self.scene().addLine(newX, newY, xNext, yNext, pen)
+                pass
+            elif selectedIndex == len(model.getPoints()) - 1:
+                previousPoint = model.getPoints()[selectedIndex - 1]
+                xPrevious = previousPoint[0]
+                yPrevious = previousPoint[1]
+                self.scene().addLine(xPrevious, yPrevious, newX, newY, pen)
+                pass
+            else:
+                previousPoint = model.getPoints()[selectedIndex - 1]
+                xPrevious = previousPoint[0]
+                yPrevious = previousPoint[1]
+                self.scene().addLine(xPrevious, yPrevious, newX, newY, pen)
+                nextPoint = model.getPoints()[selectedIndex + 1]
+                xNext = nextPoint[0]
+                yNext = nextPoint[1]
+                self.scene().addLine(newX, newY, xNext, yNext, pen)
+                pass
+            pass
+        elif isinstance(model, Circle):
+            # Previsualisation
+            radius = float(model.getRadius())
+            rect = QRectF(newX - radius, newY - radius, 2 * radius, 2 * radius)
+            circleItem = QGraphicsEllipseItem()
+            circleItem.setPen(pen)
+            circleItem.setRect(rect)
+            self.scene().addItem(circleItem)
+            pass
+        pass
+
+    def execMouseReleaseEvent(self, newPos):
+        if self._selectedItem is None:
+            return
+        selectedIndex = self._selectedItem.getIndex()
+        newX = newPos.x()
+        newY = newPos.y()
+        newPoint = newX, newY
+        model = self.scene().getModel()
+        if isinstance(model, Polyline):
+            self.scene().getController().editPoint(model, newPoint, selectedIndex)
+            pass
+        elif isinstance(model, Circle):
+            self.scene().getController().editCenter(model, newPoint)
+            pass
+        pass
 
-   def execMouseReleaseEvent( self, newPos ) :
-       if self._selectedItem is None : return
-       selectedIndex = self._selectedItem.getIndex()
-       newX = newPos.x()
-       newY = newPos.y()
-       newPoint = newX, newY
-       model = self.scene().getModel()
-       if isinstance( model, Polyline ) :
-          self.scene().getController().editPoint( model, newPoint, selectedIndex )
-          pass
-       elif isinstance( model, Circle ) :
-          self.scene().getController().editCenter( model, newPoint )
-          pass
-       pass
 
 pass
index 1f2a38025033282d7995d39f7c50041d29c8f420..87a912e414919ab070f2b3b7c25b5f64018b1fc6 100755 (executable)
@@ -1,15 +1,16 @@
 from qtsalome import *
 
-class Menu( QMenu ) :
 
-   def __init__( self, item ) :
-       """Constructor"""
+class Menu(QMenu):
+    def __init__(self, item):
+        """Constructor"""
 
-       QMenu.__init__( self )
-       self._item = item
-       pass
+        QMenu.__init__(self)
+        self._item = item
+        pass
+
+    def getItem(self):
+        return self._item
 
-   def getItem( self ) :
-       return self._item
 
 pass
index 0690d7543f66ae081683d48fa0466a8f26118251..c68fc67bd112a4fe8e69a51c52776fdcd4dde3d9 100755 (executable)
@@ -2,38 +2,39 @@ from GraphicsScene import GraphicsScene
 from qtsalome import *
 from GraphicsRectItem import GraphicsRectItem
 
-class PolyGraphicsScene(  GraphicsScene ) :
 
-   def __init__( self, controller ) :
-       GraphicsScene.__init__( self, controller )
-       pass
+class PolyGraphicsScene(GraphicsScene):
+    def __init__(self, controller):
+        GraphicsScene.__init__(self, controller)
+        pass
 
-   def draw( self ) :
-       points = self.getModel().getPoints()
+    def draw(self):
+        points = self.getModel().getPoints()
 
-       # Drawing the points as small rectangles
-       for i in range( len(points) ) :
-          point = points[i]
-          xPoint = float( point[0] )
-          yPoint = float( point[1] )
-          # Constructing a rectangle centered on point
-          pointItem = GraphicsRectItem( xPoint-0.1, yPoint-0.1, 0.2, 0.2, i )
-          self.addItem( pointItem )
-          pass
+        # Drawing the points as small rectangles
+        for i in range(len(points)):
+            point = points[i]
+            xPoint = float(point[0])
+            yPoint = float(point[1])
+            # Constructing a rectangle centered on point
+            pointItem = GraphicsRectItem(xPoint - 0.1, yPoint - 0.1, 0.2, 0.2, i)
+            self.addItem(pointItem)
+            pass
+
+        # Linking the points with lines
+        for i in range(len(points) - 1):
+            current = points[i]
+            next = points[i + 1]
+            xCurrent = float(current[0])
+            yCurrent = float(current[1])
+            xNext = float(next[0])
+            yNext = float(next[1])
+            line = QLineF(xCurrent, yCurrent, xNext, yNext)
+            lineItem = QGraphicsLineItem()
+            lineItem.setLine(line)
+            self.addItem(lineItem)
+            pass
+        pass
 
-       # Linking the points with lines
-       for i in range( len(points) - 1 ) :
-          current = points[i]
-          next = points[i+1]
-          xCurrent = float( current[0] )
-          yCurrent = float( current[1] )
-          xNext = float( next[0] )
-          yNext = float( next[1] )
-          line = QLineF( xCurrent, yCurrent, xNext, yNext )
-          lineItem = QGraphicsLineItem()
-          lineItem.setLine( line )
-          self.addItem( lineItem )
-          pass
-       pass
 
 pass
index 344bf187189b330701b8f00b74136233c53c84b2..8369f903f21b4d7175ee67f00ca6c72ff4d4e689 100755 (executable)
@@ -2,26 +2,27 @@ from View import *
 from TreeWidgetItem import TreeWidgetItem
 from qtsalome import *
 
-class PolyTreeWidgetItem( TreeWidgetItem ) :
 
-   def __init__( self, name, controller, actionsList ) :
-       """Constructor"""
+class PolyTreeWidgetItem(TreeWidgetItem):
+    def __init__(self, name, controller, actionsList):
+        """Constructor"""
 
-       TreeWidgetItem.__init__( self, name, controller, actionsList )
-       pass
+        TreeWidgetItem.__init__(self, name, controller, actionsList)
+        pass
 
-   def editInGlobalTree( self, treeWidgetItem ) :
-       name = self.getModel().getName()
-       treeWidgetItem.setText( 0 , name )
+    def editInGlobalTree(self, treeWidgetItem):
+        name = self.getModel().getName()
+        treeWidgetItem.setText(0, name)
+
+        points = self._model.getPoints()
+        for i in range(len(points)):
+            point = points[i]
+            xPoint = point[0]
+            yPoint = point[1]
+            relatedItem = treeWidgetItem.child(i)
+            relatedItem.setText(0, str(xPoint) + ":" + str(yPoint))
+            pass
+        pass
 
-       points = self._model.getPoints()
-       for i in range( len(points) ) :
-          point = points[i]
-          xPoint = point[0]
-          yPoint = point[1]
-          relatedItem = treeWidgetItem.child( i )
-          relatedItem.setText( 0 , str(xPoint) + ":" + str(yPoint) )
-          pass
-       pass
 
 pass
index c3b1ccd7e26e8b18fd8cd272a1bd182f2d2ae392..1cd30c5edcb89ca4e8b1245b271ffec9474c41c1 100755 (executable)
@@ -19,89 +19,125 @@ sgDesktop = sgPyQt.getDesktop()
 
 #########################################
 
-class TreeWidget( QTreeWidget ) :
 
-   def __init__( self, desktop ) :
-       """Constructor"""
+class TreeWidget(QTreeWidget):
+    def __init__(self, desktop):
+        """Constructor"""
 
-       QTreeWidget.__init__( self )
-       self._desktop = desktop
+        QTreeWidget.__init__(self)
+        self._desktop = desktop
 
-       #Creating popup menu
-       self.setContextMenuPolicy( Qt.CustomContextMenu )
-       self.customContextMenuRequested[QPoint].connect(self.createPopups)
-       pass
+        # Creating popup menu
+        self.setContextMenuPolicy(Qt.CustomContextMenu)
+        self.customContextMenuRequested[QPoint].connect(self.createPopups)
+        pass
 
-   def createPopups( self, point ) :
-       item = self.itemAt( point )
-       if item is None : return
-       self.menu = Menu( item )
-       for action in item.getActionsList():
-          if action == "Show" :
-             self.menu.addAction(action).triggered.connect(self.show)
-             pass
-          elif action == 'Rename' :
-             self.menu.addAction(action).triggered.connect(self.showRenameDialog)
-             pass
-          elif action == 'Delete' :
-             self.menu.addAction(action).triggered.connect(self.delete)
-             pass
-          else :
-             self.menu.addAction(action).triggered.connect(self.showEditDialog)
-             pass
-          pass
-       self. menu.exec_( QCursor.pos() )
-       pass
+    def createPopups(self, point):
+        item = self.itemAt(point)
+        if item is None:
+            return
+        self.menu = Menu(item)
+        for action in item.getActionsList():
+            if action == "Show":
+                self.menu.addAction(action).triggered.connect(self.show)
+                pass
+            elif action == "Rename":
+                self.menu.addAction(action).triggered.connect(self.showRenameDialog)
+                pass
+            elif action == "Delete":
+                self.menu.addAction(action).triggered.connect(self.delete)
+                pass
+            else:
+                self.menu.addAction(action).triggered.connect(self.showEditDialog)
+                pass
+            pass
+        self.menu.exec_(QCursor.pos())
+        pass
 
-   def show( self ) :
-       model = self.menu.getItem().getModel()
-       controller = self._desktop.getController()
-       controller.showModel( model )
-       pass
+    def show(self):
+        model = self.menu.getItem().getModel()
+        controller = self._desktop.getController()
+        controller.showModel(model)
+        pass
 
-   def showRenameDialog( self ) :
-       model = self.menu.getItem().getModel()
-       oldName = model.getName()
-       widgetDialogBox = QDockWidget( sgDesktop )
-       myDialog = RenameDialog( "www.google.fr", self._desktop.getController(), widgetDialogBox, model, oldName )
-       widgetDialogBox.setAllowedAreas( Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea )
-       widgetDialogBox.setWidget( myDialog )
-       widgetDialogBox.setWindowTitle( "Object renaming" )
-       sgDesktop.addDockWidget( Qt.LeftDockWidgetArea, widgetDialogBox )
-       pass
+    def showRenameDialog(self):
+        model = self.menu.getItem().getModel()
+        oldName = model.getName()
+        widgetDialogBox = QDockWidget(sgDesktop)
+        myDialog = RenameDialog(
+            "www.google.fr",
+            self._desktop.getController(),
+            widgetDialogBox,
+            model,
+            oldName,
+        )
+        widgetDialogBox.setAllowedAreas(Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea)
+        widgetDialogBox.setWidget(myDialog)
+        widgetDialogBox.setWindowTitle("Object renaming")
+        sgDesktop.addDockWidget(Qt.LeftDockWidgetArea, widgetDialogBox)
+        pass
 
-   def delete( self ) :
-       answer = QMessageBox.question( self, 'Confirmation', 'Do you really want to remove the selected curve ?' , QMessageBox.Yes | QMessageBox.No )
-       if answer == QMessageBox.Yes :
-          model = self.menu.getItem().getModel()
-          controller = self._desktop.getController()
-          controller.removeModel( model )
-          pass
-       pass
+    def delete(self):
+        answer = QMessageBox.question(
+            self,
+            "Confirmation",
+            "Do you really want to remove the selected curve ?",
+            QMessageBox.Yes | QMessageBox.No,
+        )
+        if answer == QMessageBox.Yes:
+            model = self.menu.getItem().getModel()
+            controller = self._desktop.getController()
+            controller.removeModel(model)
+            pass
+        pass
 
-   def showEditDialog( self ) :
-       item = self.menu.getItem()
-       parentItem = item.parent()
-       parentModel = parentItem.getModel()
-       widgetDialogBox = QDockWidget( sgDesktop )
-       if isinstance( parentModel, Polyline ) :
-          pointRange = parentItem.indexOfChild( item )
-          oldPoint = item.text( 0 )
-          myDialog = EditPointDialog( "www.google.fr", self._desktop.getController(), widgetDialogBox, parentModel, oldPoint, pointRange )
-          pass
-       elif isinstance( parentModel, Circle ) :
-          selectedRange = parentItem.indexOfChild( item )
-          oldSelected = item.text( 0 )
-          if selectedRange == 0 : myDialog = EditCenterDialog( "www.google.fr", self._desktop.getController(), widgetDialogBox, parentModel, oldSelected )
-          elif selectedRange == 1 : myDialog = EditRadiusDialog("www.google.fr",self._desktop.getController(),widgetDialogBox,parentModel,oldSelected)
-          else : pass
-          pass
-       else : pass
+    def showEditDialog(self):
+        item = self.menu.getItem()
+        parentItem = item.parent()
+        parentModel = parentItem.getModel()
+        widgetDialogBox = QDockWidget(sgDesktop)
+        if isinstance(parentModel, Polyline):
+            pointRange = parentItem.indexOfChild(item)
+            oldPoint = item.text(0)
+            myDialog = EditPointDialog(
+                "www.google.fr",
+                self._desktop.getController(),
+                widgetDialogBox,
+                parentModel,
+                oldPoint,
+                pointRange,
+            )
+            pass
+        elif isinstance(parentModel, Circle):
+            selectedRange = parentItem.indexOfChild(item)
+            oldSelected = item.text(0)
+            if selectedRange == 0:
+                myDialog = EditCenterDialog(
+                    "www.google.fr",
+                    self._desktop.getController(),
+                    widgetDialogBox,
+                    parentModel,
+                    oldSelected,
+                )
+            elif selectedRange == 1:
+                myDialog = EditRadiusDialog(
+                    "www.google.fr",
+                    self._desktop.getController(),
+                    widgetDialogBox,
+                    parentModel,
+                    oldSelected,
+                )
+            else:
+                pass
+            pass
+        else:
+            pass
+
+        widgetDialogBox.setAllowedAreas(Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea)
+        widgetDialogBox.setWidget(myDialog)
+        widgetDialogBox.setWindowTitle("Object edition")
+        sgDesktop.addDockWidget(Qt.LeftDockWidgetArea, widgetDialogBox)
+        pass
 
-       widgetDialogBox.setAllowedAreas( Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea )
-       widgetDialogBox.setWidget( myDialog )
-       widgetDialogBox.setWindowTitle( "Object edition" )
-       sgDesktop.addDockWidget( Qt.LeftDockWidgetArea, widgetDialogBox )
-       pass
 
 pass
index 1527f4490e05929b3f92f479f0752bec53c41ae7..e70758b1c0f3c7ffd01d0aee20c80d861b12c921 100755 (executable)
@@ -1,55 +1,56 @@
 from View import *
 from qtsalome import *
 
-class TreeWidgetItem( View, QTreeWidgetItem ) :
-
-   def __init__( self, name, controller, actionsList ) :
-       """Constructor"""
-
-       View.__init__( self, controller )
-       self._name = [ name ]
-       QTreeWidgetItem.__init__( self, self._name )
-       self._actionsList = actionsList
-       pass
-
-   def getActionsList( self ) :
-       return self._actionsList
-
-   def editCenter( self, center ) :
-       circle = self.getModel()
-       self.getController().editCenter( circle, center )
-       pass
-
-   def editRadius( self, radius ) :
-       circle = self.getModel()
-       self.getController().editRadius( circle, radius )
-       pass
-
-   def update( self, mode ) :
-       if mode == 'creation' :
-          self.addToGlobalTree( self )
-          pass
-       elif mode == 'modification' :
-          self.editInGlobalTree( self )
-          pass
-       elif mode == 'supression' :
-          self.removeFromGlobalTree( self )
-          pass
-       else :
-          return
-
-   def addToGlobalTree( self, treeWidgetItem ) :
-       globalTree = self.getController().getMainFrame().getGlobalTree()
-       globalTree.addTopLevelItem( treeWidgetItem )
-       pass
-
-   def editInGlobalTree( self, treeWidgetItem ) :
-       print('Virtual')
-       pass
-
-   def removeFromGlobalTree( self, treeWidgetItem ) :
-       globalTree = self.getController().getMainFrame().getGlobalTree()
-       globalTree.takeTopLevelItem( globalTree.indexOfTopLevelItem(treeWidgetItem) )
-       pass
+
+class TreeWidgetItem(View, QTreeWidgetItem):
+    def __init__(self, name, controller, actionsList):
+        """Constructor"""
+
+        View.__init__(self, controller)
+        self._name = [name]
+        QTreeWidgetItem.__init__(self, self._name)
+        self._actionsList = actionsList
+        pass
+
+    def getActionsList(self):
+        return self._actionsList
+
+    def editCenter(self, center):
+        circle = self.getModel()
+        self.getController().editCenter(circle, center)
+        pass
+
+    def editRadius(self, radius):
+        circle = self.getModel()
+        self.getController().editRadius(circle, radius)
+        pass
+
+    def update(self, mode):
+        if mode == "creation":
+            self.addToGlobalTree(self)
+            pass
+        elif mode == "modification":
+            self.editInGlobalTree(self)
+            pass
+        elif mode == "supression":
+            self.removeFromGlobalTree(self)
+            pass
+        else:
+            return
+
+    def addToGlobalTree(self, treeWidgetItem):
+        globalTree = self.getController().getMainFrame().getGlobalTree()
+        globalTree.addTopLevelItem(treeWidgetItem)
+        pass
+
+    def editInGlobalTree(self, treeWidgetItem):
+        print("Virtual")
+        pass
+
+    def removeFromGlobalTree(self, treeWidgetItem):
+        globalTree = self.getController().getMainFrame().getGlobalTree()
+        globalTree.takeTopLevelItem(globalTree.indexOfTopLevelItem(treeWidgetItem))
+        pass
+
 
 pass
index 5b7c94efd68e8c8dda607a59ddb394acd1d218fe..0f22580fd68ddac51d7fefe75e3fa1e0f5c643c1 100755 (executable)
@@ -1,33 +1,33 @@
-class View() :
+class View:
+    def __init__(self, controller):
+        """Constructor"""
 
-   def __init__( self, controller ) :
-       """Constructor"""
+        self._model = None
+        self._controller = controller
+        pass
 
-       self._model = None
-       self._controller = controller
-       pass
+    def getModel(self):
+        return self._model
 
-   def getModel( self ) :
-       return self._model
+    def setModel(self, model):
+        self._model = model
+        pass
 
-   def setModel( self, model ) :
-       self._model = model
-       pass
+    def getController(self):
+        return self._controller
 
-   def getController( self ) :
-       return self._controller
+    def setController(self, controller):
+        self._controller = controller
+        pass
 
-   def setController( self, controller ) :
-       self._controller = controller
-       pass
+    def editName(self, name):
+        model = self.getModel()
+        self._controller.editName(model, name)
+        pass
 
-   def editName( self, name ) :
-       model = self.getModel()
-       self._controller.editName( model, name )
-       pass
+    def update(self, mode):
+        print("Virtual method")
+        pass
 
-   def update( self, mode ) :
-       print('Virtual method')
-       pass
 
 pass
index b42de0c99dd5b9ce827c64cc15fd992b2e123be3..81db59176bedea8663879cc3cdd5804c9f8903de 100755 (executable)
@@ -1,10 +1,11 @@
 from Polyline import Polyline
 from Circle import Circle
 
-class Controller() :
+
+class Controller:
     """Manages the Model instances"""
 
-    def __init__( self, MainFrame ) :
+    def __init__(self, MainFrame):
         """Constructor"""
 
         self._models = []
@@ -13,92 +14,93 @@ class Controller() :
         self._nbCircles = 0
         pass
 
-    def getModels( self ) :
+    def getModels(self):
         return self._models
 
-    def getMainFrame( self ) :
+    def getMainFrame(self):
         return self._mainFrame
 
-    def getNbPolylines( self ) :
+    def getNbPolylines(self):
         return self._nbPolylines
 
-    def setNbPolylines( self, n ) :
+    def setNbPolylines(self, n):
         self._nbPolylines = n
         pass
 
-    def getNbCircles( self ) :
+    def getNbCircles(self):
         return self._nbCircles
 
-    def setNbCircles( self, n ) :
+    def setNbCircles(self, n):
         self._nbCircles = n
         pass
 
-    def createPolyline( self, name, randomNumberOfPoints ) :
+    def createPolyline(self, name, randomNumberOfPoints):
         """Creates a Polyline object nammed name with randomNumberOfPoints points"""
 
         import random
 
         # Making randomNumberOfPoints random positionned points
         points = []
-        x = random.uniform( 0, randomNumberOfPoints )
-        for i in range( randomNumberOfPoints ) :
-           x = random.uniform( x, x+randomNumberOfPoints )
-           y = random.uniform( 0, x )
-           point = x, y
-           points.append( point )
-           pass
-
-        myPolyline = Polyline( name, points, self )
-        self._models.append( myPolyline )
-        myPolyline.updateViews( mode = 'creation' )
-
-        self._nbPolylines +=1
+        x = random.uniform(0, randomNumberOfPoints)
+        for i in range(randomNumberOfPoints):
+            x = random.uniform(x, x + randomNumberOfPoints)
+            y = random.uniform(0, x)
+            point = x, y
+            points.append(point)
+            pass
+
+        myPolyline = Polyline(name, points, self)
+        self._models.append(myPolyline)
+        myPolyline.updateViews(mode="creation")
+
+        self._nbPolylines += 1
         return myPolyline
 
-    def createCircle( self, name, center, radius ) :
+    def createCircle(self, name, center, radius):
         """Creates a Circle object nammed name with center and radius"""
 
-        myCircle = Circle( name, center, radius, self )
-        self._models.append( myCircle )
-        myCircle.updateViews( mode = 'creation' )
+        myCircle = Circle(name, center, radius, self)
+        self._models.append(myCircle)
+        myCircle.updateViews(mode="creation")
 
-        self._nbCircles +=1
+        self._nbCircles += 1
         return myCircle
 
-    def showModel( self, model ) :
-        model.updateViews( mode = 'showing' )
+    def showModel(self, model):
+        model.updateViews(mode="showing")
         pass
 
-    def editName( self, model, name ) :
-        model.setName( name )
-        model.updateViews( mode = 'modification' )
+    def editName(self, model, name):
+        model.setName(name)
+        model.updateViews(mode="modification")
         return model
 
-    def editPoint( self, polyline, newPoint, pointRange ) :
-        polyline.editPoint( pointRange, newPoint )
-        polyline.updateViews( mode = 'modification' )
+    def editPoint(self, polyline, newPoint, pointRange):
+        polyline.editPoint(pointRange, newPoint)
+        polyline.updateViews(mode="modification")
         return polyline
 
-    def editCenter( self, circle, center ) :
-        circle.setCenter( center )
-        circle.updateViews( mode = 'modification' )
+    def editCenter(self, circle, center):
+        circle.setCenter(center)
+        circle.updateViews(mode="modification")
         return circle
 
-    def editRadius( self, circle, radius ) :
-        circle.setRadius( radius )
-        circle.updateViews( mode = 'modification' )
+    def editRadius(self, circle, radius):
+        circle.setRadius(radius)
+        circle.updateViews(mode="modification")
         return circle
 
-    def removeModel( self, model ) :
-        model.updateViews( mode = 'supression' )
-        index = self._models.index( model )
+    def removeModel(self, model):
+        model.updateViews(mode="supression")
+        index = self._models.index(model)
         del model
         pass
 
-    def saveListOfModels( self ) :
-        for model in self._models :
-           model.save()
-           pass
+    def saveListOfModels(self):
+        for model in self._models:
+            model.save()
+            pass
         pass
 
+
 pass
index d4e408ab8c0d732d23f8a7995b0a78b476997e68..1cc1818172eb70f674be041866a2f54837eaa9b9 100755 (executable)
@@ -1,70 +1,76 @@
 from Dialog import *
 from qtsalome import *
 
-class CreateCircleDialog( Dialog ) :
 
-   def __init__( self, helpFile, controller, widgetDialogBox ) :
-       """Constructor"""
+class CreateCircleDialog(Dialog):
+    def __init__(self, helpFile, controller, widgetDialogBox):
+        """Constructor"""
 
-       # Initializing parent widget
-       Dialog.__init__( self, helpFile, controller, widgetDialogBox )
+        # Initializing parent widget
+        Dialog.__init__(self, helpFile, controller, widgetDialogBox)
 
-       # Setting default name
-       nbCircles = controller.getNbCircles()
-       self.entryName.setText( "circle_" + str(nbCircles+1) )
-       pass
+        # Setting default name
+        nbCircles = controller.getNbCircles()
+        self.entryName.setText("circle_" + str(nbCircles + 1))
+        pass
 
-   def addSpecialWidgets( self ) :
-       floatValidator = QDoubleValidator( self )
+    def addSpecialWidgets(self):
+        floatValidator = QDoubleValidator(self)
 
-       lxCenter = QLabel( "xCenter", self )
-       self.v11.addWidget( lxCenter )
-       lyCenter = QLabel( "yCenter", self )
-       self.v11.addWidget( lyCenter )
-       lRadius = QLabel( "Radius", self )
-       self.v11.addWidget( lRadius )
+        lxCenter = QLabel("xCenter", self)
+        self.v11.addWidget(lxCenter)
+        lyCenter = QLabel("yCenter", self)
+        self.v11.addWidget(lyCenter)
+        lRadius = QLabel("Radius", self)
+        self.v11.addWidget(lRadius)
 
-       self.entryxCenter = QLineEdit( self )
-       self.entryxCenter.setValidator( floatValidator )
-       self.entryxCenter.setText( "0" )
-       self.v12.addWidget( self.entryxCenter )
-       self.entryyCenter = QLineEdit( self )
-       self.entryyCenter.setValidator( floatValidator )
-       self.entryyCenter.setText( "0" )
-       self.v12.addWidget( self.entryyCenter )
-       self.entryRadius = QLineEdit( self )
-       self.entryRadius.setValidator( floatValidator )
-       self.entryRadius.setText( "10" )
-       self.v12.addWidget( self.entryRadius)
-       pass
+        self.entryxCenter = QLineEdit(self)
+        self.entryxCenter.setValidator(floatValidator)
+        self.entryxCenter.setText("0")
+        self.v12.addWidget(self.entryxCenter)
+        self.entryyCenter = QLineEdit(self)
+        self.entryyCenter.setValidator(floatValidator)
+        self.entryyCenter.setText("0")
+        self.v12.addWidget(self.entryyCenter)
+        self.entryRadius = QLineEdit(self)
+        self.entryRadius.setValidator(floatValidator)
+        self.entryRadius.setText("10")
+        self.v12.addWidget(self.entryRadius)
+        pass
 
-   def execApply( self ) :
-       name = self.name
-       center = float(self.xCenter), float(self.yCenter)
-       radius = float( self.radius )
-       self.getController().createCircle( name, center, radius )
-       self.reInitializeDialog()
-       return
+    def execApply(self):
+        name = self.name
+        center = float(self.xCenter), float(self.yCenter)
+        radius = float(self.radius)
+        self.getController().createCircle(name, center, radius)
+        self.reInitializeDialog()
+        return
 
-   def retrieveUserEntries( self ) :
-       self.name = str( self.entryName.text() )
-       self.xCenter = str( self.entryxCenter.text() )
-       self.yCenter = str( self.entryyCenter.text() )
-       self.radius = str( self.entryRadius.text() )
-       pass
+    def retrieveUserEntries(self):
+        self.name = str(self.entryName.text())
+        self.xCenter = str(self.entryxCenter.text())
+        self.yCenter = str(self.entryyCenter.text())
+        self.radius = str(self.entryRadius.text())
+        pass
 
-   def checkUserEntries( self ) :
-       if self.name == "" or self.xCenter == "" or self.yCenter == "" or self.radius == "" :
-          self.errMessage = 'All attributes must be filled'
-          return False
-       return True
+    def checkUserEntries(self):
+        if (
+            self.name == ""
+            or self.xCenter == ""
+            or self.yCenter == ""
+            or self.radius == ""
+        ):
+            self.errMessage = "All attributes must be filled"
+            return False
+        return True
+
+    def reInitializeDialog(self):
+        nbCircles = self.getController().getNbCircles()
+        self.entryName.setText("circle_" + str(nbCircles + 1))
+        self.entryxCenter.setText("0")
+        self.entryyCenter.setText("0")
+        self.entryRadius.setText("10")
+        pass
 
-   def reInitializeDialog( self ) :
-       nbCircles = self.getController().getNbCircles()
-       self.entryName.setText( "circle_" + str(nbCircles+1) )
-       self.entryxCenter.setText( "0" )
-       self.entryyCenter.setText( "0" )
-       self.entryRadius.setText( "10" )
-       pass
 
 pass
index f0a92a6d02089a8a944cb83c196a0df1be14d61d..e09946a62dac0c3b4f3e1dc9ed4e4c249dfdc93b 100755 (executable)
@@ -1,58 +1,58 @@
 from Dialog import Dialog
 from qtsalome import *
 
-class CreatePolylineDialog( Dialog ) :
-
-   def __init__( self, helpFile, controller, widgetDialogBox  ) :
-       """Constructor"""
-
-       #Initializing parent widget
-       Dialog.__init__( self, helpFile, controller, widgetDialogBox )
-
-       #Setting default name
-       nbPolylines = controller.getNbPolylines()
-       self.entryName.setText( "polyline_" + str(nbPolylines+1) )
-       pass
-
-   def addSpecialWidgets( self ) :
-
-       intValidator = QIntValidator( self )
-
-       lNbPoints = QLabel( "Number of points", self )
-       self.v11.addWidget( lNbPoints )
-
-       self.entryNbPoints = QLineEdit( self )
-       self.entryNbPoints.setValidator( intValidator )
-       self.entryNbPoints.setText( "10" )
-       self.v12.addWidget( self.entryNbPoints )
-       pass
-
-   def execApply( self ) :
-       name = self.name
-       nbPoints = int( self.nbPoints )
-       self.getController().createPolyline( name, nbPoints )
-       self.reInitializeDialog()
-       return
-
-
-   def retrieveUserEntries( self ) :
-       self.name = str( self.entryName.text() )
-       self.nbPoints = str( self.entryNbPoints.text() )
-       pass
-
-   def checkUserEntries( self ) :
-       if self.name == "" or self.nbPoints == "" :
-          self.errMessage = 'All attributes must be filled'
-          return False
-       if int( self.nbPoints ) > 10 :
-          self.errMessage = 'The number of points must not exceed 10'
-          return False
-       return True
-
-   def reInitializeDialog( self ) :
-       nbPolylines = self.getController().getNbPolylines()
-       self.entryName.setText( "polyline_" + str(nbPolylines+1) )
-       self.entryNbPoints.setText( "10" )
-       pass
+
+class CreatePolylineDialog(Dialog):
+    def __init__(self, helpFile, controller, widgetDialogBox):
+        """Constructor"""
+
+        # Initializing parent widget
+        Dialog.__init__(self, helpFile, controller, widgetDialogBox)
+
+        # Setting default name
+        nbPolylines = controller.getNbPolylines()
+        self.entryName.setText("polyline_" + str(nbPolylines + 1))
+        pass
+
+    def addSpecialWidgets(self):
+
+        intValidator = QIntValidator(self)
+
+        lNbPoints = QLabel("Number of points", self)
+        self.v11.addWidget(lNbPoints)
+
+        self.entryNbPoints = QLineEdit(self)
+        self.entryNbPoints.setValidator(intValidator)
+        self.entryNbPoints.setText("10")
+        self.v12.addWidget(self.entryNbPoints)
+        pass
+
+    def execApply(self):
+        name = self.name
+        nbPoints = int(self.nbPoints)
+        self.getController().createPolyline(name, nbPoints)
+        self.reInitializeDialog()
+        return
+
+    def retrieveUserEntries(self):
+        self.name = str(self.entryName.text())
+        self.nbPoints = str(self.entryNbPoints.text())
+        pass
+
+    def checkUserEntries(self):
+        if self.name == "" or self.nbPoints == "":
+            self.errMessage = "All attributes must be filled"
+            return False
+        if int(self.nbPoints) > 10:
+            self.errMessage = "The number of points must not exceed 10"
+            return False
+        return True
+
+    def reInitializeDialog(self):
+        nbPolylines = self.getController().getNbPolylines()
+        self.entryName.setText("polyline_" + str(nbPolylines + 1))
+        self.entryNbPoints.setText("10")
+        pass
+
 
 pass
index 1e3a10001077291bc4db1009b1c5f71563c9fc8e..d6a9d03a750337366ad9b6f64268b485c24a575f 100755 (executable)
@@ -1,63 +1,64 @@
 from DialogEdit import *
 from qtsalome import *
 
-class EditCenterDialog( DialogEdit ) :
-
-   def __init__( self, helpFile, controller, widgetDialogBox, model, oldCenter ) :
-       """Constructor"""
-
-       # Initializing parent widget
-       DialogEdit.__init__( self, helpFile, controller, widgetDialogBox )
-
-       self._model = model
-
-       # Reading oldX and oldY
-       oldX = ""
-       oldY = ""
-       i = 0
-       while oldCenter[i] != ':' :
-          oldX += oldCenter[i]
-          i += 1
-          pass
-       for j in range( i+1, len(oldCenter) ) :
-          oldY += oldCenter[j]
-          pass
-       self.entryX.setText( oldX )
-       self.entryY.setText( oldY )
-       pass
-
-   def addSpecialWidgets( self ) :
-       floatValidator = QDoubleValidator( self )
-
-       lX = QLabel( "X", self )
-       self.v11.addWidget( lX )
-       lY = QLabel( "Y", self )
-       self.v11.addWidget( lY )
-
-       self.entryX = QLineEdit( self )
-       self.entryX.setValidator( floatValidator )
-       self.v12.addWidget( self.entryX )
-       self.entryY = QLineEdit( self )
-       self.entryY.setValidator( floatValidator )
-       self.v12.addWidget( self.entryY )
-       pass
-
-   def execApply( self ) :
-       newX = float( self.newX )
-       newY = float( self.newY )
-       newCenter = newX, newY
-       self.getController().editCenter( self._model, newCenter )
-       return
-
-   def retrieveUserEntries( self ) :
-       self.newX= str( self.entryX.text() )
-       self.newY= str( self.entryY.text() )
-       pass
-
-   def checkUserEntries( self ) :
-       if self.newX == "" or self.newY == "" :
-          self.errMessage = 'All attributes must be filled'
-          return False
-       return True
+
+class EditCenterDialog(DialogEdit):
+    def __init__(self, helpFile, controller, widgetDialogBox, model, oldCenter):
+        """Constructor"""
+
+        # Initializing parent widget
+        DialogEdit.__init__(self, helpFile, controller, widgetDialogBox)
+
+        self._model = model
+
+        # Reading oldX and oldY
+        oldX = ""
+        oldY = ""
+        i = 0
+        while oldCenter[i] != ":":
+            oldX += oldCenter[i]
+            i += 1
+            pass
+        for j in range(i + 1, len(oldCenter)):
+            oldY += oldCenter[j]
+            pass
+        self.entryX.setText(oldX)
+        self.entryY.setText(oldY)
+        pass
+
+    def addSpecialWidgets(self):
+        floatValidator = QDoubleValidator(self)
+
+        lX = QLabel("X", self)
+        self.v11.addWidget(lX)
+        lY = QLabel("Y", self)
+        self.v11.addWidget(lY)
+
+        self.entryX = QLineEdit(self)
+        self.entryX.setValidator(floatValidator)
+        self.v12.addWidget(self.entryX)
+        self.entryY = QLineEdit(self)
+        self.entryY.setValidator(floatValidator)
+        self.v12.addWidget(self.entryY)
+        pass
+
+    def execApply(self):
+        newX = float(self.newX)
+        newY = float(self.newY)
+        newCenter = newX, newY
+        self.getController().editCenter(self._model, newCenter)
+        return
+
+    def retrieveUserEntries(self):
+        self.newX = str(self.entryX.text())
+        self.newY = str(self.entryY.text())
+        pass
+
+    def checkUserEntries(self):
+        if self.newX == "" or self.newY == "":
+            self.errMessage = "All attributes must be filled"
+            return False
+        return True
+
 
 pass
index 00702ad4788889ad4a19abcaafdae5a0ab16c5c0..1c83890cf400578c05d92368e4cea183eaff01ac 100755 (executable)
@@ -1,66 +1,68 @@
 from DialogEdit import *
 from qtsalome import *
 
-class EditPointDialog( DialogEdit ) :
 
-   def __init__( self, helpFile, controller, widgetDialogBox, model, oldPoint, pointRange  ) :
-       """Constructor"""
+class EditPointDialog(DialogEdit):
+    def __init__(
+        self, helpFile, controller, widgetDialogBox, model, oldPoint, pointRange
+    ):
+        """Constructor"""
 
-       #Initializing parent widget
-       DialogEdit.__init__( self, helpFile, controller, widgetDialogBox )
+        # Initializing parent widget
+        DialogEdit.__init__(self, helpFile, controller, widgetDialogBox)
 
-       self._model = model
+        self._model = model
 
-       #Reading oldX and oldY
-       oldX = ""
-       oldY = ""
-       i = 0
-       while oldPoint[i] != ':' :
-          oldX += oldPoint[i]
-          i += 1
-          pass
-       for j in range( i+1, len(oldPoint) ) :
-          oldY += oldPoint[j]
-          pass
-       self.pointRange = pointRange
-       self.entryX.setText( oldX )
-       self.entryY.setText( oldY )
-       pass
+        # Reading oldX and oldY
+        oldX = ""
+        oldY = ""
+        i = 0
+        while oldPoint[i] != ":":
+            oldX += oldPoint[i]
+            i += 1
+            pass
+        for j in range(i + 1, len(oldPoint)):
+            oldY += oldPoint[j]
+            pass
+        self.pointRange = pointRange
+        self.entryX.setText(oldX)
+        self.entryY.setText(oldY)
+        pass
 
-   def addSpecialWidgets( self ) :
-       floatValidator = QDoubleValidator( self )
+    def addSpecialWidgets(self):
+        floatValidator = QDoubleValidator(self)
 
-       lX = QLabel( "X", self )
-       self.v11.addWidget( lX )
-       lY = QLabel( "Y", self )
-       self.v11.addWidget( lY )
+        lX = QLabel("X", self)
+        self.v11.addWidget(lX)
+        lY = QLabel("Y", self)
+        self.v11.addWidget(lY)
 
-       self.entryX = QLineEdit( self )
-       self.entryX.setValidator( floatValidator )
-       self.v12.addWidget( self.entryX )
-       self.entryY = QLineEdit( self )
-       self.entryY.setValidator( floatValidator )
-       self.v12.addWidget( self.entryY )
-       pass
+        self.entryX = QLineEdit(self)
+        self.entryX.setValidator(floatValidator)
+        self.v12.addWidget(self.entryX)
+        self.entryY = QLineEdit(self)
+        self.entryY.setValidator(floatValidator)
+        self.v12.addWidget(self.entryY)
+        pass
 
-   def execApply( self ) :
-       pointRange = self.pointRange
-       newX = float( self.newX )
-       newY = float( self.newY )
-       newPoint = newX, newY
-       self.getController().editPoint( self._model, newPoint, pointRange )
-       return
+    def execApply(self):
+        pointRange = self.pointRange
+        newX = float(self.newX)
+        newY = float(self.newY)
+        newPoint = newX, newY
+        self.getController().editPoint(self._model, newPoint, pointRange)
+        return
 
+    def retrieveUserEntries(self):
+        self.newX = str(self.entryX.text())
+        self.newY = str(self.entryY.text())
+        pass
 
-   def retrieveUserEntries( self ) :
-       self.newX= str( self.entryX.text() )
-       self.newY= str( self.entryY.text() )
-       pass
+    def checkUserEntries(self):
+        if self.newX == "" or self.newY == "":
+            self.errMessage = "All attributes must be filled"
+            return False
+        return True
 
-   def checkUserEntries( self ) :
-       if self.newX == "" or self.newY == "" :
-          self.errMessage = 'All attributes must be filled'
-          return False
-       return True
 
 pass
index 69c648ebd85836013db74a67d6f150509430b40b..a9c43c8c5971c548f66c5543bf0760c7a7d740b3 100755 (executable)
@@ -1,42 +1,42 @@
 from DialogEdit import *
 from qtsalome import *
 
-class EditRadiusDialog( DialogEdit ) :
 
-   def __init__( self, helpFile, controller, widgetDialogBox, model, oldRadius  ) :
-       """Constructor"""
+class EditRadiusDialog(DialogEdit):
+    def __init__(self, helpFile, controller, widgetDialogBox, model, oldRadius):
+        """Constructor"""
 
-       #Initializing parent widget
-       DialogEdit.__init__( self, helpFile, controller, widgetDialogBox )
+        # Initializing parent widget
+        DialogEdit.__init__(self, helpFile, controller, widgetDialogBox)
 
-       self._model = model
-       self.entryRadius.setText( oldRadius )
-       pass
+        self._model = model
+        self.entryRadius.setText(oldRadius)
+        pass
 
-   def addSpecialWidgets( self ) :
-       floatValidator = QDoubleValidator( self )
+    def addSpecialWidgets(self):
+        floatValidator = QDoubleValidator(self)
 
-       lRadius = QLabel( "Radius", self )
-       self.v11.addWidget( lRadius )
-       self.entryRadius = QLineEdit( self )
-       self.entryRadius.setValidator( floatValidator )
-       self.v12.addWidget( self.entryRadius )
-       pass
+        lRadius = QLabel("Radius", self)
+        self.v11.addWidget(lRadius)
+        self.entryRadius = QLineEdit(self)
+        self.entryRadius.setValidator(floatValidator)
+        self.v12.addWidget(self.entryRadius)
+        pass
 
-   def execApply( self ) :
-       newRadius = self.newRadius
-       self.getController().editRadius( self._model, newRadius )
-       return
+    def execApply(self):
+        newRadius = self.newRadius
+        self.getController().editRadius(self._model, newRadius)
+        return
 
+    def retrieveUserEntries(self):
+        self.newRadius = str(self.entryRadius.text())
+        pass
 
-   def retrieveUserEntries( self ) :
-       self.newRadius = str( self.entryRadius.text() )
-       pass
+    def checkUserEntries(self):
+        if self.newRadius == "":
+            self.errMessage = "All attributes must be filled"
+            return False
+        return True
 
-   def checkUserEntries( self ) :
-       if self.newRadius == "" :
-          self.errMessage = 'All attributes must be filled'
-          return False
-       return True
 
 pass
index dddcc2da263d7ca62c579c72baead043e1cbde8a..6ff291bc22e9e90c01dec7c467faff71ab1946c5 100755 (executable)
@@ -1,38 +1,39 @@
 from DialogEdit import *
 from qtsalome import *
 
-class RenameDialog( DialogEdit ) :
-
-   def __init__( self, helpFile, controller, widgetDialogBox, model, oldName  ) :
-       """Constructor"""
-
-       # Initializing parent widget
-       DialogEdit.__init__( self, helpFile, controller, widgetDialogBox )
-
-       self._model = model
-       self.entryName.setText( oldName )
-       pass
-
-   def addSpecialWidgets( self ) :
-       lName = QLabel( "Name", self )
-       self.v11.addWidget( lName )
-       self.entryName = QLineEdit( self )
-       self.v12.addWidget( self.entryName )
-       pass
-
-   def execApply( self ) :
-       newName = self.newName
-       self.getController().editName( self._model, newName )
-       return
-
-   def retrieveUserEntries( self ) :
-       self.newName = str( self.entryName.text() )
-       pass
-
-   def checkUserEntries( self ) :
-       if self.newName == "" :
-          self.errMessage = 'All attributes must be filled'
-          return False
-       return True
+
+class RenameDialog(DialogEdit):
+    def __init__(self, helpFile, controller, widgetDialogBox, model, oldName):
+        """Constructor"""
+
+        # Initializing parent widget
+        DialogEdit.__init__(self, helpFile, controller, widgetDialogBox)
+
+        self._model = model
+        self.entryName.setText(oldName)
+        pass
+
+    def addSpecialWidgets(self):
+        lName = QLabel("Name", self)
+        self.v11.addWidget(lName)
+        self.entryName = QLineEdit(self)
+        self.v12.addWidget(self.entryName)
+        pass
+
+    def execApply(self):
+        newName = self.newName
+        self.getController().editName(self._model, newName)
+        return
+
+    def retrieveUserEntries(self):
+        self.newName = str(self.entryName.text())
+        pass
+
+    def checkUserEntries(self):
+        if self.newName == "":
+            self.errMessage = "All attributes must be filled"
+            return False
+        return True
+
 
 pass
index bbd59e093c09b5d009df5b8703e0d0c0a1b95668..0f252b8b4a615240ed8cfdbd128caf8f2ba3e214 100755 (executable)
@@ -2,57 +2,64 @@ from Model import *
 from qtsalome import *
 
 __all__ = [
-           "Circle",
-          ]
-
-class Circle( Model ):
-
-   def __init__( self, name, center, radius, controller ):
-       """Constructor"""
-
-       Model.__init__( self,controller )
-       self._name = name
-       self._center = center
-       self._radius = radius
-       self.addTreeWidgetItem( self.getName(), controller )
-       self.addGraphicScene( controller )
-       pass
-
-   def getCenter( self ):
-       return self._center[0], self._center[1]
-
-   def setCenter( self, center ):
-       self._center = center
-       pass
-
-   def getRadius( self ):
-       return self._radius
-
-   def setRadius( self, radius ):
-       self._radius = radius
-
-   def addTreeWidgetItem( self, name, controller ):
-       from CircleTreeWidgetItem import CircleTreeWidgetItem
-       from TreeWidgetItem import TreeWidgetItem
-
-       myTreeWidgetItem = CircleTreeWidgetItem( name, controller, ["Show", "Rename", "Delete"] )
-       newTreeWidgetItem = TreeWidgetItem( str(self.getCenter()[0]) + ':' + str(self.getCenter()[1]), controller, ["Edit"] )
-       myTreeWidgetItem.addChild( newTreeWidgetItem )
-       newTreeWidgetItem = TreeWidgetItem( str(self.getRadius()), controller, ["Edit"] )
-       myTreeWidgetItem.addChild( newTreeWidgetItem )
-       myTreeWidgetItem.setModel( self )
-       self.getViews().append( myTreeWidgetItem )
-       return myTreeWidgetItem
-
-   def addGraphicScene( self, controller ) :
-       from CircleGraphicsScene import CircleGraphicsScene
-
-       myGraphicsScene = CircleGraphicsScene( controller )
-       myGraphicsScene.setModel( self )
-       self.getViews().append( myGraphicsScene )
-       return myGraphicsScene
-
-   def save( self ):
-       pass
+    "Circle",
+]
+
+
+class Circle(Model):
+    def __init__(self, name, center, radius, controller):
+        """Constructor"""
+
+        Model.__init__(self, controller)
+        self._name = name
+        self._center = center
+        self._radius = radius
+        self.addTreeWidgetItem(self.getName(), controller)
+        self.addGraphicScene(controller)
+        pass
+
+    def getCenter(self):
+        return self._center[0], self._center[1]
+
+    def setCenter(self, center):
+        self._center = center
+        pass
+
+    def getRadius(self):
+        return self._radius
+
+    def setRadius(self, radius):
+        self._radius = radius
+
+    def addTreeWidgetItem(self, name, controller):
+        from CircleTreeWidgetItem import CircleTreeWidgetItem
+        from TreeWidgetItem import TreeWidgetItem
+
+        myTreeWidgetItem = CircleTreeWidgetItem(
+            name, controller, ["Show", "Rename", "Delete"]
+        )
+        newTreeWidgetItem = TreeWidgetItem(
+            str(self.getCenter()[0]) + ":" + str(self.getCenter()[1]),
+            controller,
+            ["Edit"],
+        )
+        myTreeWidgetItem.addChild(newTreeWidgetItem)
+        newTreeWidgetItem = TreeWidgetItem(str(self.getRadius()), controller, ["Edit"])
+        myTreeWidgetItem.addChild(newTreeWidgetItem)
+        myTreeWidgetItem.setModel(self)
+        self.getViews().append(myTreeWidgetItem)
+        return myTreeWidgetItem
+
+    def addGraphicScene(self, controller):
+        from CircleGraphicsScene import CircleGraphicsScene
+
+        myGraphicsScene = CircleGraphicsScene(controller)
+        myGraphicsScene.setModel(self)
+        self.getViews().append(myGraphicsScene)
+        return myGraphicsScene
+
+    def save(self):
+        pass
+
 
 pass
index 9a6bd49b4d439ee26d0ea177c76b1841765aa528..810ffcb7fecda111f269857a260529561f5557ca 100755 (executable)
@@ -1,54 +1,59 @@
 from Model import *
 from qtsalome import *
 
-class Polyline( Model ):
-
-   def __init__( self, name, points, controller ):
-       """Constructor"""
-
-       Model.__init__( self, controller )
-       self._name = name
-       self._points = points
-       self.addTreeWidgetItem( self.getName(), controller )
-       self.addGraphicScene( controller )
-       pass
-
-   def getPoints( self ):
-       return self._points
-
-   def setPoints( self, points ):
-       self._points = points
-       pass
-
-   def editPoint( self, pointRange, newPoint ) :
-       self._points[ pointRange ] = newPoint
-       pass
-
-   def addTreeWidgetItem( self, name, controller ):
-       from PolyTreeWidgetItem import PolyTreeWidgetItem
-       from TreeWidgetItem import TreeWidgetItem
-
-       myTreeWidgetItem = PolyTreeWidgetItem( name, controller, ["Show", "Rename", "Delete"] )
-       # Adding list of points
-       for point in self.getPoints() :
-          x = point[0]
-          y = point[1]
-          newTreeWidgetItem = TreeWidgetItem( str(x) + ":" + str(y), controller, ["Edit"] )
-          myTreeWidgetItem.addChild( newTreeWidgetItem )
-          pass
-       myTreeWidgetItem.setModel( self )
-       self.getViews().append( myTreeWidgetItem )
-       return myTreeWidgetItem
-
-   def addGraphicScene( self, controller ) :
-       from PolyGraphicsScene import PolyGraphicsScene
-
-       myGraphicsScene = PolyGraphicsScene( controller )
-       myGraphicsScene.setModel( self )
-       self.getViews().append( myGraphicsScene )
-       return myGraphicsScene
-
-   def save( self ):
-       pass
+
+class Polyline(Model):
+    def __init__(self, name, points, controller):
+        """Constructor"""
+
+        Model.__init__(self, controller)
+        self._name = name
+        self._points = points
+        self.addTreeWidgetItem(self.getName(), controller)
+        self.addGraphicScene(controller)
+        pass
+
+    def getPoints(self):
+        return self._points
+
+    def setPoints(self, points):
+        self._points = points
+        pass
+
+    def editPoint(self, pointRange, newPoint):
+        self._points[pointRange] = newPoint
+        pass
+
+    def addTreeWidgetItem(self, name, controller):
+        from PolyTreeWidgetItem import PolyTreeWidgetItem
+        from TreeWidgetItem import TreeWidgetItem
+
+        myTreeWidgetItem = PolyTreeWidgetItem(
+            name, controller, ["Show", "Rename", "Delete"]
+        )
+        # Adding list of points
+        for point in self.getPoints():
+            x = point[0]
+            y = point[1]
+            newTreeWidgetItem = TreeWidgetItem(
+                str(x) + ":" + str(y), controller, ["Edit"]
+            )
+            myTreeWidgetItem.addChild(newTreeWidgetItem)
+            pass
+        myTreeWidgetItem.setModel(self)
+        self.getViews().append(myTreeWidgetItem)
+        return myTreeWidgetItem
+
+    def addGraphicScene(self, controller):
+        from PolyGraphicsScene import PolyGraphicsScene
+
+        myGraphicsScene = PolyGraphicsScene(controller)
+        myGraphicsScene.setModel(self)
+        self.getViews().append(myGraphicsScene)
+        return myGraphicsScene
+
+    def save(self):
+        pass
+
 
 pass
index 69256a5d0d51f0b64dec0f83ac525800e447f7ad..be011f94a1a2eb70604d843b125b615be6cd49b2 100755 (executable)
-from PyQt5.QtCore    import *
-from PyQt5.QtGui     import *
+from PyQt5.QtCore import *
+from PyQt5.QtGui import *
 from PyQt5.QtWidgets import *
 
 from TreeWidget import TreeWidget
 from GraphicsView import GraphicsView
 from GraphicsScene import GraphicsScene
 
-class Desktop( QMainWindow ) :
-
-   def __init__( self ) :
-       """Constructor"""
-
-       QMainWindow.__init__( self )
-       self._controller = None
-
-       # Creating a dockWidget which will contain globalTree
-       self._globalTree= TreeWidget( self )
-       self._globalTree.setHeaderLabel ( "Object browser" )
-       dockGlobalTree = QDockWidget( "Tree view", self )
-       dockGlobalTree.setAllowedAreas( Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea )
-       dockGlobalTree.setWidget( self._globalTree )
-       self.addDockWidget( Qt.LeftDockWidgetArea, dockGlobalTree )
-
-       # Creating a central widget which contains the globalGraphicsView
-       self._dockGlobalView = QDockWidget( "Graphics view", self )
-       scene = GraphicsScene( self._controller )
-       self._globalGraphicsView = GraphicsView( scene )
-       self._dockGlobalView.setWidget( self._globalGraphicsView )
-       self._globalGraphicsView.show()
-       self.setCentralWidget( self._dockGlobalView )
-
-       # Creating menus and toolbars
-       self.createMenus()
-       self.createToolBars()
-       pass
-
-   def getController( self ) :
-       return self._controller
-
-   def setController( self, controller ) :
-       self._controller = controller
-       pass
-
-   def getGlobalTree( self ) :
-       return self._globalTree
-
-   def createMenus( self ) :
-       # Creating menus
-       curveMenu = self.menuBar().addMenu( "Curve" )
-       toolsMenu = self.menuBar().addMenu( "Tools" )
-       # Adding actions
-       createPolylineAction = QAction( "Polyline", self )
-       createCircleAction = QAction( "Circle", self )
-       curveMenu.addAction( createPolylineAction )
-       curveMenu.addAction( createCircleAction )
-
-       deleteAllAction = QAction( "Delete all", self )
-       toolsMenu.addAction( deleteAllAction )
-       # Connecting slots
-       createPolylineAction.triggered.connect(self.showCreatePolylineDialog)
-       createCircleAction.triggered.connect(self.showCreateCircleDialog)
-       deleteAllAction.triggered.connect(self.deleteAll)
-       pass
-
-   def createToolBars( self ) :
-       # Creating toolBars
-       createPolylineTB = self.addToolBar( "New polyline")
-       createCircleTB = self.addToolBar( "New circle")
-       createPolylineAction = QAction( "Polyline", self )
-       createCircleAction = QAction( "Circle", self )
-       # Adding actions
-       createPolylineTB.addAction( createPolylineAction )
-       createCircleTB.addAction( createCircleAction )
-       # Connecting slots
-       createPolylineAction.triggered.connect(self.showCreatePolylineDialog)
-       createCircleAction.triggered.connect(self.showCreateCircleDialog)
-       pass
-
-   def showCreatePolylineDialog( self ) :
-       from CreatePolylineDialog import CreatePolylineDialog
-
-       widgetDialogBox = QDockWidget( "myDockWidget", self )
-       myDialog = CreatePolylineDialog( "www.google.fr", self._controller, widgetDialogBox )
-       widgetDialogBox.setAllowedAreas( Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea )
-       widgetDialogBox.setWidget( myDialog )
-       widgetDialogBox.setWindowTitle( "Polyline definition" )
-       self.addDockWidget( Qt.LeftDockWidgetArea, widgetDialogBox )
-       pass
-
-   def showCreateCircleDialog( self ) :
-       from CreateCircleDialog import CreateCircleDialog
-
-       widgetDialogBox = QDockWidget( "myDockWidget", self )
-       myDialog = CreateCircleDialog( "www.cea.fr", self._controller, widgetDialogBox )
-       widgetDialogBox.setAllowedAreas( Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea )
-       widgetDialogBox.setWidget( myDialog )
-       widgetDialogBox.setWindowTitle( "Polyline definition" )
-       self.addDockWidget( Qt.LeftDockWidgetArea, widgetDialogBox )
-       pass
-
-   def deleteAll( self ) :
-       models = self.getController().getModels()
-       if len( models ) == 0 : return
-       answer = QMessageBox.question( self, 'Confirmation', 'Do you really want to delete all the existing objects ?' , QMessageBox.Yes | QMessageBox.No )
-       if answer == QMessageBox.Yes :
-          for model in models :
-             self.getController().removeModel( model )
-             pass
-          pass
-       pass
-
-   def updateGlobalGraphicsView( self, scene ) :
-       self._globalGraphicsView.setScene( scene )
-       if scene is None :
-          self._dockGlobalView.setWindowTitle( "Graphics view" )
-          return
-       self._dockGlobalView.setWindowTitle( "Graphics view : showing " + scene.getModel().getName() )
-       #Resizing the globalGraphicView
-       sceneRect = scene.getRect()
-       topLeft = sceneRect.topLeft()
-       viewRect = QRectF( topLeft.x(), topLeft.y(), 2*sceneRect.width(), 2*sceneRect.height() )
-       self._globalGraphicsView.fitInView ( viewRect, Qt.IgnoreAspectRatio )
-       pass
+
+class Desktop(QMainWindow):
+    def __init__(self):
+        """Constructor"""
+
+        QMainWindow.__init__(self)
+        self._controller = None
+
+        # Creating a dockWidget which will contain globalTree
+        self._globalTree = TreeWidget(self)
+        self._globalTree.setHeaderLabel("Object browser")
+        dockGlobalTree = QDockWidget("Tree view", self)
+        dockGlobalTree.setAllowedAreas(Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea)
+        dockGlobalTree.setWidget(self._globalTree)
+        self.addDockWidget(Qt.LeftDockWidgetArea, dockGlobalTree)
+
+        # Creating a central widget which contains the globalGraphicsView
+        self._dockGlobalView = QDockWidget("Graphics view", self)
+        scene = GraphicsScene(self._controller)
+        self._globalGraphicsView = GraphicsView(scene)
+        self._dockGlobalView.setWidget(self._globalGraphicsView)
+        self._globalGraphicsView.show()
+        self.setCentralWidget(self._dockGlobalView)
+
+        # Creating menus and toolbars
+        self.createMenus()
+        self.createToolBars()
+        pass
+
+    def getController(self):
+        return self._controller
+
+    def setController(self, controller):
+        self._controller = controller
+        pass
+
+    def getGlobalTree(self):
+        return self._globalTree
+
+    def createMenus(self):
+        # Creating menus
+        curveMenu = self.menuBar().addMenu("Curve")
+        toolsMenu = self.menuBar().addMenu("Tools")
+        # Adding actions
+        createPolylineAction = QAction("Polyline", self)
+        createCircleAction = QAction("Circle", self)
+        curveMenu.addAction(createPolylineAction)
+        curveMenu.addAction(createCircleAction)
+
+        deleteAllAction = QAction("Delete all", self)
+        toolsMenu.addAction(deleteAllAction)
+        # Connecting slots
+        createPolylineAction.triggered.connect(self.showCreatePolylineDialog)
+        createCircleAction.triggered.connect(self.showCreateCircleDialog)
+        deleteAllAction.triggered.connect(self.deleteAll)
+        pass
+
+    def createToolBars(self):
+        # Creating toolBars
+        createPolylineTB = self.addToolBar("New polyline")
+        createCircleTB = self.addToolBar("New circle")
+        createPolylineAction = QAction("Polyline", self)
+        createCircleAction = QAction("Circle", self)
+        # Adding actions
+        createPolylineTB.addAction(createPolylineAction)
+        createCircleTB.addAction(createCircleAction)
+        # Connecting slots
+        createPolylineAction.triggered.connect(self.showCreatePolylineDialog)
+        createCircleAction.triggered.connect(self.showCreateCircleDialog)
+        pass
+
+    def showCreatePolylineDialog(self):
+        from CreatePolylineDialog import CreatePolylineDialog
+
+        widgetDialogBox = QDockWidget("myDockWidget", self)
+        myDialog = CreatePolylineDialog(
+            "www.google.fr", self._controller, widgetDialogBox
+        )
+        widgetDialogBox.setAllowedAreas(Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea)
+        widgetDialogBox.setWidget(myDialog)
+        widgetDialogBox.setWindowTitle("Polyline definition")
+        self.addDockWidget(Qt.LeftDockWidgetArea, widgetDialogBox)
+        pass
+
+    def showCreateCircleDialog(self):
+        from CreateCircleDialog import CreateCircleDialog
+
+        widgetDialogBox = QDockWidget("myDockWidget", self)
+        myDialog = CreateCircleDialog("www.cea.fr", self._controller, widgetDialogBox)
+        widgetDialogBox.setAllowedAreas(Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea)
+        widgetDialogBox.setWidget(myDialog)
+        widgetDialogBox.setWindowTitle("Polyline definition")
+        self.addDockWidget(Qt.LeftDockWidgetArea, widgetDialogBox)
+        pass
+
+    def deleteAll(self):
+        models = self.getController().getModels()
+        if len(models) == 0:
+            return
+        answer = QMessageBox.question(
+            self,
+            "Confirmation",
+            "Do you really want to delete all the existing objects ?",
+            QMessageBox.Yes | QMessageBox.No,
+        )
+        if answer == QMessageBox.Yes:
+            for model in models:
+                self.getController().removeModel(model)
+                pass
+            pass
+        pass
+
+    def updateGlobalGraphicsView(self, scene):
+        self._globalGraphicsView.setScene(scene)
+        if scene is None:
+            self._dockGlobalView.setWindowTitle("Graphics view")
+            return
+        self._dockGlobalView.setWindowTitle(
+            "Graphics view : showing " + scene.getModel().getName()
+        )
+        # Resizing the globalGraphicView
+        sceneRect = scene.getRect()
+        topLeft = sceneRect.topLeft()
+        viewRect = QRectF(
+            topLeft.x(), topLeft.y(), 2 * sceneRect.width(), 2 * sceneRect.height()
+        )
+        self._globalGraphicsView.fitInView(viewRect, Qt.IgnoreAspectRatio)
+        pass
+
 
 pass
index 45be4e97cd95404c2df6578d207dd33d425a3717..0712e48b3cd96ddacee22bfced0d97221d4ef653 100755 (executable)
@@ -1,21 +1,22 @@
 import sys
 
-from PyQt5.QtCore    import *
-from PyQt5.QtGui     import *
+from PyQt5.QtCore import *
+from PyQt5.QtGui import *
 from PyQt5.QtWidgets import *
 
 from Controller import Controller
 from Desktop import Desktop
 
-def main( args ) :
-    Appli = QApplication( args )
+
+def main(args):
+    Appli = QApplication(args)
     MainFrame = Desktop()
-    myController = Controller( MainFrame )
-    MainFrame.setController( myController )
+    myController = Controller(MainFrame)
+    MainFrame.setController(myController)
     MainFrame.show()
     Appli.exec_()
 
-if __name__ == "__main__" :
-   main( sys.argv )
-   pass
 
+if __name__ == "__main__":
+    main(sys.argv)
+    pass
index 65a0c2187a855f106815abaabe5b5f147f409446..edc3a3c99f4e4269b77ce06a2379fc9c09d282e5 100755 (executable)
@@ -2,29 +2,30 @@ from GraphicsScene import GraphicsScene
 from qtsalome import *
 from GraphicsRectItem import GraphicsRectItem
 
-class CircleGraphicsScene(  GraphicsScene ) :
-
-   def __init__( self, controller ) :
-       GraphicsScene.__init__( self, controller )
-       pass
-
-   def draw( self ) :
-
-       import math
-
-       center = self._model.getCenter()
-       radius = float( self._model.getRadius() )
-       xCenter = float( center[0] )
-       yCenter = float( center[1] )
-
-       #Drawing the center as a small rectangle
-       centerItem = GraphicsRectItem( xCenter-0.1, yCenter-0.1, 0.2, 0.2, None )
-       self.addItem( centerItem )
-       #Drawing the circle
-       rect = QRectF( xCenter-radius, yCenter-radius, 2*radius, 2*radius )
-       circleItem = QGraphicsEllipseItem()
-       circleItem.setRect( rect )
-       self.addItem( circleItem )
-       pass
+
+class CircleGraphicsScene(GraphicsScene):
+    def __init__(self, controller):
+        GraphicsScene.__init__(self, controller)
+        pass
+
+    def draw(self):
+
+        import math
+
+        center = self._model.getCenter()
+        radius = float(self._model.getRadius())
+        xCenter = float(center[0])
+        yCenter = float(center[1])
+
+        # Drawing the center as a small rectangle
+        centerItem = GraphicsRectItem(xCenter - 0.1, yCenter - 0.1, 0.2, 0.2, None)
+        self.addItem(centerItem)
+        # Drawing the circle
+        rect = QRectF(xCenter - radius, yCenter - radius, 2 * radius, 2 * radius)
+        circleItem = QGraphicsEllipseItem()
+        circleItem.setRect(rect)
+        self.addItem(circleItem)
+        pass
+
 
 pass
index da53ff370fe84e214ec7ab77398be0465b1988e6..59d4aee52e98fb37df9aa1b3764dc0caa5e58467 100755 (executable)
@@ -2,26 +2,27 @@ from View import *
 from TreeWidgetItem import TreeWidgetItem
 from qtsalome import *
 
-class CircleTreeWidgetItem( TreeWidgetItem ) :
 
-   def __init__( self, name, controller, actionsList ) :
-       """Constructor"""
+class CircleTreeWidgetItem(TreeWidgetItem):
+    def __init__(self, name, controller, actionsList):
+        """Constructor"""
 
-       TreeWidgetItem.__init__( self, name, controller, actionsList )
-       pass
+        TreeWidgetItem.__init__(self, name, controller, actionsList)
+        pass
 
-   def editInGlobalTree( self, treeWidgetItem ) :
-       name = self.getModel().getName()
-       treeWidgetItem.setText( 0 , name )
-       center = self._model.getCenter()
-       xCenter = center[0]
-       yCenter = center[1]
-       relatedItem = treeWidgetItem.child( 0 )
-       relatedItem.setText( 0 , str(xCenter) + ":" + str(yCenter) )
+    def editInGlobalTree(self, treeWidgetItem):
+        name = self.getModel().getName()
+        treeWidgetItem.setText(0, name)
+        center = self._model.getCenter()
+        xCenter = center[0]
+        yCenter = center[1]
+        relatedItem = treeWidgetItem.child(0)
+        relatedItem.setText(0, str(xCenter) + ":" + str(yCenter))
+
+        radius = self._model.getRadius()
+        relatedItem = treeWidgetItem.child(1)
+        relatedItem.setText(0, str(radius))
+        pass
 
-       radius = self._model.getRadius()
-       relatedItem = treeWidgetItem.child( 1 )
-       relatedItem.setText( 0 , str(radius) )
-       pass
 
 pass
index 9d895161c517b7a239c5f1238daf968e55dc09f0..449d2c037d7d12cfe01a0d85e666a934528cc56c 100755 (executable)
@@ -1,15 +1,16 @@
 from qtsalome import *
 
-class GraphicsRectItem( QGraphicsRectItem ) :
 
-   def __init__( self, x, y, w, h, index ) :
-       QGraphicsRectItem.__init__( self, x, y, w, h )
-       self._index = index
-       self.setFlag( self.ItemIsMovable, True )
-       self.setFlag( self.ItemIsSelectable, True )
-       pass
+class GraphicsRectItem(QGraphicsRectItem):
+    def __init__(self, x, y, w, h, index):
+        QGraphicsRectItem.__init__(self, x, y, w, h)
+        self._index = index
+        self.setFlag(self.ItemIsMovable, True)
+        self.setFlag(self.ItemIsSelectable, True)
+        pass
+
+    def getIndex(self):
+        return self._index
 
-   def getIndex( self ) :
-       return self._index
 
 pass
index 6635e134747482dc111a372bfce1ae7b0dfee768..24a394ef9cd4047d551e17cfa363e613530bbc95 100755 (executable)
@@ -2,99 +2,104 @@ from Polyline import Polyline
 from Circle import Circle
 from qtsalome import *
 
-class GraphicsView( QGraphicsView ) :
 
-   moved    = pyqtSignal(QPointF)
-   released = pyqtSignal(QPointF)
+class GraphicsView(QGraphicsView):
 
-   def __init__( self, scene ) :
-       QGraphicsView.__init__( self, scene )
-       self.setMouseTracking( True )
-       self._selectedItem = None
-       self.moved[QPointF].connect(self.execMouseMoveEvent)
-       self.released[QPointF].connect(self.execMouseReleaseEvent)
-       pass
+    moved = pyqtSignal(QPointF)
+    released = pyqtSignal(QPointF)
 
-   def mousePressEvent( self, mouseEvent ) :
-       QGraphicsView.mousePressEvent( self, mouseEvent )
-       if self.scene() is None : return
-       self._selectedItem = self.scene().mouseGrabberItem()
-       pass
+    def __init__(self, scene):
+        QGraphicsView.__init__(self, scene)
+        self.setMouseTracking(True)
+        self._selectedItem = None
+        self.moved[QPointF].connect(self.execMouseMoveEvent)
+        self.released[QPointF].connect(self.execMouseReleaseEvent)
+        pass
 
-   def mouseMoveEvent( self, mouseEvent ) :
-       QGraphicsView.mouseMoveEvent( self, mouseEvent )
-       pt = mouseEvent.pos()
-       currentPos = self.mapToScene( pt )
-       self.moved.emit(currentPos)
-       pass
+    def mousePressEvent(self, mouseEvent):
+        QGraphicsView.mousePressEvent(self, mouseEvent)
+        if self.scene() is None:
+            return
+        self._selectedItem = self.scene().mouseGrabberItem()
+        pass
 
-   def mouseReleaseEvent( self, mouseEvent ) :
-       QGraphicsView.mouseReleaseEvent( self, mouseEvent )
-       if mouseEvent.button() == Qt.LeftButton :
-          pt = mouseEvent.pos()
-          newPos = self.mapToScene( pt )
-          self.released.emit(newPos)
-          self._selectedItem = None
-          pass
-       pass
+    def mouseMoveEvent(self, mouseEvent):
+        QGraphicsView.mouseMoveEvent(self, mouseEvent)
+        pt = mouseEvent.pos()
+        currentPos = self.mapToScene(pt)
+        self.moved.emit(currentPos)
+        pass
 
-   def execMouseMoveEvent( self, currentPos ) :
-       if self._selectedItem is None : return
-       selectedIndex = self._selectedItem.getIndex()
-       newX = currentPos.x()
-       newY = currentPos.y()
-       newPoint = newX, newY
-       model = self.scene().getModel()
-       pen = QPen( QColor("red") )
-       if isinstance( model, Polyline ) :
-          #Previsualisation
-          if selectedIndex == 0 :
-             nextPoint = model.getPoints()[ selectedIndex+1 ]
-             xNext = nextPoint[0]
-             yNext = nextPoint[1]
-             self.scene().addLine( newX, newY, xNext, yNext, pen )
-             pass
-          elif selectedIndex == len( model.getPoints()) - 1 :
-             previousPoint = model.getPoints()[ selectedIndex-1 ]
-             xPrevious = previousPoint[0]
-             yPrevious = previousPoint[1]
-             self.scene().addLine( xPrevious, yPrevious, newX, newY, pen )
-             pass
-          else :
-             previousPoint = model.getPoints()[ selectedIndex-1 ]
-             xPrevious = previousPoint[0]
-             yPrevious = previousPoint[1]
-             self.scene().addLine( xPrevious, yPrevious, newX, newY, pen )
-             nextPoint = model.getPoints()[ selectedIndex+1 ]
-             xNext = nextPoint[0]
-             yNext = nextPoint[1]
-             self.scene().addLine( newX, newY, xNext, yNext, pen )
-             pass
-          pass
-       elif isinstance( model, Circle ) :
-          #Previsualisation
-          radius = float( model.getRadius() )
-          rect = QRectF( newX-radius, newY-radius, 2*radius, 2*radius )
-          circleItem = QGraphicsEllipseItem()
-          circleItem.setPen( pen )
-          circleItem.setRect( rect )
-          self.scene().addItem( circleItem )
-          pass
-       pass
+    def mouseReleaseEvent(self, mouseEvent):
+        QGraphicsView.mouseReleaseEvent(self, mouseEvent)
+        if mouseEvent.button() == Qt.LeftButton:
+            pt = mouseEvent.pos()
+            newPos = self.mapToScene(pt)
+            self.released.emit(newPos)
+            self._selectedItem = None
+            pass
+        pass
+
+    def execMouseMoveEvent(self, currentPos):
+        if self._selectedItem is None:
+            return
+        selectedIndex = self._selectedItem.getIndex()
+        newX = currentPos.x()
+        newY = currentPos.y()
+        newPoint = newX, newY
+        model = self.scene().getModel()
+        pen = QPen(QColor("red"))
+        if isinstance(model, Polyline):
+            # Previsualisation
+            if selectedIndex == 0:
+                nextPoint = model.getPoints()[selectedIndex + 1]
+                xNext = nextPoint[0]
+                yNext = nextPoint[1]
+                self.scene().addLine(newX, newY, xNext, yNext, pen)
+                pass
+            elif selectedIndex == len(model.getPoints()) - 1:
+                previousPoint = model.getPoints()[selectedIndex - 1]
+                xPrevious = previousPoint[0]
+                yPrevious = previousPoint[1]
+                self.scene().addLine(xPrevious, yPrevious, newX, newY, pen)
+                pass
+            else:
+                previousPoint = model.getPoints()[selectedIndex - 1]
+                xPrevious = previousPoint[0]
+                yPrevious = previousPoint[1]
+                self.scene().addLine(xPrevious, yPrevious, newX, newY, pen)
+                nextPoint = model.getPoints()[selectedIndex + 1]
+                xNext = nextPoint[0]
+                yNext = nextPoint[1]
+                self.scene().addLine(newX, newY, xNext, yNext, pen)
+                pass
+            pass
+        elif isinstance(model, Circle):
+            # Previsualisation
+            radius = float(model.getRadius())
+            rect = QRectF(newX - radius, newY - radius, 2 * radius, 2 * radius)
+            circleItem = QGraphicsEllipseItem()
+            circleItem.setPen(pen)
+            circleItem.setRect(rect)
+            self.scene().addItem(circleItem)
+            pass
+        pass
+
+    def execMouseReleaseEvent(self, newPos):
+        if self._selectedItem is None:
+            return
+        selectedIndex = self._selectedItem.getIndex()
+        newX = newPos.x()
+        newY = newPos.y()
+        newPoint = newX, newY
+        model = self.scene().getModel()
+        if isinstance(model, Polyline):
+            self.scene().getController().editPoint(model, newPoint, selectedIndex)
+            pass
+        elif isinstance(model, Circle):
+            self.scene().getController().editCenter(model, newPoint)
+            pass
+        pass
 
-   def execMouseReleaseEvent( self, newPos ) :
-       if self._selectedItem is None : return
-       selectedIndex = self._selectedItem.getIndex()
-       newX = newPos.x()
-       newY = newPos.y()
-       newPoint = newX, newY
-       model = self.scene().getModel()
-       if isinstance( model, Polyline ) :
-          self.scene().getController().editPoint( model, newPoint, selectedIndex )
-          pass
-       elif isinstance( model, Circle ) :
-          self.scene().getController().editCenter( model, newPoint )
-          pass
-       pass
 
 pass
index 1f2a38025033282d7995d39f7c50041d29c8f420..87a912e414919ab070f2b3b7c25b5f64018b1fc6 100755 (executable)
@@ -1,15 +1,16 @@
 from qtsalome import *
 
-class Menu( QMenu ) :
 
-   def __init__( self, item ) :
-       """Constructor"""
+class Menu(QMenu):
+    def __init__(self, item):
+        """Constructor"""
 
-       QMenu.__init__( self )
-       self._item = item
-       pass
+        QMenu.__init__(self)
+        self._item = item
+        pass
+
+    def getItem(self):
+        return self._item
 
-   def getItem( self ) :
-       return self._item
 
 pass
index 0690d7543f66ae081683d48fa0466a8f26118251..c68fc67bd112a4fe8e69a51c52776fdcd4dde3d9 100755 (executable)
@@ -2,38 +2,39 @@ from GraphicsScene import GraphicsScene
 from qtsalome import *
 from GraphicsRectItem import GraphicsRectItem
 
-class PolyGraphicsScene(  GraphicsScene ) :
 
-   def __init__( self, controller ) :
-       GraphicsScene.__init__( self, controller )
-       pass
+class PolyGraphicsScene(GraphicsScene):
+    def __init__(self, controller):
+        GraphicsScene.__init__(self, controller)
+        pass
 
-   def draw( self ) :
-       points = self.getModel().getPoints()
+    def draw(self):
+        points = self.getModel().getPoints()
 
-       # Drawing the points as small rectangles
-       for i in range( len(points) ) :
-          point = points[i]
-          xPoint = float( point[0] )
-          yPoint = float( point[1] )
-          # Constructing a rectangle centered on point
-          pointItem = GraphicsRectItem( xPoint-0.1, yPoint-0.1, 0.2, 0.2, i )
-          self.addItem( pointItem )
-          pass
+        # Drawing the points as small rectangles
+        for i in range(len(points)):
+            point = points[i]
+            xPoint = float(point[0])
+            yPoint = float(point[1])
+            # Constructing a rectangle centered on point
+            pointItem = GraphicsRectItem(xPoint - 0.1, yPoint - 0.1, 0.2, 0.2, i)
+            self.addItem(pointItem)
+            pass
+
+        # Linking the points with lines
+        for i in range(len(points) - 1):
+            current = points[i]
+            next = points[i + 1]
+            xCurrent = float(current[0])
+            yCurrent = float(current[1])
+            xNext = float(next[0])
+            yNext = float(next[1])
+            line = QLineF(xCurrent, yCurrent, xNext, yNext)
+            lineItem = QGraphicsLineItem()
+            lineItem.setLine(line)
+            self.addItem(lineItem)
+            pass
+        pass
 
-       # Linking the points with lines
-       for i in range( len(points) - 1 ) :
-          current = points[i]
-          next = points[i+1]
-          xCurrent = float( current[0] )
-          yCurrent = float( current[1] )
-          xNext = float( next[0] )
-          yNext = float( next[1] )
-          line = QLineF( xCurrent, yCurrent, xNext, yNext )
-          lineItem = QGraphicsLineItem()
-          lineItem.setLine( line )
-          self.addItem( lineItem )
-          pass
-       pass
 
 pass
index 344bf187189b330701b8f00b74136233c53c84b2..8369f903f21b4d7175ee67f00ca6c72ff4d4e689 100755 (executable)
@@ -2,26 +2,27 @@ from View import *
 from TreeWidgetItem import TreeWidgetItem
 from qtsalome import *
 
-class PolyTreeWidgetItem( TreeWidgetItem ) :
 
-   def __init__( self, name, controller, actionsList ) :
-       """Constructor"""
+class PolyTreeWidgetItem(TreeWidgetItem):
+    def __init__(self, name, controller, actionsList):
+        """Constructor"""
 
-       TreeWidgetItem.__init__( self, name, controller, actionsList )
-       pass
+        TreeWidgetItem.__init__(self, name, controller, actionsList)
+        pass
 
-   def editInGlobalTree( self, treeWidgetItem ) :
-       name = self.getModel().getName()
-       treeWidgetItem.setText( 0 , name )
+    def editInGlobalTree(self, treeWidgetItem):
+        name = self.getModel().getName()
+        treeWidgetItem.setText(0, name)
+
+        points = self._model.getPoints()
+        for i in range(len(points)):
+            point = points[i]
+            xPoint = point[0]
+            yPoint = point[1]
+            relatedItem = treeWidgetItem.child(i)
+            relatedItem.setText(0, str(xPoint) + ":" + str(yPoint))
+            pass
+        pass
 
-       points = self._model.getPoints()
-       for i in range( len(points) ) :
-          point = points[i]
-          xPoint = point[0]
-          yPoint = point[1]
-          relatedItem = treeWidgetItem.child( i )
-          relatedItem.setText( 0 , str(xPoint) + ":" + str(yPoint) )
-          pass
-       pass
 
 pass
index c3b1ccd7e26e8b18fd8cd272a1bd182f2d2ae392..1cd30c5edcb89ca4e8b1245b271ffec9474c41c1 100755 (executable)
@@ -19,89 +19,125 @@ sgDesktop = sgPyQt.getDesktop()
 
 #########################################
 
-class TreeWidget( QTreeWidget ) :
 
-   def __init__( self, desktop ) :
-       """Constructor"""
+class TreeWidget(QTreeWidget):
+    def __init__(self, desktop):
+        """Constructor"""
 
-       QTreeWidget.__init__( self )
-       self._desktop = desktop
+        QTreeWidget.__init__(self)
+        self._desktop = desktop
 
-       #Creating popup menu
-       self.setContextMenuPolicy( Qt.CustomContextMenu )
-       self.customContextMenuRequested[QPoint].connect(self.createPopups)
-       pass
+        # Creating popup menu
+        self.setContextMenuPolicy(Qt.CustomContextMenu)
+        self.customContextMenuRequested[QPoint].connect(self.createPopups)
+        pass
 
-   def createPopups( self, point ) :
-       item = self.itemAt( point )
-       if item is None : return
-       self.menu = Menu( item )
-       for action in item.getActionsList():
-          if action == "Show" :
-             self.menu.addAction(action).triggered.connect(self.show)
-             pass
-          elif action == 'Rename' :
-             self.menu.addAction(action).triggered.connect(self.showRenameDialog)
-             pass
-          elif action == 'Delete' :
-             self.menu.addAction(action).triggered.connect(self.delete)
-             pass
-          else :
-             self.menu.addAction(action).triggered.connect(self.showEditDialog)
-             pass
-          pass
-       self. menu.exec_( QCursor.pos() )
-       pass
+    def createPopups(self, point):
+        item = self.itemAt(point)
+        if item is None:
+            return
+        self.menu = Menu(item)
+        for action in item.getActionsList():
+            if action == "Show":
+                self.menu.addAction(action).triggered.connect(self.show)
+                pass
+            elif action == "Rename":
+                self.menu.addAction(action).triggered.connect(self.showRenameDialog)
+                pass
+            elif action == "Delete":
+                self.menu.addAction(action).triggered.connect(self.delete)
+                pass
+            else:
+                self.menu.addAction(action).triggered.connect(self.showEditDialog)
+                pass
+            pass
+        self.menu.exec_(QCursor.pos())
+        pass
 
-   def show( self ) :
-       model = self.menu.getItem().getModel()
-       controller = self._desktop.getController()
-       controller.showModel( model )
-       pass
+    def show(self):
+        model = self.menu.getItem().getModel()
+        controller = self._desktop.getController()
+        controller.showModel(model)
+        pass
 
-   def showRenameDialog( self ) :
-       model = self.menu.getItem().getModel()
-       oldName = model.getName()
-       widgetDialogBox = QDockWidget( sgDesktop )
-       myDialog = RenameDialog( "www.google.fr", self._desktop.getController(), widgetDialogBox, model, oldName )
-       widgetDialogBox.setAllowedAreas( Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea )
-       widgetDialogBox.setWidget( myDialog )
-       widgetDialogBox.setWindowTitle( "Object renaming" )
-       sgDesktop.addDockWidget( Qt.LeftDockWidgetArea, widgetDialogBox )
-       pass
+    def showRenameDialog(self):
+        model = self.menu.getItem().getModel()
+        oldName = model.getName()
+        widgetDialogBox = QDockWidget(sgDesktop)
+        myDialog = RenameDialog(
+            "www.google.fr",
+            self._desktop.getController(),
+            widgetDialogBox,
+            model,
+            oldName,
+        )
+        widgetDialogBox.setAllowedAreas(Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea)
+        widgetDialogBox.setWidget(myDialog)
+        widgetDialogBox.setWindowTitle("Object renaming")
+        sgDesktop.addDockWidget(Qt.LeftDockWidgetArea, widgetDialogBox)
+        pass
 
-   def delete( self ) :
-       answer = QMessageBox.question( self, 'Confirmation', 'Do you really want to remove the selected curve ?' , QMessageBox.Yes | QMessageBox.No )
-       if answer == QMessageBox.Yes :
-          model = self.menu.getItem().getModel()
-          controller = self._desktop.getController()
-          controller.removeModel( model )
-          pass
-       pass
+    def delete(self):
+        answer = QMessageBox.question(
+            self,
+            "Confirmation",
+            "Do you really want to remove the selected curve ?",
+            QMessageBox.Yes | QMessageBox.No,
+        )
+        if answer == QMessageBox.Yes:
+            model = self.menu.getItem().getModel()
+            controller = self._desktop.getController()
+            controller.removeModel(model)
+            pass
+        pass
 
-   def showEditDialog( self ) :
-       item = self.menu.getItem()
-       parentItem = item.parent()
-       parentModel = parentItem.getModel()
-       widgetDialogBox = QDockWidget( sgDesktop )
-       if isinstance( parentModel, Polyline ) :
-          pointRange = parentItem.indexOfChild( item )
-          oldPoint = item.text( 0 )
-          myDialog = EditPointDialog( "www.google.fr", self._desktop.getController(), widgetDialogBox, parentModel, oldPoint, pointRange )
-          pass
-       elif isinstance( parentModel, Circle ) :
-          selectedRange = parentItem.indexOfChild( item )
-          oldSelected = item.text( 0 )
-          if selectedRange == 0 : myDialog = EditCenterDialog( "www.google.fr", self._desktop.getController(), widgetDialogBox, parentModel, oldSelected )
-          elif selectedRange == 1 : myDialog = EditRadiusDialog("www.google.fr",self._desktop.getController(),widgetDialogBox,parentModel,oldSelected)
-          else : pass
-          pass
-       else : pass
+    def showEditDialog(self):
+        item = self.menu.getItem()
+        parentItem = item.parent()
+        parentModel = parentItem.getModel()
+        widgetDialogBox = QDockWidget(sgDesktop)
+        if isinstance(parentModel, Polyline):
+            pointRange = parentItem.indexOfChild(item)
+            oldPoint = item.text(0)
+            myDialog = EditPointDialog(
+                "www.google.fr",
+                self._desktop.getController(),
+                widgetDialogBox,
+                parentModel,
+                oldPoint,
+                pointRange,
+            )
+            pass
+        elif isinstance(parentModel, Circle):
+            selectedRange = parentItem.indexOfChild(item)
+            oldSelected = item.text(0)
+            if selectedRange == 0:
+                myDialog = EditCenterDialog(
+                    "www.google.fr",
+                    self._desktop.getController(),
+                    widgetDialogBox,
+                    parentModel,
+                    oldSelected,
+                )
+            elif selectedRange == 1:
+                myDialog = EditRadiusDialog(
+                    "www.google.fr",
+                    self._desktop.getController(),
+                    widgetDialogBox,
+                    parentModel,
+                    oldSelected,
+                )
+            else:
+                pass
+            pass
+        else:
+            pass
+
+        widgetDialogBox.setAllowedAreas(Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea)
+        widgetDialogBox.setWidget(myDialog)
+        widgetDialogBox.setWindowTitle("Object edition")
+        sgDesktop.addDockWidget(Qt.LeftDockWidgetArea, widgetDialogBox)
+        pass
 
-       widgetDialogBox.setAllowedAreas( Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea )
-       widgetDialogBox.setWidget( myDialog )
-       widgetDialogBox.setWindowTitle( "Object edition" )
-       sgDesktop.addDockWidget( Qt.LeftDockWidgetArea, widgetDialogBox )
-       pass
 
 pass
index 39f140767280de69325eba1c7dc6a1b8123d3145..c88b2b5eb0362322321d852b12fb95876692aefa 100644 (file)
@@ -12,13 +12,13 @@ def get_path():
 
 
 def update_context(app, pagename, templatename, context, doctree):
-    context['alabaster_version'] = version.__version__
+    context["alabaster_version"] = version.__version__
+
 
 def setup(app):
     # add_html_theme is new in Sphinx 1.6+
-    if hasattr(app, 'add_html_theme'):
+    if hasattr(app, "add_html_theme"):
         theme_path = os.path.abspath(os.path.dirname(__file__))
-        app.add_html_theme('alabaster', theme_path)
-    app.connect('html-page-context', update_context)
-    return {'version': version.__version__,
-            'parallel_read_safe': True}
+        app.add_html_theme("alabaster", theme_path)
+    app.connect("html-page-context", update_context)
+    return {"version": version.__version__, "parallel_read_safe": True}
index 9e641f779613c85e0dc99a3fc2b27f3a066a5236..b74ffbe110dde90724390fc65d606dcda0e51b45 100644 (file)
@@ -1,2 +1,2 @@
 __version_info__ = (0, 7, 10)
-__version__ = '.'.join(map(str, __version_info__))
+__version__ = ".".join(map(str, __version_info__))
index 0f3aa8cb29ca99167fe5b4cde45e5dbeb85c0fd5..ebde7b355be6571aeba01768f381db2a5dd2fe88 100644 (file)
@@ -1,88 +1,91 @@
 # flake8: noqa
 
 from pygments.style import Style
-from pygments.token import Keyword, Name, Comment, String, Error, \
-     Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
+from pygments.token import (
+    Keyword,
+    Name,
+    Comment,
+    String,
+    Error,
+    Number,
+    Operator,
+    Generic,
+    Whitespace,
+    Punctuation,
+    Other,
+    Literal,
+)
 
 
 # Originally based on FlaskyStyle which was based on 'tango'.
 class Alabaster(Style):
-    background_color = "#f8f8f8" # doesn't seem to override CSS 'pre' styling?
+    background_color = "#f8f8f8"  # doesn't seem to override CSS 'pre' styling?
     default_style = ""
 
     styles = {
         # No corresponding class for the following:
-        #Text:                     "", # class:  ''
-        Whitespace:                "underline #f8f8f8",      # class: 'w'
-        Error:                     "#a40000 border:#ef2929", # class: 'err'
-        Other:                     "#000000",                # class 'x'
-
-        Comment:                   "italic #8f5902", # class: 'c'
-        Comment.Preproc:           "noitalic",       # class: 'cp'
-
-        Keyword:                   "bold #004461",   # class: 'k'
-        Keyword.Constant:          "bold #004461",   # class: 'kc'
-        Keyword.Declaration:       "bold #004461",   # class: 'kd'
-        Keyword.Namespace:         "bold #004461",   # class: 'kn'
-        Keyword.Pseudo:            "bold #004461",   # class: 'kp'
-        Keyword.Reserved:          "bold #004461",   # class: 'kr'
-        Keyword.Type:              "bold #004461",   # class: 'kt'
-
-        Operator:                  "#582800",   # class: 'o'
-        Operator.Word:             "bold #004461",   # class: 'ow' - like keywords
-
-        Punctuation:               "bold #000000",   # class: 'p'
-
+        # Text:                     "", # class:  ''
+        Whitespace: "underline #f8f8f8",  # class: 'w'
+        Error: "#a40000 border:#ef2929",  # class: 'err'
+        Other: "#000000",  # class 'x'
+        Comment: "italic #8f5902",  # class: 'c'
+        Comment.Preproc: "noitalic",  # class: 'cp'
+        Keyword: "bold #004461",  # class: 'k'
+        Keyword.Constant: "bold #004461",  # class: 'kc'
+        Keyword.Declaration: "bold #004461",  # class: 'kd'
+        Keyword.Namespace: "bold #004461",  # class: 'kn'
+        Keyword.Pseudo: "bold #004461",  # class: 'kp'
+        Keyword.Reserved: "bold #004461",  # class: 'kr'
+        Keyword.Type: "bold #004461",  # class: 'kt'
+        Operator: "#582800",  # class: 'o'
+        Operator.Word: "bold #004461",  # class: 'ow' - like keywords
+        Punctuation: "bold #000000",  # class: 'p'
         # because special names such as Name.Class, Name.Function, etc.
         # are not recognized as such later in the parsing, we choose them
         # to look the same as ordinary variables.
-        Name:                      "#000000",        # class: 'n'
-        Name.Attribute:            "#c4a000",        # class: 'na' - to be revised
-        Name.Builtin:              "#004461",        # class: 'nb'
-        Name.Builtin.Pseudo:       "#3465a4",        # class: 'bp'
-        Name.Class:                "#000000",        # class: 'nc' - to be revised
-        Name.Constant:             "#000000",        # class: 'no' - to be revised
-        Name.Decorator:            "#888",           # class: 'nd' - to be revised
-        Name.Entity:               "#ce5c00",        # class: 'ni'
-        Name.Exception:            "bold #cc0000",   # class: 'ne'
-        Name.Function:             "#000000",        # class: 'nf'
-        Name.Property:             "#000000",        # class: 'py'
-        Name.Label:                "#f57900",        # class: 'nl'
-        Name.Namespace:            "#000000",        # class: 'nn' - to be revised
-        Name.Other:                "#000000",        # class: 'nx'
-        Name.Tag:                  "bold #004461",   # class: 'nt' - like a keyword
-        Name.Variable:             "#000000",        # class: 'nv' - to be revised
-        Name.Variable.Class:       "#000000",        # class: 'vc' - to be revised
-        Name.Variable.Global:      "#000000",        # class: 'vg' - to be revised
-        Name.Variable.Instance:    "#000000",        # class: 'vi' - to be revised
-
-        Number:                    "#990000",        # class: 'm'
-
-        Literal:                   "#000000",        # class: 'l'
-        Literal.Date:              "#000000",        # class: 'ld'
-
-        String:                    "#4e9a06",        # class: 's'
-        String.Backtick:           "#4e9a06",        # class: 'sb'
-        String.Char:               "#4e9a06",        # class: 'sc'
-        String.Doc:                "italic #8f5902", # class: 'sd' - like a comment
-        String.Double:             "#4e9a06",        # class: 's2'
-        String.Escape:             "#4e9a06",        # class: 'se'
-        String.Heredoc:            "#4e9a06",        # class: 'sh'
-        String.Interpol:           "#4e9a06",        # class: 'si'
-        String.Other:              "#4e9a06",        # class: 'sx'
-        String.Regex:              "#4e9a06",        # class: 'sr'
-        String.Single:             "#4e9a06",        # class: 's1'
-        String.Symbol:             "#4e9a06",        # class: 'ss'
-
-        Generic:                   "#000000",        # class: 'g'
-        Generic.Deleted:           "#a40000",        # class: 'gd'
-        Generic.Emph:              "italic #000000", # class: 'ge'
-        Generic.Error:             "#ef2929",        # class: 'gr'
-        Generic.Heading:           "bold #000080",   # class: 'gh'
-        Generic.Inserted:          "#00A000",        # class: 'gi'
-        Generic.Output:            "#888",           # class: 'go'
-        Generic.Prompt:            "#745334",        # class: 'gp'
-        Generic.Strong:            "bold #000000",   # class: 'gs'
-        Generic.Subheading:        "bold #800080",   # class: 'gu'
-        Generic.Traceback:         "bold #a40000",   # class: 'gt'
+        Name: "#000000",  # class: 'n'
+        Name.Attribute: "#c4a000",  # class: 'na' - to be revised
+        Name.Builtin: "#004461",  # class: 'nb'
+        Name.Builtin.Pseudo: "#3465a4",  # class: 'bp'
+        Name.Class: "#000000",  # class: 'nc' - to be revised
+        Name.Constant: "#000000",  # class: 'no' - to be revised
+        Name.Decorator: "#888",  # class: 'nd' - to be revised
+        Name.Entity: "#ce5c00",  # class: 'ni'
+        Name.Exception: "bold #cc0000",  # class: 'ne'
+        Name.Function: "#000000",  # class: 'nf'
+        Name.Property: "#000000",  # class: 'py'
+        Name.Label: "#f57900",  # class: 'nl'
+        Name.Namespace: "#000000",  # class: 'nn' - to be revised
+        Name.Other: "#000000",  # class: 'nx'
+        Name.Tag: "bold #004461",  # class: 'nt' - like a keyword
+        Name.Variable: "#000000",  # class: 'nv' - to be revised
+        Name.Variable.Class: "#000000",  # class: 'vc' - to be revised
+        Name.Variable.Global: "#000000",  # class: 'vg' - to be revised
+        Name.Variable.Instance: "#000000",  # class: 'vi' - to be revised
+        Number: "#990000",  # class: 'm'
+        Literal: "#000000",  # class: 'l'
+        Literal.Date: "#000000",  # class: 'ld'
+        String: "#4e9a06",  # class: 's'
+        String.Backtick: "#4e9a06",  # class: 'sb'
+        String.Char: "#4e9a06",  # class: 'sc'
+        String.Doc: "italic #8f5902",  # class: 'sd' - like a comment
+        String.Double: "#4e9a06",  # class: 's2'
+        String.Escape: "#4e9a06",  # class: 'se'
+        String.Heredoc: "#4e9a06",  # class: 'sh'
+        String.Interpol: "#4e9a06",  # class: 'si'
+        String.Other: "#4e9a06",  # class: 'sx'
+        String.Regex: "#4e9a06",  # class: 'sr'
+        String.Single: "#4e9a06",  # class: 's1'
+        String.Symbol: "#4e9a06",  # class: 'ss'
+        Generic: "#000000",  # class: 'g'
+        Generic.Deleted: "#a40000",  # class: 'gd'
+        Generic.Emph: "italic #000000",  # class: 'ge'
+        Generic.Error: "#ef2929",  # class: 'gr'
+        Generic.Heading: "bold #000080",  # class: 'gh'
+        Generic.Inserted: "#00A000",  # class: 'gi'
+        Generic.Output: "#888",  # class: 'go'
+        Generic.Prompt: "#745334",  # class: 'gp'
+        Generic.Strong: "bold #000000",  # class: 'gs'
+        Generic.Subheading: "bold #800080",  # class: 'gu'
+        Generic.Traceback: "bold #a40000",  # class: 'gt'
     }
index 82ad04cdf9c42d12476c0b0209fd4d32d00661f5..8be1b069764557a163dffecde075c0f1b11da46d 100644 (file)
@@ -2,34 +2,34 @@ from datetime import datetime
 
 
 extensions = []
-templates_path = ['_templates']
-source_suffix = '.rst'
-master_doc = 'index'
+templates_path = ["_templates"]
+source_suffix = ".rst"
+master_doc = "index"
 
-project = u'Alabaster'
+project = u"Alabaster"
 year = datetime.now().year
-copyright = u'%d Jeff Forcier' % year
+copyright = u"%d Jeff Forcier" % year
 
-exclude_patterns = ['_build']
+exclude_patterns = ["_build"]
 
-html_theme = 'alabaster'
+html_theme = "alabaster"
 html_sidebars = {
-    '**': [
-        'about.html',
-        'navigation.html',
-        'relations.html',
-        'searchbox.html',
-        'donate.html',
+    "**": [
+        "about.html",
+        "navigation.html",
+        "relations.html",
+        "searchbox.html",
+        "donate.html",
     ]
 }
 html_theme_options = {
-    'description': "A light, configurable Sphinx theme",
-    'github_user': 'bitprophet',
-    'github_repo': 'alabaster',
-    'fixed_sidebar': True,
+    "description": "A light, configurable Sphinx theme",
+    "github_user": "bitprophet",
+    "github_repo": "alabaster",
+    "fixed_sidebar": True,
 }
 
-extensions.append('releases')
-releases_github_path = 'bitprophet/alabaster'
+extensions.append("releases")
+releases_github_path = "bitprophet/alabaster"
 # Our pre-0.x releases are unstable / mix bugs+features
 releases_unstable_prehistory = True
index 2170208553fa0ce93fc5eda0e8ecddff21b176f9..c9d037fb74bb44c66992a191b4433a4a272b1654 100644 (file)
@@ -5,47 +5,47 @@ from setuptools import setup
 
 # Version info -- read without importing
 _locals = {}
-with open('alabaster/_version.py') as fp:
+with open("alabaster/_version.py") as fp:
     exec(fp.read(), None, _locals)
-version = _locals['__version__']
+version = _locals["__version__"]
 
 # README into long description
-with codecs.open('README.rst', encoding='utf-8') as f:
+with codecs.open("README.rst", encoding="utf-8") as f:
     readme = f.read()
 
 setup(
-    name='alabaster',
+    name="alabaster",
     version=version,
-    description='A configurable sidebar-enabled Sphinx theme',
+    description="A configurable sidebar-enabled Sphinx theme",
     long_description=readme,
-    author='Jeff Forcier',
-    author_email='jeff@bitprophet.org',
-    url='https://alabaster.readthedocs.io',
-    packages=['alabaster'],
+    author="Jeff Forcier",
+    author_email="jeff@bitprophet.org",
+    url="https://alabaster.readthedocs.io",
+    packages=["alabaster"],
     include_package_data=True,
     entry_points={
-        'sphinx.html_themes': [
-            'alabaster = alabaster',
+        "sphinx.html_themes": [
+            "alabaster = alabaster",
         ]
     },
     classifiers=[
-        'Development Status :: 5 - Production/Stable',
-        'Intended Audience :: Developers',
-        'License :: OSI Approved :: BSD License',
-        'Operating System :: OS Independent',
-        'Programming Language :: Python',
-        'Programming Language :: Python :: 2',
-        'Programming Language :: Python :: 2.6',
-        'Programming Language :: Python :: 2.7',
-        'Programming Language :: Python :: 3',
-        'Programming Language :: Python :: 3.2',
-        'Programming Language :: Python :: 3.3',
-        'Programming Language :: Python :: 3.4',
-        'Programming Language :: Python :: 3.5',
-        'Programming Language :: Python :: 3.6',
-        'Programming Language :: Python :: Implementation :: CPython',
-        'Programming Language :: Python :: Implementation :: PyPy',
-        'Topic :: Documentation',
-        'Topic :: Software Development :: Documentation',
+        "Development Status :: 5 - Production/Stable",
+        "Intended Audience :: Developers",
+        "License :: OSI Approved :: BSD License",
+        "Operating System :: OS Independent",
+        "Programming Language :: Python",
+        "Programming Language :: Python :: 2",
+        "Programming Language :: Python :: 2.6",
+        "Programming Language :: Python :: 2.7",
+        "Programming Language :: Python :: 3",
+        "Programming Language :: Python :: 3.2",
+        "Programming Language :: Python :: 3.3",
+        "Programming Language :: Python :: 3.4",
+        "Programming Language :: Python :: 3.5",
+        "Programming Language :: Python :: 3.6",
+        "Programming Language :: Python :: Implementation :: CPython",
+        "Programming Language :: Python :: Implementation :: PyPy",
+        "Topic :: Documentation",
+        "Topic :: Software Development :: Documentation",
     ],
 )
index ff51d9ee4f24b3b1fa715101dab7d2588cb21a61..c131611ae4dfa0b268d7080684fee1b1cb29918d 100644 (file)
@@ -4,10 +4,12 @@ from invocations.packaging import release
 
 
 ns = Collection(release, docs)
-ns.configure({
-    'packaging': {
-        'sign': True,
-        'wheel': True,
-        'changelog_file': 'docs/changelog.rst',
+ns.configure(
+    {
+        "packaging": {
+            "sign": True,
+            "wheel": True,
+            "changelog_file": "docs/changelog.rst",
+        }
     }
-})
+)
index b4916cbcf2b899ca50adb0eb9b7fae6b9b366d2e..a0efe622bb4e9177ad53b6baba9b1b560ad3ac46 100644 (file)
@@ -16,7 +16,7 @@ import os
 
 
 # https://stackoverflow.com/questions/23462494/how-to-add-a-custom-css-file-to-sphinx
-# this needs realpath(custom), cause problem on relocated git clone, 
+# this needs realpath(custom), cause problem on relocated git clone,
 # so use file link instead in _themes/alabaster: ln -s ../../../../src/custom.css custom.css
 # def setup(app):
 #     custom = os.path.join('src', 'custom.css')
@@ -26,78 +26,83 @@ import os
 # If extensions (or modules to document with autodoc) are in another directory,
 # add these directories to sys.path here. If the directory is relative to the
 # documentation root, use os.path.abspath to make it absolute, like shown here.
-#sys.path.insert(0, os.path.abspath('.'))
+# sys.path.insert(0, os.path.abspath('.'))
 
 # -- General configuration -----------------------------------------------------
 
 # If your documentation needs a minimal Sphinx version, state it here.
-#needs_sphinx = '1.0'
+# needs_sphinx = '1.0'
 
 # Add any Sphinx extension module names here, as strings. They can be extensions
 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = ['sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.mathjax', 'sphinx.ext.viewcode']
+extensions = [
+    "sphinx.ext.intersphinx",
+    "sphinx.ext.todo",
+    "sphinx.ext.mathjax",
+    "sphinx.ext.viewcode",
+]
 
 # do not use rst_prolog please use doc/rst_prolog.rst and '.. include:: ../rst_prolog.rst'
 # https://github.com/sphinx-doc/sphinx/issues/2445
 
 # Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
+templates_path = ["_templates"]
 
 # The suffix of source filenames.
-source_suffix = '.rst'
+source_suffix = ".rst"
 
 # The encoding of source files.
-#source_encoding = 'utf-8-sig'
+# source_encoding = 'utf-8-sig'
 
 # The master toctree document.
-master_doc = 'index'
+master_doc = "index"
 
 # General information about the project.
-project = u'sat'
-copyright = u'2019, CEA'
+project = u"sat"
+copyright = u"2019, CEA"
 
 # The version info for the project you're documenting, acts as replacement for
 # |version| and |release|, also used in various other places throughout the
 # built documents.
 #
 # The short X.Y version.
-version = '5.8'
+version = "5.8"
 # The full version, including alpha/beta/rc tags.
-release = '5.8.0'
+release = "5.8.0"
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.
-#language = None
+# language = None
 
 # There are two options for replacing |today|: either, you set today to some
 # non-false value, then it is used:
-#today = ''
+# today = ''
 # Else, today_fmt is used as the format for a strftime call.
-#today_fmt = '%B %d, %Y'
+# today_fmt = '%B %d, %Y'
 
 # List of patterns, relative to source directory, that match files and
 # directories to ignore when looking for source files.
 exclude_patterns = []
 
 # The reST default role (used for this markup: `text`) to use for all documents.
-#default_role = None
+# default_role = None
 
 # If true, '()' will be appended to :func: etc. cross-reference text.
-#add_function_parentheses = True
+# add_function_parentheses = True
 
 # If true, the current module name will be prepended to all description
 # unit titles (such as .. function::).
-#add_module_names = True
+# add_module_names = True
 
 # If true, sectionauthor and moduleauthor directives will be shown in the
 # output. They are ignored by default.
-#show_authors = False
+# show_authors = False
 
 # The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
+pygments_style = "sphinx"
 
 # A list of ignored prefixes for module index sorting.
-#modindex_common_prefix = []
+# modindex_common_prefix = []
 
 
 # -- Options for HTML output ---------------------------------------------------
@@ -105,7 +110,7 @@ pygments_style = 'sphinx'
 # The theme to use for HTML and HTML Help pages.  See the documentation for
 # a list of builtin themes.
 # default sphinxdoc scrolls agogo traditional nature haiku
-html_theme = 'alabaster' #added in _theme dir
+html_theme = "alabaster"  # added in _theme dir
 
 # Theme options are theme-specific and customize the look and feel of a theme
 # further.  For a list of options available for each theme, see the
@@ -118,28 +123,30 @@ html_theme_options = {
     #'sidebar_includehidden': True,
     #'sidebar_collapse': True,
     #'show_powered_by': False,
-    'show_related': True,
-    'fixed_sidebar': False,
-    'page_width': '1000px',
-    'sidebar_width': '250px',
-    'font_size': '0.9em', #'1.0625em',
-    'code_font_size': '0.8em',
+    "show_related": True,
+    "fixed_sidebar": False,
+    "page_width": "1000px",
+    "sidebar_width": "250px",
+    "font_size": "0.9em",  #'1.0625em',
+    "code_font_size": "0.8em",
     #'note_bg': '#eee',
-    'pre_bg': '#fee', # code-block background
+    "pre_bg": "#fee",  # code-block background
     #'gray_1': '#00f',
     #'gray_2': '#f00',
     #'gray_3': '#0f0',
 }
 
 # Add any paths that contain custom themes here, relative to this directory.
-html_theme_path = ["../_themes/alabaster", ]
+html_theme_path = [
+    "../_themes/alabaster",
+]
 
 # The name for this set of Sphinx documents.  If None, it defaults to
 # "<project> v<release> documentation".
-#html_title = None
+# html_title = None
 
 # A shorter title for the navigation bar.  Default is the same as html_title.
-#html_short_title = None
+# html_short_title = None
 
 # The name of an image file (relative to this directory) to place at the top
 # of the sidebar.
@@ -153,105 +160,107 @@ html_logo = "images/sat_v5.png"
 # Add any paths that contain custom static files (such as style sheets) here,
 # relative to this directory. They are copied after the builtin static files,
 # so a file named "default.css" will overwrite the builtin "default.css".
-#html_static_path = ['_static']
+# html_static_path = ['_static']
 
 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
 # using the given strftime format.
-#html_last_updated_fmt = '%b %d, %Y'
+# html_last_updated_fmt = '%b %d, %Y'
 
 # If true, SmartyPants will be used to convert quotes and dashes to
 # typographically correct entities.
 html_use_smartypants = False
 
 # Custom sidebar templates, maps document names to template names.
-#html_sidebars = {}
+# html_sidebars = {}
 
 # Additional templates that should be rendered to pages, maps page names to
 # template names.
-#html_additional_pages = {}
+# html_additional_pages = {}
 
 # If false, no module index is generated.
-#html_domain_indices = True
+# html_domain_indices = True
 
 # If false, no index is generated.
-#html_use_index = True
+# html_use_index = True
 
 # If true, the index is split into individual pages for each letter.
-#html_split_index = False
+# html_split_index = False
 
 # If true, links to the reST sources are added to the pages.
-#html_show_sourcelink = True
+# html_show_sourcelink = True
 
 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
-#html_show_sphinx = True
+# html_show_sphinx = True
 
 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
-#html_show_copyright = True
+# html_show_copyright = True
 
 # If true, an OpenSearch description file will be output, and all pages will
 # contain a <link> tag referring to it.  The value of this option must be the
 # base URL from which the finished HTML is served.
-#html_use_opensearch = ''
+# html_use_opensearch = ''
 
 # This is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = None
+# html_file_suffix = None
 
 # Output file base name for HTML help builder.
-htmlhelp_basename = 'satdoc'
+htmlhelp_basename = "satdoc"
 
 
 # -- Options for LaTeX output --------------------------------------------------
 
 latex_elements = {
-  # The paper size ('letterpaper' or 'a4paper').
-  'papersize': 'a4paper',
-
-  # The font size ('10pt', '11pt' or '12pt').
-  'pointsize': '10pt',
-
-  # Additional stuff for the LaTeX preamble.
-  #'preamble': '',
-
-  # http://www.sphinx-doc.org/en/master/latex.html
-  'sphinxsetup': 'verbatimwithframe=false, VerbatimColor={rgb}{.98,.94,.94}',
+    # The paper size ('letterpaper' or 'a4paper').
+    "papersize": "a4paper",
+    # The font size ('10pt', '11pt' or '12pt').
+    "pointsize": "10pt",
+    # Additional stuff for the LaTeX preamble.
+    #'preamble': '',
+    # http://www.sphinx-doc.org/en/master/latex.html
+    "sphinxsetup": "verbatimwithframe=false, VerbatimColor={rgb}{.98,.94,.94}",
 }
 
 
 # The paper size ('letter' or 'a4').
-#latex_paper_size = 'a4'
+# latex_paper_size = 'a4'
 
 # The font size ('10pt', '11pt' or '12pt').
-#latex_font_size = '10pt'
+# latex_font_size = '10pt'
 
 # Grouping the document tree into LaTeX files. List of tuples
 # (source start file, target name, title, author, documentclass [howto/manual]).
 latex_documents = [
-  ('index', 'sat.tex', u'SAT Documentation',
-   u'CEA DES/ISAS/DM2S/STMF/LGLS', 'manual'),
+    (
+        "index",
+        "sat.tex",
+        u"SAT Documentation",
+        u"CEA DES/ISAS/DM2S/STMF/LGLS",
+        "manual",
+    ),
 ]
 
 # The name of an image file (relative to this directory) to place at the top of
 # the title page.
-#latex_logo = None
+# latex_logo = None
 
 # For "manual" documents, if this is true, then toplevel headings are parts,
 # not chapters.
-#latex_use_parts = False
+# latex_use_parts = False
 
 # If true, show page references after internal links.
 latex_show_pagerefs = True
 
 # If true, show URL addresses after external links.
-latex_show_urls = 'footnote' # sphinx version 1.7 # True
+latex_show_urls = "footnote"  # sphinx version 1.7 # True
 
 # Additional stuff for the LaTeX preamble.
-#latex_preamble = ''
+# latex_preamble = ''
 
 # Documents to append as an appendix to all manuals.
-#latex_appendices = []
+# latex_appendices = []
 
 # If false, no module index is generated.
-#latex_domain_indices = True
+# latex_domain_indices = True
 
 
 # -- Options for manual page output --------------------------------------------
@@ -259,8 +268,5 @@ latex_show_urls = 'footnote' # sphinx version 1.7 # True
 # One entry per manual page. List of tuples
 # (source start file, name, description, authors, manual section).
 man_pages = [
-    ('index', 'sat', u'SAT Documentation',
-     [u'CEA DES/ISAS/DM2S/STMF/LGLS'], 1)
+    ("index", "sat", u"SAT Documentation", [u"CEA DES/ISAS/DM2S/STMF/LGLS"], 1)
 ]
-
-
index 8fdd9cb84e83f6b47293444d373a7d19cbc004cd..d9c35ea7363b77b7f362a536bcdf70782738e9c7 100644 (file)
@@ -68,7 +68,8 @@ xpath_tokenizer_re = re.compile(
     "[/.*:\[\]\(\)@=])|"
     "((?:\{[^}]+\})?[^/\[\]\(\)@=\s]+)|"
     "\s+"
-    )
+)
+
 
 def xpath_tokenizer(pattern, namespaces=None):
     for token in xpath_tokenizer_re.findall(pattern):
@@ -84,6 +85,7 @@ def xpath_tokenizer(pattern, namespaces=None):
         else:
             yield token
 
+
 def get_parent_map(context):
     parent_map = context.parent_map
     if parent_map is None:
@@ -93,26 +95,34 @@ def get_parent_map(context):
                 parent_map[e] = p
     return parent_map
 
+
 def prepare_child(next, token):
     tag = token[1]
+
     def select(context, result):
         for elem in result:
             for e in elem:
                 if e.tag == tag:
                     yield e
+
     return select
 
+
 def prepare_star(next, token):
     def select(context, result):
         for elem in result:
             yield from elem
+
     return select
 
+
 def prepare_self(next, token):
     def select(context, result):
         yield from result
+
     return select
 
+
 def prepare_descendant(next, token):
     token = next()
     if token[0] == "*":
@@ -121,13 +131,16 @@ def prepare_descendant(next, token):
         tag = token[1]
     else:
         raise SyntaxError("invalid descendant")
+
     def select(context, result):
         for elem in result:
             for e in elem.iter(tag):
                 if e is not elem:
                     yield e
+
     return select
 
+
 def prepare_parent(next, token):
     def select(context, result):
         # FIXME: raise error if .. is applied at toplevel?
@@ -139,8 +152,10 @@ def prepare_parent(next, token):
                 if parent not in result_map:
                     result_map[parent] = None
                     yield parent
+
     return select
 
+
 def prepare_predicate(next, token):
     # FIXME: replace with real parser!!! refs:
     # http://effbot.org/zone/simple-iterator-parser.htm
@@ -160,38 +175,46 @@ def prepare_predicate(next, token):
     if signature == "@-":
         # [@attribute] predicate
         key = predicate[1]
+
         def select(context, result):
             for elem in result:
                 if elem.get(key) is not None:
                     yield elem
+
         return select
     if signature == "@-='":
         # [@attribute='value']
         key = predicate[1]
         value = predicate[-1]
+
         def select(context, result):
             for elem in result:
                 if elem.get(key) == value:
                     yield elem
+
         return select
     if signature == "-" and not re.match("\-?\d+$", predicate[0]):
         # [tag]
         tag = predicate[0]
+
         def select(context, result):
             for elem in result:
                 if elem.find(tag) is not None:
                     yield elem
+
         return select
     if signature == "-='" and not re.match("\-?\d+$", predicate[0]):
         # [tag='value']
         tag = predicate[0]
         value = predicate[-1]
+
         def select(context, result):
             for elem in result:
                 for e in elem.findall(tag):
                     if "".join(e.itertext()) == value:
                         yield elem
                         break
+
         return select
     if signature == "-" or signature == "-()" or signature == "-()-":
         # [index] or [last()] or [last()-index]
@@ -212,6 +235,7 @@ def prepare_predicate(next, token):
                     raise SyntaxError("XPath offset from last() must be negative")
             else:
                 index = -1
+
         def select(context, result):
             parent_map = get_parent_map(context)
             for elem in result:
@@ -223,9 +247,11 @@ def prepare_predicate(next, token):
                         yield elem
                 except (IndexError, KeyError):
                     pass
+
         return select
     raise SyntaxError("invalid predicate")
 
+
 ops = {
     "": prepare_child,
     "*": prepare_star,
@@ -233,26 +259,32 @@ ops = {
     "..": prepare_parent,
     "//": prepare_descendant,
     "[": prepare_predicate,
-    }
+}
 
 _cache = {}
 
+
 class _SelectorContext:
     parent_map = None
+
     def __init__(self, root):
         self.root = root
 
+
 # --------------------------------------------------------------------
 
 ##
 # Generate all matching objects.
 
+
 def iterfind(elem, path, namespaces=None):
     # compile selector pattern
-    cache_key = (path, None if namespaces is None
-                            else tuple(sorted(namespaces.items())))
+    cache_key = (
+        path,
+        None if namespaces is None else tuple(sorted(namespaces.items())),
+    )
     if path[-1:] == "/":
-        path = path + "*" # implicit all (FIXME: keep this?)
+        path = path + "*"  # implicit all (FIXME: keep this?)
     try:
         selector = _cache[cache_key]
     except KeyError:
@@ -282,27 +314,33 @@ def iterfind(elem, path, namespaces=None):
         result = select(context, result)
     return result
 
+
 ##
 # Find first matching object.
 
+
 def find(elem, path, namespaces=None):
     try:
         return next(iterfind(elem, path, namespaces))
     except StopIteration:
         return None
 
+
 ##
 # Find all matching objects.
 
+
 def findall(elem, path, namespaces=None):
     return list(iterfind(elem, path, namespaces))
 
+
 ##
 # Find text for first matching object.
 
+
 def findtext(elem, path, default=None, namespaces=None):
     try:
         elem = next(iterfind(elem, path, namespaces))
         return elem.text or ""
     except StopIteration:
-        return default
\ No newline at end of file
+        return default
index b871200f6461694aa6ff92d2fa83ab6bf8576130..c42fc447ff76769d52ed221af7ebe9c48ab93cb4 100644 (file)
@@ -1,5 +1,3 @@
-
-
 """
 using VERSION 1.3.0 native xml.etree.ElementTree for python3
 appending method tostring serialize 'pretty_xml'
@@ -11,34 +9,40 @@ import debug as DBG
 _versionPython = sys.version_info[0]
 
 if _versionPython < 3:
-  # python2 previous historic mode
-  import src.ElementTreePython2 as etree
-  DBG.write("ElementTree Python2 %s" % etree.VERSION, etree.__file__, DBG.isDeveloper())
-  tostring = etree.tostring
+    # python2 previous historic mode
+    import src.ElementTreePython2 as etree
+
+    DBG.write(
+        "ElementTree Python2 %s" % etree.VERSION, etree.__file__, DBG.isDeveloper()
+    )
+    tostring = etree.tostring
 
 else:
-  # python3 mode
-  # import xml.etree.ElementTree as etree # native version
-  import src.ElementTreePython3 as etree # VERSION 1.3.0 plus _serialize 'pretty_xml'
-  DBG.write("ElementTree Python3 %s" % etree.VERSION, etree.__file__, DBG.isDeveloper())
-
-  def tostring(node, encoding='utf-8'):
-    """
-    fix output as str with encoding='unicode' because python3
-    If encoding is "unicode", a string is returned.
-    Otherwise a bytestring is returned
-    """
-    try:
-      aStr = etree.tostring(node, encoding='unicode', method="pretty_xml")
-    except:
-      print("*****************************\n problem node", node)
-      # try no pretty
-      aStr = etree.tostring(node, encoding='unicode')
-    # if be byte
-    # aStr = aStr.decode('utf-8')
-    return aStr
+    # python3 mode
+    # import xml.etree.ElementTree as etree # native version
+    import src.ElementTreePython3 as etree  # VERSION 1.3.0 plus _serialize 'pretty_xml'
+
+    DBG.write(
+        "ElementTree Python3 %s" % etree.VERSION, etree.__file__, DBG.isDeveloper()
+    )
+
+    def tostring(node, encoding="utf-8"):
+        """
+        fix output as str with encoding='unicode' because python3
+        If encoding is "unicode", a string is returned.
+        Otherwise a bytestring is returned
+        """
+        try:
+            aStr = etree.tostring(node, encoding="unicode", method="pretty_xml")
+        except:
+            print("*****************************\n problem node", node)
+            # try no pretty
+            aStr = etree.tostring(node, encoding="unicode")
+        # if be byte
+        # aStr = aStr.decode('utf-8')
+        return aStr
+
 
 # common use
 Element = etree.Element
 parse = etree.parse
-
index d1ba909381f358c50b07e5852dde728708176723..2cb93fe526352d24e559c4a49eb5c59c20b78e99 100644 (file)
@@ -71,18 +71,22 @@ __all__ = [
     # public symbols
     "Comment",
     "dump",
-    "Element", "ElementTree",
+    "Element",
+    "ElementTree",
     "fromstring",
-    "iselement", "iterparse",
+    "iselement",
+    "iterparse",
     "parse",
-    "PI", "ProcessingInstruction",
+    "PI",
+    "ProcessingInstruction",
     "QName",
     "SubElement",
     "tostring",
     "TreeBuilder",
-    "VERSION", "XML",
+    "VERSION",
+    "XML",
     "XMLTreeBuilder",
-    ]
+]
 
 ##
 # The <b>Element</b> type is a flexible container object, designed to
@@ -108,6 +112,7 @@ __all__ = [
 
 import string, sys, re, platform
 
+
 class _SimpleElementPath:
     # emulate pre-1.2 find/findtext/findall behaviour
     def find(self, element, tag):
@@ -115,11 +120,13 @@ class _SimpleElementPath:
             if elem.tag == tag:
                 return elem
         return None
+
     def findtext(self, element, tag, default=None):
         for elem in element:
             if elem.tag == tag:
                 return elem.text or ""
         return default
+
     def findall(self, element, tag):
         if tag[:3] == ".//":
             return element.getiterator(tag[3:])
@@ -129,6 +136,7 @@ class _SimpleElementPath:
                 result.append(elem)
         return result
 
+
 """
 # obsolete
 # ElementPath.py is for python3 2019
@@ -139,7 +147,7 @@ except ImportError:
     # FIXME: issue warning in this case?
     ElementPath = _SimpleElementPath()
 """
-ElementPath = _SimpleElementPath() # before 2019 python2 situation sat5.0
+ElementPath = _SimpleElementPath()  # before 2019 python2 situation sat5.0
 
 # TODO: add support for custom namespace resolvers/default namespaces
 # TODO: add improved support for incremental parsing
@@ -159,6 +167,7 @@ VERSION = "1.2.6"
 # @see Comment
 # @see ProcessingInstruction
 
+
 class _ElementInterface:
     # <tag attrib>text<child/>...</tag>tail
 
@@ -188,7 +197,7 @@ class _ElementInterface:
     # next sibling element's start tag.  This is either a string or
     # the value None, if there was no text.
 
-    tail = None # text after end tag, if any
+    tail = None  # text after end tag, if any
 
     def __init__(self, tag, attrib):
         self.tag = tag
@@ -410,7 +419,7 @@ class _ElementInterface:
             else:
                 res.append(key)
         return res
-                
+
     ##
     # Gets element attributes, as a sequence.  The attributes are
     # returned in an arbitrary order.
@@ -443,6 +452,7 @@ class _ElementInterface:
             nodes.extend(node.getiterator(tag))
         return nodes
 
+
 # compatibility
 _Element = _ElementInterface
 
@@ -461,11 +471,13 @@ _Element = _ElementInterface
 # @return An element instance.
 # @defreturn Element
 
+
 def Element(tag, attrib={}, **extra):
     attrib = attrib.copy()
     attrib.update(extra)
     return _ElementInterface(tag, attrib)
 
+
 ##
 # Subelement factory.  This function creates an element instance, and
 # appends it to an existing element.
@@ -480,6 +492,7 @@ def Element(tag, attrib={}, **extra):
 # @return An element instance.
 # @defreturn Element
 
+
 def SubElement(parent, tag, attrib={}, **extra):
     attrib = attrib.copy()
     attrib.update(extra)
@@ -487,6 +500,7 @@ def SubElement(parent, tag, attrib={}, **extra):
     parent.append(element)
     return element
 
+
 ##
 # Comment element factory.  This factory function creates a special
 # element that will be serialized as an XML comment.
@@ -498,11 +512,13 @@ def SubElement(parent, tag, attrib={}, **extra):
 # @return An element instance, representing a comment.
 # @defreturn Element
 
+
 def Comment(text=None):
     element = Element(Comment)
     element.text = text
     return element
 
+
 ##
 # PI element factory.  This factory function creates a special element
 # that will be serialized as an XML processing instruction.
@@ -512,6 +528,7 @@ def Comment(text=None):
 # @return An element instance, representing a PI.
 # @defreturn Element
 
+
 def ProcessingInstruction(target, text=None):
     element = Element(ProcessingInstruction)
     element.text = target
@@ -519,6 +536,7 @@ def ProcessingInstruction(target, text=None):
         element.text = element.text + " " + text
     return element
 
+
 PI = ProcessingInstruction
 
 ##
@@ -531,20 +549,25 @@ PI = ProcessingInstruction
 #     an URI, and this argument is interpreted as a local name.
 # @return An opaque object, representing the QName.
 
+
 class QName:
     def __init__(self, text_or_uri, tag=None):
         if tag:
             text_or_uri = "{%s}%s" % (text_or_uri, tag)
         self.text = text_or_uri
+
     def __str__(self):
         return self.text
+
     def __hash__(self):
         return hash(self.text)
+
     def __cmp__(self, other):
         if isinstance(other, QName):
             return cmp(self.text, other.text)
         return cmp(self.text, other)
 
+
 ##
 # ElementTree wrapper class.  This class represents an entire element
 # hierarchy, and adds some extra support for serialization to and from
@@ -554,11 +577,11 @@ class QName:
 # @keyparam file Optional file handle or name.  If given, the
 #     tree is initialized with the contents of this XML file.
 
-class ElementTree:
 
+class ElementTree:
     def __init__(self, element=None, file=None):
         assert element is None or iselement(element)
-        self._root = element # first node
+        self._root = element  # first node
         if file:
             self.parse(file)
 
@@ -688,45 +711,54 @@ class ElementTree:
             file.write("<?%s?>\n" % _escape_cdata(node.text, encoding))
         else:
             items = node.items()
-            xmlns_items = [] # new namespaces in this scope
+            xmlns_items = []  # new namespaces in this scope
             try:
                 if isinstance(tag, QName) or tag[:1] == "{":
                     tag, xmlns = fixtag(tag, namespaces)
-                    if xmlns: xmlns_items.append(xmlns)
+                    if xmlns:
+                        xmlns_items.append(xmlns)
             except TypeError:
                 _raise_serialization_error(tag)
-            file.write(' ' * margin)
+            file.write(" " * margin)
             file.write(_encode("<", encoding) + _encode(tag, encoding))
             if items or xmlns_items:
                 try:
-                    items = sorted(items) # lexical order
+                    items = sorted(items)  # lexical order
                 except:
                     print("*** problem sorting items", items)
                 for k, v in items:
                     try:
                         if isinstance(k, QName) or k[:1] == "{":
                             k, xmlns = fixtag(k, namespaces)
-                            if xmlns: xmlns_items.append(xmlns)
+                            if xmlns:
+                                xmlns_items.append(xmlns)
                     except TypeError:
                         _raise_serialization_error(k)
                     try:
                         if isinstance(v, QName):
                             v, xmlns = fixtag(v, namespaces)
-                            if xmlns: xmlns_items.append(xmlns)
+                            if xmlns:
+                                xmlns_items.append(xmlns)
                     except TypeError:
                         _raise_serialization_error(v)
-                    file.write(" %s=\"%s\"" % (k,v))
+                    file.write(' %s="%s"' % (k, v))
                 for k, v in xmlns_items:
-                    file.write(" %s=\"%s\"" % (k,v))
+                    file.write(' %s="%s"' % (k, v))
             if node.text or len(node):
                 file.write(">")
                 if node.text:
                     file.write(_escape_cdata(node.text, encoding))
-                if len(node) > 0: file.write("\n")
+                if len(node) > 0:
+                    file.write("\n")
                 for n in node:
                     self._write(file, n, encoding, namespaces, margin + 2)
-                if len(node) > 0: file.write(' ' * margin)
-                file.write(_encode("</", encoding) + _encode(tag, encoding) + _encode(">\n", encoding))
+                if len(node) > 0:
+                    file.write(" " * margin)
+                file.write(
+                    _encode("</", encoding)
+                    + _encode(tag, encoding)
+                    + _encode(">\n", encoding)
+                )
             else:
                 file.write("/>\n")
             for k, v in xmlns_items:
@@ -734,6 +766,7 @@ class ElementTree:
         if node.tail:
             file.write(_escape_cdata(node.tail, encoding))
 
+
 # --------------------------------------------------------------------
 # helpers
 
@@ -744,11 +777,13 @@ class ElementTree:
 # @return A true value if this is an element object.
 # @defreturn flag
 
+
 def iselement(element):
     # FIXME: not sure about this; might be a better idea to look
     # for tag/attrib/text attributes
     return isinstance(element, _ElementInterface) or hasattr(element, "tag")
 
+
 ##
 # Writes an element tree or element structure to sys.stdout.  This
 # function should be used for debugging only.
@@ -758,6 +793,7 @@ def iselement(element):
 #
 # @param elem An element tree or an individual element.
 
+
 def dump(elem):
     # debugging
     if not isinstance(elem, ElementTree):
@@ -767,14 +803,16 @@ def dump(elem):
     if not tail or tail[-1] != "\n":
         sys.stdout.write("\n")
 
+
 def _encode(s, encoding):
     try:
         return s.encode(encoding)
     except AttributeError:
-        return s # 1.5.2: assume the string uses the right encoding
+        return s  # 1.5.2: assume the string uses the right encoding
+
 
 if sys.version[:3] == "1.5":
-    _escape = re.compile(r"[&<>\"\x80-\xff]+") # 1.5.2
+    _escape = re.compile(r"[&<>\"\x80-\xff]+")  # 1.5.2
 else:
     _escape = re.compile(eval(r'u"[&<>\"\u0080-\uffff]+"'))
 
@@ -793,10 +831,10 @@ _namespace_map = {
     "http://schemas.xmlsoap.org/wsdl/": "wsdl",
 }
 
+
 def _raise_serialization_error(text):
-    raise TypeError(
-        "cannot serialize %r (type %s)" % (text, type(text).__name__)
-        )
+    raise TypeError("cannot serialize %r (type %s)" % (text, type(text).__name__))
+
 
 def _encode_entity(text, pattern=_escape):
     # map reserved and non-ascii characters to numerical entities
@@ -809,25 +847,28 @@ def _encode_entity(text, pattern=_escape):
                 text = "&#%d;" % ord(char)
             append(text)
         return string.join(out, "")
+
     try:
         return _encode(pattern.sub(escape_entities, text), "ascii")
     except TypeError:
         _raise_serialization_error(text)
 
+
 #
 # the following functions assume an ascii-compatible encoding
 # (or "utf-16")
 
+
 def _escape_cdata(text, encoding=None, replace=str.replace):
     # escape character data
     try:
-        if platform.python_version()[0] == '2': # python 2.x.y
+        if platform.python_version()[0] == "2":  # python 2.x.y
             if encoding:
                 try:
                     text = _encode(text, encoding)
                 except UnicodeError:
                     return _encode_entity(text)
-            
+
         text = replace(text, "&", "&amp;")
         text = replace(text, "<", "&lt;")
         text = replace(text, ">", "&gt;")
@@ -841,12 +882,13 @@ def _escape_cdata(text, encoding=None, replace=str.replace):
     except (TypeError, AttributeError):
         _raise_serialization_error(text)
 
+
 def _escape_attrib(text, encoding=None, replace=str.replace):
     # escape attribute value
     try:
         text = replace(text, "&", "&amp;")
-        text = replace(text, "'", "&apos;") # FIXME: overkill
-        text = replace(text, "\"", "&quot;")
+        text = replace(text, "'", "&apos;")  # FIXME: overkill
+        text = replace(text, '"', "&quot;")
         text = replace(text, "<", "&lt;")
         text = replace(text, ">", "&gt;")
         if encoding:
@@ -858,6 +900,7 @@ def _escape_attrib(text, encoding=None, replace=str.replace):
     except (TypeError, AttributeError):
         _raise_serialization_error(text)
 
+
 def fixtag(tag, namespaces):
     # given a decorated tag (of the form {uri}tag), return prefixed
     # tag and namespace declaration, if any
@@ -878,6 +921,7 @@ def fixtag(tag, namespaces):
         xmlns = None
     return "%s:%s" % (prefix, tag), xmlns
 
+
 ##
 # Parses an XML document into an element tree.
 #
@@ -886,11 +930,13 @@ def fixtag(tag, namespaces):
 #     standard {@link XMLTreeBuilder} parser is used.
 # @return An ElementTree instance
 
+
 def parse(source, parser=None):
     tree = ElementTree()
     tree.parse(source, parser)
     return tree
 
+
 ##
 # Parses an XML document into an element tree incrementally, and reports
 # what's going on to the user.
@@ -900,8 +946,8 @@ def parse(source, parser=None):
 #     events are reported.
 # @return A (event, elem) iterator.
 
-class iterparse:
 
+class iterparse:
     def __init__(self, source, events=None):
         if not hasattr(source, "read"):
             # OP TEST
@@ -922,31 +968,50 @@ class iterparse:
                 try:
                     parser.ordered_attributes = 1
                     parser.specified_attributes = 1
-                    def handler(tag, attrib_in, event=event, append=append,
-                                start=self._parser._start_list):
+
+                    def handler(
+                        tag,
+                        attrib_in,
+                        event=event,
+                        append=append,
+                        start=self._parser._start_list,
+                    ):
                         append((event, start(tag, attrib_in)))
+
                     parser.StartElementHandler = handler
                 except AttributeError:
-                    def handler(tag, attrib_in, event=event, append=append,
-                                start=self._parser._start):
+
+                    def handler(
+                        tag,
+                        attrib_in,
+                        event=event,
+                        append=append,
+                        start=self._parser._start,
+                    ):
                         append((event, start(tag, attrib_in)))
+
                     parser.StartElementHandler = handler
             elif event == "end":
-                def handler(tag, event=event, append=append,
-                            end=self._parser._end):
+
+                def handler(tag, event=event, append=append, end=self._parser._end):
                     append((event, end(tag)))
+
                 parser.EndElementHandler = handler
             elif event == "start-ns":
+
                 def handler(prefix, uri, event=event, append=append):
                     try:
                         uri = _encode(uri, "ascii")
                     except UnicodeError:
                         pass
                     append((event, (prefix or "", uri)))
+
                 parser.StartNamespaceDeclHandler = handler
             elif event == "end-ns":
+
                 def handler(prefix, event=event, append=append):
                     append((event, None))
+
                 parser.EndNamespaceDeclHandler = handler
 
     def next(self):
@@ -975,12 +1040,16 @@ class iterparse:
 
     try:
         iter
+
         def __iter__(self):
             return self
+
     except NameError:
+
         def __getitem__(self, index):
             return self.next()
 
+
 ##
 # Parses an XML document from a string constant.  This function can
 # be used to embed "XML literals" in Python code.
@@ -989,11 +1058,13 @@ class iterparse:
 # @return An Element instance.
 # @defreturn Element
 
+
 def XML(text):
     parser = XMLTreeBuilder()
     parser.feed(text)
     return parser.close()
 
+
 ##
 # Parses an XML document from a string constant, and also returns
 # a dictionary which maps from element id:s to elements.
@@ -1002,6 +1073,7 @@ def XML(text):
 # @return A tuple containing an Element instance and a dictionary.
 # @defreturn (Element, dictionary)
 
+
 def XMLID(text):
     parser = XMLTreeBuilder()
     parser.feed(text)
@@ -1013,6 +1085,7 @@ def XMLID(text):
             ids[id] = elem
     return tree, ids
 
+
 ##
 # Parses an XML document from a string constant.  Same as {@link #XML}.
 #
@@ -1031,9 +1104,11 @@ fromstring = XML
 # @return An encoded string containing the XML data.
 # @defreturn string
 
+
 def tostring(element, encoding=None):
     class dummy:
         pass
+
     data = []
     file = dummy()
     file.write = data.append
@@ -1045,6 +1120,7 @@ def tostring(element, encoding=None):
         data2.append(item)
     return "".join(data2)
 
+
 ##
 # Generic element structure builder.  This builder converts a sequence
 # of {@link #TreeBuilder.start}, {@link #TreeBuilder.data}, and {@link
@@ -1056,13 +1132,13 @@ def tostring(element, encoding=None):
 # @param element_factory Optional element factory.  This factory
 #    is called to create new Element instances, as necessary.
 
-class TreeBuilder:
 
+class TreeBuilder:
     def __init__(self, element_factory=None):
-        self._data = [] # data collector
-        self._elem = [] # element stack
-        self._last = None # last element
-        self._tail = None # true if we're after an end tag
+        self._data = []  # data collector
+        self._elem = []  # element stack
+        self._last = None  # last element
+        self._tail = None  # true if we're after an end tag
         if element_factory is None:
             element_factory = _ElementInterface
         self._factory = element_factory
@@ -1132,12 +1208,14 @@ class TreeBuilder:
     def end(self, tag):
         self._flush()
         self._last = self._elem.pop()
-        assert self._last.tag == tag,\
-               "end tag mismatch (expected %s, got %s)" % (
-                   self._last.tag, tag)
+        assert self._last.tag == tag, "end tag mismatch (expected %s, got %s)" % (
+            self._last.tag,
+            tag,
+        )
         self._tail = 1
         return self._last
 
+
 ##
 # Element structure builder for XML source data, based on the
 # <b>expat</b> parser.
@@ -1149,20 +1227,18 @@ class TreeBuilder:
 # @see #ElementTree
 # @see #TreeBuilder
 
-class XMLTreeBuilder:
 
+class XMLTreeBuilder:
     def __init__(self, html=0, target=None):
         try:
             from xml.parsers import expat
         except ImportError:
-            raise ImportError(
-                "No module named expat; use SimpleXMLTreeBuilder instead"
-                )
+            raise ImportError("No module named expat; use SimpleXMLTreeBuilder instead")
         self._parser = parser = expat.ParserCreate(None, "}")
         if target is None:
             target = TreeBuilder()
         self._target = target
-        self._names = {} # name memo cache
+        self._names = {}  # name memo cache
         # callbacks
         parser.DefaultHandlerExpand = self._default
         parser.StartElementHandler = self._start
@@ -1180,8 +1256,8 @@ class XMLTreeBuilder:
             parser.StartElementHandler = self._start_list
         except AttributeError:
             pass
-        #encoding = None
-        #if not parser.returns_unicode:
+        # encoding = None
+        # if not parser.returns_unicode:
         #    encoding = "utf-8"
         # target.xml(encoding, None)
         self._doctype = None
@@ -1219,7 +1295,7 @@ class XMLTreeBuilder:
         attrib = {}
         if attrib_in:
             for i in range(0, len(attrib_in), 2):
-                attrib[fixname(attrib_in[i])] = self._fixtext(attrib_in[i+1])
+                attrib[fixname(attrib_in[i])] = self._fixtext(attrib_in[i + 1])
         return self._target.start(tag, attrib)
 
     def _data(self, text):
@@ -1236,13 +1312,17 @@ class XMLTreeBuilder:
                 self._target.data(self.entity[text[1:-1]])
             except KeyError:
                 from xml.parsers import expat
+
                 raise expat.error(
-                    "undefined entity %s: line %d, column %d" %
-                    (text, self._parser.ErrorLineNumber,
-                    self._parser.ErrorColumnNumber)
+                    "undefined entity %s: line %d, column %d"
+                    % (
+                        text,
+                        self._parser.ErrorLineNumber,
+                        self._parser.ErrorColumnNumber,
                     )
+                )
         elif prefix == "<" and text[:9] == "<!DOCTYPE":
-            self._doctype = [] # inside a doctype declaration
+            self._doctype = []  # inside a doctype declaration
         elif self._doctype is not None:
             # parse doctype contents
             if prefix == ">":
@@ -1293,7 +1373,7 @@ class XMLTreeBuilder:
         try:
             self._parser.Parse(data, 0)
         except:
-            print("*** problem feed:\n%s" % data.decode('utf-8'))
+            print("*** problem feed:\n%s" % data.decode("utf-8"))
 
     ##
     # Finishes feeding data to the parser.
@@ -1302,7 +1382,7 @@ class XMLTreeBuilder:
     # @defreturn Element
 
     def close(self):
-        self._parser.Parse("", 1) # end of data
+        self._parser.Parse("", 1)  # end of data
         tree = self._target.close()
-        del self._target, self._parser # get rid of circular references
+        del self._target, self._parser  # get rid of circular references
         return tree
index 68635ce6d819df15a5a0f14af2f1164947dbab72..36dc08664bae7babfd957a192c593b1c7d90f6be 100644 (file)
@@ -33,7 +33,7 @@
 
 """
 
-#---------------------------------------------------------------------
+# ---------------------------------------------------------------------
 # Licensed to PSF under a Contributor Agreement.
 # See http://www.python.org/psf/license for licensing details.
 #
@@ -74,20 +74,27 @@ __all__ = [
     # public symbols
     "Comment",
     "dump",
-    "Element", "ElementTree",
-    "fromstring", "fromstringlist",
-    "iselement", "iterparse",
-    "parse", "ParseError",
-    "PI", "ProcessingInstruction",
+    "Element",
+    "ElementTree",
+    "fromstring",
+    "fromstringlist",
+    "iselement",
+    "iterparse",
+    "parse",
+    "ParseError",
+    "PI",
+    "ProcessingInstruction",
     "QName",
     "SubElement",
-    "tostring", "tostringlist",
+    "tostring",
+    "tostringlist",
     "TreeBuilder",
     "VERSION",
-    "XML", "XMLID",
+    "XML",
+    "XMLID",
     "XMLParser",
     "register_namespace",
-    ]
+]
 
 VERSION = "1.3.0"
 
@@ -109,14 +116,16 @@ class ParseError(SyntaxError):
         'position' - the line and column of the error
 
     """
+
     pass
 
+
 # --------------------------------------------------------------------
 
 
 def iselement(element):
     """Return True if *element* appears to be an Element."""
-    return hasattr(element, 'tag')
+    return hasattr(element, "tag")
 
 
 class Element:
@@ -165,8 +174,9 @@ class Element:
 
     def __init__(self, tag, attrib={}, **extra):
         if not isinstance(attrib, dict):
-            raise TypeError("attrib must be dict, not %s" % (
-                attrib.__class__.__name__,))
+            raise TypeError(
+                "attrib must be dict, not %s" % (attrib.__class__.__name__,)
+            )
         attrib = attrib.copy()
         attrib.update(extra)
         self.tag = tag
@@ -207,9 +217,10 @@ class Element:
         warnings.warn(
             "The behavior of this method will change in future versions.  "
             "Use specific 'len(elem)' or 'elem is not None' test instead.",
-            FutureWarning, stacklevel=2
-            )
-        return len(self._children) != 0 # emulate old behaviour, for now
+            FutureWarning,
+            stacklevel=2,
+        )
+        return len(self._children) != 0  # emulate old behaviour, for now
 
     def __getitem__(self, index):
         return self._children[index]
@@ -255,7 +266,7 @@ class Element:
         # Need to refer to the actual Python implementation, not the
         # shadowing C implementation.
         if not isinstance(e, _Element_Py):
-            raise TypeError('expected an Element, not %s' % type(e).__name__)
+            raise TypeError("expected an Element, not %s" % type(e).__name__)
 
     def remove(self, subelement):
         """Remove matching subelement.
@@ -281,8 +292,9 @@ class Element:
         warnings.warn(
             "This method will be removed in future versions.  "
             "Use 'list(elem)' or iteration over elem instead.",
-            DeprecationWarning, stacklevel=2
-            )
+            DeprecationWarning,
+            stacklevel=2,
+        )
         return self._children
 
     def find(self, path, namespaces=None):
@@ -414,7 +426,8 @@ class Element:
         warnings.warn(
             "This method will be removed in future versions.  "
             "Use 'elem.iter()' or 'list(elem.iter())' instead.",
-            PendingDeprecationWarning, stacklevel=2
+            PendingDeprecationWarning,
+            stacklevel=2,
         )
         return list(self.iter(tag))
 
@@ -485,6 +498,7 @@ def ProcessingInstruction(target, text=None):
         element.text = element.text + " " + text
     return element
 
+
 PI = ProcessingInstruction
 
 
@@ -502,41 +516,52 @@ class QName:
     be interpreted as a local name.
 
     """
+
     def __init__(self, text_or_uri, tag=None):
         if tag:
             text_or_uri = "{%s}%s" % (text_or_uri, tag)
         self.text = text_or_uri
+
     def __str__(self):
         return self.text
+
     def __repr__(self):
-        return '<QName %r>' % (self.text,)
+        return "<QName %r>" % (self.text,)
+
     def __hash__(self):
         return hash(self.text)
+
     def __le__(self, other):
         if isinstance(other, QName):
             return self.text <= other.text
         return self.text <= other
+
     def __lt__(self, other):
         if isinstance(other, QName):
             return self.text < other.text
         return self.text < other
+
     def __ge__(self, other):
         if isinstance(other, QName):
             return self.text >= other.text
         return self.text >= other
+
     def __gt__(self, other):
         if isinstance(other, QName):
             return self.text > other.text
         return self.text > other
+
     def __eq__(self, other):
         if isinstance(other, QName):
             return self.text == other.text
         return self.text == other
+
     def __ne__(self, other):
         if isinstance(other, QName):
             return self.text != other.text
         return self.text != other
 
+
 # --------------------------------------------------------------------
 
 
@@ -551,9 +576,10 @@ class ElementTree:
     contents will be used to initialize the tree with.
 
     """
+
     def __init__(self, element=None, file=None):
         # assert element is None or iselement(element)
-        self._root = element # first node
+        self._root = element  # first node
         if file:
             self.parse(file)
 
@@ -590,7 +616,7 @@ class ElementTree:
             if parser is None:
                 # If no parser was specified, create a default XMLParser
                 parser = XMLParser()
-                if hasattr(parser, '_parse_whole'):
+                if hasattr(parser, "_parse_whole"):
                     # The default XMLParser, when it comes from an accelerator,
                     # can define an internal _parse_whole API for efficiency.
                     # It can be used to parse the whole source without feeding
@@ -626,7 +652,8 @@ class ElementTree:
         warnings.warn(
             "This method will be removed in future versions.  "
             "Use 'tree.iter()' or 'list(tree.iter())' instead.",
-            PendingDeprecationWarning, stacklevel=2
+            PendingDeprecationWarning,
+            stacklevel=2,
         )
         return list(self.iter(tag))
 
@@ -648,8 +675,9 @@ class ElementTree:
                 "This search is broken in 1.3 and earlier, and will be "
                 "fixed in a future version.  If you rely on the current "
                 "behaviour, change it to %r" % path,
-                FutureWarning, stacklevel=2
-                )
+                FutureWarning,
+                stacklevel=2,
+            )
         return self._root.find(path, namespaces)
 
     def findtext(self, path, default=None, namespaces=None):
@@ -670,8 +698,9 @@ class ElementTree:
                 "This search is broken in 1.3 and earlier, and will be "
                 "fixed in a future version.  If you rely on the current "
                 "behaviour, change it to %r" % path,
-                FutureWarning, stacklevel=2
-                )
+                FutureWarning,
+                stacklevel=2,
+            )
         return self._root.findtext(path, default, namespaces)
 
     def findall(self, path, namespaces=None):
@@ -692,8 +721,9 @@ class ElementTree:
                 "This search is broken in 1.3 and earlier, and will be "
                 "fixed in a future version.  If you rely on the current "
                 "behaviour, change it to %r" % path,
-                FutureWarning, stacklevel=2
-                )
+                FutureWarning,
+                stacklevel=2,
+            )
         return self._root.findall(path, namespaces)
 
     def iterfind(self, path, namespaces=None):
@@ -714,16 +744,21 @@ class ElementTree:
                 "This search is broken in 1.3 and earlier, and will be "
                 "fixed in a future version.  If you rely on the current "
                 "behaviour, change it to %r" % path,
-                FutureWarning, stacklevel=2
-                )
+                FutureWarning,
+                stacklevel=2,
+            )
         return self._root.iterfind(path, namespaces)
 
-    def write(self, file_or_filename,
-              encoding=None,
-              xml_declaration=None,
-              default_namespace=None,
-              method=None, *,
-              short_empty_elements=True):
+    def write(
+        self,
+        file_or_filename,
+        encoding=None,
+        xml_declaration=None,
+        default_namespace=None,
+        method=None,
+        *,
+        short_empty_elements=True
+    ):
         """Write element tree to a file as XML.
 
         Arguments:
@@ -758,31 +793,42 @@ class ElementTree:
                 encoding = "us-ascii"
         enc_lower = encoding.lower()
         with _get_writer(file_or_filename, enc_lower) as write:
-            if method == "xml" and (xml_declaration or
-                    (xml_declaration is None and
-                     enc_lower not in ("utf-8", "us-ascii", "unicode"))):
+            if method == "xml" and (
+                xml_declaration
+                or (
+                    xml_declaration is None
+                    and enc_lower not in ("utf-8", "us-ascii", "unicode")
+                )
+            ):
                 declared_encoding = encoding
                 if enc_lower == "unicode":
                     # Retrieve the default encoding for the xml declaration
                     import locale
+
                     declared_encoding = locale.getpreferredencoding()
-                write("<?xml version='1.0' encoding='%s'?>\n" % (
-                    declared_encoding,))
+                write("<?xml version='1.0' encoding='%s'?>\n" % (declared_encoding,))
             if method == "text":
                 _serialize_text(write, self._root)
             else:
                 qnames, namespaces = _namespaces(self._root, default_namespace)
                 serialize = _serialize[method]
-                serialize(write, self._root, qnames, namespaces,
-                          short_empty_elements=short_empty_elements)
+                serialize(
+                    write,
+                    self._root,
+                    qnames,
+                    namespaces,
+                    short_empty_elements=short_empty_elements,
+                )
 
     def write_c14n(self, file):
         # lxml.etree compatibility.  use output method instead
         return self.write(file, method="c14n")
 
+
 # --------------------------------------------------------------------
 # serialization support
 
+
 @contextlib.contextmanager
 def _get_writer(file_or_filename, encoding):
     # returns text write method and release all resources after using
@@ -793,8 +839,9 @@ def _get_writer(file_or_filename, encoding):
         if encoding == "unicode":
             file = open(file_or_filename, "w")
         else:
-            file = open(file_or_filename, "w", encoding=encoding,
-                        errors="xmlcharrefreplace")
+            file = open(
+                file_or_filename, "w", encoding=encoding, errors="xmlcharrefreplace"
+            )
         with file:
             yield file.write
     else:
@@ -826,15 +873,15 @@ def _get_writer(file_or_filename, encoding):
                         file.tell = file_or_filename.tell
                     except AttributeError:
                         pass
-                file = io.TextIOWrapper(file,
-                                        encoding=encoding,
-                                        errors="xmlcharrefreplace",
-                                        newline="\n")
+                file = io.TextIOWrapper(
+                    file, encoding=encoding, errors="xmlcharrefreplace", newline="\n"
+                )
                 # Keep the original file open when the TextIOWrapper is
                 # destroyed
                 stack.callback(file.detach)
                 yield file.write
 
+
 def _namespaces(elem, default_namespace=None):
     # identify namespaces used in this tree
 
@@ -861,14 +908,14 @@ def _namespaces(elem, default_namespace=None):
                 if prefix:
                     qnames[qname] = "%s:%s" % (prefix, tag)
                 else:
-                    qnames[qname] = tag # default element
+                    qnames[qname] = tag  # default element
             else:
                 if default_namespace:
                     # FIXME: can this be handled in XML 1.0?
                     raise ValueError(
                         "cannot use non-qualified names with "
                         "default_namespace option"
-                        )
+                    )
                 qnames[qname] = qname
         except TypeError:
             _raise_serialization_error(qname)
@@ -896,8 +943,8 @@ def _namespaces(elem, default_namespace=None):
             add_qname(text.text)
     return qnames, namespaces
 
-def _serialize_xml(write, elem, qnames, namespaces,
-                   short_empty_elements, **kwargs):
+
+def _serialize_xml(write, elem, qnames, namespaces, short_empty_elements, **kwargs):
     tag = elem.tag
     text = elem.text
     if tag is Comment:
@@ -910,21 +957,20 @@ def _serialize_xml(write, elem, qnames, namespaces,
             if text:
                 write(_escape_cdata(text))
             for e in elem:
-                _serialize_xml(write, e, qnames, None,
-                               short_empty_elements=short_empty_elements)
+                _serialize_xml(
+                    write, e, qnames, None, short_empty_elements=short_empty_elements
+                )
         else:
             write("<" + tag)
             items = list(elem.items())
             if items or namespaces:
                 if namespaces:
-                    for v, k in sorted(namespaces.items(),
-                                       key=lambda x: x[1]):  # sort on prefix
+                    for v, k in sorted(
+                        namespaces.items(), key=lambda x: x[1]
+                    ):  # sort on prefix
                         if k:
                             k = ":" + k
-                        write(" xmlns%s=\"%s\"" % (
-                            k,
-                            _escape_attrib(v)
-                            ))
+                        write(' xmlns%s="%s"' % (k, _escape_attrib(v)))
                 for k, v in sorted(items):  # lexical order
                     if isinstance(k, QName):
                         k = k.text
@@ -932,85 +978,116 @@ def _serialize_xml(write, elem, qnames, namespaces,
                         v = qnames[v.text]
                     else:
                         v = _escape_attrib(v)
-                    write(" %s=\"%s\"" % (qnames[k], v))
+                    write(' %s="%s"' % (qnames[k], v))
             if text or len(elem) or not short_empty_elements:
                 write(">")
                 if text:
                     write(_escape_cdata(text))
                 for e in elem:
-                    _serialize_xml(write, e, qnames, None,
-                                   short_empty_elements=short_empty_elements)
+                    _serialize_xml(
+                        write,
+                        e,
+                        qnames,
+                        None,
+                        short_empty_elements=short_empty_elements,
+                    )
                 write("</" + tag + ">")
             else:
                 write(" />")
     if elem.tail:
         write(_escape_cdata(elem.tail))
 
+
 # add from cvw jan 2019
-def _serialize_pretty_xml(write, elem, qnames, namespaces,
-                     short_empty_elements, indent=0):
+def _serialize_pretty_xml(
+    write, elem, qnames, namespaces, short_empty_elements, indent=0
+):
     # print("*****pretty***** indent", elem.tag, indent)
     tag = elem.tag
     text = elem.text
     if tag is Comment:
-      write("<!--%s-->" % text)
+        write("<!--%s-->" % text)
     elif tag is ProcessingInstruction:
-      write("<?%s?>" % text)
+        write("<?%s?>" % text)
     else:
-      tag = qnames[tag]
-      if tag is None:
-        if text:
-          write(_escape_cdata(text))
-        for e in elem:
-          _serialize_pretty_xml(write, e, qnames, None,
-                         short_empty_elements=short_empty_elements, indent=indent)
-      else:
-        write(" "*indent + "<" + tag)
-        items = list(elem.items())
-        if items or namespaces:
-          if namespaces:
-            for v, k in sorted(namespaces.items(),
-                               key=lambda x: x[1]):  # sort on prefix
-              if k:
-                k = ":" + k
-              write(" xmlns%s=\"%s\"" % (
-                k,
-                _escape_attrib(v)
-              ))
-          for k, v in sorted(items):  # lexical order
-            # print("atrrib ", k, v)
-            if isinstance(k, QName):
-              k = k.text
-            if isinstance(v, QName):
-              v = qnames[v.text]
-            else:
-              v = _escape_attrib(v)
-            write(" %s=\"%s\"" % (qnames[k], v))
-        if text or len(elem) or not short_empty_elements:
-          if text:
-            write(">")
-            write(_escape_cdata(text))
-          else:
-            write(">\n")
-
-          for e in elem:
-            _serialize_pretty_xml(write, e, qnames, None,
-                           short_empty_elements=short_empty_elements, indent=indent+2)
-          write(" "*indent + "</" + tag + ">\n")
+        tag = qnames[tag]
+        if tag is None:
+            if text:
+                write(_escape_cdata(text))
+            for e in elem:
+                _serialize_pretty_xml(
+                    write,
+                    e,
+                    qnames,
+                    None,
+                    short_empty_elements=short_empty_elements,
+                    indent=indent,
+                )
         else:
-          write(" />\n")
+            write(" " * indent + "<" + tag)
+            items = list(elem.items())
+            if items or namespaces:
+                if namespaces:
+                    for v, k in sorted(
+                        namespaces.items(), key=lambda x: x[1]
+                    ):  # sort on prefix
+                        if k:
+                            k = ":" + k
+                        write(' xmlns%s="%s"' % (k, _escape_attrib(v)))
+                for k, v in sorted(items):  # lexical order
+                    # print("atrrib ", k, v)
+                    if isinstance(k, QName):
+                        k = k.text
+                    if isinstance(v, QName):
+                        v = qnames[v.text]
+                    else:
+                        v = _escape_attrib(v)
+                    write(' %s="%s"' % (qnames[k], v))
+            if text or len(elem) or not short_empty_elements:
+                if text:
+                    write(">")
+                    write(_escape_cdata(text))
+                else:
+                    write(">\n")
+
+                for e in elem:
+                    _serialize_pretty_xml(
+                        write,
+                        e,
+                        qnames,
+                        None,
+                        short_empty_elements=short_empty_elements,
+                        indent=indent + 2,
+                    )
+                write(" " * indent + "</" + tag + ">\n")
+            else:
+                write(" />\n")
     if elem.tail:
-      write(_escape_cdata(elem.tail))
+        write(_escape_cdata(elem.tail))
 
 
-HTML_EMPTY = ("area", "base", "basefont", "br", "col", "frame", "hr",
-              "img", "input", "isindex", "link", "meta", "param")
+HTML_EMPTY = (
+    "area",
+    "base",
+    "basefont",
+    "br",
+    "col",
+    "frame",
+    "hr",
+    "img",
+    "input",
+    "isindex",
+    "link",
+    "meta",
+    "param",
+)
 
 try:
     HTML_EMPTY = set(HTML_EMPTY)
 except NameError:
     pass
 
+
 def _serialize_html(write, elem, qnames, namespaces, **kwargs):
     tag = elem.tag
     text = elem.text
@@ -1030,14 +1107,12 @@ def _serialize_html(write, elem, qnames, namespaces, **kwargs):
             items = list(elem.items())
             if items or namespaces:
                 if namespaces:
-                    for v, k in sorted(namespaces.items(),
-                                       key=lambda x: x[1]):  # sort on prefix
+                    for v, k in sorted(
+                        namespaces.items(), key=lambda x: x[1]
+                    ):  # sort on prefix
                         if k:
                             k = ":" + k
-                        write(" xmlns%s=\"%s\"" % (
-                            k,
-                            _escape_attrib(v)
-                            ))
+                        write(' xmlns%s="%s"' % (k, _escape_attrib(v)))
                 for k, v in sorted(items):  # lexical order
                     if isinstance(k, QName):
                         k = k.text
@@ -1046,7 +1121,7 @@ def _serialize_html(write, elem, qnames, namespaces, **kwargs):
                     else:
                         v = _escape_attrib_html(v)
                     # FIXME: handle boolean attributes
-                    write(" %s=\"%s\"" % (qnames[k], v))
+                    write(' %s="%s"' % (qnames[k], v))
             write(">")
             ltag = tag.lower()
             if text:
@@ -1061,19 +1136,21 @@ def _serialize_html(write, elem, qnames, namespaces, **kwargs):
     if elem.tail:
         write(_escape_cdata(elem.tail))
 
+
 def _serialize_text(write, elem):
     for part in elem.itertext():
         write(part)
     if elem.tail:
         write(elem.tail)
 
+
 _serialize = {
     "xml": _serialize_xml,
     "pretty_xml": _serialize_pretty_xml,
     "html": _serialize_html,
     "text": _serialize_text,
-# this optional method is imported at the end of the module
-#   "c14n": _serialize_c14n,
+    # this optional method is imported at the end of the module
+    #   "c14n": _serialize_c14n,
 }
 
 
@@ -1096,6 +1173,7 @@ def register_namespace(prefix, uri):
             del _namespace_map[k]
     _namespace_map[uri] = prefix
 
+
 _namespace_map = {
     # "well-known" namespace prefixes
     "http://www.w3.org/XML/1998/namespace": "xml",
@@ -1111,10 +1189,10 @@ _namespace_map = {
 # For tests and troubleshooting
 register_namespace._namespace_map = _namespace_map
 
+
 def _raise_serialization_error(text):
-    raise TypeError(
-        "cannot serialize %r (type %s)" % (text, type(text).__name__)
-        )
+    raise TypeError("cannot serialize %r (type %s)" % (text, type(text).__name__))
+
 
 def _escape_cdata(text):
     # escape character data
@@ -1132,6 +1210,7 @@ def _escape_cdata(text):
     except (TypeError, AttributeError):
         _raise_serialization_error(text)
 
+
 def _escape_attrib(text):
     # escape attribute value
     try:
@@ -1141,14 +1220,15 @@ def _escape_attrib(text):
             text = text.replace("<", "&lt;")
         if ">" in text:
             text = text.replace(">", "&gt;")
-        if "\"" in text:
-            text = text.replace("\"", "&quot;")
+        if '"' in text:
+            text = text.replace('"', "&quot;")
         if "\n" in text:
             text = text.replace("\n", "&#10;")
         return text
     except (TypeError, AttributeError):
         _raise_serialization_error(text)
 
+
 def _escape_attrib_html(text):
     # escape attribute value
     try:
@@ -1156,16 +1236,17 @@ def _escape_attrib_html(text):
             text = text.replace("&", "&amp;")
         if ">" in text:
             text = text.replace(">", "&gt;")
-        if "\"" in text:
-            text = text.replace("\"", "&quot;")
+        if '"' in text:
+            text = text.replace('"', "&quot;")
         return text
     except (TypeError, AttributeError):
         _raise_serialization_error(text)
 
+
 # --------------------------------------------------------------------
 
-def tostring(element, encoding=None, method=None, *,
-             short_empty_elements=True):
+
+def tostring(element, encoding=None, method=None, *, short_empty_elements=True):
     """Generate string representation of XML element.
 
     All subelements are included.  If encoding is "unicode", a string
@@ -1178,13 +1259,16 @@ def tostring(element, encoding=None, method=None, *,
     Returns an (optionally) encoded string containing the XML data.
 
     """
-    stream = io.StringIO() if encoding == 'unicode' else io.BytesIO()
-    ElementTree(element).write(stream, encoding, method=method,
-                               short_empty_elements=short_empty_elements)
+    stream = io.StringIO() if encoding == "unicode" else io.BytesIO()
+    ElementTree(element).write(
+        stream, encoding, method=method, short_empty_elements=short_empty_elements
+    )
     return stream.getvalue()
 
+
 class _ListDataStream(io.BufferedIOBase):
     """An auxiliary stream accumulating into a list reference."""
+
     def __init__(self, lst):
         self.lst = lst
 
@@ -1200,12 +1284,13 @@ class _ListDataStream(io.BufferedIOBase):
     def tell(self):
         return len(self.lst)
 
-def tostringlist(element, encoding=None, method=None, *,
-                 short_empty_elements=True):
+
+def tostringlist(element, encoding=None, method=None, *, short_empty_elements=True):
     lst = []
     stream = _ListDataStream(lst)
-    ElementTree(element).write(stream, encoding, method=method,
-                               short_empty_elements=short_empty_elements)
+    ElementTree(element).write(
+        stream, encoding, method=method, short_empty_elements=short_empty_elements
+    )
     return lst
 
 
@@ -1227,6 +1312,7 @@ def dump(elem):
     if not tail or tail[-1] != "\n":
         sys.stdout.write("\n")
 
+
 # --------------------------------------------------------------------
 # parsing
 
@@ -1273,7 +1359,6 @@ def iterparse(source, events=None, parser=None):
 
 
 class XMLPullParser:
-
     def __init__(self, events=None, *, _parser=None):
         # The _parser argument is for internal use only and must not be relied
         # upon in user code. It will be removed in a future release.
@@ -1342,7 +1427,6 @@ class XMLPullParser:
 
 
 class _IterParseIterator:
-
     def __init__(self, source, events, parser, close_source=False):
         # Use the internal, undocumented _parser argument for now; When the
         # parser argument of iterparse is removed, this can be killed.
@@ -1415,9 +1499,11 @@ def XMLID(text, parser=None):
             ids[id] = elem
     return tree, ids
 
+
 # Parse XML document from string constant.  Alias for XML().
 fromstring = XML
 
+
 def fromstringlist(sequence, parser=None):
     """Parse XML document from sequence of string fragments.
 
@@ -1433,6 +1519,7 @@ def fromstringlist(sequence, parser=None):
         parser.feed(text)
     return parser.close()
 
+
 # --------------------------------------------------------------------
 
 
@@ -1449,11 +1536,12 @@ class TreeBuilder:
     to create new Element instances, as necessary.
 
     """
+
     def __init__(self, element_factory=None):
-        self._data = [] # data collector
-        self._elem = [] # element stack
-        self._last = None # last element
-        self._tail = None # true if we're after an end tag
+        self._data = []  # data collector
+        self._elem = []  # element stack
+        self._last = None  # last element
+        self._tail = None  # true if we're after an end tag
         if element_factory is None:
             element_factory = Element
         self._factory = element_factory
@@ -1503,9 +1591,10 @@ class TreeBuilder:
         """
         self._flush()
         self._last = self._elem.pop()
-        assert self._last.tag == tag,\
-               "end tag mismatch (expected %s, got %s)" % (
-                   self._last.tag, tag)
+        assert self._last.tag == tag, "end tag mismatch (expected %s, got %s)" % (
+            self._last.tag,
+            tag,
+        )
         self._tail = 1
         return self._last
 
@@ -1531,7 +1620,7 @@ class XMLParser:
             except ImportError:
                 raise ImportError(
                     "No module named expat; use SimpleXMLTreeBuilder instead"
-                    )
+                )
         parser = expat.ParserCreate(encoding, "}")
         if target is None:
             target = TreeBuilder()
@@ -1539,19 +1628,19 @@ class XMLParser:
         self.parser = self._parser = parser
         self.target = self._target = target
         self._error = expat.error
-        self._names = {} # name memo cache
+        self._names = {}  # name memo cache
         # main callbacks
         parser.DefaultHandlerExpand = self._default
-        if hasattr(target, 'start'):
+        if hasattr(target, "start"):
             parser.StartElementHandler = self._start
-        if hasattr(target, 'end'):
+        if hasattr(target, "end"):
             parser.EndElementHandler = self._end
-        if hasattr(target, 'data'):
+        if hasattr(target, "data"):
             parser.CharacterDataHandler = target.data
         # miscellaneous callbacks
-        if hasattr(target, 'comment'):
+        if hasattr(target, "comment"):
             parser.CommentHandler = target.comment
-        if hasattr(target, 'pi'):
+        if hasattr(target, "pi"):
             parser.ProcessingInstructionHandler = target.pi
         # Configure pyexpat: buffering, new-style attribute handling.
         parser.buffer_text = 1
@@ -1562,7 +1651,7 @@ class XMLParser:
         try:
             self.version = "Expat %d.%d.%d" % expat.version_info
         except AttributeError:
-            pass # unknown
+            pass  # unknown
 
     def _setevents(self, events_queue, events_to_report):
         # Internal API for XMLPullParser
@@ -1577,22 +1666,30 @@ class XMLParser:
             if event_name == "start":
                 parser.ordered_attributes = 1
                 parser.specified_attributes = 1
-                def handler(tag, attrib_in, event=event_name, append=append,
-                            start=self._start):
+
+                def handler(
+                    tag, attrib_in, event=event_name, append=append, start=self._start
+                ):
                     append((event, start(tag, attrib_in)))
+
                 parser.StartElementHandler = handler
             elif event_name == "end":
-                def handler(tag, event=event_name, append=append,
-                            end=self._end):
+
+                def handler(tag, event=event_name, append=append, end=self._end):
                     append((event, end(tag)))
+
                 parser.EndElementHandler = handler
             elif event_name == "start-ns":
+
                 def handler(prefix, uri, event=event_name, append=append):
                     append((event, (prefix or "", uri or "")))
+
                 parser.StartNamespaceDeclHandler = handler
             elif event_name == "end-ns":
+
                 def handler(prefix, event=event_name, append=append):
                     append((event, None))
+
                 parser.EndNamespaceDeclHandler = handler
             else:
                 raise ValueError("unknown event %r" % event_name)
@@ -1623,7 +1720,7 @@ class XMLParser:
         attrib = {}
         if attr_list:
             for i in range(0, len(attr_list), 2):
-                attrib[fixname(attr_list[i])] = attr_list[i+1]
+                attrib[fixname(attr_list[i])] = attr_list[i + 1]
         return self.target.start(tag, attrib)
 
     def _end(self, tag):
@@ -1641,17 +1738,17 @@ class XMLParser:
                 data_handler(self.entity[text[1:-1]])
             except KeyError:
                 from xml.parsers import expat
+
                 err = expat.error(
-                    "undefined entity %s: line %d, column %d" %
-                    (text, self.parser.ErrorLineNumber,
-                    self.parser.ErrorColumnNumber)
-                    )
-                err.code = 11 # XML_ERROR_UNDEFINED_ENTITY
+                    "undefined entity %s: line %d, column %d"
+                    % (text, self.parser.ErrorLineNumber, self.parser.ErrorColumnNumber)
+                )
+                err.code = 11  # XML_ERROR_UNDEFINED_ENTITY
                 err.lineno = self.parser.ErrorLineNumber
                 err.offset = self.parser.ErrorColumnNumber
                 raise err
         elif prefix == "<" and text[:9] == "<!DOCTYPE":
-            self._doctype = [] # inside a doctype declaration
+            self._doctype = []  # inside a doctype declaration
         elif self._doctype is not None:
             # parse doctype contents
             if prefix == ">":
@@ -1692,7 +1789,7 @@ class XMLParser:
             "This method of XMLParser is deprecated.  Define doctype() "
             "method on the TreeBuilder target.",
             DeprecationWarning,
-            )
+        )
 
     # sentinel, if doctype is redefined in a subclass
     __doctype = doctype
@@ -1707,7 +1804,7 @@ class XMLParser:
     def close(self):
         """Finish feeding data to parser and return element structure."""
         try:
-            self.parser.Parse("", 1) # end of data
+            self.parser.Parse("", 1)  # end of data
         except self._error as v:
             self._raiseerror(v)
         try:
@@ -1732,4 +1829,4 @@ try:
     # Element, SubElement, ParseError, TreeBuilder, XMLParser
     from _elementtree import *
 except ImportError:
-    pass
\ No newline at end of file
+    pass
index 9f3e8d1e7a2fc26019dbee57d82f3ecee2954470..7f6d7e8a5823899bc579ad4966a8608fb2d860fd 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 #  Copyright (C) 2010-2013  CEA/DEN
 #
 #  This library is free software; you can redistribute it and/or
@@ -44,8 +44,10 @@ from . import test_module
 from . import template
 
 import platform
-if platform.system() == "Windows" :
+
+if platform.system() == "Windows":
     import colorama
+
     colorama.init()
 
 OK_STATUS = "OK"
@@ -54,14 +56,17 @@ NA_STATUS = "NA"
 KNOWNFAILURE_STATUS = "KF"
 TIMEOUT_STATUS = "TIMEOUT"
 
+
 class SatException(Exception):
     """sat exception class"""
+
     def message(self, arg):
         if sys.version_info[0] >= 3:
             # message method is not available for python 3.8+
             return super().msg(arg)
         else:
-            return super(SatException,self).message(arg)
+            return super(SatException, self).message(arg)
+
 
 def ensure_path_exists(p):
     """Create a path if not existing
@@ -71,29 +76,39 @@ def ensure_path_exists(p):
     if not os.path.exists(p):
         os.makedirs(p)
 
-def check_config_has_application( config, details = None ):
+
+def check_config_has_application(config, details=None):
     """check that the config has the key APPLICATION. Else raise an exception.
 
     :param config class 'common.pyconf.Config': The config.
     """
-    if 'APPLICATION' not in config:
-        message = _("An APPLICATION is required. Use 'config --list' to get the list of available applications.\n")
-        if details :
+    if "APPLICATION" not in config:
+        message = _(
+            "An APPLICATION is required. Use 'config --list' to get the list of available applications.\n"
+        )
+        if details:
             details.append(message)
-        raise SatException( message )
+        raise SatException(message)
+
 
-def check_platform_is_supported( config, logger ):
+def check_platform_is_supported(config, logger):
     """check that the platform is supported, write warning if not.
 
     :param config class 'common.pyconf.Config': The config.
     """
-    if 'platform' in config.APPLICATION and config.VARS.dist not in config.APPLICATION.platform:
-        msg = "WARNING: Your application configuration is not supported on this platform (%s)\n"\
-              "         Please consider using the native application!" % config.VARS.dist
+    if (
+        "platform" in config.APPLICATION
+        and config.VARS.dist not in config.APPLICATION.platform
+    ):
+        msg = (
+            "WARNING: Your application configuration is not supported on this platform (%s)\n"
+            "         Please consider using the native application!" % config.VARS.dist
+        )
         logger.write("\n%s\n\n" % printcolors.printcWarning(msg), 1)
     return
 
-def check_config_has_profile( config, details = None ):
+
+def check_config_has_profile(config, details=None):
     """\
     check that the config has the key APPLICATION.profile.
     else, raise an exception.
@@ -101,13 +116,14 @@ def check_config_has_profile( config, details = None ):
     :param config class 'common.pyconf.Config': The config.
     """
     check_config_has_application(config)
-    if 'profile' not in config.APPLICATION:
+    if "profile" not in config.APPLICATION:
         message = _("A profile section is required in your application.\n")
-        if details :
+        if details:
             details.append(message)
-        raise SatException( message )
+        raise SatException(message)
 
-def appli_test_property(config,property_name, property_value):
+
+def appli_test_property(config, property_name, property_value):
     """Generic function to test if an application has a property set to a value
     :param config class 'common.pyconf.Config': The config.
     :param property_name : The name of the property to check
@@ -116,20 +132,25 @@ def appli_test_property(config,property_name, property_value):
     :rtype: boolean
     """
     # first check if application has property_value
-    if not ("APPLICATION"  in config and
-            "properties"   in config.APPLICATION and
-            property_name  in config.APPLICATION.properties):
+    if not (
+        "APPLICATION" in config
+        and "properties" in config.APPLICATION
+        and property_name in config.APPLICATION.properties
+    ):
         return False
 
     # then check to the property is set to property_value
-    eval_expression = 'config.APPLICATION.properties.%s == "%s"' %\
-                      (property_name,property_value)
+    eval_expression = 'config.APPLICATION.properties.%s == "%s"' % (
+        property_name,
+        property_value,
+    )
     result = eval(eval_expression)
     return result
 
 
-def config_has_application( config ):
-    return 'APPLICATION' in config
+def config_has_application(config):
+    return "APPLICATION" in config
+
 
 def get_cfg_param(config, param_name, default):
     """\
@@ -148,6 +169,7 @@ def get_cfg_param(config, param_name, default):
         return config[param_name]
     return default
 
+
 def strSplitN(aList, nb, skip="\n     "):
     """
     example
@@ -158,41 +180,44 @@ def strSplitN(aList, nb, skip="\n     "):
     strValue = ""
     i = 0
     for v in aList:
-      strValue += "%15s, " % str(v)
-      i += 1
-      if i >= nb:
-        strValue += skip
-        i = 0
+        strValue += "%15s, " % str(v)
+        i += 1
+        if i >= nb:
+            strValue += skip
+            i = 0
     if len(aList) > nb:
         strValue = skip + strValue
     return strValue
 
+
 def getProductNames(cfg, wildcards, logger):
     """get products names using * or ? as wildcards like shell Linux"""
     res = []
     if type(wildcards) is list:
-      wilds = wildcards
+        wilds = wildcards
     else:
-      wilds = [wildcards]
+        wilds = [wildcards]
     notFound = {}
     products = cfg.APPLICATION.products.keys()
     for wild in wildcards:
-      ok = False
-      for prod in products:
-        filtered = fnmatch.filter([prod], wild)
-        # print("filtered", prod, wild, filtered)
-        if len(filtered) > 0:
-          res.append(prod)
-          ok = True
-          continue
-      if not ok:
-        notFound[wild] = None
+        ok = False
+        for prod in products:
+            filtered = fnmatch.filter([prod], wild)
+            # print("filtered", prod, wild, filtered)
+            if len(filtered) > 0:
+                res.append(prod)
+                ok = True
+                continue
+        if not ok:
+            notFound[wild] = None
     if len(res) == 0:
-      logger.warning("Empty list of products, from %s" % wilds)
+        logger.warning("Empty list of products, from %s" % wilds)
     if len(notFound.keys()) > 0:
-      strProd = strSplitN( sorted(products), 5)
-      logger.warning("products not found: %s\n  availables products are:\n%s" % \
-                     (sorted(notFound.keys()), strProd) )
+        strProd = strSplitN(sorted(products), 5)
+        logger.warning(
+            "products not found: %s\n  availables products are:\n%s"
+            % (sorted(notFound.keys()), strProd)
+        )
     return res
 
 
@@ -211,6 +236,7 @@ def print_info(logger, info):
         printcolors.print_value(logger, sp + i[0], i[1], 2)
     logger.write("\n", 2)
 
+
 def get_base_path(config):
     """\
     Returns the path of the products base.
@@ -220,9 +246,9 @@ def get_base_path(config):
     :rtype: str
     """
     if "base" not in config.LOCAL:
-        local_file_path = os.path.join(config.VARS.salometoolsway,
-                                      "data",
-                                      "local.pyconf")
+        local_file_path = os.path.join(
+            config.VARS.salometoolsway, "data", "local.pyconf"
+        )
         msg = _("Please define a base path in the file %s" % local_file_path)
         raise SatException(msg)
 
@@ -230,6 +256,7 @@ def get_base_path(config):
 
     return base_path
 
+
 def get_launcher_name(config):
     """\
     Returns the name of salome launcher.
@@ -239,13 +266,17 @@ def get_launcher_name(config):
     :rtype: str
     """
     check_config_has_application(config)
-    if 'profile' in config.APPLICATION and 'launcher_name' in config.APPLICATION.profile:
+    if (
+        "profile" in config.APPLICATION
+        and "launcher_name" in config.APPLICATION.profile
+    ):
         launcher_name = config.APPLICATION.profile.launcher_name
     else:
-        launcher_name = 'salome'
+        launcher_name = "salome"
 
     return launcher_name
 
+
 def get_launcher_exe(config):
     """\
     Returns the name of exe defined in profile section.
@@ -255,7 +286,7 @@ def get_launcher_exe(config):
     :rtype: str
     """
     check_config_has_application(config)
-    if 'profile' in config.APPLICATION and 'exe' in config.APPLICATION.profile:
+    if "profile" in config.APPLICATION and "exe" in config.APPLICATION.profile:
         exe_name = config.APPLICATION.profile.exe
     else:
         exe_name = None
@@ -272,9 +303,9 @@ def get_log_path(config):
     :rtype: str
     """
     if "log_dir" not in config.LOCAL:
-        local_file_path = os.path.join(config.VARS.salometoolsway,
-                                      "data",
-                                      "local.pyconf")
+        local_file_path = os.path.join(
+            config.VARS.salometoolsway, "data", "local.pyconf"
+        )
         msg = _("Please define a log_dir in the file %s" % local_file_path)
         raise SatException(msg)
 
@@ -282,43 +313,44 @@ def get_log_path(config):
 
     return log_dir_path
 
+
 def get_salometool_version(config):
-   """Return the salomeTool version.
+    """Return the salomeTool version.
 
-   :param config Config: The global Config instance.
-   :return: the description of this version of sat in terms of tag and commit
-   """
-   return config.LOCAL.tag
+    :param config Config: The global Config instance.
+    :return: the description of this version of sat in terms of tag and commit
+    """
+    return config.LOCAL.tag
 
 
 def get_salome_version(config):
     import versionMinorMajorPatch as VMMP
 
-    if hasattr(config.APPLICATION, 'version_salome'):
+    if hasattr(config.APPLICATION, "version_salome"):
         version = VMMP.MinorMajorPatch(config.APPLICATION.version_salome)
     else:
         kernel_info = product.get_product_config(config, "KERNEL")
-        aFile = os.path.join(
-                            kernel_info.install_dir,
-                            "bin",
-                            "salome",
-                            "VERSION")
+        aFile = os.path.join(kernel_info.install_dir, "bin", "salome", "VERSION")
         if not os.path.isfile(aFile):
             return None
         with open(aFile) as f:
-          line = f.readline()  # example: '[SALOME KERNEL] : 8.4.0'
+            line = f.readline()  # example: '[SALOME KERNEL] : 8.4.0'
         version = VMMP.MinorMajorPatch(line.split(":")[1])
 
     # from nov. 2023 and SALOME 9.10.0 forbid test(s) on integer, use MajorMinorPatch class tests
     return version
 
+
 def read_config_from_a_file(filePath):
-        try:
-            cfg_file = pyconf.Config(filePath)
-        except pyconf.ConfigError as e:
-            raise SatException(_("Error in configuration file: %(file)s\n  %(error)s") % \
-                { 'file': filePath, 'error': str(e) })
-        return cfg_file
+    try:
+        cfg_file = pyconf.Config(filePath)
+    except pyconf.ConfigError as e:
+        raise SatException(
+            _("Error in configuration file: %(file)s\n  %(error)s")
+            % {"file": filePath, "error": str(e)}
+        )
+    return cfg_file
+
 
 def get_tmp_filename(cfg, name):
     if not os.path.exists(cfg.VARS.tmp_root):
@@ -326,6 +358,7 @@ def get_tmp_filename(cfg, name):
 
     return os.path.join(cfg.VARS.tmp_root, name)
 
+
 ##
 # Utils class to simplify path manipulations.
 class Path:
@@ -377,7 +410,7 @@ class Path:
         if self.islink():
             os.remove(self.path)
         else:
-            shutil.rmtree( self.path, onerror = handleRemoveReadonly )
+            shutil.rmtree(self.path, onerror=handleRemoveReadonly)
 
     def copy(self, path, smart=False):
         if not isinstance(path, Path):
@@ -439,7 +472,8 @@ class Path:
         except:
             return False
 
-def find_file_in_lpath(file_name, lpath, additional_dir = ""):
+
+def find_file_in_lpath(file_name, lpath, additional_dir=""):
     """\
     Find in all the directories in lpath list the file that has the same name
     as file_name.
@@ -466,7 +500,10 @@ def find_file_in_lpath(file_name, lpath, additional_dir = ""):
                 return os.path.join(dir_complete, file_name)
     return False
 
-def find_file_in_ftppath(file_name, ftppath, installation_dir, logger, additional_dir = ""):
+
+def find_file_in_ftppath(
+    file_name, ftppath, installation_dir, logger, additional_dir=""
+):
     """\
     Find in all ftp servers in ftppath the file called file_name
     If it is found then return the destination path of the file
@@ -485,61 +522,67 @@ def find_file_in_ftppath(file_name, ftppath, installation_dir, logger, additiona
     if not os.path.exists(installation_dir):
         os.makedirs(installation_dir)
 
-    destination=os.path.join(installation_dir, file_name)
+    destination = os.path.join(installation_dir, file_name)
 
     # paths in ftppath may contain several paths separated by ":"
     # we plit them, and push all paths in bigftppath
-    bigftppath=[]
+    bigftppath = []
     for ipath in ftppath:
-        splpath=ipath.split(":")
-        bigftppath+=splpath
+        splpath = ipath.split(":")
+        bigftppath += splpath
 
     for ftp_archive in bigftppath:
-       try:
-           # ftp_archive has the form ftp.xxx.yyy/dir1/dir2/...
-           ftp_archive_split=ftp_archive.split("/")
-           ftp_server=ftp_archive_split[0]
-           ftp = FTP(ftp_server)
-           logger.write("   Connect to ftp server %s\n" % ftp_server, 3)
-           ftp.login()
-           for directory in ftp_archive_split[1:]:
-               logger.write("   Change directory to %s\n" % directory, 3)
-               ftp.cwd(directory)
-           if additional_dir:
-               ftp.cwd(additional_dir)
-       except:
-           logger.error("while connecting to ftp server %s\n" % ftp_server)
-           continue
-
-       try:  # get md5 file if it exists
-           file_name_md5=file_name + ".md5"
-           destination_md5=destination + ".md5"
-           if ftp.size(file_name_md5) > 0:
-               with open(destination_md5,'wb') as dest_file_md5:
-                   ftp.retrbinary("RETR "+file_name_md5, dest_file_md5.write)
-       except:
-           pass
-
-       try:
-           if ftp.size(file_name) > 0:
-               # if file exists and is non empty
-               with open(destination,'wb') as dest_file:
-                   ftp.retrbinary("RETR "+file_name, dest_file.write)
-               logger.write("   Archive %s was retrieved and stored in %s\n" % (file_name, destination), 3)
-               return destination
-       except:
-           logger.error("File not found in ftp_archive %s\n" % ftp_server)
+        try:
+            # ftp_archive has the form ftp.xxx.yyy/dir1/dir2/...
+            ftp_archive_split = ftp_archive.split("/")
+            ftp_server = ftp_archive_split[0]
+            ftp = FTP(ftp_server)
+            logger.write("   Connect to ftp server %s\n" % ftp_server, 3)
+            ftp.login()
+            for directory in ftp_archive_split[1:]:
+                logger.write("   Change directory to %s\n" % directory, 3)
+                ftp.cwd(directory)
+            if additional_dir:
+                ftp.cwd(additional_dir)
+        except:
+            logger.error("while connecting to ftp server %s\n" % ftp_server)
+            continue
+
+        try:  # get md5 file if it exists
+            file_name_md5 = file_name + ".md5"
+            destination_md5 = destination + ".md5"
+            if ftp.size(file_name_md5) > 0:
+                with open(destination_md5, "wb") as dest_file_md5:
+                    ftp.retrbinary("RETR " + file_name_md5, dest_file_md5.write)
+        except:
+            pass
+
+        try:
+            if ftp.size(file_name) > 0:
+                # if file exists and is non empty
+                with open(destination, "wb") as dest_file:
+                    ftp.retrbinary("RETR " + file_name, dest_file.write)
+                logger.write(
+                    "   Archive %s was retrieved and stored in %s\n"
+                    % (file_name, destination),
+                    3,
+                )
+                return destination
+        except:
+            logger.error("File not found in ftp_archive %s\n" % ftp_server)
 
     return False
 
+
 def handleRemoveReadonly(func, path, exc):
     excvalue = exc[1]
     if func in (os.rmdir, os.remove) and excvalue.errno == errno.EACCES:
-        os.chmod(path, stat.S_IRWXU| stat.S_IRWXG| stat.S_IRWXO) # 0777
+        os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)  # 0777
         func(path)
     else:
         raise
 
+
 def deepcopy_list(input_list):
     """\
     Do a deep copy of a list
@@ -553,6 +596,7 @@ def deepcopy_list(input_list):
         res.append(elem)
     return res
 
+
 def remove_item_from_list(input_list, item):
     """\
     Remove all occurences of item from input_list
@@ -568,6 +612,7 @@ def remove_item_from_list(input_list, item):
         res.append(elem)
     return res
 
+
 def parse_date(date):
     """\
     Transform YYYYMMDD_hhmmss into YYYY-MM-DD hh:mm:ss.
@@ -578,14 +623,17 @@ def parse_date(date):
     """
     if len(date) != 15:
         return date
-    res = "%s-%s-%s %s:%s:%s" % (date[0:4],
-                                 date[4:6],
-                                 date[6:8],
-                                 date[9:11],
-                                 date[11:13],
-                                 date[13:])
+    res = "%s-%s-%s %s:%s:%s" % (
+        date[0:4],
+        date[4:6],
+        date[6:8],
+        date[9:11],
+        date[11:13],
+        date[13:],
+    )
     return res
 
+
 def merge_dicts(*dict_args):
     """\
     Given any number of dicts, shallow copy and merge into a new dict,
@@ -596,16 +644,18 @@ def merge_dicts(*dict_args):
         result.update(dictionary)
     return result
 
+
 def replace_in_file(filein, strin, strout):
     """Replace <strin> by <strout> in file <filein>"""
     shutil.move(filein, filein + "_old")
-    fileout= filein
+    fileout = filein
     filein = filein + "_old"
     fin = open(filein, "r")
     fout = open(fileout, "w")
     for line in fin:
         fout.write(line.replace(strin, strout))
 
+
 def get_property_in_product_cfg(product_cfg, pprty):
     if not "properties" in product_cfg:
         return None
@@ -613,12 +663,13 @@ def get_property_in_product_cfg(product_cfg, pprty):
         return None
     return product_cfg.properties[pprty]
 
+
 def activate_mesa_property(config):
     """Add mesa property into application properties
 
     :param config Config: The global configuration. It must have an application!
     """
     # Verify the existence of the file
-    if not 'properties' in config.APPLICATION:
-        config.APPLICATION.addMapping( 'properties', pyconf.Mapping(), None )
-    config.APPLICATION.properties.use_mesa="yes"
+    if not "properties" in config.APPLICATION:
+        config.APPLICATION.addMapping("properties", pyconf.Mapping(), None)
+    config.APPLICATION.properties.use_mesa = "yes"
index 6e83d0beeea8dcb950ea3f9cd0c98dba03cd7e8d..b5d9badb02e432d77c9c455bb73733264fc7f4fc 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 #  Copyright (C) 2010-2013  CEA/DEN
 #
 #  This library is free software; you can redistribute it and/or
 #  License along with this library; if not, write to the Free Software
 #  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 
-'''
+"""
 In this file : all the stuff that can change with the architecture 
 on which SAT is running
-'''
+"""
 
 import os, sys
-from platform import system,python_version,release
+from platform import system, python_version, release
 
 # linux_distribution is removed from platform module in python 3.8+
-# we have to use distro module, which is not standard. 
+# we have to use distro module, which is not standard.
 # write an error message if distro is not installed
 try:
     from platform import linux_distribution
@@ -33,139 +33,149 @@ except:
     try:
         from distro import linux_distribution
     except:
-        print ("\nError :\n"
-               "  linux_distribution was removed from platform module in Python 3.8+\n"
-               "  For python 3.8+ sat requires distro module to get information on linux distribution.\n"
-               "  Please install distro module with : pip install distro")
+        print(
+            "\nError :\n"
+            "  linux_distribution was removed from platform module in Python 3.8+\n"
+            "  For python 3.8+ sat requires distro module to get information on linux distribution.\n"
+            "  Please install distro module with : pip install distro"
+        )
         sys.exit(-1)
 
 
 def is_windows():
-    '''method that checks windows OS
-      
+    """method that checks windows OS
+
     :rtype: boolean
-    '''
-    return system() == 'Windows'
+    """
+    return system() == "Windows"
+
 
 def get_user():
-    '''method that gets the username that launched sat  
-    
+    """method that gets the username that launched sat
+
     :rtype: str
-    '''
-    try :
+    """
+    try:
         if is_windows():
             # In windows case, the USERNAME environment variable has to be set
-            user_name=os.environ['USERNAME']
-        else: # linux
+            user_name = os.environ["USERNAME"]
+        else:  # linux
             import pwd
-            user_name=pwd.getpwuid(os.getuid())[0]
-    except :
-        user_name="Unknown"
+
+            user_name = pwd.getpwuid(os.getuid())[0]
+    except:
+        user_name = "Unknown"
     return user_name
 
-       
+
 def get_distribution(codes):
-    '''Gets the code for the distribution
-    
+    """Gets the code for the distribution
+
     :param codes L{Mapping}: The map containing distribution correlation table.
-    :return: The distribution on which salomeTools is running, regarding the 
+    :return: The distribution on which salomeTools is running, regarding the
              distribution correlation table contained in codes variable.
     :rtype: str
-    '''
+    """
     if is_windows():
         return "W"
 
     # else get linux distribution description from platform, and encode it with code
     lin_distrib = linux_distribution()[0].lower()
-    distrib="not found"
+    distrib = "not found"
     for dist in codes:
         if dist in lin_distrib:
             distrib = codes[dist]
             break
-    if distrib=="not found":
+    if distrib == "not found":
         sys.stderr.write(_(u"Unknown distribution: '%s'\n") % distrib)
-        sys.stderr.write(_(u"Please add your distribution to src/internal_config/distrib.pyconf\n"))
+        sys.stderr.write(
+            _(u"Please add your distribution to src/internal_config/distrib.pyconf\n")
+        )
         sys.exit(-1)
 
     return distrib
 
+
 def get_version_XY():
     """
     Return major and minor version of the distribution
     from a CentOS example, returns '7.6'
     extracted from platform.linux_distribution()
     """
-    dist_version=linux_distribution()[1].split('.')
-    if len(dist_version)==1:
+    dist_version = linux_distribution()[1].split(".")
+    if len(dist_version) == 1:
         version = dist_version[0]
     else:
         version = dist_version[0] + "." + dist_version[1]
-    return version 
+    return version
 
 
 def get_distrib_version(distrib):
-    '''Return the sat encoded version of the distribution
+    """Return the sat encoded version of the distribution
        This code is used in config to apend the name of the application directories
        withdistribution info"
-    
+
     :param distrib str: The distribution on which the version will be found.
-    :return: The version of the distribution on which salomeTools is running, 
-             regarding the distribution correlation table contained in codes 
+    :return: The version of the distribution on which salomeTools is running,
+             regarding the distribution correlation table contained in codes
              variable.
     :rtype: str
-    '''
+    """
 
     if is_windows():
         return release()
 
     # get version from platform
-    dist_version=linux_distribution()[1].split('.')
+    dist_version = linux_distribution()[1].split(".")
 
     # encode it (conform to src/internal_config/distrib.pyconf VERSIONS dist
     if distrib == "CO":
-        version=dist_version[0] # for centos, we only care for major version
+        version = dist_version[0]  # for centos, we only care for major version
     elif distrib == "UB":
         # for ubuntu, we care for major + minor version
-        version=dist_version[0] + "." + dist_version[1] 
+        version = dist_version[0] + "." + dist_version[1]
     elif distrib == "DB":
         if len(dist_version[0]) == 1:
-            version="0"+dist_version[0]
+            version = "0" + dist_version[0]
         else:
-            version=dist_version[0]  # unstable, and version >= 10
+            version = dist_version[0]  # unstable, and version >= 10
     elif distrib == "MG":
-        version="0"+dist_version[0]
+        version = "0" + dist_version[0]
     else:
-        version=dist_version[0]
-        
+        version = dist_version[0]
+
     return version
 
+
 def get_python_version():
-    '''Gets the version of the running python.
-    
+    """Gets the version of the running python.
+
     :return: the version of the running python.
     :rtype: str
-    '''
-    
+    """
+
     # The platform python module gives the answer
     return python_version()
 
+
 def get_nb_proc():
-    '''Gets the number of processors of the machine 
+    """Gets the number of processors of the machine
        on which salomeTools is running.
-    
+
     :return: the number of processors.
     :rtype: str
-    '''
-    
-    try :
+    """
+
+    try:
         import multiprocessing
-        nb_proc=multiprocessing.cpu_count()
-    except :
+
+        nb_proc = multiprocessing.cpu_count()
+    except:
         if is_windows():
             if os.environ.has_key("NUMBER_OF_PROCESSORS"):
                 nb_proc = int(os.environ["NUMBER_OF_PROCESSORS"])
             else:
                 nb_proc = 1
         else:
-            nb_proc=int(os.sysconf('SC_NPROCESSORS_ONLN'))
+            nb_proc = int(os.sysconf("SC_NPROCESSORS_ONLN"))
     return nb_proc
index 9fd0308f8088e21f9afb4a4ac65476a27e174db9..c42fbcf12cd6c8f441a3e8edff02c5ffe9cc3837 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 
 # https://gist.github.com/techtonik/2151727
 # Public Domain, i.e. feel free to copy/paste
@@ -14,145 +14,147 @@ import pprint as PP
 
 ##############################################################################
 def caller_name_simple(skip=1):
-  """
-  Get a name of a caller in the format module.class.method
-
-  'skip' specifies how many levels of stack to skip while getting caller
-  name. skip=1 means 'who calls me', skip=2 'who calls my caller' etc.
-
-  An empty string is returned if skipped levels exceed stack height
-  """
-
-  def stack_(frame):
-    framelist = []
-    while frame:
-      framelist.append(frame)
-      frame = frame.f_back
-    return framelist
-
-  stack = stack_(sys._getframe(1))
-  start = 0 + skip
-  if len(stack) < start + 1:
-    return ''
-  parentframe = stack[start]
-
-  name = []
-  module = inspect.getmodule(parentframe)
-  # `modname` can be None when frame is executed directly in console
-  # TODO(techtonik): consider using __main__
-  if module:
-    name.append(module.__name__)
-  # detect classname
-  if 'self' in parentframe.f_locals:
-    # I don't know any way to detect call from the object method
-    # XXX: there seems to be no way to detect static method call - it will
-    #      be just a function call
-    name.append(parentframe.f_locals['self'].__class__.__name__)
-  codename = parentframe.f_code.co_name
-
-  fr = inspect.currentframe().f_back
-  for i in range(skip):  # no more 20 for precaution
-    fr = fr.f_back
-    if fr is None:
-      break
-  lineno = fr.f_lineno
-
-  if codename != '<module>':  # top level usually
-    name.append(codename)
-
-  name[-1] += "[%s]" % str(lineno)  # function or a method
-  del parentframe
-  return ".".join(name)
+    """
+    Get a name of a caller in the format module.class.method
+
+    'skip' specifies how many levels of stack to skip while getting caller
+    name. skip=1 means 'who calls me', skip=2 'who calls my caller' etc.
+
+    An empty string is returned if skipped levels exceed stack height
+    """
+
+    def stack_(frame):
+        framelist = []
+        while frame:
+            framelist.append(frame)
+            frame = frame.f_back
+        return framelist
+
+    stack = stack_(sys._getframe(1))
+    start = 0 + skip
+    if len(stack) < start + 1:
+        return ""
+    parentframe = stack[start]
+
+    name = []
+    module = inspect.getmodule(parentframe)
+    # `modname` can be None when frame is executed directly in console
+    # TODO(techtonik): consider using __main__
+    if module:
+        name.append(module.__name__)
+    # detect classname
+    if "self" in parentframe.f_locals:
+        # I don't know any way to detect call from the object method
+        # XXX: there seems to be no way to detect static method call - it will
+        #      be just a function call
+        name.append(parentframe.f_locals["self"].__class__.__name__)
+    codename = parentframe.f_code.co_name
+
+    fr = inspect.currentframe().f_back
+    for i in range(skip):  # no more 20 for precaution
+        fr = fr.f_back
+        if fr is None:
+            break
+    lineno = fr.f_lineno
+
+    if codename != "<module>":  # top level usually
+        name.append(codename)
+
+    name[-1] += "[%s]" % str(lineno)  # function or a method
+    del parentframe
+    return ".".join(name)
 
 
 ##############################################################################
 def caller_name_stack(skip=1):
-  """
-  Get a name of a caller in the format module[no].class[no].method[no]
-  where [no] is line nunber in source file(s)
-
-  'skip' specifies how many levels of stack to skip while getting caller
-  name. skip=1 means 'who calls me', skip=2 'who calls my caller' etc.
-
-  An empty string is returned if skipped levels exceed stack height
-  """
-  def stack_(frame):
-    framelist = []
-    while frame:
-      framelist.append(frame)
-      frame = frame.f_back
-    return framelist
-
-  stack = stack_(sys._getframe(1))
-  start = 0 + skip
-  if len(stack) < start + 1:
-    return ''
-  parentframe = stack[start]
-
-  name = []
-  module = inspect.getmodule(parentframe)
-  # `modname` can be None when frame is executed directly in console
-  # TODO(techtonik): consider using __main__
-  if module:
-    name.append(module.__name__)
-  # detect classname
-  if 'self' in parentframe.f_locals:
-    # I don't know any way to detect call from the object method
-    # XXX: there seems to be no way to detect static method call - it will
-    #      be just a function call
-    name.append(parentframe.f_locals['self'].__class__.__name__)
-  codename = parentframe.f_code.co_name
-
-  fr = inspect.currentframe().f_back
-  lineno = [fr.f_lineno]
-  for i in range(20):  # no more 20 for precaution
-    fr = fr.f_back
-    if fr is None:
-      break
-    #print("*** frame locals %s" % str(fr.f_locals.keys()))
-    #print("*** frame globals %s" % str(fr.f_globals.keys()))
-    try:
-      namesrc = fr.f_globals["__name__"]
-      if namesrc == "__main__":
-        namesrc = os.path.basename(fr.f_globals["__file__"])
-      lineno.insert(0, (namesrc + "[%s]" % fr.f_lineno))
-    except:
-      lineno.insert(0, ("??", fr.f_lineno))
-
-  if codename != '<module>':  # top level usually
-    name.append(codename)  # function or a method
-
-  #print("lineno", lineno)
-  #print("name", name)
-
-  name[-1] += " // STACK: %s" % " ".join(lineno[0:-1])
-
-  del parentframe
-  return ".".join(name)
+    """
+    Get a name of a caller in the format module[no].class[no].method[no]
+    where [no] is line nunber in source file(s)
+
+    'skip' specifies how many levels of stack to skip while getting caller
+    name. skip=1 means 'who calls me', skip=2 'who calls my caller' etc.
+
+    An empty string is returned if skipped levels exceed stack height
+    """
+
+    def stack_(frame):
+        framelist = []
+        while frame:
+            framelist.append(frame)
+            frame = frame.f_back
+        return framelist
+
+    stack = stack_(sys._getframe(1))
+    start = 0 + skip
+    if len(stack) < start + 1:
+        return ""
+    parentframe = stack[start]
+
+    name = []
+    module = inspect.getmodule(parentframe)
+    # `modname` can be None when frame is executed directly in console
+    # TODO(techtonik): consider using __main__
+    if module:
+        name.append(module.__name__)
+    # detect classname
+    if "self" in parentframe.f_locals:
+        # I don't know any way to detect call from the object method
+        # XXX: there seems to be no way to detect static method call - it will
+        #      be just a function call
+        name.append(parentframe.f_locals["self"].__class__.__name__)
+    codename = parentframe.f_code.co_name
+
+    fr = inspect.currentframe().f_back
+    lineno = [fr.f_lineno]
+    for i in range(20):  # no more 20 for precaution
+        fr = fr.f_back
+        if fr is None:
+            break
+        # print("*** frame locals %s" % str(fr.f_locals.keys()))
+        # print("*** frame globals %s" % str(fr.f_globals.keys()))
+        try:
+            namesrc = fr.f_globals["__name__"]
+            if namesrc == "__main__":
+                namesrc = os.path.basename(fr.f_globals["__file__"])
+            lineno.insert(0, (namesrc + "[%s]" % fr.f_lineno))
+        except:
+            lineno.insert(0, ("??", fr.f_lineno))
+
+    if codename != "<module>":  # top level usually
+        name.append(codename)  # function or a method
+
+    # print("lineno", lineno)
+    # print("name", name)
+
+    name[-1] += " // STACK: %s" % " ".join(lineno[0:-1])
+
+    del parentframe
+    return ".".join(name)
 
 
 ##############################################################################
 def example_of_use(toCall):
-  """
-  example of use caller_name_simple, or else
-  """
-  class Dummy:
-    def one_method(self):
-      print("4- call in class %s" % toCall(0))
+    """
+    example of use caller_name_simple, or else
+    """
 
-  print("1- call in %s" % toCall(0)) # output from main to here
-  print("2- call in %s" % toCall(0))
-  print("3- call in %s" % toCall(1)) # output from main to caller
-  tmp = Dummy()
-  tmp.one_method()
+    class Dummy:
+        def one_method(self):
+            print("4- call in class %s" % toCall(0))
+
+    print("1- call in %s" % toCall(0))  # output from main to here
+    print("2- call in %s" % toCall(0))
+    print("3- call in %s" % toCall(1))  # output from main to caller
+    tmp = Dummy()
+    tmp.one_method()
 
 
 ##############################################################################
 # main as an example
 ##############################################################################
 if __name__ == "__main__":
-  example_of_use(caller_name_simple)
-  example_of_use(caller_name_stack)
+    example_of_use(caller_name_simple)
+    example_of_use(caller_name_stack)
 
 """
 example of output
@@ -169,5 +171,5 @@ example of output
 
 
 # here default caller_name is user choice...
-caller_name = caller_name_simple     # not so verbose
+caller_name = caller_name_simple  # not so verbose
 # caller_name = caller_name_stack    # more verbose, with stack
index 670e6b3970b77529fd65f002a1afda6d28873cce..0b885c7898f5306b3e6eb437265b61e6248585b2 100644 (file)
@@ -3,5 +3,4 @@ from .initialise import init, deinit, reinit, colorama_text
 from .ansi import Fore, Back, Style, Cursor
 from .ansitowin32 import AnsiToWin32
 
-__version__ = '0.3.7'
-
+__version__ = "0.3.7"
index 78776588db9410924d8e4af0922fbc3960a37624..1da3897945c4367bb885e6187a3105ec73b92735 100644 (file)
@@ -1,25 +1,28 @@
 # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
-'''
+"""
 This module generates ANSI character codes to printing colors to terminals.
 See: http://en.wikipedia.org/wiki/ANSI_escape_code
-'''
+"""
 
-CSI = '\033['
-OSC = '\033]'
-BEL = '\007'
+CSI = "\033["
+OSC = "\033]"
+BEL = "\007"
 
 
 def code_to_chars(code):
-    return CSI + str(code) + 'm'
+    return CSI + str(code) + "m"
+
 
 def set_title(title):
-    return OSC + '2;' + title + BEL
+    return OSC + "2;" + title + BEL
+
 
 def clear_screen(mode=2):
-    return CSI + str(mode) + 'J'
+    return CSI + str(mode) + "J"
+
 
 def clear_line(mode=2):
-    return CSI + str(mode) + 'K'
+    return CSI + str(mode) + "K"
 
 
 class AnsiCodes(object):
@@ -28,75 +31,80 @@ class AnsiCodes(object):
         # Upon instantiation we define instance attributes, which are the same
         # as the class attributes but wrapped with the ANSI escape sequence
         for name in dir(self):
-            if not name.startswith('_'):
+            if not name.startswith("_"):
                 value = getattr(self, name)
                 setattr(self, name, code_to_chars(value))
 
 
 class AnsiCursor(object):
     def UP(self, n=1):
-        return CSI + str(n) + 'A'
+        return CSI + str(n) + "A"
+
     def DOWN(self, n=1):
-        return CSI + str(n) + 'B'
+        return CSI + str(n) + "B"
+
     def FORWARD(self, n=1):
-        return CSI + str(n) + 'C'
+        return CSI + str(n) + "C"
+
     def BACK(self, n=1):
-        return CSI + str(n) + 'D'
+        return CSI + str(n) + "D"
+
     def POS(self, x=1, y=1):
-        return CSI + str(y) + ';' + str(x) + 'H'
+        return CSI + str(y) + ";" + str(x) + "H"
 
 
 class AnsiFore(AnsiCodes):
-    BLACK           = 30
-    RED             = 31
-    GREEN           = 32
-    YELLOW          = 33
-    BLUE            = 34
-    MAGENTA         = 35
-    CYAN            = 36
-    WHITE           = 37
-    RESET           = 39
+    BLACK = 30
+    RED = 31
+    GREEN = 32
+    YELLOW = 33
+    BLUE = 34
+    MAGENTA = 35
+    CYAN = 36
+    WHITE = 37
+    RESET = 39
 
     # These are fairly well supported, but not part of the standard.
-    LIGHTBLACK_EX   = 90
-    LIGHTRED_EX     = 91
-    LIGHTGREEN_EX   = 92
-    LIGHTYELLOW_EX  = 93
-    LIGHTBLUE_EX    = 94
+    LIGHTBLACK_EX = 90
+    LIGHTRED_EX = 91
+    LIGHTGREEN_EX = 92
+    LIGHTYELLOW_EX = 93
+    LIGHTBLUE_EX = 94
     LIGHTMAGENTA_EX = 95
-    LIGHTCYAN_EX    = 96
-    LIGHTWHITE_EX   = 97
+    LIGHTCYAN_EX = 96
+    LIGHTWHITE_EX = 97
 
 
 class AnsiBack(AnsiCodes):
-    BLACK           = 40
-    RED             = 41
-    GREEN           = 42
-    YELLOW          = 43
-    BLUE            = 44
-    MAGENTA         = 45
-    CYAN            = 46
-    WHITE           = 47
-    RESET           = 49
+    BLACK = 40
+    RED = 41
+    GREEN = 42
+    YELLOW = 43
+    BLUE = 44
+    MAGENTA = 45
+    CYAN = 46
+    WHITE = 47
+    RESET = 49
 
     # These are fairly well supported, but not part of the standard.
-    LIGHTBLACK_EX   = 100
-    LIGHTRED_EX     = 101
-    LIGHTGREEN_EX   = 102
-    LIGHTYELLOW_EX  = 103
-    LIGHTBLUE_EX    = 104
+    LIGHTBLACK_EX = 100
+    LIGHTRED_EX = 101
+    LIGHTGREEN_EX = 102
+    LIGHTYELLOW_EX = 103
+    LIGHTBLUE_EX = 104
     LIGHTMAGENTA_EX = 105
-    LIGHTCYAN_EX    = 106
-    LIGHTWHITE_EX   = 107
+    LIGHTCYAN_EX = 106
+    LIGHTWHITE_EX = 107
 
 
 class AnsiStyle(AnsiCodes):
-    BRIGHT    = 1
-    DIM       = 2
-    NORMAL    = 22
+    BRIGHT = 1
+    DIM = 2
+    NORMAL = 22
     RESET_ALL = 0
 
-Fore   = AnsiFore()
-Back   = AnsiBack()
-Style  = AnsiStyle()
+
+Fore = AnsiFore()
+Back = AnsiBack()
+Style = AnsiStyle()
 Cursor = AnsiCursor()
index b7ff6f2136eec3530563c0fedf226b4b00c52d9d..8b445f56a63994bc7cef32b6ab4b23dff7f0da94 100644 (file)
@@ -14,19 +14,20 @@ if windll is not None:
 
 
 def is_stream_closed(stream):
-    return not hasattr(stream, 'closed') or stream.closed
+    return not hasattr(stream, "closed") or stream.closed
 
 
 def is_a_tty(stream):
-    return hasattr(stream, 'isatty') and stream.isatty()
+    return hasattr(stream, "isatty") and stream.isatty()
 
 
 class StreamWrapper(object):
-    '''
+    """
     Wraps a stream (such as stdout), acting as a transparent proxy for all
     attribute access apart from method 'write()', which is delegated to our
     Converter instance.
-    '''
+    """
+
     def __init__(self, wrapped, converter):
         # double-underscore everything to prevent clashes with names of
         # attributes on the wrapped stream object.
@@ -41,13 +42,18 @@ class StreamWrapper(object):
 
 
 class AnsiToWin32(object):
-    '''
+    """
     Implements a 'write()' method which, on Windows, will strip ANSI character
     sequences from the text, and if outputting to a tty, will convert them into
     win32 function calls.
-    '''
-    ANSI_CSI_RE = re.compile('\001?\033\[((?:\d|;)*)([a-zA-Z])\002?')     # Control Sequence Introducer
-    ANSI_OSC_RE = re.compile('\001?\033\]((?:.|;)*?)(\x07)\002?')         # Operating System Command
+    """
+
+    ANSI_CSI_RE = re.compile(
+        "\001?\033\[((?:\d|;)*)([a-zA-Z])\002?"
+    )  # Control Sequence Introducer
+    ANSI_OSC_RE = re.compile(
+        "\001?\033\]((?:.|;)*?)(\x07)\002?"
+    )  # Operating System Command
 
     def __init__(self, wrapped, convert=None, strip=None, autoreset=False):
         # The wrapped stream (normally sys.stdout or sys.stderr)
@@ -59,7 +65,7 @@ class AnsiToWin32(object):
         # create the proxy wrapping our output stream
         self.stream = StreamWrapper(wrapped, self)
 
-        on_windows = os.name == 'nt'
+        on_windows = os.name == "nt"
         # We test if the WinAPI works, because even if we are on Windows
         # we may be using a terminal that doesn't support the WinAPI
         # (e.g. Cygwin Terminal). In this case it's up to the terminal
@@ -68,12 +74,18 @@ class AnsiToWin32(object):
 
         # should we strip ANSI sequences from our output?
         if strip is None:
-            strip = conversion_supported or (not is_stream_closed(wrapped) and not is_a_tty(wrapped))
+            strip = conversion_supported or (
+                not is_stream_closed(wrapped) and not is_a_tty(wrapped)
+            )
         self.strip = strip
 
         # should we should convert ANSI sequences into win32 calls?
         if convert is None:
-            convert = conversion_supported and not is_stream_closed(wrapped) and is_a_tty(wrapped)
+            convert = (
+                conversion_supported
+                and not is_stream_closed(wrapped)
+                and is_a_tty(wrapped)
+            )
         self.convert = convert
 
         # dict of ansi codes to win32 functions and parameters
@@ -83,19 +95,19 @@ class AnsiToWin32(object):
         self.on_stderr = self.wrapped is sys.stderr
 
     def should_wrap(self):
-        '''
+        """
         True if this class is actually needed. If false, then the output
         stream will not be affected, nor will win32 calls be issued, so
         wrapping stdout is not actually required. This will generally be
         False on non-Windows platforms, unless optional functionality like
         autoreset has been requested using kwargs to init()
-        '''
+        """
         return self.convert or self.strip or self.autoreset
 
     def get_win32_calls(self):
         if self.convert and winterm:
             return {
-                AnsiStyle.RESET_ALL: (winterm.reset_all, ),
+                AnsiStyle.RESET_ALL: (winterm.reset_all,),
                 AnsiStyle.BRIGHT: (winterm.style, WinStyle.BRIGHT),
                 AnsiStyle.DIM: (winterm.style, WinStyle.NORMAL),
                 AnsiStyle.NORMAL: (winterm.style, WinStyle.NORMAL),
@@ -107,7 +119,7 @@ class AnsiToWin32(object):
                 AnsiFore.MAGENTA: (winterm.fore, WinColor.MAGENTA),
                 AnsiFore.CYAN: (winterm.fore, WinColor.CYAN),
                 AnsiFore.WHITE: (winterm.fore, WinColor.GREY),
-                AnsiFore.RESET: (winterm.fore, ),
+                AnsiFore.RESET: (winterm.fore,),
                 AnsiFore.LIGHTBLACK_EX: (winterm.fore, WinColor.BLACK, True),
                 AnsiFore.LIGHTRED_EX: (winterm.fore, WinColor.RED, True),
                 AnsiFore.LIGHTGREEN_EX: (winterm.fore, WinColor.GREEN, True),
@@ -124,7 +136,7 @@ class AnsiToWin32(object):
                 AnsiBack.MAGENTA: (winterm.back, WinColor.MAGENTA),
                 AnsiBack.CYAN: (winterm.back, WinColor.CYAN),
                 AnsiBack.WHITE: (winterm.back, WinColor.GREY),
-                AnsiBack.RESET: (winterm.back, ),
+                AnsiBack.RESET: (winterm.back,),
                 AnsiBack.LIGHTBLACK_EX: (winterm.back, WinColor.BLACK, True),
                 AnsiBack.LIGHTRED_EX: (winterm.back, WinColor.RED, True),
                 AnsiBack.LIGHTGREEN_EX: (winterm.back, WinColor.GREEN, True),
@@ -145,20 +157,18 @@ class AnsiToWin32(object):
         if self.autoreset:
             self.reset_all()
 
-
     def reset_all(self):
         if self.convert:
-            self.call_win32('m', (0,))
+            self.call_win32("m", (0,))
         elif not self.strip and not is_stream_closed(self.wrapped):
             self.wrapped.write(Style.RESET_ALL)
 
-
     def write_and_convert(self, text):
-        '''
+        """
         Write the given text to our wrapped stream, stripping any ANSI
         sequences from the text, and optionally converting them into win32
         calls.
-        '''
+        """
         cursor = 0
         text = self.convert_osc(text)
         for match in self.ANSI_CSI_RE.finditer(text):
@@ -168,39 +178,35 @@ class AnsiToWin32(object):
             cursor = end
         self.write_plain_text(text, cursor, len(text))
 
-
     def write_plain_text(self, text, start, end):
         if start < end:
             self.wrapped.write(text[start:end])
             self.wrapped.flush()
 
-
     def convert_ansi(self, paramstring, command):
         if self.convert:
             params = self.extract_params(command, paramstring)
             self.call_win32(command, params)
 
-
     def extract_params(self, command, paramstring):
-        if command in 'Hf':
-            params = tuple(int(p) if len(p) != 0 else 1 for p in paramstring.split(';'))
+        if command in "Hf":
+            params = tuple(int(p) if len(p) != 0 else 1 for p in paramstring.split(";"))
             while len(params) < 2:
                 # defaults:
                 params = params + (1,)
         else:
-            params = tuple(int(p) for p in paramstring.split(';') if len(p) != 0)
+            params = tuple(int(p) for p in paramstring.split(";") if len(p) != 0)
             if len(params) == 0:
                 # defaults:
-                if command in 'JKm':
+                if command in "JKm":
                     params = (0,)
-                elif command in 'ABCD':
+                elif command in "ABCD":
                     params = (1,)
 
         return params
 
-
     def call_win32(self, command, params):
-        if command == 'm':
+        if command == "m":
             for param in params:
                 if param in self.win32_calls:
                     func_args = self.win32_calls[param]
@@ -208,29 +214,28 @@ class AnsiToWin32(object):
                     args = func_args[1:]
                     kwargs = dict(on_stderr=self.on_stderr)
                     func(*args, **kwargs)
-        elif command in 'J':
+        elif command in "J":
             winterm.erase_screen(params[0], on_stderr=self.on_stderr)
-        elif command in 'K':
+        elif command in "K":
             winterm.erase_line(params[0], on_stderr=self.on_stderr)
-        elif command in 'Hf':     # cursor position - absolute
+        elif command in "Hf":  # cursor position - absolute
             winterm.set_cursor_position(params, on_stderr=self.on_stderr)
-        elif command in 'ABCD':   # cursor position - relative
+        elif command in "ABCD":  # cursor position - relative
             n = params[0]
             # A - up, B - down, C - forward, D - back
-            x, y = {'A': (0, -n), 'B': (0, n), 'C': (n, 0), 'D': (-n, 0)}[command]
+            x, y = {"A": (0, -n), "B": (0, n), "C": (n, 0), "D": (-n, 0)}[command]
             winterm.cursor_adjust(x, y, on_stderr=self.on_stderr)
 
-
     def convert_osc(self, text):
         for match in self.ANSI_OSC_RE.finditer(text):
             start, end = match.span()
             text = text[:start] + text[end:]
             paramstring, command = match.groups()
-            if command in '\x07':       # \x07 = BEL
+            if command in "\x07":  # \x07 = BEL
                 params = paramstring.split(";")
                 # 0 - change title and icon (we will only change title)
                 # 1 - change icon (we don't support this)
                 # 2 - change title
-                if params[0] in '02':
+                if params[0] in "02":
                     winterm.set_title(params[1])
         return text
index 834962a35f76f903c56bf921605b7fe3938088a4..4ab14aeddbc7c67e95da64ea46ac3b5186894dd9 100644 (file)
@@ -16,14 +16,14 @@ atexit_done = False
 
 
 def reset_all():
-    if AnsiToWin32 is not None:    # Issue #74: objects might become None at exit
+    if AnsiToWin32 is not None:  # Issue #74: objects might become None at exit
         AnsiToWin32(orig_stdout).reset_all()
 
 
 def init(autoreset=False, convert=None, strip=None, wrap=True):
 
     if not wrap and any([autoreset, convert, strip]):
-        raise ValueError('wrap=False conflicts with any other arg=True')
+        raise ValueError("wrap=False conflicts with any other arg=True")
 
     global wrapped_stdout, wrapped_stderr
     global orig_stdout, orig_stderr
@@ -34,13 +34,15 @@ def init(autoreset=False, convert=None, strip=None, wrap=True):
     if sys.stdout is None:
         wrapped_stdout = None
     else:
-        sys.stdout = wrapped_stdout = \
-            wrap_stream(orig_stdout, convert, strip, autoreset, wrap)
+        sys.stdout = wrapped_stdout = wrap_stream(
+            orig_stdout, convert, strip, autoreset, wrap
+        )
     if sys.stderr is None:
         wrapped_stderr = None
     else:
-        sys.stderr = wrapped_stderr = \
-            wrap_stream(orig_stderr, convert, strip, autoreset, wrap)
+        sys.stderr = wrapped_stderr = wrap_stream(
+            orig_stderr, convert, strip, autoreset, wrap
+        )
 
     global atexit_done
     if not atexit_done:
@@ -73,10 +75,7 @@ def reinit():
 
 def wrap_stream(stream, convert, strip, autoreset, wrap):
     if wrap:
-        wrapper = AnsiToWin32(stream,
-            convert=convert, strip=strip, autoreset=autoreset)
+        wrapper = AnsiToWin32(stream, convert=convert, strip=strip, autoreset=autoreset)
         if wrapper.should_wrap():
             stream = wrapper.stream
     return stream
-
-
index 3d1d2f2d91867ecd8663a2870fededa15b2b9c89..34c13ab6e4243b48b1ccc8fb1c7e03d73e3efb51 100644 (file)
@@ -7,6 +7,7 @@ STDERR = -12
 try:
     import ctypes
     from ctypes import LibraryLoader
+
     windll = LibraryLoader(ctypes.WinDLL)
     from ctypes import wintypes
 except (AttributeError, ImportError):
@@ -20,6 +21,7 @@ else:
 
     class CONSOLE_SCREEN_BUFFER_INFO(Structure):
         """struct in wincon.h."""
+
         _fields_ = [
             ("dwSize", COORD),
             ("dwCursorPosition", COORD),
@@ -27,13 +29,20 @@ else:
             ("srWindow", wintypes.SMALL_RECT),
             ("dwMaximumWindowSize", COORD),
         ]
+
         def __str__(self):
-            return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % (
-                self.dwSize.Y, self.dwSize.X
-                , self.dwCursorPosition.Y, self.dwCursorPosition.X
-                , self.wAttributes
-                , self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right
-                , self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X
+            return "(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)" % (
+                self.dwSize.Y,
+                self.dwSize.X,
+                self.dwCursorPosition.Y,
+                self.dwCursorPosition.X,
+                self.wAttributes,
+                self.srWindow.Top,
+                self.srWindow.Left,
+                self.srWindow.Bottom,
+                self.srWindow.Right,
+                self.dwMaximumWindowSize.Y,
+                self.dwMaximumWindowSize.X,
             )
 
     _GetStdHandle = windll.kernel32.GetStdHandle
@@ -84,9 +93,7 @@ else:
     _FillConsoleOutputAttribute.restype = wintypes.BOOL
 
     _SetConsoleTitleW = windll.kernel32.SetConsoleTitleA
-    _SetConsoleTitleW.argtypes = [
-        wintypes.LPCSTR
-    ]
+    _SetConsoleTitleW.argtypes = [wintypes.LPCSTR]
     _SetConsoleTitleW.restype = wintypes.BOOL
 
     handles = {
@@ -97,15 +104,13 @@ else:
     def winapi_test():
         handle = handles[STDOUT]
         csbi = CONSOLE_SCREEN_BUFFER_INFO()
-        success = _GetConsoleScreenBufferInfo(
-            handle, byref(csbi))
+        success = _GetConsoleScreenBufferInfo(handle, byref(csbi))
         return bool(success)
 
     def GetConsoleScreenBufferInfo(stream_id=STDOUT):
         handle = handles[stream_id]
         csbi = CONSOLE_SCREEN_BUFFER_INFO()
-        success = _GetConsoleScreenBufferInfo(
-            handle, byref(csbi))
+        success = _GetConsoleScreenBufferInfo(handle, byref(csbi))
         return csbi
 
     def SetConsoleTextAttribute(stream_id, attrs):
@@ -137,18 +142,20 @@ else:
         num_written = wintypes.DWORD(0)
         # Note that this is hard-coded for ANSI (vs wide) bytes.
         success = _FillConsoleOutputCharacterA(
-            handle, char, length, start, byref(num_written))
+            handle, char, length, start, byref(num_written)
+        )
         return num_written.value
 
     def FillConsoleOutputAttribute(stream_id, attr, length, start):
-        ''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )'''
+        """FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )"""
         handle = handles[stream_id]
         attribute = wintypes.WORD(attr)
         length = wintypes.DWORD(length)
         num_written = wintypes.DWORD(0)
         # Note that this is hard-coded for ANSI (vs wide) bytes.
         return _FillConsoleOutputAttribute(
-            handle, attribute, length, start, byref(num_written))
+            handle, attribute, length, start, byref(num_written)
+        )
 
     def SetConsoleTitle(title):
         return _SetConsoleTitleW(title)
index 60309d3c07aa4c1a66388d0d2530e7086d46f091..c9caecba8bdf98321f1c75c3705a65b1e52da5cf 100644 (file)
@@ -4,23 +4,24 @@ from . import win32
 
 # from wincon.h
 class WinColor(object):
-    BLACK   = 0
-    BLUE    = 1
-    GREEN   = 2
-    CYAN    = 3
-    RED     = 4
+    BLACK = 0
+    BLUE = 1
+    GREEN = 2
+    CYAN = 3
+    RED = 4
     MAGENTA = 5
-    YELLOW  = 6
-    GREY    = 7
+    YELLOW = 6
+    GREY = 7
+
 
 # from wincon.h
 class WinStyle(object):
-    NORMAL              = 0x00 # dim text, dim background
-    BRIGHT              = 0x08 # bright text, dim background
-    BRIGHT_BACKGROUND   = 0x80 # dim text, bright background
+    NORMAL = 0x00  # dim text, dim background
+    BRIGHT = 0x08  # bright text, dim background
+    BRIGHT_BACKGROUND = 0x80  # dim text, bright background
 
-class WinTerm(object):
 
+class WinTerm(object):
     def __init__(self):
         self._default = win32.GetConsoleScreenBufferInfo(win32.STDOUT).wAttributes
         self.set_attrs(self._default)
@@ -118,7 +119,9 @@ class WinTerm(object):
         # get the number of character cells in the current buffer
         cells_in_screen = csbi.dwSize.X * csbi.dwSize.Y
         # get number of character cells before current cursor position
-        cells_before_cursor = csbi.dwSize.X * csbi.dwCursorPosition.Y + csbi.dwCursorPosition.X
+        cells_before_cursor = (
+            csbi.dwSize.X * csbi.dwCursorPosition.Y + csbi.dwCursorPosition.X
+        )
         if mode == 0:
             from_coord = csbi.dwCursorPosition
             cells_to_erase = cells_in_screen - cells_before_cursor
@@ -129,9 +132,11 @@ class WinTerm(object):
             from_coord = win32.COORD(0, 0)
             cells_to_erase = cells_in_screen
         # fill the entire screen with blanks
-        win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord)
+        win32.FillConsoleOutputCharacter(handle, " ", cells_to_erase, from_coord)
         # now set the buffer's attributes accordingly
-        win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord)
+        win32.FillConsoleOutputAttribute(
+            handle, self.get_attrs(), cells_to_erase, from_coord
+        )
         if mode == 2:
             # put the cursor where needed
             win32.SetConsoleCursorPosition(handle, (1, 1))
@@ -154,9 +159,11 @@ class WinTerm(object):
             from_coord = win32.COORD(0, csbi.dwCursorPosition.Y)
             cells_to_erase = csbi.dwSize.X
         # fill the entire screen with blanks
-        win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord)
+        win32.FillConsoleOutputCharacter(handle, " ", cells_to_erase, from_coord)
         # now set the buffer's attributes accordingly
-        win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord)
+        win32.FillConsoleOutputAttribute(
+            handle, self.get_attrs(), cells_to_erase, from_coord
+        )
 
     def set_title(self, title):
         win32.SetConsoleTitle(title)
index 751fe73b911891c94f4de45f3fa9d46768d169be..721b3fae64b747bcd31eb3115e4753ec0d9e847b 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 #  Copyright (C) 2010-2013  CEA/DEN
 #
 #  This library is free software; you can redistribute it and/or
@@ -24,24 +24,21 @@ import glob
 
 import src
 
-C_COMPILE_ENV_LIST = ["CC",
-                      "CXX",
-                      "F77",
-                      "CFLAGS",
-                      "CXXFLAGS",
-                      "LIBS",
-                      "LDFLAGS"]
+C_COMPILE_ENV_LIST = ["CC", "CXX", "F77", "CFLAGS", "CXXFLAGS", "LIBS", "LDFLAGS"]
+
 
 class Builder:
-    """Class to handle all construction steps, like cmake, configure, make, ...
-    """
-    def __init__(self,
-                 config,
-                 logger,
-                 product_name,
-                 product_info,
-                 options = src.options.OptResult(),
-                 check_src=True):
+    """Class to handle all construction steps, like cmake, configure, make, ..."""
+
+    def __init__(
+        self,
+        config,
+        logger,
+        product_name,
+        product_info,
+        options=src.options.OptResult(),
+        check_src=True,
+    ):
         self.config = config
         self.logger = logger
         self.options = options
@@ -79,29 +76,29 @@ class Builder:
             # create build dir
             self.build_dir.make()
 
-        self.log('  build_dir   = %s\n' % str(self.build_dir), 4)
-        self.log('  install_dir = %s\n' % str(self.install_dir), 4)
-        self.log('\n', 4)
+        self.log("  build_dir   = %s\n" % str(self.build_dir), 4)
+        self.log("  install_dir = %s\n" % str(self.install_dir), 4)
+        self.log("\n", 4)
 
         # add products in depend and opt_depend list recursively
-        environ_info = src.product.get_product_dependencies(self.config,
-                                                            self.product_name,
-                                                            self.product_info)
-        #environ_info.append(self.product_info.name)
+        environ_info = src.product.get_product_dependencies(
+            self.config, self.product_name, self.product_info
+        )
+        # environ_info.append(self.product_info.name)
 
         # create build environment
-        self.build_environ = src.environment.SalomeEnviron(self.config,
-                                      src.environment.Environ(dict(os.environ)),
-                                      True)
-        self.build_environ.silent = (self.config.USER.output_verbose_level < 5)
+        self.build_environ = src.environment.SalomeEnviron(
+            self.config, src.environment.Environ(dict(os.environ)), True
+        )
+        self.build_environ.silent = self.config.USER.output_verbose_level < 5
         self.build_environ.set_full_environ(self.logger, environ_info)
-        
+
         if add_env_launch:
-        # create runtime environment
-            self.launch_environ = src.environment.SalomeEnviron(self.config,
-                                      src.environment.Environ(dict(os.environ)),
-                                      False)
-            self.launch_environ.silent = True # no need to show here
+            # create runtime environment
+            self.launch_environ = src.environment.SalomeEnviron(
+                self.config, src.environment.Environ(dict(os.environ)), False
+            )
+            self.launch_environ.silent = True  # no need to show here
             self.launch_environ.set_full_environ(self.logger, environ_info)
 
         for ee in C_COMPILE_ENV_LIST:
@@ -117,37 +114,42 @@ class Builder:
 
         cmake_option = options
         # cmake_option +=' -DCMAKE_VERBOSE_MAKEFILE=ON -DSALOME_CMAKE_DEBUG=ON'
-        if 'cmake_options' in self.product_info:
-            cmake_option += " %s " % " ".join(
-                                        self.product_info.cmake_options.split())
+        if "cmake_options" in self.product_info:
+            cmake_option += " %s " % " ".join(self.product_info.cmake_options.split())
 
         # add debug option
         if self.debug_mode:
             cmake_option += " -DCMAKE_BUILD_TYPE=Debug"
-        else :
+        else:
             cmake_option += " -DCMAKE_BUILD_TYPE=Release"
 
         # add verbose option if specified in application for this product.
         if self.verbose_mode:
             cmake_option += " -DCMAKE_VERBOSE_MAKEFILE=ON"
 
-        # In case CMAKE_GENERATOR is defined in environment, 
+        # In case CMAKE_GENERATOR is defined in environment,
         # use it in spite of automatically detect it
-        if 'cmake_generator' in self.config.APPLICATION:
-            cmake_option += " -DCMAKE_GENERATOR=\"%s\"" \
-                                       % self.config.APPLICATION.cmake_generator
-        command = ("cmake %s -DCMAKE_INSTALL_PREFIX=%s %s" %
-                            (cmake_option, self.install_dir, self.source_dir))
+        if "cmake_generator" in self.config.APPLICATION:
+            cmake_option += (
+                ' -DCMAKE_GENERATOR="%s"' % self.config.APPLICATION.cmake_generator
+            )
+        command = "cmake %s -DCMAKE_INSTALL_PREFIX=%s %s" % (
+            cmake_option,
+            self.install_dir,
+            self.source_dir,
+        )
 
         self.log_command(command)
         # for key in sorted(self.build_environ.environ.environ.keys()):
-            # print key, "  ", self.build_environ.environ.environ[key]
-        res = subprocess.call(command,
-                              shell=True,
-                              cwd=str(self.build_dir),
-                              env=self.build_environ.environ.environ,
-                              stdout=self.logger.logTxtFile,
-                              stderr=subprocess.STDOUT)
+        # print key, "  ", self.build_environ.environ.environ[key]
+        res = subprocess.call(
+            command,
+            shell=True,
+            cwd=str(self.build_dir),
+            env=self.build_environ.environ.environ,
+            stdout=self.logger.logTxtFile,
+            stderr=subprocess.STDOUT,
+        )
 
         self.put_txt_log_in_appli_log_dir("cmake")
         if res == 0:
@@ -159,19 +161,21 @@ class Builder:
     # Runs build_configure with the given options.
     def build_configure(self, options=""):
 
-        if 'buildconfigure_options' in self.product_info:
+        if "buildconfigure_options" in self.product_info:
             options += " %s " % self.product_info.buildconfigure_options
 
-        command = str('%s/build_configure') % (self.source_dir)
+        command = str("%s/build_configure") % (self.source_dir)
         command = command + " " + options
         self.log_command(command)
 
-        res = subprocess.call(command,
-                              shell=True,
-                              cwd=str(self.build_dir),
-                              env=self.build_environ.environ.environ,
-                              stdout=self.logger.logTxtFile,
-                              stderr=subprocess.STDOUT)
+        res = subprocess.call(
+            command,
+            shell=True,
+            cwd=str(self.build_dir),
+            env=self.build_environ.environ.environ,
+            stdout=self.logger.logTxtFile,
+            stderr=subprocess.STDOUT,
+        )
         self.put_txt_log_in_appli_log_dir("build_configure")
         if res == 0:
             return res
@@ -182,22 +186,23 @@ class Builder:
     # Runs configure with the given options.
     def configure(self, options=""):
 
-        if 'configure_options' in self.product_info:
+        if "configure_options" in self.product_info:
             options += " %s " % self.product_info.configure_options
 
-        command = "%s/configure --prefix=%s" % (self.source_dir,
-                                                str(self.install_dir))
+        command = "%s/configure --prefix=%s" % (self.source_dir, str(self.install_dir))
 
         command = command + " " + options
         self.log_command(command)
 
-        res = subprocess.call(command,
-                              shell=True,
-                              cwd=str(self.build_dir),
-                              env=self.build_environ.environ.environ,
-                              stdout=self.logger.logTxtFile,
-                              stderr=subprocess.STDOUT)
-        
+        res = subprocess.call(
+            command,
+            shell=True,
+            cwd=str(self.build_dir),
+            env=self.build_environ.environ.environ,
+            stdout=self.logger.logTxtFile,
+            stderr=subprocess.STDOUT,
+        )
+
         self.put_txt_log_in_appli_log_dir("configure")
         if res == 0:
             return res
@@ -205,16 +210,16 @@ class Builder:
             return 1
 
     def hack_libtool(self):
-        if not os.path.exists(str(self.build_dir + 'libtool')):
+        if not os.path.exists(str(self.build_dir + "libtool")):
             return
 
-        lf = open(os.path.join(str(self.build_dir), "libtool"), 'r')
+        lf = open(os.path.join(str(self.build_dir), "libtool"), "r")
         for line in lf.readlines():
-            if 'hack_libtool' in line:
+            if "hack_libtool" in line:
                 return
 
         # fix libtool by replacing CC="<compil>" with hack_libtool function
-        hack_command='''sed -i "s%^CC=\\"\(.*\)\\"%hack_libtool() { \\n\\
+        hack_command = """sed -i "s%^CC=\\"\(.*\)\\"%hack_libtool() { \\n\\
 if test \\"\$(echo \$@ | grep -E '\\\\\\-L/usr/lib(/../lib)?(64)? ')\\" == \\\"\\\" \\n\\
   then\\n\\
     cmd=\\"\\1 \$@\\"\\n\\
@@ -223,47 +228,50 @@ if test \\"\$(echo \$@ | grep -E '\\\\\\-L/usr/lib(/../lib)?(64)? ')\\" == \\\"\
   fi\\n\\
   \$cmd\\n\\
 }\\n\\
-CC=\\"hack_libtool\\"%g" libtool'''
+CC=\\"hack_libtool\\"%g" libtool"""
 
         self.log_command(hack_command)
-        subprocess.call(hack_command,
-                        shell=True,
-                        cwd=str(self.build_dir),
-                        env=self.build_environ.environ.environ,
-                        stdout=self.logger.logTxtFile,
-                        stderr=subprocess.STDOUT)
-
+        subprocess.call(
+            hack_command,
+            shell=True,
+            cwd=str(self.build_dir),
+            env=self.build_environ.environ.environ,
+            stdout=self.logger.logTxtFile,
+            stderr=subprocess.STDOUT,
+        )
 
     ##
     # Runs make to build the module.
     def make(self, nb_proc, make_opt=""):
 
         # make
-        command = 'make'
+        command = "make"
         command = command + " -j" + str(nb_proc)
         command = command + " " + make_opt
         self.log_command(command)
-        res = subprocess.call(command,
-                              shell=True,
-                              cwd=str(self.build_dir),
-                              env=self.build_environ.environ.environ,
-                              stdout=self.logger.logTxtFile,
-                              stderr=subprocess.STDOUT)
+        res = subprocess.call(
+            command,
+            shell=True,
+            cwd=str(self.build_dir),
+            env=self.build_environ.environ.environ,
+            stdout=self.logger.logTxtFile,
+            stderr=subprocess.STDOUT,
+        )
         self.put_txt_log_in_appli_log_dir("make")
         if res == 0:
             return res
         else:
             return 1
-    
+
     ##
     # Runs msbuild to build the module.
-    def wmake(self,nb_proc, opt_nb_proc = None):
+    def wmake(self, nb_proc, opt_nb_proc=None):
 
-        hh = 'MSBUILD /m:%s' % str(nb_proc)
+        hh = "MSBUILD /m:%s" % str(nb_proc)
         if self.debug_mode:
             hh += " " + src.printcolors.printcWarning("DEBUG")
         # make
-        command = 'msbuild'
+        command = "msbuild"
         command = command + " /maxcpucount:" + str(nb_proc)
         if self.debug_mode:
             command = command + " /p:Configuration=Debug  /p:Platform=x64 "
@@ -272,13 +280,15 @@ CC=\\"hack_libtool\\"%g" libtool'''
         command = command + " ALL_BUILD.vcxproj"
 
         self.log_command(command)
-        res = subprocess.call(command,
-                              shell=True,
-                              cwd=str(self.build_dir),
-                              env=self.build_environ.environ.environ,
-                              stdout=self.logger.logTxtFile,
-                              stderr=subprocess.STDOUT)
-        
+        res = subprocess.call(
+            command,
+            shell=True,
+            cwd=str(self.build_dir),
+            env=self.build_environ.environ.environ,
+            stdout=self.logger.logTxtFile,
+            stderr=subprocess.STDOUT,
+        )
+
         self.put_txt_log_in_appli_log_dir("make")
         if res == 0:
             return res
@@ -289,141 +299,172 @@ CC=\\"hack_libtool\\"%g" libtool'''
     # Runs 'make install'.
     def install(self):
         if src.architecture.is_windows():
-            command = 'msbuild INSTALL.vcxproj'
+            command = "msbuild INSTALL.vcxproj"
             if self.debug_mode:
                 command = command + " /p:Configuration=Debug  /p:Platform=x64 "
             else:
                 command = command + " /p:Configuration=Release  /p:Platform=x64 "
-        else :
-            command = 'make install'
+        else:
+            command = "make install"
         self.log_command(command)
 
-        res = subprocess.call(command,
-                              shell=True,
-                              cwd=str(self.build_dir),
-                              env=self.build_environ.environ.environ,
-                              stdout=self.logger.logTxtFile,
-                              stderr=subprocess.STDOUT)
-        
-        res_check=self.check_install()
-        if res_check > 0 :
-            self.log_command("Error in sat check install - some files are not installed!")
+        res = subprocess.call(
+            command,
+            shell=True,
+            cwd=str(self.build_dir),
+            env=self.build_environ.environ.environ,
+            stdout=self.logger.logTxtFile,
+            stderr=subprocess.STDOUT,
+        )
+
+        res_check = self.check_install()
+        if res_check > 0:
+            self.log_command(
+                "Error in sat check install - some files are not installed!"
+            )
         self.put_txt_log_in_appli_log_dir("makeinstall")
 
-        res+=res_check
+        res += res_check
         if res == 0:
             return res
         else:
             return 1
 
-    # this function checks wether a list of file patterns (specified by check_install keyword) 
+    # this function checks wether a list of file patterns (specified by check_install keyword)
     # exixts after the make install. The objective is to ensure the installation is complete.
     # patterns are given relatively to the install dir of the product
     def check_install(self):
-        res=0
+        res = 0
         if "check_install" in self.product_info:
             self.log_command("Check installation of files")
             for pattern in self.product_info.check_install:
                 # pattern is given relatively to the install dir
-                complete_pattern=os.path.join(self.product_info.install_dir, pattern) 
+                complete_pattern = os.path.join(self.product_info.install_dir, pattern)
                 self.log_command("    -> check %s" % complete_pattern)
                 # expansion of pattern : takes into account environment variables and unix shell rules
-                list_of_path=glob.glob(os.path.expandvars(complete_pattern))
+                list_of_path = glob.glob(os.path.expandvars(complete_pattern))
                 if not list_of_path:
                     # we expect to find at least one entry, if not we consider the test failed
-                    res+=1
-                    self.logger.write("Error, sat check install failed for file pattern %s\n" % complete_pattern, 1)
-                    self.log_command("Error, sat check install failed for file pattern %s" % complete_pattern)
+                    res += 1
+                    self.logger.write(
+                        "Error, sat check install failed for file pattern %s\n"
+                        % complete_pattern,
+                        1,
+                    )
+                    self.log_command(
+                        "Error, sat check install failed for file pattern %s"
+                        % complete_pattern
+                    )
         return res
 
     ##
     # Runs 'make_check'.
     def check(self, command=""):
         if src.architecture.is_windows():
-            cmd = 'msbuild RUN_TESTS.vcxproj /p:Configuration=Release  /p:Platform=x64 '
-        else :
-            if self.product_info.build_source=="autotools" :
-                cmd = 'make check'
+            cmd = "msbuild RUN_TESTS.vcxproj /p:Configuration=Release  /p:Platform=x64 "
+        else:
+            if self.product_info.build_source == "autotools":
+                cmd = "make check"
             else:
-                cmd = 'make test'
-        
+                cmd = "make test"
+
         if command:
             cmd = command
-        
+
         self.log_command(cmd)
         self.log_command("For more detailed logs, see test logs in %s" % self.build_dir)
 
-        res = subprocess.call(cmd,
-                              shell=True,
-                              cwd=str(self.build_dir),
-                              env=self.launch_environ.environ.environ,
-                              stdout=self.logger.logTxtFile,
-                              stderr=subprocess.STDOUT)
+        res = subprocess.call(
+            cmd,
+            shell=True,
+            cwd=str(self.build_dir),
+            env=self.launch_environ.environ.environ,
+            stdout=self.logger.logTxtFile,
+            stderr=subprocess.STDOUT,
+        )
 
         self.put_txt_log_in_appli_log_dir("makecheck")
         if res == 0:
             return res
         else:
             return 1
-      
+
     ##
     # Performs a default build for this module.
-    def do_default_build(self,
-                         build_conf_options="",
-                         configure_options="",
-                         show_warning=True):
+    def do_default_build(
+        self, build_conf_options="", configure_options="", show_warning=True
+    ):
         use_autotools = False
-        if 'use_autotools' in self.product_info:
+        if "use_autotools" in self.product_info:
             uc = self.product_info.use_autotools
-            if uc in ['always', 'yes']: 
+            if uc in ["always", "yes"]:
                 use_autotools = True
-            elif uc == 'option': 
+            elif uc == "option":
                 use_autotools = self.options.autotools
 
-
         self.use_autotools = use_autotools
 
         use_ctest = False
-        if 'use_ctest' in self.product_info:
+        if "use_ctest" in self.product_info:
             uc = self.product_info.use_ctest
-            if uc in ['always', 'yes']: 
+            if uc in ["always", "yes"]:
                 use_ctest = True
-            elif uc == 'option': 
+            elif uc == "option":
                 use_ctest = self.options.ctest
 
         self.use_ctest = use_ctest
 
         if show_warning:
             cmd = ""
-            if use_autotools: cmd = "(autotools)"
-            if use_ctest: cmd = "(ctest)"
-            
+            if use_autotools:
+                cmd = "(autotools)"
+            if use_ctest:
+                cmd = "(ctest)"
+
             self.log("\n", 4, False)
-            self.log("%(module)s: Run default compilation method %(cmd)s\n" % \
-                { "module": self.module, "cmd": cmd }, 4)
+            self.log(
+                "%(module)s: Run default compilation method %(cmd)s\n"
+                % {"module": self.module, "cmd": cmd},
+                4,
+            )
 
         if use_autotools:
-            if not self.prepare(): return self.get_result()
-            if not self.build_configure(
-                                   build_conf_options): return self.get_result()
-            if not self.configure(configure_options): return self.get_result()
-            if not self.make(): return self.get_result()
-            if not self.install(): return self.get_result()
-            if not self.clean(): return self.get_result()
-           
-        else: # CMake
-            if self.config.VARS.dist_name=='Win':
-                if not self.wprepare(): return self.get_result()
-                if not self.cmake(): return self.get_result()
-                if not self.wmake(): return self.get_result()
-                if not self.install(): return self.get_result()
-                if not self.clean(): return self.get_result()
-            else :
-                if not self.prepare(): return self.get_result()
-                if not self.cmake(): return self.get_result()
-                if not self.make(): return self.get_result()
-                if not self.install(): return self.get_result()
-                if not self.clean(): return self.get_result()
+            if not self.prepare():
+                return self.get_result()
+            if not self.build_configure(build_conf_options):
+                return self.get_result()
+            if not self.configure(configure_options):
+                return self.get_result()
+            if not self.make():
+                return self.get_result()
+            if not self.install():
+                return self.get_result()
+            if not self.clean():
+                return self.get_result()
+
+        else:  # CMake
+            if self.config.VARS.dist_name == "Win":
+                if not self.wprepare():
+                    return self.get_result()
+                if not self.cmake():
+                    return self.get_result()
+                if not self.wmake():
+                    return self.get_result()
+                if not self.install():
+                    return self.get_result()
+                if not self.clean():
+                    return self.get_result()
+            else:
+                if not self.prepare():
+                    return self.get_result()
+                if not self.cmake():
+                    return self.get_result()
+                if not self.make():
+                    return self.get_result()
+                if not self.install():
+                    return self.get_result()
+                if not self.clean():
+                    return self.get_result()
 
         return self.get_result()
 
@@ -431,11 +472,17 @@ CC=\\"hack_libtool\\"%g" libtool'''
     # Performs a build with a script.
     def do_python_script_build(self, script, nb_proc):
         # script found
-        self.logger.write(_("Compile %(product)s using script %(script)s\n") % \
-            { 'product': self.product_info.name,
-             'script': src.printcolors.printcLabel(script) }, 4)
+        self.logger.write(
+            _("Compile %(product)s using script %(script)s\n")
+            % {
+                "product": self.product_info.name,
+                "script": src.printcolors.printcLabel(script),
+            },
+            4,
+        )
         try:
             import imp
+
             product = self.product_info.name
             pymodule = imp.load_source(product + "_compile_script", script)
             self.nb_proc = nb_proc
@@ -444,6 +491,7 @@ CC=\\"hack_libtool\\"%g" libtool'''
             __, exceptionValue, exceptionTraceback = sys.exc_info()
             self.logger.write(str(exceptionValue), 1)
             import traceback
+
             traceback.print_tb(exceptionTraceback)
             traceback.print_exc()
             retcode = 1
@@ -454,7 +502,7 @@ CC=\\"hack_libtool\\"%g" libtool'''
 
     def complete_environment(self, make_options):
         assert self.build_environ is not None
-        # pass additional variables to environment 
+        # pass additional variables to environment
         # (may be used by the build script)
         self.build_environ.set("APPLICATION_NAME", self.config.APPLICATION.name)
         self.build_environ.set("SOURCE_DIR", str(self.source_dir))
@@ -467,7 +515,7 @@ CC=\\"hack_libtool\\"%g" libtool'''
         self.build_environ.set("DIST_VERSION", self.config.VARS.dist_version)
         self.build_environ.set("DIST", self.config.VARS.dist)
         self.build_environ.set("VERSION", self.product_info.version)
-        # if product is in hpc mode, set SAT_HPC to 1 
+        # if product is in hpc mode, set SAT_HPC to 1
         # in order for the compilation script to take it into account
         if src.product.product_is_hpc(self.product_info):
             self.build_environ.set("SAT_HPC", "1")
@@ -476,27 +524,30 @@ CC=\\"hack_libtool\\"%g" libtool'''
         if self.verbose_mode:
             self.build_environ.set("SAT_VERBOSE", "1")
 
-
     def do_batch_script_build(self, script, nb_proc):
 
         if src.architecture.is_windows():
             make_options = "/maxcpucount:%s" % nb_proc
-        else :
+        else:
             make_options = "-j%s" % nb_proc
 
         self.log_command("  " + _("Run build script %s\n") % script)
         self.complete_environment(make_options)
-        
-        res = subprocess.call(script, 
-                              shell=True,
-                              stdout=self.logger.logTxtFile,
-                              stderr=subprocess.STDOUT,
-                              cwd=str(self.build_dir),
-                              env=self.build_environ.environ.environ)
-
-        res_check=self.check_install()
-        if res_check > 0 :
-            self.log_command("Error in sat check install - some files are not installed!")
+
+        res = subprocess.call(
+            script,
+            shell=True,
+            stdout=self.logger.logTxtFile,
+            stderr=subprocess.STDOUT,
+            cwd=str(self.build_dir),
+            env=self.build_environ.environ.environ,
+        )
+
+        res_check = self.check_install()
+        if res_check > 0:
+            self.log_command(
+                "Error in sat check install - some files are not installed!"
+            )
 
         self.put_txt_log_in_appli_log_dir("script")
         res += res_check
@@ -504,42 +555,41 @@ CC=\\"hack_libtool\\"%g" libtool'''
             return res
         else:
             return 1
-    
+
     def do_script_build(self, script, number_of_proc=0):
         # define make options (may not be used by the script)
-        if number_of_proc==0:
-            nb_proc = src.get_cfg_param(self.product_info,"nb_proc", 0)
-            if nb_proc == 0: 
+        if number_of_proc == 0:
+            nb_proc = src.get_cfg_param(self.product_info, "nb_proc", 0)
+            if nb_proc == 0:
                 nb_proc = self.config.VARS.nb_proc
         else:
             nb_proc = min(number_of_proc, self.config.VARS.nb_proc)
-            
-        extension = script.split('.')[-1]
-        if extension in ["bat","sh"]:
+
+        extension = script.split(".")[-1]
+        if extension in ["bat", "sh"]:
             return self.do_batch_script_build(script, nb_proc)
         if extension == "py":
             return self.do_python_script_build(script, nb_proc)
-        
+
         msg = _("The script %s must have .sh, .bat or .py extension." % script)
         raise src.SatException(msg)
-    
+
     def put_txt_log_in_appli_log_dir(self, file_name):
-        '''Put the txt log (that contain the system logs, like make command
+        """Put the txt log (that contain the system logs, like make command
            output) in the directory <APPLICATION DIR>/LOGS/<product_name>/
-    
+
         :param file_name Str: the name of the file to write
-        '''
+        """
         if self.logger.logTxtFile == sys.__stdout__:
             return
-        dir_where_to_put = os.path.join(self.config.APPLICATION.workdir,
-                                        "LOGS",
-                                        self.product_info.name)
+        dir_where_to_put = os.path.join(
+            self.config.APPLICATION.workdir, "LOGS", self.product_info.name
+        )
         file_path = os.path.join(dir_where_to_put, file_name)
         src.ensure_path_exists(dir_where_to_put)
-        # write the logTxtFile copy it to the destination, and then recreate 
+        # write the logTxtFile copy it to the destination, and then recreate
         # it as it was
         self.logger.logTxtFile.close()
         shutil.move(self.logger.txtFilePath, file_path)
-        self.logger.logTxtFile = open(str(self.logger.txtFilePath), 'w')
+        self.logger.logTxtFile = open(str(self.logger.txtFilePath), "w")
         self.logger.logTxtFile.write(open(file_path, "r").read())
-        
index 330fb6df37c33468237c5c49f03f4e04528161b4..8c154271518cdf305423aaeb6dad3f40eb6dafd5 100755 (executable)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 
 #  Copyright (C) 2010-2018  CEA/DEN
 #
@@ -70,7 +70,7 @@ try:
 except ImportError:
     from io import StringIO
 
-_debug = [False] #support push/pop for temporary activate debug outputs
+_debug = [False]  # support push/pop for temporary activate debug outputs
 
 
 def isDeveloper():
@@ -84,60 +84,79 @@ def isDeveloper():
     else:
         return False
 
-def indent(text, amount=2, ch=' '):
+
+def indent(text, amount=2, ch=" "):
     """indent multi lines message"""
     padding = amount * ch
-    return ''.join(padding + line for line in text.splitlines(True))
+    return "".join(padding + line for line in text.splitlines(True))
+
 
 def isTypeConfig(var):
     """To know if var is instance from Config/pyconf"""
     typ = str(type(var))
     # print "isTypeConfig" ,type, dir(var)
-    if ".pyconf.Config" in typ: return True
-    if ".pyconf.Mapping" in typ: return True
-    if ".pyconf.Sequence" in typ: return True
+    if ".pyconf.Config" in typ:
+        return True
+    if ".pyconf.Mapping" in typ:
+        return True
+    if ".pyconf.Sequence" in typ:
+        return True
     # print "NOT isTypeConfig %s" % typ
     return False
-    
+
+
 def write(title, var="", force=None, fmt="  %s:\n%s\n####\n"):
     """write sys.stderr a message if _debug[-1]==True or optionaly force=True"""
     if _debug[-1] or force:
-      callerframerecord = inspect.stack()[1] # get info of the caller
-      frame = callerframerecord[0]
-      info = inspect.getframeinfo(frame)
-      sys.stderr.write("\n#### DEBUG - %s:%s (%s) ####\n" % (info.filename, info.lineno, info.function))
-      tvar = type(var)
-      typ = str(tvar)
-      if isTypeConfig(var):
-        sys.stderr.write(fmt % (title, indent(getStrConfigDbg(var))))
-        return
-      if 'UnittestStream' in typ:
-        sys.stderr.write(fmt % (title, indent(var.getLogs())))
-        return  
-      if tvar is not str and tvar is not unicode:
-        sys.stderr.write(fmt % (title, indent(PP.pformat(var))))
+        callerframerecord = inspect.stack()[1]  # get info of the caller
+        frame = callerframerecord[0]
+        info = inspect.getframeinfo(frame)
+        sys.stderr.write(
+            "\n#### DEBUG - %s:%s (%s) ####\n"
+            % (info.filename, info.lineno, info.function)
+        )
+        tvar = type(var)
+        typ = str(tvar)
+        if isTypeConfig(var):
+            sys.stderr.write(fmt % (title, indent(getStrConfigDbg(var))))
+            return
+        if "UnittestStream" in typ:
+            sys.stderr.write(fmt % (title, indent(var.getLogs())))
+            return
+        if tvar is not str and tvar is not unicode:
+            sys.stderr.write(fmt % (title, indent(PP.pformat(var))))
+            return
+        sys.stderr.write(fmt % (title, indent(var)))
         return
-      sys.stderr.write(fmt % (title, indent(var)))
-      return
     return
 
+
 def tofix(title, var="", force=None):
     """
     write sys.stderr a message if _debug[-1]==True or optionaly force=True
     use this only if no logger accessible for classic logger.warning(message)
     """
     if _debug[-1] or isDeveloper():
-        callerframerecord = inspect.stack()[1] # get info of the caller
+        callerframerecord = inspect.stack()[1]  # get info of the caller
         frame = callerframerecord[0]
         info = inspect.getframeinfo(frame)
-        fmt = "#### TOFIX - " + str(info.filename) + ":" + str(info.lineno) +\
-              " (" + str(info.function) + ") ####\n   %s:\n%s\n"
+        fmt = (
+            "#### TOFIX - "
+            + str(info.filename)
+            + ":"
+            + str(info.lineno)
+            + " ("
+            + str(info.function)
+            + ") ####\n   %s:\n%s\n"
+        )
         write(title, var, force, fmt)
 
+
 def push_debug(aBool):
     """set debug outputs activated, or not"""
     _debug.append(aBool)
 
+
 def pop_debug():
     """restore previous debug outputs status"""
     if len(_debug) > 1:
@@ -148,61 +167,67 @@ def pop_debug():
 
 
 def format_exception(msg, limit=None, trace=None):
-  """
-  Format a stack trace and the exception information.
-  as traceback.format_exception(), without color
-  with traceback only if (_debug) or (DBG.isDeveloper())
-  """
-  etype, value, tb = sys.exc_info()
-  res = msg
-  if tb:
-    res += "\nTraceback (most recent call last):\n"
-    res += "".join(traceback.format_tb(tb, limit))  # [:-1])
-  res += "\n"
-  res += "\n".join(traceback.format_exception_only(etype, value))
-  return res
-
-def format_color_exception(msg, limit=None, trace=None):
-  """
-  Format a stack trace and the exception information.
-  as traceback.format_exception(), with color
-  with traceback only if _debug or isDeveloper())
-  """
-  etype, value, tb = sys.exc_info()
-  if _debug[-1] or isDeveloper():
-    res = "<red>" + msg
+    """
+    Format a stack trace and the exception information.
+    as traceback.format_exception(), without color
+    with traceback only if (_debug) or (DBG.isDeveloper())
+    """
+    etype, value, tb = sys.exc_info()
+    res = msg
     if tb:
-      res += "<yellow>\nTraceback (most recent call last):\n"
-      res += "".join(traceback.format_tb(tb, limit))  # [:-1])
-    res += "\n<red>"
+        res += "\nTraceback (most recent call last):\n"
+        res += "".join(traceback.format_tb(tb, limit))  # [:-1])
+    res += "\n"
     res += "\n".join(traceback.format_exception_only(etype, value))
-    return res + "<reset>"
-  else:
-    res = "<red>" + msg  # + "<bright>"
-    res += "".join(traceback.format_exception_only(etype, value))
-    return res + "<reset>"
+    return res
+
+
+def format_color_exception(msg, limit=None, trace=None):
+    """
+    Format a stack trace and the exception information.
+    as traceback.format_exception(), with color
+    with traceback only if _debug or isDeveloper())
+    """
+    etype, value, tb = sys.exc_info()
+    if _debug[-1] or isDeveloper():
+        res = "<red>" + msg
+        if tb:
+            res += "<yellow>\nTraceback (most recent call last):\n"
+            res += "".join(traceback.format_tb(tb, limit))  # [:-1])
+        res += "\n<red>"
+        res += "\n".join(traceback.format_exception_only(etype, value))
+        return res + "<reset>"
+    else:
+        res = "<red>" + msg  # + "<bright>"
+        res += "".join(traceback.format_exception_only(etype, value))
+        return res + "<reset>"
 
 
 ###############################################
 # utilitaires divers pour debug
 ###############################################
 
+
 class OutStream(StringIO):
     """
     utility class for pyconf.Config output iostream
     """
+
     def close(self):
-      """
-      because Config.__save__ calls close() stream as file
-      keep value before lost as self.value
-      """
-      self.value = self.getvalue()
-      StringIO.close(self)
-    
+        """
+        because Config.__save__ calls close() stream as file
+        keep value before lost as self.value
+        """
+        self.value = self.getvalue()
+        StringIO.close(self)
+
+
 class InStream(StringIO):
     """utility class for pyconf.Config input iostream"""
+
     pass
 
+
 def getLocalEnv():
     """get string for environment variables representation"""
     res = ""
@@ -210,11 +235,13 @@ def getLocalEnv():
         res += "%s : %s\n" % (i, os.environ[i])
     return res
 
-# save as initial Config.save() moved as Config.__save__() 
+
+# save as initial Config.save() moved as Config.__save__()
 def saveConfigStd(config, aStream):
     """returns as file .pyconf"""
-    indent =  0
-    config.__save__(aStream, indent) 
+    indent = 0
+    config.__save__(aStream, indent)
+
 
 def getStrConfigStd(config):
     """set string as saveConfigStd, as file .pyconf"""
@@ -222,44 +249,48 @@ def getStrConfigStd(config):
     saveConfigStd(config, outStream)
     return outStream.value
 
+
 def getStrConfigDbg(config):
     """
-    set string as saveConfigDbg, 
+    set string as saveConfigDbg,
     as (path expression evaluation) for debug
     """
     outStream = OutStream()
     saveConfigDbg(config, outStream)
     return outStream.value
 
+
 def saveConfigDbg(config, aStream, indent=0, path=""):
     """pyconf returns multilines (path expression evaluation) for debug"""
     _saveConfigRecursiveDbg(config, aStream, indent, path, 0)
-    aStream.close() # as config.__save__()
+    aStream.close()  # as config.__save__()
+
 
 def _saveConfigRecursiveDbg(config, aStream, indent, path, nb):
     """pyconf inspired from Mapping.__save__"""
     debug = False
-    nbp = nb + 1 # depth recursive
-    if indent <= 0: 
-      indentp = 0
+    nbp = nb + 1  # depth recursive
+    if indent <= 0:
+        indentp = 0
     else:
-      indentp = indent + 2
-      
-    if nbp > 10: # protection
-      # raise Exception("!!! ERROR: Circular reference after %s" % aStream.getvalue())
-      # raise Exception("!!! ERROR: Circular reference %s" % path)
-      aStream.write("<red>!!! ERROR: Circular reference after %s<reset>\n" % path)
-      return
-    
-    indstr = indent * ' ' # '':no indent, ' ':indent
+        indentp = indent + 2
+
+    if nbp > 10:  # protection
+        # raise Exception("!!! ERROR: Circular reference after %s" % aStream.getvalue())
+        # raise Exception("!!! ERROR: Circular reference %s" % path)
+        aStream.write("<red>!!! ERROR: Circular reference after %s<reset>\n" % path)
+        return
+
+    indstr = indent * " "  # '':no indent, ' ':indent
     strType = str(type(config))
-    if debug: print("saveDbg Type %s %s" % (path, strType))
-    
+    if debug:
+        print("saveDbg Type %s %s" % (path, strType))
+
     if "Sequence" in strType:
-      for i in range(len(config)):
-        _saveConfigRecursiveDbg(config[i], aStream, indentp, path+"[%i]" % i, nbp)
-      return
-    '''
+        for i in range(len(config)):
+            _saveConfigRecursiveDbg(config[i], aStream, indentp, path + "[%i]" % i, nbp)
+        return
+    """
     if "Reference" in strType:
       try:
         #evaluate = value.resolve(config)
@@ -267,46 +298,64 @@ def _saveConfigRecursiveDbg(config, aStream, indent, path, nb):
       except Exception as e:  
         aStream.write("<header>%s%s<reset> : <red>!!! ERROR: %s !!!<reset>\n" % (indstr, path, str(e)))     
       return
-    '''
-    
-    try: #type config, mapping
-      order = object.__getattribute__(config, 'order')
-      data = object.__getattribute__(config, 'data')
+    """
+
+    try:  # type config, mapping
+        order = object.__getattribute__(config, "order")
+        data = object.__getattribute__(config, "data")
     except:
-      aStream.write("%s%s : '%s'\n" % (indstr, path, str(config)))
-      return     
-    for key in sorted(data): #order): # data as sort alphabetical, order as initial order
-      value = data[key]
-      strType = str(type(value))
-      if debug: print('strType %s %s %s' % (path, key, strType))
-      if "Config" in strType:
-        _saveConfigRecursiveDbg(value, aStream, indentp, path+"."+key, nbp)
-        continue
-      if "Mapping" in strType:
-        _saveConfigRecursiveDbg(value, aStream, indentp, path+"."+key, nbp)
-        continue
-      if "Sequence" in strType:
-        for i in range(len(value)):
-          _saveConfigRecursiveDbg(value.data[i], aStream, indentp, path+"."+key+"[%i]" % i, nbp)
-        continue
-      if "Expression" in strType:
-        try:
-          evaluate = value.evaluate(config)
-          aStream.write("%s%s.%s : %s --> '%s'\n" % (indstr, path, key, str(value), evaluate))
-        except Exception as e:      
-          aStream.write("%s%s.%s : !!! ERROR: %s !!!\n" % (indstr, path, key, str(e)))     
-        continue
-      if "Reference" in strType:
+        aStream.write("%s%s : '%s'\n" % (indstr, path, str(config)))
+        return
+    for key in sorted(
+        data
+    ):  # order): # data as sort alphabetical, order as initial order
+        value = data[key]
+        strType = str(type(value))
+        if debug:
+            print("strType %s %s %s" % (path, key, strType))
+        if "Config" in strType:
+            _saveConfigRecursiveDbg(value, aStream, indentp, path + "." + key, nbp)
+            continue
+        if "Mapping" in strType:
+            _saveConfigRecursiveDbg(value, aStream, indentp, path + "." + key, nbp)
+            continue
+        if "Sequence" in strType:
+            for i in range(len(value)):
+                _saveConfigRecursiveDbg(
+                    value.data[i], aStream, indentp, path + "." + key + "[%i]" % i, nbp
+                )
+            continue
+        if "Expression" in strType:
+            try:
+                evaluate = value.evaluate(config)
+                aStream.write(
+                    "%s%s.%s : %s --> '%s'\n"
+                    % (indstr, path, key, str(value), evaluate)
+                )
+            except Exception as e:
+                aStream.write(
+                    "%s%s.%s : !!! ERROR: %s !!!\n" % (indstr, path, key, str(e))
+                )
+            continue
+        if "Reference" in strType:
+            try:
+                evaluate = value.resolve(config)
+                aStream.write(
+                    "%s%s.%s : %s --> '%s'\n"
+                    % (indstr, path, key, str(value), evaluate)
+                )
+            except Exception as e:
+                aStream.write(
+                    "%s%s.%s : !!! ERROR: %s !!!\n" % (indstr, path, key, str(e))
+                )
+            continue
+        if type(value) in [str, bool, int, type(None), unicode]:
+            aStream.write("%s%s.%s : '%s'\n" % (indstr, path, key, str(value)))
+            continue
         try:
-          evaluate = value.resolve(config)
-          aStream.write("%s%s.%s : %s --> '%s'\n" % (indstr, path, key, str(value), evaluate))
-        except Exception as e:  
-          aStream.write("%s%s.%s : !!! ERROR: %s !!!\n" % (indstr, path, key, str(e)))     
-        continue
-      if type(value) in [str, bool, int, type(None), unicode]:
-        aStream.write("%s%s.%s : '%s'\n" % (indstr, path, key, str(value)))
-        continue
-      try:
-        aStream.write("!!! TODO fix that %s %s%s.%s : %s\n" % (type(value), indstr, path, key, str(value)))
-      except Exception as e:      
-        aStream.write("%s%s.%s : !!! %s\n" % (indstr, path, key, str(e)))
+            aStream.write(
+                "!!! TODO fix that %s %s%s.%s : %s\n"
+                % (type(value), indstr, path, key, str(value))
+            )
+        except Exception as e:
+            aStream.write("%s%s.%s : !!! %s\n" % (indstr, path, key, str(e)))
index a706d5deab31149f876575522860b92198d41b46..9df274b750edd62f897a5008b6fe9c7d8b24a8c7 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 #  Copyright (C) 2010-2013  CEA/DEN
 #
 #  This library is free software; you can redistribute it and/or
@@ -31,11 +31,12 @@ class Environ:
     """\
     Class to manage the environment context
     """
+
     def __init__(self, environ=None):
         """Initialization. If the environ argument is passed, the environment
            will be add to it, else it is the external environment.
-           
-        :param environ dict:  
+
+        :param environ dict:
         """
         if environ is not None:
             self.environ = environ
@@ -59,7 +60,7 @@ class Environ:
         else:
             delim = "$"
         if delim in value:
-            # The string.Template class is a string class 
+            # The string.Template class is a string class
             # for supporting $-substitutions
             zt = string.Template(value)
             zt.delimiter = delim
@@ -67,7 +68,7 @@ class Environ:
                 value = zt.substitute(self.environ)
             except KeyError as exc:
                 pass
-                #raise src.SatException(_("Missing definition "
+                # raise src.SatException(_("Missing definition "
                 #                         "in environment: %s") % str(exc))
         return value
 
@@ -81,9 +82,12 @@ class Environ:
         :param sep str: the separator string
         """
         # check that value so no contain the system separator
-        separator=os.pathsep
+        separator = os.pathsep
         if separator in value:
-            raise Exception("Environ append key '%s' value '%s' contains forbidden character '%s'" % (key, value, separator))
+            raise Exception(
+                "Environ append key '%s' value '%s' contains forbidden character '%s'"
+                % (key, value, separator)
+            )
 
         # check if the key is already in the environment
         if key in self.environ:
@@ -121,9 +125,12 @@ class Environ:
         :param sep str: the separator string
         """
         # check that value so no contain the system separator
-        separator=os.pathsep
+        separator = os.pathsep
         if separator in value:
-            raise Exception("Environ append key '%s' value '%s' contains forbidden character '%s'" % (key, value, separator))
+            raise Exception(
+                "Environ append key '%s' value '%s' contains forbidden character '%s'"
+                % (key, value, separator)
+            )
 
         # check if the key is already in the environment
         if key in self.environ:
@@ -145,7 +152,7 @@ class Environ:
         :param sep str: the separator string
         """
         if isinstance(value, list):
-            for v in reversed(value): # prepend list, first item at last to stay first
+            for v in reversed(value):  # prepend list, first item at last to stay first
                 self.prepend_value(key, v, sep)
         else:
             self.prepend_value(key, value, sep)
@@ -188,17 +195,19 @@ class Environ:
         return self.get(key)
 
 
-
 class SalomeEnviron:
     """\
     Class to manage the environment of SALOME.
     """
-    def __init__(self,
-                 cfg,
-                 environ,
-                 forBuild=False,
-                 for_package=None,
-                 enable_simple_env_script = True):
+
+    def __init__(
+        self,
+        cfg,
+        environ,
+        forBuild=False,
+        for_package=None,
+        enable_simple_env_script=True,
+    ):
         """\
         Initialization.
 
@@ -222,31 +231,29 @@ class SalomeEnviron:
     def __repr__(self):
         """easy almost exhaustive quick resume for debug print"""
         res = {
-          "environ" : self.environ,
-          "forBuild" : self.forBuild,
-          "for_package" : self.for_package,
+            "environ": self.environ,
+            "forBuild": self.forBuild,
+            "for_package": self.for_package,
         }
         return "%s(\n%s\n)" % (self.__class__.__name__, PP.pformat(res))
 
     def __set_sorted_products_list(self):
         all_products_infos = src.product.get_products_infos(
-                                 self.cfg.APPLICATION.products,
-                                 self.cfg)
-        
-        from compile import get_dependencies_graph,depth_first_topo_graph
-        all_products_graph=get_dependencies_graph(all_products_infos, self.forBuild)
-        visited_nodes=[]
-        sorted_nodes=[]
+            self.cfg.APPLICATION.products, self.cfg
+        )
+
+        from compile import get_dependencies_graph, depth_first_topo_graph
+
+        all_products_graph = get_dependencies_graph(all_products_infos, self.forBuild)
+        visited_nodes = []
+        sorted_nodes = []
         for n in all_products_graph:
             if n not in visited_nodes:
-                visited_nodes,sorted_nodes=depth_first_topo_graph(
-                                               all_products_graph, 
-                                               n, 
-                                               visited_nodes,
-                                               sorted_nodes)
-        self.sorted_product_list=sorted_nodes
-        self.all_products_graph=all_products_graph
-
+                visited_nodes, sorted_nodes = depth_first_topo_graph(
+                    all_products_graph, n, visited_nodes, sorted_nodes
+                )
+        self.sorted_product_list = sorted_nodes
+        self.all_products_graph = all_products_graph
 
     def append(self, key, value, sep=os.pathsep):
         """\
@@ -305,9 +312,9 @@ class SalomeEnviron:
         """
         # check if value needs to be evaluated
         if value is not None and value.startswith("`") and value.endswith("`"):
-            res = subprocess.Popen("echo %s" % value,
-                                   shell=True,
-                                   stdout=subprocess.PIPE).communicate()
+            res = subprocess.Popen(
+                "echo %s" % value, shell=True, stdout=subprocess.PIPE
+            ).communicate()
             value = res[0].strip()
 
         return self.environ.set(key, value)
@@ -331,7 +338,7 @@ class SalomeEnviron:
         
         :param nb_line int: the number of empty lines to add
         """
-        if 'add_line' in dir(self.environ):
+        if "add_line" in dir(self.environ):
             self.environ.add_line(nb_line)
 
     def add_comment(self, comment):
@@ -340,7 +347,7 @@ class SalomeEnviron:
         
         :param comment str: the commentary to add
         """
-        if 'add_comment' in dir(self.environ):
+        if "add_comment" in dir(self.environ):
             self.environ.add_comment(comment)
 
     def add_warning(self, warning):
@@ -349,7 +356,7 @@ class SalomeEnviron:
         
         :param warning str: the warning to add
         """
-        if 'add_warning' in dir(self.environ):
+        if "add_warning" in dir(self.environ):
             self.environ.add_warning(warning)
 
     def finish(self):
@@ -358,7 +365,7 @@ class SalomeEnviron:
         
         :param required bool: Do nothing if required is False
         """
-        if 'finish' in dir(self.environ):
+        if "finish" in dir(self.environ):
             self.environ.add_line(1)
             # what for ?
             # self.environ.add_comment("clean all the path")
@@ -366,11 +373,9 @@ class SalomeEnviron:
 
     def set_python_libdirs(self):
         """Set some generic variables for python library paths"""
-        ver = self.get('PYTHON_VERSION')
-        self.set('PYTHON_LIBDIR', os.path.join('lib',
-                                                'python' + ver,
-                                                'site-packages'))
-        self.python_lib = self.get('PYTHON_LIBDIR')
+        ver = self.get("PYTHON_VERSION")
+        self.set("PYTHON_LIBDIR", os.path.join("lib", "python" + ver, "site-packages"))
+        self.python_lib = self.get("PYTHON_LIBDIR")
         self.has_python = True
 
     def set_application_env(self, logger):
@@ -379,22 +384,23 @@ class SalomeEnviron:
         
         :param logger Logger: The logger instance to display messages
         """
-        
+
         if self.for_package:
-           if src.architecture.is_windows():
-              self.set("PRODUCT_ROOT_DIR", "%out_dir_Path%")
-           else:
-              self.set("PRODUCT_ROOT_DIR", "out_dir_Path")
+            if src.architecture.is_windows():
+                self.set("PRODUCT_ROOT_DIR", "%out_dir_Path%")
+            else:
+                self.set("PRODUCT_ROOT_DIR", "out_dir_Path")
 
         else:
-           self.cfg.APPLICATION.environ.PRODUCT_ROOT_DIR = src.pyconf.Reference(self.cfg, src.pyconf.DOLLAR, "workdir")
-
+            self.cfg.APPLICATION.environ.PRODUCT_ROOT_DIR = src.pyconf.Reference(
+                self.cfg, src.pyconf.DOLLAR, "workdir"
+            )
 
         # Set the variables defined in the "environ" section
-        if 'environ' in self.cfg.APPLICATION:
-            # we write PRODUCT environment it in order to conform to 
+        if "environ" in self.cfg.APPLICATION:
+            # we write PRODUCT environment it in order to conform to
             # parseConfigFile.py
-            self.add_comment("PRODUCT environment") 
+            self.add_comment("PRODUCT environment")
             self.load_cfg_environment(self.cfg.APPLICATION.environ)
             if self.forBuild and "build" in self.cfg.APPLICATION.environ:
                 self.load_cfg_environment(self.cfg.APPLICATION.environ.build)
@@ -402,7 +408,6 @@ class SalomeEnviron:
                 self.load_cfg_environment(self.cfg.APPLICATION.environ.launch)
             self.add_line(1)
 
-
     def set_salome_minimal_product_env(self, product_info, logger):
         """\
         Sets the minimal environment for a SALOME product.
@@ -415,23 +420,30 @@ class SalomeEnviron:
 
         # set root dir
         root_dir = product_info.name + "_ROOT_DIR"
-        
+
         if src.product.product_is_configuration(product_info):
             # configuration modules are not installed, root_dir points at source dir
             if not self.for_package:
                 self.set(root_dir, product_info.source_dir)
             else:
-                self.set(root_dir, os.path.join("out_dir_Path",
-                         "SOURCES",
-                         os.path.basename(product_info.source_dir)))
-        elif 'install_dir' in product_info and product_info.install_dir:
+                self.set(
+                    root_dir,
+                    os.path.join(
+                        "out_dir_Path",
+                        "SOURCES",
+                        os.path.basename(product_info.source_dir),
+                    ),
+                )
+        elif "install_dir" in product_info and product_info.install_dir:
             self.set(root_dir, product_info.install_dir)
         elif not self.silent:
-            logger.write("  " + _("No install_dir for product %s\n") %
-                          product_info.name, 5)
-    
-        source_in_package = src.get_property_in_product_cfg(product_info,
-                                                           "sources_in_package")
+            logger.write(
+                "  " + _("No install_dir for product %s\n") % product_info.name, 5
+            )
+
+        source_in_package = src.get_property_in_product_cfg(
+            product_info, "sources_in_package"
+        )
         if not self.for_package or source_in_package == "yes":
             # set source dir, unless no source dir
             if not src.product.product_is_fixed(product_info):
@@ -439,18 +451,22 @@ class SalomeEnviron:
                 if not self.for_package:
                     self.set(src_dir, product_info.source_dir)
                 else:
-                    self.set(src_dir, os.path.join("out_dir_Path",
-                             "SOURCES",
-                             os.path.basename(product_info.source_dir)))
+                    self.set(
+                        src_dir,
+                        os.path.join(
+                            "out_dir_Path",
+                            "SOURCES",
+                            os.path.basename(product_info.source_dir),
+                        ),
+                    )
 
     def expand_salome_modules(self, pi):
-        if 'component_name' in pi:
+        if "component_name" in pi:
             compo_name = pi.component_name
         else:
             compo_name = pi.name
-        self.append('SALOME_MODULES', compo_name, ',')
-        
-        
+        self.append("SALOME_MODULES", compo_name, ",")
+
     def set_salome_generic_product_env(self, pi):
         """\
         Sets the generic environment for a SALOME product.
@@ -465,48 +481,48 @@ class SalomeEnviron:
         env_root_dir = self.get(pi.name + "_ROOT_DIR")
         l_binpath_libpath = []
         # create additional ROOT_DIR for CPP components
-        if 'component_name' in pi:
+        if "component_name" in pi:
             compo_name = pi.component_name
             if compo_name + "CPP" == pi.name:
                 compo_root_dir = compo_name + "_ROOT_DIR"
                 envcompo_root_dir = os.path.join(
-                            self.cfg.TOOLS.common.install_root, compo_name )
-                self.set(compo_root_dir ,  envcompo_root_dir)
-                bin_path = os.path.join(envcompo_root_dir, 'bin', 'salome')
-                lib_path = os.path.join(envcompo_root_dir, 'lib', 'salome')
-                l_binpath_libpath.append( (bin_path, lib_path) )
-
+                    self.cfg.TOOLS.common.install_root, compo_name
+                )
+                self.set(compo_root_dir, envcompo_root_dir)
+                bin_path = os.path.join(envcompo_root_dir, "bin", "salome")
+                lib_path = os.path.join(envcompo_root_dir, "lib", "salome")
+                l_binpath_libpath.append((bin_path, lib_path))
 
         if src.get_property_in_product_cfg(pi, "fhs"):
-            lib_path = os.path.join(env_root_dir, 'lib')
-            bin_path = os.path.join(env_root_dir, 'bin')
+            lib_path = os.path.join(env_root_dir, "lib")
+            bin_path = os.path.join(env_root_dir, "bin")
             if self.has_python:
-            # if the application doesn't include python, we don't need these two lines
+                # if the application doesn't include python, we don't need these two lines
                 pylib_path = os.path.join(env_root_dir, self.python_lib)
         else:
-            lib_path = os.path.join(env_root_dir, 'lib', 'salome')
-            bin_path = os.path.join(env_root_dir, 'bin', 'salome')
+            lib_path = os.path.join(env_root_dir, "lib", "salome")
+            bin_path = os.path.join(env_root_dir, "bin", "salome")
             if self.has_python:
-            # if the application doesn't include python, we don't need these two lines
-                pylib_path = os.path.join(env_root_dir, self.python_lib, 'salome')
+                # if the application doesn't include python, we don't need these two lines
+                pylib_path = os.path.join(env_root_dir, self.python_lib, "salome")
 
-        # Construct the paths to prepend to PATH and LD_LIBRARY_PATH and 
+        # Construct the paths to prepend to PATH and LD_LIBRARY_PATH and
         # PYTHONPATH
-        l_binpath_libpath.append( (bin_path, lib_path) )
+        l_binpath_libpath.append((bin_path, lib_path))
 
         for bin_path, lib_path in l_binpath_libpath:
             if not self.forBuild:
-                self.prepend('PATH', bin_path)
+                self.prepend("PATH", bin_path)
                 if src.architecture.is_windows():
-                    self.prepend('PATH', lib_path)
-                else :
-                    self.prepend('LD_LIBRARY_PATH', lib_path)
+                    self.prepend("PATH", lib_path)
+                else:
+                    self.prepend("LD_LIBRARY_PATH", lib_path)
 
-            l = [ bin_path, lib_path ]
+            l = [bin_path, lib_path]
             if not src.product.product_is_wheel(pi):
                 if self.has_python:
                     l.append(pylib_path)
-                self.prepend('PYTHONPATH', l)
+                self.prepend("PYTHONPATH", l)
 
     def set_cpp_env(self, product_info):
         """\
@@ -518,24 +534,24 @@ class SalomeEnviron:
         env_root_dir = self.get(product_info.name + "_ROOT_DIR")
         l_binpath_libpath = []
 
-        # Construct the paths to prepend to PATH and LD_LIBRARY_PATH and 
+        # Construct the paths to prepend to PATH and LD_LIBRARY_PATH and
         # PYTHONPATH
-        bin_path = os.path.join(env_root_dir, 'bin')
-        lib_path = os.path.join(env_root_dir, 'lib')
-        l_binpath_libpath.append( (bin_path, lib_path) )
+        bin_path = os.path.join(env_root_dir, "bin")
+        lib_path = os.path.join(env_root_dir, "lib")
+        l_binpath_libpath.append((bin_path, lib_path))
 
         for bin_path, lib_path in l_binpath_libpath:
             if not self.forBuild:
-                self.prepend('PATH', bin_path)
+                self.prepend("PATH", bin_path)
                 if src.architecture.is_windows():
-                    self.prepend('PATH', lib_path)
-                else :
-                    self.prepend('LD_LIBRARY_PATH', lib_path)
+                    self.prepend("PATH", lib_path)
+                else:
+                    self.prepend("LD_LIBRARY_PATH", lib_path)
 
-            l = [ bin_path, lib_path ]
+            l = [bin_path, lib_path]
             if self.has_python:
                 l.append(os.path.join(env_root_dir, self.python_lib))
-            self.prepend('PYTHONPATH', l)
+            self.prepend("PYTHONPATH", l)
 
     def load_cfg_environment(self, cfg_env):
         """\
@@ -546,11 +562,11 @@ class SalomeEnviron:
         # Loop on cfg_env values
         for env_def in cfg_env:
             val = cfg_env[env_def]
-            
+
             # if it is env_script, do not do anything (reserved keyword)
             if env_def == "env_script":
                 continue
-            
+
             # if it is a dict, do not do anything
             if isinstance(val, src.pyconf.Mapping):
                 continue
@@ -566,14 +582,14 @@ class SalomeEnviron:
             # "_" means that the value must be prepended
             if env_def.startswith("_"):
                 # separator exception for PV_PLUGIN_PATH
-                if env_def[1:] == 'PV_PLUGIN_PATH':
-                    self.prepend(env_def[1:], val, ';')
+                if env_def[1:] == "PV_PLUGIN_PATH":
+                    self.prepend(env_def[1:], val, ";")
                 else:
                     self.prepend(env_def[1:], val)
             elif env_def.endswith("_"):
                 # separator exception for PV_PLUGIN_PATH
-                if env_def[:-1] == 'PV_PLUGIN_PATH':
-                    self.append(env_def[:-1], val, ';')
+                if env_def[:-1] == "PV_PLUGIN_PATH":
+                    self.append(env_def[:-1], val, ";")
                 else:
                     self.append(env_def[:-1], val)
             else:
@@ -589,105 +605,113 @@ class SalomeEnviron:
 
         # Get the informations corresponding to the product
         pi = src.product.get_product_config(self.cfg, product)
-        # skip compile time products at run time 
+        # skip compile time products at run time
         if not self.forBuild:
             if src.product.product_is_compile_time(pi):
                 return
         else:
-            if src.product.product_is_native(pi) :
-                self.set("SAT_%s_IS_NATIVE"%pi.name, "1")
-                
+            if src.product.product_is_native(pi):
+                self.set("SAT_%s_IS_NATIVE" % pi.name, "1")
 
-        # skip pip products when pip is activated and installation is done in python 
-        #if (src.appli_test_property(self.cfg,"pip", "yes") and 
+        # skip pip products when pip is activated and installation is done in python
+        # if (src.appli_test_property(self.cfg,"pip", "yes") and
         #    src.product.product_test_property(pi,"pip", "yes") and
         #    src.appli_test_property(self.cfg,"pip_install_dir", "python") ):
         #        return
 
-        # skip mesa products (if any) at run time, 
+        # skip mesa products (if any) at run time,
         # unless use_mesa property was activated
         if not self.forBuild:
-            if not ("APPLICATION" in self.cfg  and
-                    "properties" in self.cfg.APPLICATION  and
-                    "use_mesa" in self.cfg.APPLICATION.properties  and
-                    self.cfg.APPLICATION.properties.use_mesa == "yes") :
-                if ("properties" in pi and
-                    "is_mesa" in pi.properties  and
-                    pi.properties.is_mesa == "yes") :
+            if not (
+                "APPLICATION" in self.cfg
+                and "properties" in self.cfg.APPLICATION
+                and "use_mesa" in self.cfg.APPLICATION.properties
+                and self.cfg.APPLICATION.properties.use_mesa == "yes"
+            ):
+                if (
+                    "properties" in pi
+                    and "is_mesa" in pi.properties
+                    and pi.properties.is_mesa == "yes"
+                ):
                     logger.write(_("Skip mesa product %s\n") % pi.name, 4)
                     return
-               
-        
+
         if self.for_package:
-            prod_base_name=os.path.basename(pi.install_dir)
+            prod_base_name = os.path.basename(pi.install_dir)
             if prod_base_name.startswith("config"):
                 # case of a products installed in base. We remove "config-i"
-                prod_base_name=os.path.basename(os.path.dirname(pi.install_dir))
+                prod_base_name = os.path.basename(os.path.dirname(pi.install_dir))
             pi.install_dir = os.path.join(
-                                 "out_dir_Path",
-                                 self.for_package,
-                                 prod_base_name)
+                "out_dir_Path", self.for_package, prod_base_name
+            )
 
         if not self.silent:
             logger.write(_("Setting environment for %s\n") % product, 4)
 
         self.add_line(1)
-        self.add_comment('setting environ for ' + product)
-            
+        self.add_comment("setting environ for " + product)
+
         # Do not define environment if the product is native
         if src.product.product_is_native(pi):
             if src.product.product_has_env_script(pi):
                 self.run_env_script(pi, native=True)
             return
-               
+
         # Set an additional environment for SALOME products
         if src.product.product_is_salome(pi):
             # set environment using definition of the product
             self.set_salome_minimal_product_env(pi, logger)
             self.set_salome_generic_product_env(pi)
-           
-        
+
         # Expand SALOME_MODULES variable for products which have a salome gui
         if src.product.product_has_salome_gui(pi):
             self.expand_salome_modules(pi)
 
         # use variable LICENCE_FILE to communicate the licence file name to the environment script
-        licence_file_name = src.product.product_has_licence(pi, self.cfg.PATHS.LICENCEPATH)
+        licence_file_name = src.product.product_has_licence(
+            pi, self.cfg.PATHS.LICENCEPATH
+        )
         if licence_file_name:
-            logger.write("licence file found for product %s : %s\n" % (pi.name, licence_file_name), 5) 
+            logger.write(
+                "licence file found for product %s : %s\n"
+                % (pi.name, licence_file_name),
+                5,
+            )
             self.set("LICENCE_FILE", licence_file_name)
 
         if src.product.product_is_cpp(pi):
             # set a specific environment for cpp modules
             self.set_salome_minimal_product_env(pi, logger)
             self.set_cpp_env(pi)
-            
+
             if src.product.product_is_generated(pi):
                 if "component_name" in pi:
-                    # hack the source and install directories in order to point  
+                    # hack the source and install directories in order to point
                     # on the generated product source install directories
                     install_dir_save = pi.install_dir
                     source_dir_save = pi.source_dir
                     name_save = pi.name
-                    pi.install_dir = os.path.join(self.cfg.APPLICATION.workdir,
-                                                  self.cfg.INTERNAL.config.install_dir,
-                                                  pi.component_name)
+                    pi.install_dir = os.path.join(
+                        self.cfg.APPLICATION.workdir,
+                        self.cfg.INTERNAL.config.install_dir,
+                        pi.component_name,
+                    )
                     if self.for_package:
-                        pi.install_dir = os.path.join("out_dir_Path",
-                                                      self.for_package,
-                                                      pi.component_name)
-                    pi.source_dir = os.path.join(self.cfg.APPLICATION.workdir,
-                                                  "GENERATED",
-                                                  pi.component_name)
+                        pi.install_dir = os.path.join(
+                            "out_dir_Path", self.for_package, pi.component_name
+                        )
+                    pi.source_dir = os.path.join(
+                        self.cfg.APPLICATION.workdir, "GENERATED", pi.component_name
+                    )
                     pi.name = pi.component_name
                     self.set_salome_minimal_product_env(pi, logger)
                     self.set_salome_generic_product_env(pi)
-                    
+
                     # Put original values
                     pi.install_dir = install_dir_save
                     pi.source_dir = source_dir_save
                     pi.name = name_save
-        
+
         # Put the environment define in the configuration of the product
         if "environ" in pi:
             self.load_cfg_environment(pi.environ)
@@ -696,12 +720,9 @@ class SalomeEnviron:
             if not self.forBuild and "launch" in pi.environ:
                 self.load_cfg_environment(pi.environ.launch)
             # if product_info defines a env_scripts, load it
-            if 'env_script' in pi.environ:
+            if "env_script" in pi.environ:
                 self.run_env_script(pi, logger)
 
-        
-            
-
     def run_env_script(self, product_info, logger=None, native=False):
         """\
         Runs an environment script. 
@@ -713,8 +734,7 @@ class SalomeEnviron:
         env_script = product_info.environ.env_script
         # Check that the script exists
         if not os.path.exists(env_script):
-            raise src.SatException(_("Environment script not found: %s") % 
-                                   env_script)
+            raise src.SatException(_("Environment script not found: %s") % env_script)
 
         if not self.silent and logger is not None:
             logger.write("  ** load %s\n" % env_script, 4)
@@ -722,23 +742,23 @@ class SalomeEnviron:
         # import the script and run the set_env function
         try:
             import imp
-            pyproduct = imp.load_source(product_info.name + "_env_script",
-                                        env_script)
+
+            pyproduct = imp.load_source(product_info.name + "_env_script", env_script)
             if not native:
                 if self.forBuild and "set_env_build" in dir(pyproduct):
-                    pyproduct.set_env_build(self,
-                                            product_info.install_dir,
-                                            product_info.version)
+                    pyproduct.set_env_build(
+                        self, product_info.install_dir, product_info.version
+                    )
                 elif (not self.forBuild) and "set_env_launch" in dir(pyproduct):
-                    pyproduct.set_env_launch(self,
-                                            product_info.install_dir,
-                                            product_info.version)
+                    pyproduct.set_env_launch(
+                        self, product_info.install_dir, product_info.version
+                    )
                 else:
                     # at least this one is mandatory,
                     # if set_env_build and set_env_build are not defined
-                    pyproduct.set_env(self,
-                                      product_info.install_dir,
-                                      product_info.version)
+                    pyproduct.set_env(
+                        self, product_info.install_dir, product_info.version
+                    )
             else:
                 # not mandatory, if set_nativ_env not defined, we do nothing
                 if "set_nativ_env" in dir(pyproduct):
@@ -747,6 +767,7 @@ class SalomeEnviron:
             __, exceptionValue, exceptionTraceback = sys.exc_info()
             print(exceptionValue)
             import traceback
+
             traceback.print_tb(exceptionTraceback)
             traceback.print_exc()
 
@@ -758,7 +779,7 @@ class SalomeEnviron:
         :param src_root src: the application working directory
         """
         self.add_line(1)
-        self.add_comment('setting environ for all products')
+        self.add_comment("setting environ for all products")
 
         # Make sure that the python lib dirs are set after python
         if "Python" in self.sorted_product_list:
@@ -770,7 +791,7 @@ class SalomeEnviron:
             if product == "Python":
                 continue
             self.set_a_product(product, logger)
+
     def set_full_environ(self, logger, env_info):
         """\
         Sets the full environment for products, with their dependencies 
@@ -784,13 +805,14 @@ class SalomeEnviron:
         # set product environ
         self.set_application_env(logger)
 
-        # use the sorted list of all products to sort the list of products 
+        # use the sorted list of all products to sort the list of products
         # we have to set
-        visited=[]
-        from compile import depth_search_graph # to get the dependencies
+        visited = []
+        from compile import depth_search_graph  # to get the dependencies
+
         for p_name in env_info:
-            visited=depth_search_graph(self.all_products_graph, p_name, visited)
-        sorted_product_list=[]
+            visited = depth_search_graph(self.all_products_graph, p_name, visited)
+        sorted_product_list = []
         for n in self.sorted_product_list:
             if n in visited:
                 sorted_product_list.append(n)
@@ -805,10 +827,12 @@ class SalomeEnviron:
                 continue
             self.set_a_product(product, logger)
 
+
 class FileEnvWriter:
     """\
     Class to dump the environment to a file.
     """
+
     def __init__(self, config, logger, out_dir, src_root, env_info=None):
         """\
         Initialization.
@@ -822,16 +846,13 @@ class FileEnvWriter:
         self.config = config
         self.logger = logger
         self.out_dir = out_dir
-        self.src_root= src_root
+        self.src_root = src_root
         self.silent = True
         self.env_info = env_info
 
-    def write_tcl_files(self,
-                        forBuild, 
-                        shell, 
-                        for_package = None,
-                        no_path_init=False,
-                        additional_env = {}):
+    def write_tcl_files(
+        self, forBuild, shell, for_package=None, no_path_init=False, additional_env={}
+    ):
         """\
         Create tcl environment files for environment module.
         
@@ -846,16 +867,16 @@ class FileEnvWriter:
         """
 
         # get the products informations
-        all_products=self.config.APPLICATION.products
-        products_infos = src.product.get_products_infos(all_products, self.config) 
+        all_products = self.config.APPLICATION.products
+        products_infos = src.product.get_products_infos(all_products, self.config)
 
         # set a global environment (we need it to resolve variable references
         # between dependent products
-        global_environ = src.environment.SalomeEnviron(self.config,
-                                  src.environment.Environ(additional_env),
-                                  False)
+        global_environ = src.environment.SalomeEnviron(
+            self.config, src.environment.Environ(additional_env), False
+        )
         global_environ.set_products(self.logger)
-        
+
         # The loop on the products
         for product in all_products:
             # create one file per product
@@ -863,61 +884,72 @@ class FileEnvWriter:
             if "base" not in pi:  # we write tcl files only for products in base
                 continue
 
-            # get the global environment, and complete it with sat_ prefixed 
-            # prefixed variables which are used to transfer info to 
-            # TclFileEnviron class  
+            # get the global environment, and complete it with sat_ prefixed
+            # prefixed variables which are used to transfer info to
+            # TclFileEnviron class
             product_env = copy.deepcopy(global_environ.environ)
             product_env.environ["sat_product_name"] = pi.name
             product_env.environ["sat_product_version"] = pi.version
-            product_env.environ["sat_product_base_path"] = src.get_base_path(self.config)
+            product_env.environ["sat_product_base_path"] = src.get_base_path(
+                self.config
+            )
             product_env.environ["sat_product_base_name"] = pi.base
-   
+
             # store infos in sat_product_load_depend to set dependencies in tcl file
-            sat_product_load_depend=""
-            for p_name,p_info in products_infos:
+            sat_product_load_depend = ""
+            for p_name, p_info in products_infos:
                 if p_name in pi.depend:
-                    sat_product_load_depend+="module load %s/%s/%s;" % (pi.base, 
-                                                                        p_info.name, 
-                                                                        p_info.version)
-            if len(sat_product_load_depend)>0:
+                    sat_product_load_depend += "module load %s/%s/%s;" % (
+                        pi.base,
+                        p_info.name,
+                        p_info.version,
+                    )
+            if len(sat_product_load_depend) > 0:
                 # if there are dependencies, store the module to load (get rid of trailing ;)
-                product_env.environ["sat_product_load_depend"]=sat_product_load_depend[0:-1]
-
-
-            env_file_name = os.path.join(product_env.environ["sat_product_base_path"], 
-                                         "modulefiles", 
-                                         product_env.environ["sat_product_base_name"],
-                                         product_env.environ["sat_product_name"], 
-                                         product_env.environ["sat_product_version"])
-            prod_dir_name=os.path.dirname(env_file_name)
+                product_env.environ[
+                    "sat_product_load_depend"
+                ] = sat_product_load_depend[0:-1]
+
+            env_file_name = os.path.join(
+                product_env.environ["sat_product_base_path"],
+                "modulefiles",
+                product_env.environ["sat_product_base_name"],
+                product_env.environ["sat_product_name"],
+                product_env.environ["sat_product_version"],
+            )
+            prod_dir_name = os.path.dirname(env_file_name)
             if not os.path.isdir(prod_dir_name):
                 os.makedirs(prod_dir_name)
 
             env_file = open(env_file_name, "w")
-            file_environ = src.fileEnviron.get_file_environ(env_file,
-                                           "tcl", product_env)
-            env = SalomeEnviron(self.config, 
-                                file_environ, 
-                                False, 
-                                for_package=for_package)
+            file_environ = src.fileEnviron.get_file_environ(
+                env_file, "tcl", product_env
+            )
+            env = SalomeEnviron(
+                self.config, file_environ, False, for_package=for_package
+            )
             if "Python" in pi.depend:
                 # short cut, env.python_lib is required by set_a_product for salome modules
-                env.has_python="True"
-                env.python_lib=global_environ.get("PYTHON_LIBDIR")
+                env.has_python = "True"
+                env.python_lib = global_environ.get("PYTHON_LIBDIR")
             env.set_a_product(product, self.logger)
             env_file.close()
             if not self.silent:
-                self.logger.write(_("    Create tcl module environment file %s\n") % 
-                                  src.printcolors.printcLabel(env_file_name), 3)
-
-
-    def write_env_file(self,
-                       filename,
-                       forBuild, 
-                       shell, 
-                       for_package = None,
-                       no_path_init=False,
-                       additional_env = {}):
+                self.logger.write(
+                    _("    Create tcl module environment file %s\n")
+                    % src.printcolors.printcLabel(env_file_name),
+                    3,
+                )
+
+    def write_env_file(
+        self,
+        filename,
+        forBuild,
+        shell,
+        for_package=None,
+        no_path_init=False,
+        additional_env={},
+    ):
         """\
         Create an environment file.
         
@@ -931,28 +963,30 @@ class FileEnvWriter:
         :return: The path to the generated file
         :rtype: str
         """
-        additional_env["sat_dist"]=self.config.VARS.dist
+        additional_env["sat_dist"] = self.config.VARS.dist
         if not self.silent:
-            self.logger.write(_("Create environment file %s\n") % 
-                              src.printcolors.printcLabel(filename), 3)
+            self.logger.write(
+                _("Create environment file %s\n")
+                % src.printcolors.printcLabel(filename),
+                3,
+            )
         # create then env object
         env_file = open(os.path.join(self.out_dir, filename), "w")
 
         # we duplicate additional_env, and transmit it to fileEnviron, which will use its sat_ prefixed variables.
         # the other variables of additional_env are added to the environement file at the end of this function.
         salome_env = copy.deepcopy(additional_env)
-        file_environ = src.fileEnviron.get_file_environ(env_file,
-                                               shell,
-                                               src.environment.Environ(salome_env))
+        file_environ = src.fileEnviron.get_file_environ(
+            env_file, shell, src.environment.Environ(salome_env)
+        )
         if no_path_init:
             # specify we don't want to reinitialise paths
             # path will keep the inherited value, which will be appended with new values.
             file_environ.set_no_init_path()
 
-        env = SalomeEnviron(self.config, 
-                            file_environ, 
-                            forBuild, 
-                            for_package=for_package)
+        env = SalomeEnviron(
+            self.config, file_environ, forBuild, for_package=for_package
+        )
 
         env.silent = self.silent
 
@@ -963,15 +997,14 @@ class FileEnvWriter:
             # set env from the APPLICATION
             env.set_application_env(self.logger)
             # set the products
-            env.set_products(self.logger,
-                            src_root=self.src_root)
+            env.set_products(self.logger, src_root=self.src_root)
         # Add the additional environment if it is not empty
         if len(additional_env) != 0:
             env.add_line(1)
             env.add_comment("[APPLI variables]")
             for variable in additional_env:
                 if not variable.startswith("sat_"):
-                    # by convention variables starting with sat_ are used to transfer information, 
+                    # by convention variables starting with sat_ are used to transfer information,
                     # not to be written in env
                     env.set(variable, additional_env[variable])
 
@@ -980,12 +1013,13 @@ class FileEnvWriter:
         env_file.close()
 
         return env_file.name
-   
+
 
 class Shell:
     """\
     Definition of a Shell.
     """
+
     def __init__(self, name, extension):
         """\
         Initialization.
@@ -996,6 +1030,7 @@ class Shell:
         self.name = name
         self.extension = extension
 
+
 def load_environment(config, build, logger):
     """\
     Loads the environment (used to run the tests, for example).
index dc8dc8070f5b340ad3162cd143e8d72bd1353b4c..43e71a3f6a63cd74cf6b14996ee72cab0e846fdb 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 #  Copyright (C) 2010-2013  CEA/DEN
 #
 #  This library is free software; you can redistribute it and/or
@@ -22,15 +22,16 @@ import src.debug as DBG
 import src.architecture
 import src.environment
 
+
 def get_file_environ(output, shell, environ=None):
     """Instantiate correct FileEnvironment sub-class.
-    
+
     :param output file: the output file stream.
     :param shell str: the type of shell syntax to use.
     :param environ dict: a potential additional environment.
     """
     if environ == None:
-        environ=src.environment.Environ({})
+        environ = src.environment.Environ({})
     if shell == "bash":
         return BashFileEnviron(output, environ)
     if shell == "tcl":
@@ -43,10 +44,12 @@ def get_file_environ(output, shell, environ=None):
         return ContextFileEnviron(output, environ)
     raise Exception("FileEnviron: Unknown shell = %s" % shell)
 
+
 class FileEnviron(object):
     """\
     Base class for shell environment
     """
+
     def __init__(self, output, environ=None):
         """\
         Initialization
@@ -60,11 +63,10 @@ class FileEnviron(object):
         """\
         easy non exhaustive quick resume for debug print"""
         res = {
-          "output" : self.output,
-          "environ" : self.environ,
+            "output": self.output,
+            "environ": self.environ,
         }
         return "%s(\n%s\n)" % (self.__class__.__name__, PP.pformat(res))
-        
 
     def _do_init(self, output, environ=None):
         """\
@@ -74,7 +76,7 @@ class FileEnviron(object):
         :param environ dict: a potential additional environment.
         """
         self.output = output
-        self.init_path=True # by default we initialise all paths, except PATH
+        self.init_path = True  # by default we initialise all paths, except PATH
         if environ is not None:
             self.environ = environ
         else:
@@ -122,17 +124,22 @@ class FileEnviron(object):
         :param sep str: the separator string
         """
         # check that value so no contain the system separator
-        separator=os.pathsep
+        separator = os.pathsep
         if separator in value:
-            raise Exception("FileEnviron append key '%s' value '%s' contains forbidden character '%s'" % (key, value, separator))
-        do_append=True
+            raise Exception(
+                "FileEnviron append key '%s' value '%s' contains forbidden character '%s'"
+                % (key, value, separator)
+            )
+        do_append = True
         if self.environ.is_defined(key):
             value_list = self.environ.get(key).split(sep)
             if self.environ._expandvars(value) in value_list:
-                do_append=False  # value is already in key path : we don't append it again
-            
+                do_append = (
+                    False  # value is already in key path : we don't append it again
+                )
+
         if do_append:
-            self.environ.append_value(key, value,sep)
+            self.environ.append_value(key, value, sep)
             self.set(key, self.get(key) + sep + value)
 
     def append(self, key, value, sep=os.pathsep):
@@ -159,18 +166,21 @@ class FileEnviron(object):
         :param sep str: the separator string
         """
         # check that value so no contain the system separator
-        separator=os.pathsep
+        separator = os.pathsep
         if separator in value:
-            raise Exception("FileEnviron append key '%s' value '%s' contains forbidden character '%s'" % (key, value, separator))
+            raise Exception(
+                "FileEnviron append key '%s' value '%s' contains forbidden character '%s'"
+                % (key, value, separator)
+            )
 
-        do_not_prepend=False
+        do_not_prepend = False
         if self.environ.is_defined(key):
             value_list = self.environ.get(key).split(sep)
-            exp_val=self.environ._expandvars(value)
+            exp_val = self.environ._expandvars(value)
             if exp_val in value_list:
-                do_not_prepend=True
+                do_not_prepend = True
         if not do_not_prepend:
-            self.environ.prepend_value(key, value,sep)
+            self.environ.prepend_value(key, value, sep)
             self.set(key, value + sep + self.get(key))
 
     def prepend(self, key, value, sep=os.pathsep):
@@ -182,7 +192,7 @@ class FileEnviron(object):
         :param sep str: the separator string
         """
         if isinstance(value, list):
-            for v in reversed(value): # prepend list, first item at last to stay first
+            for v in reversed(value):  # prepend list, first item at last to stay first
                 self.prepend_value(key, v, sep)
         else:
             self.prepend_value(key, value, sep)
@@ -211,9 +221,9 @@ class FileEnviron(object):
         :param key str: the environment variable
         """
         if src.architecture.is_windows():
-            return '%' + key + '%'
+            return "%" + key + "%"
         else:
-            return '${%s}' % key
+            return "${%s}" % key
 
     def get_value(self, key):
         """Get the real value of the environment variable "key"
@@ -224,22 +234,22 @@ class FileEnviron(object):
 
     def finish(self):
         """Add a final instruction in the out file (in case of file generation)
-        
+
         :param required bool: Do nothing if required is False
         """
         return
 
     def set_no_init_path(self):
         """Set the no initialisation mode for all paths.
-           By default only PATH is not reinitialised. All others paths are
-           (LD_LIBRARY_PATH, PYTHONPATH, ...)
-           After the call to these function ALL PATHS ARE NOT REINITIALISED.
-           There initial value is inherited from the environment
+        By default only PATH is not reinitialised. All others paths are
+        (LD_LIBRARY_PATH, PYTHONPATH, ...)
+        After the call to these function ALL PATHS ARE NOT REINITIALISED.
+        There initial value is inherited from the environment
         """
-        self.init_path=False
+        self.init_path = False
 
     def value_filter(self, value):
-        res=value
+        res = value
         return res
 
 
@@ -247,39 +257,43 @@ class TclFileEnviron(FileEnviron):
     """\
     Class for tcl shell.
     """
+
     def __init__(self, output, environ=None):
         """Initialization
-        
+
         :param output file: the output file stream.
         :param environ dict: a potential additional environment.
         """
         self._do_init(output, environ)
-        self.output.write(tcl_header.replace("<module_name>",
-                                             self.environ.get("sat_product_name")))
-        self.output.write("\nset software %s\n" % self.environ.get("sat_product_name") )
-        self.output.write("set version %s\n" % self.environ.get("sat_product_version") )
-        root=os.path.join(self.environ.get("sat_product_base_path"),  
-                                  "apps", 
-                                  self.environ.get("sat_product_base_name"), 
-                                  "$software", 
-                                  "$version")
-        self.output.write("set root %s\n" % root) 
-        modules_to_load=self.environ.get("sat_product_load_depend")
-        if len(modules_to_load)>0:
+        self.output.write(
+            tcl_header.replace("<module_name>", self.environ.get("sat_product_name"))
+        )
+        self.output.write("\nset software %s\n" % self.environ.get("sat_product_name"))
+        self.output.write("set version %s\n" % self.environ.get("sat_product_version"))
+        root = os.path.join(
+            self.environ.get("sat_product_base_path"),
+            "apps",
+            self.environ.get("sat_product_base_name"),
+            "$software",
+            "$version",
+        )
+        self.output.write("set root %s\n" % root)
+        modules_to_load = self.environ.get("sat_product_load_depend")
+        if len(modules_to_load) > 0:
             # write module load commands for product dependencies
             self.output.write("\n")
             for module_to_load in modules_to_load.split(";"):
-                self.output.write(module_to_load+"\n")
+                self.output.write(module_to_load + "\n")
 
     def set(self, key, value):
         """Set the environment variable "key" to value "value"
-        
+
         :param key str: the environment variable to set
         :param value str: the value
         """
         self.output.write('setenv  %s "%s"\n' % (key, value))
         self.environ.set(key, value)
-        
+
     def get(self, key):
         """\
         Get the value of the environment variable "key"
@@ -290,36 +304,37 @@ class TclFileEnviron(FileEnviron):
 
     def append_value(self, key, value, sep=os.pathsep):
         """append value to key using sep
-        
+
         :param key str: the environment variable to append
         :param value str: the value to append to key
         :param sep str: the separator string
         """
-        if sep==os.pathsep:
-            self.output.write('append-path  %s   %s\n' % (key, value))
+        if sep == os.pathsep:
+            self.output.write("append-path  %s   %s\n" % (key, value))
         else:
-            self.output.write('append-path --delim=\%c %s   %s\n' % (sep, key, value))
+            self.output.write("append-path --delim=\%c %s   %s\n" % (sep, key, value))
 
     def prepend_value(self, key, value, sep=os.pathsep):
         """prepend value to key using sep
-        
+
         :param key str: the environment variable to prepend
         :param value str: the value to prepend to key
         :param sep str: the separator string
         """
-        if sep==os.pathsep:
-            self.output.write('prepend-path  %s   %s\n' % (key, value))
+        if sep == os.pathsep:
+            self.output.write("prepend-path  %s   %s\n" % (key, value))
         else:
-            self.output.write('prepend-path --delim=\%c %s   %s\n' % (sep, key, value))
+            self.output.write("prepend-path --delim=\%c %s   %s\n" % (sep, key, value))
+
 
-        
 class BashFileEnviron(FileEnviron):
     """\
     Class for bash shell.
     """
+
     def __init__(self, output, environ=None):
         """Initialization
-        
+
         :param output file: the output file stream.
         :param environ dict: a potential additional environment.
         """
@@ -328,22 +343,22 @@ class BashFileEnviron(FileEnviron):
 
     def set(self, key, value):
         """Set the environment variable "key" to value "value"
-        
+
         :param key str: the environment variable to set
         :param value str: the value
         """
         self.output.write('export %s="%s"\n' % (key, value))
         self.environ.set(key, value)
-        
 
-        
+
 class BatFileEnviron(FileEnviron):
     """\
     for Windows batch shell.
     """
+
     def __init__(self, output, environ=None):
         """Initialization
-        
+
         :param output file: the output file stream.
         :param environ dict: a potential additional environment.
         """
@@ -352,34 +367,34 @@ class BatFileEnviron(FileEnviron):
 
     def add_comment(self, comment):
         """Add a comment in the shell file
-        
+
         :param comment str: the comment to add
         """
         self.output.write("rem %s\n" % comment)
-    
+
     def get(self, key):
         """Get the value of the environment variable "key"
-        
+
         :param key str: the environment variable
         """
-        return '%%%s%%' % key
-    
+        return "%%%s%%" % key
+
     def set(self, key, value):
         """Set the environment variable "key" to value "value"
-        
+
         :param key str: the environment variable to set
         :param value str: the value
         """
-        self.output.write('set %s=%s\n' % (key, self.value_filter(value)))
+        self.output.write("set %s=%s\n" % (key, self.value_filter(value)))
         self.environ.set(key, value)
 
 
 class ContextFileEnviron(FileEnviron):
-    """Class for a salome context configuration file.
-    """
+    """Class for a salome context configuration file."""
+
     def __init__(self, output, environ=None):
         """Initialization
-        
+
         :param output file: the output file stream.
         :param environ dict: a potential additional environment.
         """
@@ -388,7 +403,7 @@ class ContextFileEnviron(FileEnviron):
 
     def set(self, key, value):
         """Set the environment variable "key" to value "value"
-        
+
         :param key str: the environment variable to set
         :param value str: the value
         """
@@ -397,46 +412,48 @@ class ContextFileEnviron(FileEnviron):
 
     def get(self, key):
         """Get the value of the environment variable "key"
-        
+
         :param key str: the environment variable
         """
-        return '%({0})s'.format(key)
+        return "%({0})s".format(key)
 
     def add_echo(self, text):
         """Add a comment
-        
+
         :param text str: the comment to add
         """
         self.add_comment(text)
 
     def add_warning(self, warning):
         """Add a warning
-        
+
         :param text str: the warning to add
         """
-        self.add_comment("WARNING %s"  % warning)
+        self.add_comment("WARNING %s" % warning)
 
     def prepend_value(self, key, value, sep=os.pathsep):
         """prepend value to key using sep
-        
+
         :param key str: the environment variable to prepend
         :param value str: the value to prepend to key
         :param sep str: the separator string
         """
-        do_append=True
+        do_append = True
         if self.environ.is_defined(key):
             value_list = self.environ.get(key).split(sep)
-            #value cannot be expanded (unlike bash/bat case) - but it doesn't matter.
+            # value cannot be expanded (unlike bash/bat case) - but it doesn't matter.
             if value in value_list:
-                do_append=False  # value is already in key path : we don't append it again
-            
+                do_append = (
+                    False  # value is already in key path : we don't append it again
+                )
+
         if do_append:
-            self.environ.append_value(key, value,sep)
-            self.output.write('ADD_TO_%s: %s\n' % (key, value))
+            self.environ.append_value(key, value, sep)
+            self.output.write("ADD_TO_%s: %s\n" % (key, value))
 
     def append_value(self, key, value, sep=os.pathsep):
         """append value to key using sep
-        
+
         :param key str: the environment variable to append
         :param value str: the value to append to key
         :param sep str: the separator string
@@ -449,56 +466,60 @@ class LauncherFileEnviron(FileEnviron):
     Class to generate a launcher file script 
     (in python syntax) SalomeContext API
     """
+
     def __init__(self, output, environ=None):
         """Initialization
-        
+
         :param output file: the output file stream.
         :param environ dict: a potential additional environment.
         """
         self._do_init(output, environ)
-        self.python_version=self.environ.get("sat_python_version")
-        self.bin_kernel_root_dir=self.environ.get("sat_bin_kernel_install_dir")
+        self.python_version = self.environ.get("sat_python_version")
+        self.bin_kernel_root_dir = self.environ.get("sat_bin_kernel_install_dir")
 
         # four whitespaces for first indentation in a python script
-        self.indent="    "
-        self.prefix="context."
-        self.setVarEnv="setVariable"
-        self.begin=self.indent+self.prefix
+        self.indent = "    "
+        self.prefix = "context."
+        self.setVarEnv = "setVariable"
+        self.begin = self.indent + self.prefix
 
         # write the begining of launcher file.
-        # choose the template version corresponding to python version 
+        # choose the template version corresponding to python version
         # and substitute BIN_KERNEL_INSTALL_DIR (the path to salomeContext.py)
         if self.python_version == 2:
-            launcher_header=launcher_header2
+            launcher_header = launcher_header2
         else:
-            launcher_header=launcher_header3
+            launcher_header = launcher_header3
         # in case of Windows OS, Python scripts are not executable.  PyExe ?
         if src.architecture.is_windows():
-            launcher_header = launcher_header.replace("#! /usr/bin/env python3",'')
-        self.output.write(launcher_header\
-                          .replace("BIN_KERNEL_INSTALL_DIR", self.bin_kernel_root_dir))
+            launcher_header = launcher_header.replace("#! /usr/bin/env python3", "")
+        self.output.write(
+            launcher_header.replace("BIN_KERNEL_INSTALL_DIR", self.bin_kernel_root_dir)
+        )
 
         # for these path, we use specialired functions in salomeContext api
-        self.specialKeys={"PATH": "Path",
-                          "LD_LIBRARY_PATH": "LdLibraryPath",
-                          "PYTHONPATH": "PythonPath"}
+        self.specialKeys = {
+            "PATH": "Path",
+            "LD_LIBRARY_PATH": "LdLibraryPath",
+            "PYTHONPATH": "PythonPath",
+        }
 
         # we do not want to reinitialise PATH.
         # for that we make sure PATH is in self.environ
         # and therefore we will not use setVariable for PATH
         if not self.environ.is_defined("PATH"):
-            self.environ.set("PATH","")
+            self.environ.set("PATH", "")
 
     def add_echo(self, text):
         """Add a comment
-        
+
         :param text str: the comment to add
         """
         self.output.write('# %s"\n' % text)
 
     def add_warning(self, warning):
         """Add a warning
-        
+
         :param text str: the warning to add
         """
         self.output.write('# "WARNING %s"\n' % warning)
@@ -506,51 +527,63 @@ class LauncherFileEnviron(FileEnviron):
     def append_value(self, key, value, sep=os.pathsep):
         """append value to key using sep,
         if value contains ":" or ";" then raise error
-        
+
         :param key str: the environment variable to prepend
         :param value str: the value to prepend to key
         :param sep str: the separator string
         """
         # check that value so no contain the system separator
-        separator=os.pathsep
-        msg="LauncherFileEnviron append key '%s' value '%s' contains forbidden character '%s'"
+        separator = os.pathsep
+        msg = "LauncherFileEnviron append key '%s' value '%s' contains forbidden character '%s'"
         if separator in value:
             raise Exception(msg % (key, value, separator))
 
-        is_key_defined=self.environ.is_defined(key)
-        conditional_reinit=False
-        if (self.init_path and (not is_key_defined)):
+        is_key_defined = self.environ.is_defined(key)
+        conditional_reinit = False
+        if self.init_path and (not is_key_defined):
             # reinitialisation mode set to true (the default)
             # for the first occurrence of key, we set it.
             # therefore key will not be inherited from environment
-            self.output.write(self.indent+'if reinitialise_paths:\n'+self.indent)
+            self.output.write(self.indent + "if reinitialise_paths:\n" + self.indent)
             self.set(key, value)
-            self.output.write(self.indent+'else:\n'+self.indent)
-            conditional_reinit=True # in this case do not register value in self.environ a second time
+            self.output.write(self.indent + "else:\n" + self.indent)
+            conditional_reinit = (
+                True  # in this case do not register value in self.environ a second time
+            )
 
         # in all other cases we use append (except if value is already the key
-        do_append=True
+        do_append = True
         if is_key_defined:
             value_list = self.environ.get(key).split(sep)
             # rem : value cannot be expanded (unlike bash/bat case) - but it doesn't matter.
             if value in value_list:
-                do_append=False  # value is already in key path : we don't append it again
-            
+                do_append = (
+                    False  # value is already in key path : we don't append it again
+                )
+
         if do_append:
             if not conditional_reinit:
-                self.environ.append_value(key, value,sep) # register value in self.environ
+                self.environ.append_value(
+                    key, value, sep
+                )  # register value in self.environ
             if key in self.specialKeys.keys():
-                #for these special keys we use the specific salomeContext function
-                self.output.write(self.begin+'addTo%s(r"%s")\n' % 
-                                  (self.specialKeys[key], self.value_filter(value)))
+                # for these special keys we use the specific salomeContext function
+                self.output.write(
+                    self.begin
+                    + 'addTo%s(r"%s")\n'
+                    % (self.specialKeys[key], self.value_filter(value))
+                )
             else:
                 # else we use the general salomeContext addToVariable function
-                self.output.write(self.begin+'appendVariable(r"%s", r"%s",separator="%s")\n'
-                                  % (key, self.value_filter(value), sep))
+                self.output.write(
+                    self.begin
+                    + 'appendVariable(r"%s", r"%s",separator="%s")\n'
+                    % (key, self.value_filter(value), sep)
+                )
 
     def append(self, key, value, sep=":"):
         """Same as append_value but the value argument can be a list
-        
+
         :param key str: the environment variable to append
         :param value str or list: the value(s) to append to key
         :param sep str: the separator string
@@ -564,52 +597,63 @@ class LauncherFileEnviron(FileEnviron):
     def prepend_value(self, key, value, sep=os.pathsep):
         """prepend value to key using sep,
         if value contains ":" or ";" then raise error
-        
+
         :param key str: the environment variable to prepend
         :param value str: the value to prepend to key
         :param sep str: the separator string
         """
         # check that value so no contain the system separator
-        separator=os.pathsep
-        msg="LauncherFileEnviron append key '%s' value '%s' contains forbidden character '%s'"
+        separator = os.pathsep
+        msg = "LauncherFileEnviron append key '%s' value '%s' contains forbidden character '%s'"
         if separator in value:
             raise Exception(msg % (key, value, separator))
 
-        is_key_defined=self.environ.is_defined(key)
-        conditional_reinit=False
-        if (self.init_path and (not is_key_defined)):
+        is_key_defined = self.environ.is_defined(key)
+        conditional_reinit = False
+        if self.init_path and (not is_key_defined):
             # reinitialisation mode set to true (the default)
             # for the first occurrence of key, we set it.
             # therefore key will not be inherited from environment
-            self.output.write(self.indent+'if reinitialise_paths:\n'+self.indent)
+            self.output.write(self.indent + "if reinitialise_paths:\n" + self.indent)
             self.set(key, value)
-            self.output.write(self.indent+'else:\n'+self.indent)
-            conditional_reinit=True # in this case do not register value in self.environ a second time
+            self.output.write(self.indent + "else:\n" + self.indent)
+            conditional_reinit = (
+                True  # in this case do not register value in self.environ a second time
+            )
 
         # in all other cases we use append (except if value is already the key
-        do_append=True
+        do_append = True
         if is_key_defined:
             value_list = self.environ.get(key).split(sep)
             # rem : value cannot be expanded (unlike bash/bat case) - but it doesn't matter.
             if value in value_list:
-                do_append=False  # value is already in key path : we don't append it again
-            
+                do_append = (
+                    False  # value is already in key path : we don't append it again
+                )
+
         if do_append:
             if not conditional_reinit:
-                self.environ.append_value(key, value,sep) # register value in self.environ
+                self.environ.append_value(
+                    key, value, sep
+                )  # register value in self.environ
             if key in self.specialKeys.keys():
-                #for these special keys we use the specific salomeContext function
-                self.output.write(self.begin+'addTo%s(r"%s")\n' % 
-                                  (self.specialKeys[key], self.value_filter(value)))
+                # for these special keys we use the specific salomeContext function
+                self.output.write(
+                    self.begin
+                    + 'addTo%s(r"%s")\n'
+                    % (self.specialKeys[key], self.value_filter(value))
+                )
             else:
                 # else we use the general salomeContext addToVariable function
-                self.output.write(self.begin+'addToVariable(r"%s", r"%s",separator="%s")\n' 
-                                  % (key, self.value_filter(value), sep))
-            
+                self.output.write(
+                    self.begin
+                    + 'addToVariable(r"%s", r"%s",separator="%s")\n'
+                    % (key, self.value_filter(value), sep)
+                )
 
     def prepend(self, key, value, sep=":"):
         """Same as prepend_value but the value argument can be a list
-        
+
         :param key str: the environment variable to prepend
         :param value str or list: the value(s) to prepend to key
         :param sep str: the separator string
@@ -620,41 +664,45 @@ class LauncherFileEnviron(FileEnviron):
         else:
             self.prepend_value(key, value, sep)
 
-
     def set(self, key, value):
         """Set the environment variable "key" to value "value"
-        
+
         :param key str: the environment variable to set
         :param value str: the value
         """
-        self.output.write(self.begin+self.setVarEnv+
-                          '(r"%s", r"%s", overwrite=True)\n' % 
-                          (key, self.value_filter(value)))
-        self.environ.set(key,value)
-    
+        self.output.write(
+            self.begin
+            + self.setVarEnv
+            + '(r"%s", r"%s", overwrite=True)\n' % (key, self.value_filter(value))
+        )
+        self.environ.set(key, value)
 
     def add_comment(self, comment):
         # Special comment in case of the DISTENE licence
-        if comment=="DISTENE license":
-            self.output.write(self.indent+
-                              "#"+
-                              self.prefix+
-                              self.setVarEnv+
-                              '(r"%s", r"%s", overwrite=True)\n' % 
-                              ('DISTENE_LICENSE_FILE', 'Use global envvar: DLIM8VAR'))
-            self.output.write(self.indent+
-                              "#"+
-                              self.prefix+
-                              self.setVarEnv+
-                              '(r"%s", r"%s", overwrite=True)\n' % 
-                              ('DLIM8VAR', '<your licence>'))
+        if comment == "DISTENE license":
+            self.output.write(
+                self.indent
+                + "#"
+                + self.prefix
+                + self.setVarEnv
+                + '(r"%s", r"%s", overwrite=True)\n'
+                % ("DISTENE_LICENSE_FILE", "Use global envvar: DLIM8VAR")
+            )
+            self.output.write(
+                self.indent
+                + "#"
+                + self.prefix
+                + self.setVarEnv
+                + '(r"%s", r"%s", overwrite=True)\n' % ("DLIM8VAR", "<your licence>")
+            )
             return
         if "setting environ for" in comment:
-            self.output.write(self.indent+"#[%s]\n" % 
-                              comment.split("setting environ for ")[1])
+            self.output.write(
+                self.indent + "#[%s]\n" % comment.split("setting environ for ")[1]
+            )
             return
 
-        self.output.write(self.indent+"# %s\n" % comment)
+        self.output.write(self.indent + "# %s\n" % comment)
 
     def finish(self):
         """\
@@ -664,12 +712,13 @@ class LauncherFileEnviron(FileEnviron):
         :param required bool: Do nothing if required is False
         """
         if self.python_version == 2:
-            launcher_tail=launcher_tail_py2
+            launcher_tail = launcher_tail_py2
         else:
-            launcher_tail=launcher_tail_py3
+            launcher_tail = launcher_tail_py3
         self.output.write(launcher_tail)
         return
 
+
 class ScreenEnviron(FileEnviron):
     def __init__(self, output, environ=None):
         self._do_init(output, environ)
@@ -689,10 +738,17 @@ class ScreenEnviron(FileEnviron):
 
     def write(self, command, name, value, sign="="):
         import src
-        self.output.write("  %s%s %s %s %s\n" % \
-            (src.printcolors.printcLabel(command),
-             " " * (12 - len(command)),
-             src.printcolors.printcInfo(name), sign, value))
+
+        self.output.write(
+            "  %s%s %s %s %s\n"
+            % (
+                src.printcolors.printcLabel(command),
+                " " * (12 - len(command)),
+                src.printcolors.printcInfo(name),
+                sign,
+                value,
+            )
+        )
 
     def is_defined(self, name):
         return name in self.defined
@@ -723,21 +779,21 @@ class ScreenEnviron(FileEnviron):
 #
 #  Headers
 #
-bat_header="""\
+bat_header = """\
 @echo off
 
 rem The following variables are used only in case of a sat package
 set out_dir_Path=%~dp0
 """
 
-tcl_header="""\
+tcl_header = """\
 #%Module -*- tcl -*-
 #
 # <module_name> module for use with 'environment-modules' package
 #
 """
 
-bash_header="""\
+bash_header = """\
 #!/bin/bash
 if [ "$BASH" = "" ]
 then
@@ -756,11 +812,11 @@ export out_dir_Path=$(cd $(dirname ${BASH_SOURCE[0]});pwd)
 ###########################################################################
 """
 
-cfg_header="""\
+cfg_header = """\
 [SALOME Configuration]
 """
 
-launcher_header2="""\
+launcher_header2 = """\
 #! /usr/bin/env python
 
 ################################################################
@@ -848,7 +904,7 @@ def main(args):
     context.getLogger().setLevel(40)
 """
 
-launcher_header3="""\
+launcher_header3 = """\
 #! /usr/bin/env python3
 
 ################################################################
@@ -945,7 +1001,7 @@ def main(args):
     context.getLogger().setLevel(40)
 """
 
-launcher_tail_py2="""\
+launcher_tail_py2 = """\
     #[hook to integrate in launcher additionnal user modules]
     
     # Load all files extra.env.d/*.py and call the module's init routine]
@@ -986,7 +1042,7 @@ if __name__ == "__main__":
 #
 """
 
-launcher_tail_py3="""\
+launcher_tail_py3 = """\
     #[hook to integrate in launcher additionnal user modules]
     
     # Load all files extra.env.d/*.py and call the module's init routine]
@@ -1028,5 +1084,3 @@ if __name__ == "__main__":
   main(args)
 #
 """
-    
-
index aa239de9a05581797fa8d08ea04959a210e59e7f..3ed0565268c4a32a86477bb2b86d6252d3332fac 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 #  Copyright (C) 2010-2013  CEA/DEN
 #
 #  This library is free software; you can redistribute it and/or
@@ -25,26 +25,34 @@ import subprocess
 # OP
 import src
 
+
 def show_progress(logger, top, delai, ss=""):
     """shortcut function to display the progression
-    
+
     :param logger Logger: The logging instance
     :param top int: the number to display
     :param delai int: the number max
     :param ss str: the string to display
     """
-    logger.write("\r%s\r%s timeout %s / %s stay %s s    " % ((" " * 30), ss, top, delai, (delai - top)), 4, False)
+    logger.write(
+        "\r%s\r%s timeout %s / %s stay %s s    "
+        % ((" " * 30), ss, top, delai, (delai - top)),
+        4,
+        False,
+    )
     logger.flush()
 
+
 def write_back(logger, message, level):
     """shortcut function to write at the begin of the line
-    
+
     :param logger Logger: The logging instance
     :param message str: the text to display
     :param level int: the level of verbosity
     """
     logger.write("\r%s\r%s" % ((" " * 40), message), level)
 
+
 # Launch command
 # --------------
 def launch_command(cmd, logger, cwd, args=[], log=None):
@@ -57,22 +65,23 @@ def launch_command(cmd, logger, cwd, args=[], log=None):
 
     # Add Windows case
     if src.architecture.is_windows():
-        prs = subprocess.Popen(cmd,
-                           shell=True,
-                           stdout=log,
-                           stderr=subprocess.STDOUT,
-                           cwd=cwd)
+        prs = subprocess.Popen(
+            cmd, shell=True, stdout=log, stderr=subprocess.STDOUT, cwd=cwd
+        )
 
     else:
-        prs = subprocess.Popen(cmd,
-                           shell=True,
-                           stdout=log,
-                           stderr=subprocess.STDOUT,
-                           cwd=cwd,
-                           executable='/bin/bash')
+        prs = subprocess.Popen(
+            cmd,
+            shell=True,
+            stdout=log,
+            stderr=subprocess.STDOUT,
+            cwd=cwd,
+            executable="/bin/bash",
+        )
 
     return prs
 
+
 # Launch a batch
 # --------------
 def batch(cmd, logger, cwd, args=[], log=None, delai=20, sommeil=1):
@@ -86,6 +95,7 @@ def batch(cmd, logger, cwd, args=[], log=None, delai=20, sommeil=1):
             if top == delai:
                 logger.write("batch: time out KILL\n", 3)
                 import signal
+
                 os.kill(proc.pid, signal.SIGTERM)
                 break
             else:
@@ -100,9 +110,19 @@ def batch(cmd, logger, cwd, args=[], log=None, delai=20, sommeil=1):
 
 # Launch a salome process
 # -----------------------
-def batch_salome(cmd, logger, cwd, args, getTmpDir,
-                 pendant="SALOME_Session_Server", fin="killSalome.py",
-                 log=None, delai=20, sommeil=1, delaiapp=0):
+def batch_salome(
+    cmd,
+    logger,
+    cwd,
+    args,
+    getTmpDir,
+    pendant="SALOME_Session_Server",
+    fin="killSalome.py",
+    log=None,
+    delai=20,
+    sommeil=1,
+    delaiapp=0,
+):
 
     beginTime = time.time()
     launch_command(cmd, logger, cwd, args, log)
@@ -116,7 +136,7 @@ def batch_salome(cmd, logger, cwd, args, getTmpDir,
     foundSalome = "batch salome not seen"
     tmp_dir = getTmpDir()
     # print("batch_salome %s %s / %s sommeil %s:\n%s" % (tmp_dir, delai, delaiapp, sommeil, cmd))
-    while (not found and top < delaiapp):
+    while not found and top < delaiapp:
         if os.path.exists(tmp_dir):
             listFile = os.listdir(tmp_dir)
             listFile = [f for f in listFile if f.endswith("_pidict")]
@@ -156,13 +176,19 @@ def batch_salome(cmd, logger, cwd, args, getTmpDir,
 
         time.sleep(sommeil)
         top += 1
-        show_progress(logger, top, delaiapp, "launching salome or appli found=%s:" % found)
+        show_progress(
+            logger, top, delaiapp, "launching salome or appli found=%s:" % found
+        )
 
     # continue or not
     if found:
         logger.write("\nbatch_salome: supposed started\n", 5)
     else:
-        logger.write("\nbatch_salome: seems FAILED to launch salome or appli : %s\n" % foundSalome, 3)
+        logger.write(
+            "\nbatch_salome: seems FAILED to launch salome or appli : %s\n"
+            % foundSalome,
+            3,
+        )
         return False, -1
 
     # salome launched run the script
index cf467dac8e81bdc3d405a68928546591285b33df..f779b67e82e45d07d26ca16c53268c980dbc9c01 100755 (executable)
@@ -1,23 +1,23 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 
 # %% LICENSE_SALOME_CEA_BEGIN
 # Copyright (C) 2008-2018  CEA/DEN
-# 
+#
 # This library is free software; you can redistribute it and/or
 # modify it under the terms of the GNU Lesser General Public
 # License as published by the Free Software Foundation; either
 # version 2.1 of the License, or (at your option) any later version.
-# 
+#
 # This library is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 # Lesser General Public License for more details.
-# 
+#
 # You should have received a copy of the GNU Lesser General Public
 # License along with this library; if not, write to the Free Software
 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-# 
+#
 # See http://www.salome-platform.org or email : webmaster.salome@opencascade.com
 # %% LICENSE_END
 
@@ -27,29 +27,33 @@ import unittest
 
 verbose = False
 
+
 class TestCase(unittest.TestCase):
-  
-  def test_001(self):
-    # first load resources for internationalization
-    gettext.install('salomeTools', os.path.realpath(os.path.dirname(__file__)))
-  def test_005(self):
-    res = _("Harvey writes '%(1)s' for %(2)s.") % {"1": "hello", "2": "test_005"}
-    if verbose: print(res)
-    self.assertEqual(res, "pour test_005 Hervé Ã©crit 'hello'.")
-
-  def test_010(self):
-    res = _("Harvey writes '%(1)s' for %(2)s.") % {"1": _("hello"), "2": "test_010"}
-    if verbose: print(res)
-    self.assertEqual(res, "pour test_010 Hervé Ã©crit 'bonjour'.")
-
-  def test_020(self):
-    # keep Ooops inexisting in salomeTools.po as no translation
-    res = _("Harvey writes '%(1)s' for %(2)s.") % {"1": _("Ooops"), "2": "test_020"}
-    if verbose: print(res)
-    self.assertEqual(res, "pour test_020 Hervé Ã©crit 'Ooops'.")
-
-if __name__ == '__main__':
-  verbose = False
-  unittest.main()
-  pass
+    def test_001(self):
+        # first load resources for internationalization
+        gettext.install("salomeTools", os.path.realpath(os.path.dirname(__file__)))
+
+    def test_005(self):
+        res = _("Harvey writes '%(1)s' for %(2)s.") % {"1": "hello", "2": "test_005"}
+        if verbose:
+            print(res)
+        self.assertEqual(res, "pour test_005 Hervé Ã©crit 'hello'.")
+
+    def test_010(self):
+        res = _("Harvey writes '%(1)s' for %(2)s.") % {"1": _("hello"), "2": "test_010"}
+        if verbose:
+            print(res)
+        self.assertEqual(res, "pour test_010 Hervé Ã©crit 'bonjour'.")
+
+    def test_020(self):
+        # keep Ooops inexisting in salomeTools.po as no translation
+        res = _("Harvey writes '%(1)s' for %(2)s.") % {"1": _("Ooops"), "2": "test_020"}
+        if verbose:
+            print(res)
+        self.assertEqual(res, "pour test_020 Hervé Ã©crit 'Ooops'.")
+
+
+if __name__ == "__main__":
+    verbose = False
+    unittest.main()
+    pass
index ea22f7d297ab77c9d2fff4231e1e111eef4539e8..98ac79939b7a2e52587eed15494baca895049299 100644 (file)
@@ -5,5 +5,6 @@ create fr/LC_MESSAGES/salomeTools.mo from r/LC_MESSAGES/salomeTools.po
 """
 
 import polib
-po = polib.pofile('fr/LC_MESSAGES/salomeTools.po', encoding='utf-8')
-po.save_as_mofile('fr/LC_MESSAGES/salomeTools.mo')
+
+po = polib.pofile("fr/LC_MESSAGES/salomeTools.po", encoding="utf-8")
+po.save_as_mofile("fr/LC_MESSAGES/salomeTools.mo")
index 545ac0b8dfbe368f7d2333b23129455ed653dffa..646565aa8ffc69a909a1529f7d95688f99875b1c 100755 (executable)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 #  Copyright (C) 2010-2012  CEA/DEN
 #
 #  This library is free software; you can redistribute it and/or
@@ -37,19 +37,23 @@ import src.debug as DBG
 log_macro_command_file_expression = "^[0-9]{8}_+[0-9]{6}_+.*\.xml$"
 log_all_command_file_expression = "^.*[0-9]{8}_+[0-9]{6}_+.*\.xml$"
 
-verbose = True # cvw TODO
+verbose = True  # cvw TODO
+
 
 class Logger(object):
     """\
     Class to handle log mechanism.
     """
-    def __init__(self,
-                 config= None,
-                 silent_sysstd=False,
-                 all_in_terminal=False,
-                 micro_command = False):
+
+    def __init__(
+        self,
+        config=None,
+        silent_sysstd=False,
+        all_in_terminal=False,
+        micro_command=False,
+    ):
         """Initialization
-        
+
         :param config pyconf.Config: The global configuration.
         :param silent_sysstd boolean: if True, do not write anything
                                       in terminal.
@@ -58,128 +62,137 @@ class Logger(object):
         self.config = config
         self.default_level = 3
         self.silentSysStd = silent_sysstd
-        
+
         # Construct xml log file location for sat prints.
         prefix = ""
         if micro_command:
             prefix = "micro_"
-        hour_command_host = (config.VARS.datehour + "_" + 
-                             config.VARS.command + "_" + 
-                             config.VARS.hostname)
+        hour_command_host = (
+            config.VARS.datehour
+            + "_"
+            + config.VARS.command
+            + "_"
+            + config.VARS.hostname
+        )
         logFileName = prefix + hour_command_host + ".xml"
         log_dir = src.get_log_path(config)
         logFilePath = os.path.join(log_dir, logFileName)
-        # Construct txt file location in order to log 
+        # Construct txt file location in order to log
         # the external commands calls (cmake, make, git clone, etc...)
         txtFileName = prefix + hour_command_host + ".txt"
         txtFilePath = os.path.join(log_dir, "OUT", txtFileName)
 
         aDirLog = os.path.dirname(logFilePath)
         if not os.path.exists(aDirLog):
-          print("create log dir %s" % aDirLog)
-          src.ensure_path_exists(aDirLog)
-          # sometimes other users make 'sat log' and create hat.xml file...
-          os.chmod(aDirLog,
-                   stat.S_IRUSR |
-                   stat.S_IRGRP |
-                   stat.S_IROTH |
-                   stat.S_IWUSR |
-                   stat.S_IWGRP |
-                   stat.S_IWOTH |
-                   stat.S_IXUSR |
-                   stat.S_IXGRP |
-                   stat.S_IXOTH)
+            print("create log dir %s" % aDirLog)
+            src.ensure_path_exists(aDirLog)
+            # sometimes other users make 'sat log' and create hat.xml file...
+            os.chmod(
+                aDirLog,
+                stat.S_IRUSR
+                | stat.S_IRGRP
+                | stat.S_IROTH
+                | stat.S_IWUSR
+                | stat.S_IWGRP
+                | stat.S_IWOTH
+                | stat.S_IXUSR
+                | stat.S_IXGRP
+                | stat.S_IXOTH,
+            )
         src.ensure_path_exists(os.path.dirname(txtFilePath))
-        
-        # The path of the log files (one for sat traces, and the other for 
+
+        # The path of the log files (one for sat traces, and the other for
         # the system commands traces)
         self.logFileName = logFileName
         self.logFilePath = logFilePath
         self.txtFileName = txtFileName
         self.txtFilePath = txtFilePath
-        
+
         # The list of all log files corresponding to the current command and
         # the commands called by the current command
         self.l_logFiles = [logFilePath, txtFilePath]
-        
-        # Initialize xml instance and put first fields 
-        # like beginTime, user, command, etc... 
-        self.xmlFile = xmlManager.XmlLogFile(logFilePath, "SATcommand", 
-                            attrib = {"application" : config.VARS.application})
+
+        # Initialize xml instance and put first fields
+        # like beginTime, user, command, etc...
+        self.xmlFile = xmlManager.XmlLogFile(
+            logFilePath, "SATcommand", attrib={"application": config.VARS.application}
+        )
         self.put_initial_xml_fields()
         # Initialize the txt file for reading
         try:
-            self.logTxtFile = open(str(self.txtFilePath), 'w')
+            self.logTxtFile = open(str(self.txtFilePath), "w")
         except IOError:
-            #msg1 = _("WARNING! Trying to write to a file that"
+            # msg1 = _("WARNING! Trying to write to a file that"
             #         " is not accessible:")
-            #msg2 = _("The logs won't be written.")
-            #print("%s\n%s\n%s\n" % (src.printcolors.printcWarning(msg1),
+            # msg2 = _("The logs won't be written.")
+            # print("%s\n%s\n%s\n" % (src.printcolors.printcWarning(msg1),
             #                        src.printcolors.printcLabel(str(self.txtFilePath)),
             #                        src.printcolors.printcWarning(msg2) ))
             self.logTxtFile = tempfile.TemporaryFile()
-            
+
         # If the option all_in_terminal was called, all the system commands
         # are redirected to the terminal
         if all_in_terminal:
             self.logTxtFile = sys.__stdout__
-        
+
     def put_initial_xml_fields(self):
         """\
         Called at class initialization: Put all fields 
         corresponding to the command context (user, time, ...)
         """
         # command name
-        self.xmlFile.add_simple_node("Site", attrib={"command" : 
-                                                     self.config.VARS.command})
+        self.xmlFile.add_simple_node(
+            "Site", attrib={"command": self.config.VARS.command}
+        )
         # version of salomeTools
-        self.xmlFile.append_node_attrib("Site", attrib={"satversion" : 
-                                            src.get_salometool_version(self.config)})
+        self.xmlFile.append_node_attrib(
+            "Site", attrib={"satversion": src.get_salometool_version(self.config)}
+        )
         # machine name on which the command has been launched
-        self.xmlFile.append_node_attrib("Site", attrib={"hostname" : 
-                                                    self.config.VARS.hostname})
+        self.xmlFile.append_node_attrib(
+            "Site", attrib={"hostname": self.config.VARS.hostname}
+        )
         # Distribution of the machine
-        self.xmlFile.append_node_attrib("Site", attrib={"OS" : 
-                                                        self.config.VARS.dist})
+        self.xmlFile.append_node_attrib("Site", attrib={"OS": self.config.VARS.dist})
         # The user that have launched the command
-        self.xmlFile.append_node_attrib("Site", attrib={"user" : 
-                                                        self.config.VARS.user})
+        self.xmlFile.append_node_attrib("Site", attrib={"user": self.config.VARS.user})
         # The time when command was launched
         Y, m, dd, H, M, S = date_to_datetime(self.config.VARS.datehour)
         date_hour = "%4s/%2s/%2s %2sh%2sm%2ss" % (Y, m, dd, H, M, S)
-        self.xmlFile.append_node_attrib("Site", attrib={"beginTime" : 
-                                                        date_hour})
+        self.xmlFile.append_node_attrib("Site", attrib={"beginTime": date_hour})
         # The application if any
         if "APPLICATION" in self.config:
-            self.xmlFile.append_node_attrib("Site", 
-                        attrib={"application" : self.config.VARS.application})
+            self.xmlFile.append_node_attrib(
+                "Site", attrib={"application": self.config.VARS.application}
+            )
         # The initialization of the trace node
-        self.xmlFile.add_simple_node("Log",text="")
+        self.xmlFile.add_simple_node("Log", text="")
         # The system commands logs
-        self.xmlFile.add_simple_node("OutLog",
-                                    text=os.path.join("OUT", self.txtFileName))
-        # The initialization of the node where 
+        self.xmlFile.add_simple_node(
+            "OutLog", text=os.path.join("OUT", self.txtFileName)
+        )
+        # The initialization of the node where
         # to put the links to the other sat commands that can be called by any
-        # command 
+        # command
         self.xmlFile.add_simple_node("Links")
 
-    def add_link(self,
-                 log_file_name,
-                 command_name,
-                 command_res,
-                 full_launched_command):
+    def add_link(self, log_file_name, command_name, command_res, full_launched_command):
         """Add a link to another log file.
-        
+
         :param log_file_name str: The file name of the link.
         :param command_name str: The name of the command linked.
         :param command_res str: The result of the command linked. "0" or "1"
-        :parma full_launched_command str: The full lanch command 
+        :parma full_launched_command str: The full lanch command
                                           ("sat command ...")
         """
         xmlLinks = self.xmlFile.xmlroot.find("Links")
         flc = src.xmlManager.escapeSequence(full_launched_command)
-        att = {"command" : command_name, "passed" : str(command_res), "launchedCommand" : flc}
-        src.xmlManager.add_simple_node(xmlLinks, "link", text = log_file_name, attrib = att)
+        att = {
+            "command": command_name,
+            "passed": str(command_res),
+            "launchedCommand": flc,
+        }
+        src.xmlManager.add_simple_node(xmlLinks, "link", text=log_file_name, attrib=att)
 
     def write(self, message, level=None, screenOnly=False):
         """\
@@ -199,82 +212,82 @@ class Logger(object):
 
         # do not write message starting with \r to log file
         if not message.startswith("\r") and not screenOnly:
-            self.xmlFile.append_node_text("Log", 
-                                          printcolors.cleancolor(message))
+            self.xmlFile.append_node_text("Log", printcolors.cleancolor(message))
 
         # get user or option output level
         current_output_verbose_level = self.config.USER.output_verbose_level
-        if not ('isatty' in dir(sys.stdout) and sys.stdout.isatty()):
+        if not ("isatty" in dir(sys.stdout) and sys.stdout.isatty()):
             # clean the message color if the terminal is redirected by user
             # ex: sat compile appli > log.txt
             message = printcolors.cleancolor(message)
-        
+
         # Print message regarding the output level value
         if level:
             if level <= current_output_verbose_level and not self.silentSysStd:
                 sys.stdout.write(message)
         else:
-            if self.default_level <= current_output_verbose_level and not self.silentSysStd:
+            if (
+                self.default_level <= current_output_verbose_level
+                and not self.silentSysStd
+            ):
                 sys.stdout.write(message)
         self.flush()
 
     def error(self, message, prefix="ERROR: "):
-      """Print an error.
+        """Print an error.
 
-      :param message str: The message to print.
-      """
-      # Print in the log file
-      self.xmlFile.append_node_text("traces", prefix + message)
+        :param message str: The message to print.
+        """
+        # Print in the log file
+        self.xmlFile.append_node_text("traces", prefix + message)
 
-      # Print in the terminal and clean colors if the terminal
-      # is redirected by user
-      if not ('isatty' in dir(sys.stderr) and sys.stderr.isatty()):
-        sys.stderr.write(printcolors.printcError(prefix + message + "\n"))
-      else:
-        sys.stderr.write(prefix + message + "\n")
+        # Print in the terminal and clean colors if the terminal
+        # is redirected by user
+        if not ("isatty" in dir(sys.stderr) and sys.stderr.isatty()):
+            sys.stderr.write(printcolors.printcError(prefix + message + "\n"))
+        else:
+            sys.stderr.write(prefix + message + "\n")
 
     def step(self, message):
-      """Print an step message.
+        """Print an step message.
 
-      :param message str: The message to print.
-      """
-      self.write('STEP: ' + message, level=4)
+        :param message str: The message to print.
+        """
+        self.write("STEP: " + message, level=4)
 
     def trace(self, message):
-      """Print an trace message.
+        """Print an trace message.
 
-      :param message str: The message to print.
-      """
-      self.write('TRACE: ' + message, level=5)
+        :param message str: The message to print.
+        """
+        self.write("TRACE: " + message, level=5)
 
     def debug(self, message):
-      """Print an debug message.
+        """Print an debug message.
 
-      :param message str: The message to print.
-      """
-      self.write('DEBUG: ' + message, level=6)
+        :param message str: The message to print.
+        """
+        self.write("DEBUG: " + message, level=6)
 
     def warning(self, message):
-      """Print an warning message.
+        """Print an warning message.
 
-      :param message str: The message to print.
-      """
-      self.error(message, prefix="WARNING: ")
+        :param message str: The message to print.
+        """
+        self.error(message, prefix="WARNING: ")
 
     def critical(self, message):
-      """Print an critical message.
-
-      :param message str: The message to print.
-      """
-      self.error(message, prefix="CRITICAL: ")
-
+        """Print an critical message.
 
+        :param message str: The message to print.
+        """
+        self.error(message, prefix="CRITICAL: ")
 
     def flush(self):
         """Flush terminal"""
         sys.stdout.flush()
         self.logTxtFile.flush()
-        
+
     def end_write(self, attribute):
         """\
         Called just after command end: Put all fields 
@@ -283,7 +296,7 @@ class Logger(object):
         And display the command to launch to get the log
         
         :param attribute dict: the attribute to add to the node "Site".
-        """       
+        """
         # Get current time (end of command) and format it
         dt = datetime.datetime.now()
         Y, m, dd, H, M, S = date_to_datetime(self.config.VARS.datehour)
@@ -292,23 +305,24 @@ class Logger(object):
         delta = tf - t0
         total_time = timedelta_total_seconds(delta)
         hours = int(total_time / 3600)
-        minutes = int((total_time - hours*3600) / 60)
-        seconds = total_time - hours*3600 - minutes*60
+        minutes = int((total_time - hours * 3600) / 60)
+        seconds = total_time - hours * 3600 - minutes * 60
         # Add the fields corresponding to the end time
         # and the total time of command
-        endtime = dt.strftime('%Y/%m/%d %Hh%Mm%Ss')
-        self.xmlFile.append_node_attrib("Site", attrib={"endTime" : endtime})
-        self.xmlFile.append_node_attrib("Site", 
-                attrib={"TotalTime" : "%ih%im%is" % (hours, minutes, seconds)})
-        
+        endtime = dt.strftime("%Y/%m/%d %Hh%Mm%Ss")
+        self.xmlFile.append_node_attrib("Site", attrib={"endTime": endtime})
+        self.xmlFile.append_node_attrib(
+            "Site", attrib={"TotalTime": "%ih%im%is" % (hours, minutes, seconds)}
+        )
+
         # Add the attribute passed to the method
         self.xmlFile.append_node_attrib("Site", attrib=attribute)
-        
+
         # Call the method to write the xml file on the hard drive
-        self.xmlFile.write_tree(stylesheet = "command.xsl")
+        self.xmlFile.write_tree(stylesheet="command.xsl")
 
         # so unconditionnaly copy stylesheet file(s)
-        xslDir = os.path.join(self.config.VARS.srcDir, 'xsl')
+        xslDir = os.path.join(self.config.VARS.srcDir, "xsl")
         xslCommand = "command.xsl"
         # xslHat = "hat.xsl" # have to be completed (one time at end)
         xsltest = "test.xsl"
@@ -321,24 +335,24 @@ class Logger(object):
         # OP We use copy instead of copy2 to update the creation date
         #    So we can clean the LOGS directories easily
         for f in files_to_copy:
-          f_init = os.path.join(xslDir, f)
-          f_target = os.path.join(logDir, f)
-          if not os.path.isfile(f_target): # do not overrride
-            shutil.copy(f_init, logDir)
-        
+            f_init = os.path.join(xslDir, f)
+            f_target = os.path.join(logDir, f)
+            if not os.path.isfile(f_target):  # do not overrride
+                shutil.copy(f_init, logDir)
+
         # Dump the config in a pyconf file in the log directory
-        dumpedPyconfFileName = (self.config.VARS.datehour
-                                + "_" 
-                                + self.config.VARS.command 
-                                + ".pyconf")
-        dumpedPyconfFilePath = os.path.join(logDir, 'OUT', dumpedPyconfFileName)
+        dumpedPyconfFileName = (
+            self.config.VARS.datehour + "_" + self.config.VARS.command + ".pyconf"
+        )
+        dumpedPyconfFilePath = os.path.join(logDir, "OUT", dumpedPyconfFileName)
         try:
-            f = open(dumpedPyconfFilePath, 'w')
+            f = open(dumpedPyconfFilePath, "w")
             self.config.__save__(f)
             f.close()
         except IOError:
             pass
 
+
 def date_to_datetime(date):
     """\
     From a string date in format YYYYMMDD_HHMMSS
@@ -356,6 +370,7 @@ def date_to_datetime(date):
     S = date[13:15]
     return Y, m, dd, H, M, S
 
+
 def timedelta_total_seconds(timedelta):
     """\
     Replace total_seconds from datetime module 
@@ -366,9 +381,12 @@ def timedelta_total_seconds(timedelta):
     :rtype: float
     """
     return (
-        timedelta.microseconds + 0.0 +
-        (timedelta.seconds + timedelta.days * 24 * 3600) * 10 ** 6) / 10 ** 6
-        
+        timedelta.microseconds
+        + 0.0
+        + (timedelta.seconds + timedelta.days * 24 * 3600) * 10 ** 6
+    ) / 10 ** 6
+
+
 def show_command_log(logFilePath, cmd, application, notShownCommands):
     """\
     Used in updateHatXml. 
@@ -390,7 +408,7 @@ def show_command_log(logFilePath, cmd, application, notShownCommands):
     # Do not show
     if cmd in notShownCommands:
         return False, None, None
+
     # Get the application of the log file
     try:
         logFileXml = src.xmlManager.ReadXmlFile(logFilePath)
@@ -400,29 +418,30 @@ def show_command_log(logFilePath, cmd, application, notShownCommands):
         return False, None, None
 
     try:
-        if 'application' in logFileXml.xmlroot.keys():
-          appliLog = logFileXml.xmlroot.get('application')
-          launched_cmd = logFileXml.xmlroot.find('Site').attrib['launchedCommand']
-          # if it corresponds, then the log has to be shown
-          if appliLog == application:
-              return True, appliLog, launched_cmd
-          elif application != 'None':
-              return False, appliLog, launched_cmd
-
-          return True, appliLog, launched_cmd
+        if "application" in logFileXml.xmlroot.keys():
+            appliLog = logFileXml.xmlroot.get("application")
+            launched_cmd = logFileXml.xmlroot.find("Site").attrib["launchedCommand"]
+            # if it corresponds, then the log has to be shown
+            if appliLog == application:
+                return True, appliLog, launched_cmd
+            elif application != "None":
+                return False, appliLog, launched_cmd
+
+            return True, appliLog, launched_cmd
     except Exception as e:
         msg = _("WARNING: the log file %s cannot be parsed:" % logFilePath)
         sys.stdout.write(printcolors.printcWarning("%s\n%s\n" % (msg, e)))
         return False, None, None
 
-    if application == 'None':
-            return True, None, None
-        
+    if application == "None":
+        return True, None, None
+
     return False, None, None
 
+
 def list_log_file(dirPath, expression):
     """Find all files corresponding to expression in dirPath
-    
+
     :param dirPath str: the directory where to search the files
     :param expression str: the regular expression of files to find
     :return: the list of files path and informations about it
@@ -436,33 +455,42 @@ def list_log_file(dirPath, expression):
         if oExpr.search(fileName):
             file_name = fileName
             if fileName.startswith("micro_"):
-                file_name = fileName[len("micro_"):]
+                file_name = fileName[len("micro_") :]
             # get date and hour and format it
-            date_hour_cmd_host = file_name.split('_')
+            date_hour_cmd_host = file_name.split("_")
             date_not_formated = date_hour_cmd_host[0]
-            date = "%s/%s/%s" % (date_not_formated[6:8], 
-                                 date_not_formated[4:6], 
-                                 date_not_formated[0:4])
+            date = "%s/%s/%s" % (
+                date_not_formated[6:8],
+                date_not_formated[4:6],
+                date_not_formated[0:4],
+            )
             hour_not_formated = date_hour_cmd_host[1]
-            hour = "%s:%s:%s" % (hour_not_formated[0:2], 
-                                 hour_not_formated[2:4], 
-                                 hour_not_formated[4:6])
+            hour = "%s:%s:%s" % (
+                hour_not_formated[0:2],
+                hour_not_formated[2:4],
+                hour_not_formated[4:6],
+            )
             if len(date_hour_cmd_host) < 4:
-                cmd = date_hour_cmd_host[2][:-len('.xml')]
+                cmd = date_hour_cmd_host[2][: -len(".xml")]
                 host = ""
             else:
                 cmd = date_hour_cmd_host[2]
-                host = date_hour_cmd_host[3][:-len('.xml')]
-            lRes.append((os.path.join(dirPath, fileName), 
-                         date_not_formated,
-                         date,
-                         hour_not_formated,
-                         hour,
-                         cmd,
-                         host))
+                host = date_hour_cmd_host[3][: -len(".xml")]
+            lRes.append(
+                (
+                    os.path.join(dirPath, fileName),
+                    date_not_formated,
+                    date,
+                    hour_not_formated,
+                    hour,
+                    cmd,
+                    host,
+                )
+            )
     return lRes
 
-def update_hat_xml(logDir, application=None, notShownCommands = []):
+
+def update_hat_xml(logDir, application=None, notShownCommands=[]):
     """\
     Create the xml file in logDir that contain all the xml file 
     and have a name like YYYYMMDD_HHMMSS_namecmd.xml
@@ -471,36 +499,44 @@ def update_hat_xml(logDir, application=None, notShownCommands = []):
     :param application str: the name of the application if there is any
     """
     # Create an instance of XmlLogFile class to create hat.xml file
-    xmlHatFilePath = os.path.join(logDir, 'hat.xml')
-    xmlHat = src.xmlManager.XmlLogFile(xmlHatFilePath, "LOGlist", {"application" : application})
-    # parse the log directory to find all the command logs, 
+    xmlHatFilePath = os.path.join(logDir, "hat.xml")
+    xmlHat = src.xmlManager.XmlLogFile(
+        xmlHatFilePath, "LOGlist", {"application": application}
+    )
+    # parse the log directory to find all the command logs,
     # then add it to the xml file
     lLogFile = list_log_file(logDir, log_macro_command_file_expression)
     for filePath, __, date, __, hour, cmd, __ in lLogFile:
-        showLog, cmdAppli, full_cmd = show_command_log(filePath, cmd,
-                                              application, notShownCommands)
-        #if cmd not in notShownCommands:
+        showLog, cmdAppli, full_cmd = show_command_log(
+            filePath, cmd, application, notShownCommands
+        )
+        # if cmd not in notShownCommands:
         if showLog:
             # add a node to the hat.xml file
-            xmlHat.add_simple_node("LogCommand", 
-                                   text=os.path.basename(filePath), 
-                                   attrib = {"date" : date, 
-                                             "hour" : hour, 
-                                             "cmd" : cmd, 
-                                             "application" : cmdAppli,
-                                             "full_command" : full_cmd})
-    
+            xmlHat.add_simple_node(
+                "LogCommand",
+                text=os.path.basename(filePath),
+                attrib={
+                    "date": date,
+                    "hour": hour,
+                    "cmd": cmd,
+                    "application": cmdAppli,
+                    "full_command": full_cmd,
+                },
+            )
+
     # Write the file on the hard drive
-    xmlHat.write_tree('hat.xsl')
+    xmlHat.write_tree("hat.xsl")
     # Sometimes other users will make 'sat log' and update this file
-    os.chmod(xmlHatFilePath,
-             stat.S_IRUSR |
-             stat.S_IRGRP |
-             stat.S_IROTH |
-             stat.S_IWUSR |
-             stat.S_IWGRP |
-             stat.S_IWOTH )
-
+    os.chmod(
+        xmlHatFilePath,
+        stat.S_IRUSR
+        | stat.S_IRGRP
+        | stat.S_IROTH
+        | stat.S_IWUSR
+        | stat.S_IWGRP
+        | stat.S_IWOTH,
+    )
 
 
 # TODO for future
@@ -508,50 +544,59 @@ def update_hat_xml(logDir, application=None, notShownCommands = []):
 # suppose only one logger in sat5.1
 _currentLogger = []
 
+
 def getCurrentLogger():
-  """get current logging logger, set as DefaultLogger if not set yet"""
-  if len(_currentLogger) == 0:
+    """get current logging logger, set as DefaultLogger if not set yet"""
+    if len(_currentLogger) == 0:
+        import src.loggingSimple as LOGSI
+
+        logger = LOGSI.getDefaultLogger()
+        _currentLogger.append(logger)
+        logger.warning("set by default current logger as %s" % logger.name)
+    return _currentLogger[0]
+
+
+def getDefaultLogger():
+    """get simple logging logger DefaultLogger, set it as current"""
     import src.loggingSimple as LOGSI
+
     logger = LOGSI.getDefaultLogger()
-    _currentLogger.append(logger)
-    logger.warning("set by default current logger as %s" % logger.name)
-  return _currentLogger[0]
+    setCurrentLogger(logger)  # set it as current
+    return logger
 
-def getDefaultLogger():
-  """get simple logging logger DefaultLogger, set it as current"""
-  import src.loggingSimple as LOGSI
-  logger = LOGSI.getDefaultLogger()
-  setCurrentLogger(logger) # set it as current
-  return logger
 
 def getUnittestLogger():
-  """get simple logging logger UnittestLogger, set it as current"""
-  import src.loggingSimple as LOGSI
-  logger = LOGSI.getUnittestLogger()
-  setCurrentLogger(logger) # set it as current
-  return logger
+    """get simple logging logger UnittestLogger, set it as current"""
+    import src.loggingSimple as LOGSI
+
+    logger = LOGSI.getUnittestLogger()
+    setCurrentLogger(logger)  # set it as current
+    return logger
+
 
 def setCurrentLogger(logger):
-  """temporary send all in stdout as simple logging logger"""
-  if len(_currentLogger) == 0:
-    _currentLogger.append(logger)
-    logger.debug("set current logger as %s" % logger.name)
-  else:
-    if _currentLogger[0].name != logger.name:
-      # logger.debug("quit current logger as default %s" % _currentLogger[0].name)
-      _currentLogger[0] = logger
-      logger.warning("change current logger as %s" % logger.name)
-  return _currentLogger[0]
+    """temporary send all in stdout as simple logging logger"""
+    if len(_currentLogger) == 0:
+        _currentLogger.append(logger)
+        logger.debug("set current logger as %s" % logger.name)
+    else:
+        if _currentLogger[0].name != logger.name:
+            # logger.debug("quit current logger as default %s" % _currentLogger[0].name)
+            _currentLogger[0] = logger
+            logger.warning("change current logger as %s" % logger.name)
+    return _currentLogger[0]
+
 
 def isCurrentLoggerUnittest():
     logger = getCurrentLogger()
     if "Unittest" in logger.name:
-      res = True
+        res = True
     else:
-      res = False
-    #DBG.write("isCurrentLoggerUnittest %s" % logger.name, res)
+        res = False
+    # DBG.write("isCurrentLoggerUnittest %s" % logger.name, res)
     return res
 
+
 def sendMessageToCurrentLogger(message, level):
     """
     assume relay from obsolescent
@@ -560,27 +605,27 @@ def sendMessageToCurrentLogger(message, level):
     """
     logger = getCurrentLogger()
     if level is None:
-      lev = 2
+        lev = 2
     else:
-      lev = level
+        lev = level
     if lev <= 1:
-      logger.critical(message)
-      return
+        logger.critical(message)
+        return
     if lev == 2:
-      logger.warning(message)
-      return
+        logger.warning(message)
+        return
     if lev == 3:
-      logger.info(message)
-      return
+        logger.info(message)
+        return
     if lev == 4:
-      logger.step(message)
-      return
+        logger.step(message)
+        return
     if lev == 5:
-      logger.trace(message)
-      return
+        logger.trace(message)
+        return
     if lev >= 6:
-      logger.debug(message)
-      return
+        logger.debug(message)
+        return
     msg = "What is this level: '%s' for message:\n%s" % (level, message)
     logger.warning(msg)
     return
index 177b3426f2bf67dea058be8d2933518fc663c6dd..3667a0d199e06499f51c0831e5a22d106536bea8 100755 (executable)
@@ -27,8 +27,8 @@ import src.debug as DBG  # Easy print stderr (for DEBUG only)
 
 _verbose = False
 _name = "loggingSimple"
-_loggerDefaultName = 'SimpleDefaultLogger'
-_loggerUnittestName = 'SimpleUnittestLogger'
+_loggerDefaultName = "SimpleDefaultLogger"
+_loggerUnittestName = "SimpleUnittestLogger"
 
 _STEP = LOGI.INFO - 1  # step level is just below INFO
 _TRACE = LOGI.INFO - 2  # trace level is just below STEP
@@ -44,52 +44,53 @@ _knownLevelsStr = "[%s]" % "|".join(_knownLevels)
 # utilities methods
 #################################################################
 
+
 def filterLevel(aLevel):
-  """
-  filter levels logging values from firsts characters levels.
-  No case sensitive
-
-  | example:
-  | 'i' -> 'INFO'
-  | 'cRiT' -> 'CRITICAL'
-  """
-  aLev = aLevel.upper()
-  knownLevels = _knownLevels
-  maxLen = max([len(i) for i in knownLevels])
-  for i in range(maxLen):
-    for lev in knownLevels:
-      if aLev == lev[:i]:
-        # DBG.write("filterLevel", "%s -> %s" % (aLevel, lev))
-        return lev
-  msg = "Unknown level '%s', accepted are:\n%s" % (aLev, ", ".join(knownLevels))
-  return msg
-  # raise Exception(msg)
+    """
+    filter levels logging values from firsts characters levels.
+    No case sensitive
+
+    | example:
+    | 'i' -> 'INFO'
+    | 'cRiT' -> 'CRITICAL'
+    """
+    aLev = aLevel.upper()
+    knownLevels = _knownLevels
+    maxLen = max([len(i) for i in knownLevels])
+    for i in range(maxLen):
+        for lev in knownLevels:
+            if aLev == lev[:i]:
+                # DBG.write("filterLevel", "%s -> %s" % (aLevel, lev))
+                return lev
+    msg = "Unknown level '%s', accepted are:\n%s" % (aLev, ", ".join(knownLevels))
+    return msg
+    # raise Exception(msg)
 
 
 def indent(msg, nb, car=" "):
-  """indent nb car (spaces) multi lines message except first one"""
-  s = msg.split("\n")
-  res = ("\n" + car * nb).join(s)
-  return res
+    """indent nb car (spaces) multi lines message except first one"""
+    s = msg.split("\n")
+    res = ("\n" + car * nb).join(s)
+    return res
 
 
 def indentUnittest(msg, prefix=" | "):
-  """
-  indent multi lines message except first one with prefix.
-  prefix default is designed for less spaces for size logs files
-  and keep logs human eye readable
-  """
-  s = msg.split("\n")
-  res = ("\n" + prefix).join(s)
-  return res
+    """
+    indent multi lines message except first one with prefix.
+    prefix default is designed for less spaces for size logs files
+    and keep logs human eye readable
+    """
+    s = msg.split("\n")
+    res = ("\n" + prefix).join(s)
+    return res
 
 
 def log(msg, force=False):
-  """elementary log when no logging.Logger yet"""
-  prefix = "---- %s.log: " % _name
-  nb = len(prefix)
-  if _verbose or force:
-    print(prefix + indent(msg, nb))
+    """elementary log when no logging.Logger yet"""
+    prefix = "---- %s.log: " % _name
+    nb = len(prefix)
+    if _verbose or force:
+        print(prefix + indent(msg, nb))
 
 
 # just for debug info where is import logging
@@ -97,55 +98,55 @@ log("import logging on %s" % LOGI.__file__)
 
 
 def getStrDirLogger(logger):
-  """
-  Returns multi line string for logger description, with dir(logger).
-  Used for debug
-  """
-  lgr = logger  # shortcut
-  msg = "%s(name=%s, dateLogger=%s):\n%s\n"
-  cName = lgr.__class__.__name__
-  res = msg % (cName, lgr.name, lgr.dateLogger, PP.pformat(dir(lgr)))
-  return res
+    """
+    Returns multi line string for logger description, with dir(logger).
+    Used for debug
+    """
+    lgr = logger  # shortcut
+    msg = "%s(name=%s, dateLogger=%s):\n%s\n"
+    cName = lgr.__class__.__name__
+    res = msg % (cName, lgr.name, lgr.dateLogger, PP.pformat(dir(lgr)))
+    return res
 
 
 def getStrHandler(handler):
-  """
-  Returns one line string for handler description
-  (as inexisting __repr__)
-  to avoid create inherited classe(s) handler
-  """
-  h = handler  # shortcut
-  msg = "%s(name=%s)"
-  cName = h.__class__.__name__
-  # get_name absent in logging 0.5.0.5 python 2.6
-  res = msg % (cName, h._name)
-  return res
+    """
+    Returns one line string for handler description
+    (as inexisting __repr__)
+    to avoid create inherited classe(s) handler
+    """
+    h = handler  # shortcut
+    msg = "%s(name=%s)"
+    cName = h.__class__.__name__
+    # get_name absent in logging 0.5.0.5 python 2.6
+    res = msg % (cName, h._name)
+    return res
 
 
 def getStrShort(msg):
-  """Returns short string for msg (as first caracters without line feed"""
-  # log("getStrShort " + str(msg), True)
-  res = msg.replace("\n", "//")[0:30]
-  return res
+    """Returns short string for msg (as first caracters without line feed"""
+    # log("getStrShort " + str(msg), True)
+    res = msg.replace("\n", "//")[0:30]
+    return res
 
 
 def getStrLogRecord(logRecord):
-  """
-  Returns one line string for simple logging LogRecord description
-  """
-  msg = "LogRecord(level='%s', msg='%s...')"
-  shortMsg = getStrShort(logRecord.msg)
-  levelName = logRecord.levelname
-  res = msg % (levelName, shortMsg)
-  return res
+    """
+    Returns one line string for simple logging LogRecord description
+    """
+    msg = "LogRecord(level='%s', msg='%s...')"
+    shortMsg = getStrShort(logRecord.msg)
+    levelName = logRecord.levelname
+    res = msg % (levelName, shortMsg)
+    return res
 
 
 def getListOfStrLogRecord(listOfLogRecord):
-  """
-  Returns one line string for logging LogRecord description
-  """
-  res = [getStrLogRecord(l) for l in listOfLogRecord]
-  return res
+    """
+    Returns one line string for logging LogRecord description
+    """
+    res = [getStrLogRecord(l) for l in listOfLogRecord]
+    return res
 
 
 #################################################################
@@ -153,233 +154,242 @@ def getListOfStrLogRecord(listOfLogRecord):
 #################################################################
 
 try:
-  unicode
-  _unicode = True
+    unicode
+    _unicode = True
 except NameError:
-  _unicode = False
+    _unicode = False
 
 
 def getMessage(self):
-  """
-  modified from logging.__init__.LogRecord.getMessage,
-  better message on format error
-  Return the message for this LogRecord.
-
-  Return the message for this LogRecord after merging any user-supplied
-  arguments with the message.
-  """
-  if not _unicode:  # if no unicode support...
-    msg = str(self.msg)
-  else:
-    msg = self.msg
-    if not isinstance(msg, basestring):
-      try:
+    """
+    modified from logging.__init__.LogRecord.getMessage,
+    better message on format error
+    Return the message for this LogRecord.
+
+    Return the message for this LogRecord after merging any user-supplied
+    arguments with the message.
+    """
+    if not _unicode:  # if no unicode support...
         msg = str(self.msg)
-      except UnicodeError:
-        msg = self.msg  # Defer encoding till later
-  if self.args:
-    try:  # better message on format error
-      msg = msg % self.args
-    except Exception as e:
-      msg = "ERROR: %s with args %s" % (msg, PP.pformat(self.args))
-      log(msg, True)
-  return msg
+    else:
+        msg = self.msg
+        if not isinstance(msg, basestring):
+            try:
+                msg = str(self.msg)
+            except UnicodeError:
+                msg = self.msg  # Defer encoding till later
+    if self.args:
+        try:  # better message on format error
+            msg = msg % self.args
+        except Exception as e:
+            msg = "ERROR: %s with args %s" % (msg, PP.pformat(self.args))
+            log(msg, True)
+    return msg
 
 
 LOGI.LogRecord.getMessage = getMessage  # better message if error
 
 
 #################################################################
-class LoggerSimple(LOGI.Logger, object): # object force new-style classes in logging 0.5.0.5 python 2.6
-  """
-  Inherited class logging.Logger for logger salomeTools
-
-  | add a level STEP as log.step(msg)
-  | add a level TRACE as log.trace(msg)
-  | below log.info(msg)
-  | above log.debug(msg)
-  | to assume message step inside files xml 'command's internal traces'
-  | to assume store long log asci in files txt outside files xml
-  |
-  | see: /usr/lib64/python2.7/logging/__init__.py etc.
-  """
-
-  def __init__(self, name, level=LOGI.INFO):
-    """
-    Initialize the logger with a name and an optional level.
-    """
-    super(LoggerSimple, self).__init__(name, level)
-    LOGI.addLevelName(_STEP, "STEP")
-    LOGI.addLevelName(_TRACE, "TRACE")
-    self.dateLogger = "NoDateLogger"
-    self.dateHour = None  # datehour of main command
-    self.isClosed = False
-    self.STEP = _STEP
-    self.TRACE = _TRACE
-
-  def close(self):
+class LoggerSimple(
+    LOGI.Logger, object
+):  # object force new-style classes in logging 0.5.0.5 python 2.6
     """
-    final stuff for logger, done at end salomeTools
-    flushed and closed xml files have to be not overriden/appended
+    Inherited class logging.Logger for logger salomeTools
+
+    | add a level STEP as log.step(msg)
+    | add a level TRACE as log.trace(msg)
+    | below log.info(msg)
+    | above log.debug(msg)
+    | to assume message step inside files xml 'command's internal traces'
+    | to assume store long log asci in files txt outside files xml
+    |
+    | see: /usr/lib64/python2.7/logging/__init__.py etc.
     """
-    if self.isClosed:
-      raise Exception("logger closed yet: %s" % self)
-    log("close stuff logger %s" % self)  # getStrDirLogger(self)
-    for handl in list(self.handlers):  # get original list
-      log("close stuff handler %s" % getStrHandler(handl))
-      handl.close()  # Tidy up any resources used by the handler.
-      self.removeHandler(handl)
-    # todo etc
-    self.isClosed = True  # done at end of execution
-    return
 
-  def __repr__(self):
-    """one line string representation"""
-    msg = "%s(name=%s, dateLogger=%s, handlers=%s)"
-    cName = self.__class__.__name__
-    h = [getStrHandler(h) for h in self.handlers]
-    h = "[" + ", ".join(h) + "]"
-    res = msg % (cName, self.name, self.dateLogger, h)
-    return res
+    def __init__(self, name, level=LOGI.INFO):
+        """
+        Initialize the logger with a name and an optional level.
+        """
+        super(LoggerSimple, self).__init__(name, level)
+        LOGI.addLevelName(_STEP, "STEP")
+        LOGI.addLevelName(_TRACE, "TRACE")
+        self.dateLogger = "NoDateLogger"
+        self.dateHour = None  # datehour of main command
+        self.isClosed = False
+        self.STEP = _STEP
+        self.TRACE = _TRACE
+
+    def close(self):
+        """
+        final stuff for logger, done at end salomeTools
+        flushed and closed xml files have to be not overriden/appended
+        """
+        if self.isClosed:
+            raise Exception("logger closed yet: %s" % self)
+        log("close stuff logger %s" % self)  # getStrDirLogger(self)
+        for handl in list(self.handlers):  # get original list
+            log("close stuff handler %s" % getStrHandler(handl))
+            handl.close()  # Tidy up any resources used by the handler.
+            self.removeHandler(handl)
+        # todo etc
+        self.isClosed = True  # done at end of execution
+        return
+
+    def __repr__(self):
+        """one line string representation"""
+        msg = "%s(name=%s, dateLogger=%s, handlers=%s)"
+        cName = self.__class__.__name__
+        h = [getStrHandler(h) for h in self.handlers]
+        h = "[" + ", ".join(h) + "]"
+        res = msg % (cName, self.name, self.dateLogger, h)
+        return res
+
+    def trace(self, msg, *args, **kwargs):
+        """
+        Log 'msg % args' with severity '_TRACE'.
+        """
+        log("trace stuff logger '%s' msg '%s...'" % (self.name, getStrShort(msg)))
+        if self.isEnabledFor(_TRACE):
+            self._log(_TRACE, msg, args, **kwargs)
+
+    def step(self, msg, *args, **kwargs):
+        """
+        Log 'msg % args' with severity '_STEP'.
+        """
+        log("step stuff logger '%s' msg '%s...'" % (self.name, getStrShort(msg)))
+        if self.isEnabledFor(_STEP):
+            self._log(_STEP, msg, args, **kwargs)
+
+    def setLevelMainHandler(self, level):
+        handl = self.handlers[0]  # get main handler
+        log("setLevelMainHandler %s" % level)
+        handl.setLevel(level)
 
-  def trace(self, msg, *args, **kwargs):
-    """
-    Log 'msg % args' with severity '_TRACE'.
-    """
-    log("trace stuff logger '%s' msg '%s...'" % (self.name, getStrShort(msg)))
-    if self.isEnabledFor(_TRACE):
-      self._log(_TRACE, msg, args, **kwargs)
 
-  def step(self, msg, *args, **kwargs):
+#################################################################
+class UnittestFormatter(
+    LOGI.Formatter, object
+):  # object force new-style classes in logging 0.5.0.5 python 2.6
     """
-    Log 'msg % args' with severity '_STEP'.
+    this formatter prefixes level name and indents all messages
     """
-    log("step stuff logger '%s' msg '%s...'" % (self.name, getStrShort(msg)))
-    if self.isEnabledFor(_STEP):
-      self._log(_STEP, msg, args, **kwargs)
 
-  def setLevelMainHandler(self, level):
-    handl =  self.handlers[0]  # get main handler
-    log("setLevelMainHandler %s" % level)
-    handl.setLevel(level)
+    def format(self, record):
+        # print "", record.levelname #type(record), dir(record)
+        # nb = len("2018-03-17 12:15:41 :: INFO     :: ")
+        res = super(UnittestFormatter, self).format(record)
+        res = indentUnittest(res)
+        return res
 
 
 #################################################################
-class UnittestFormatter(LOGI.Formatter, object): # object force new-style classes in logging 0.5.0.5 python 2.6
-  """
-  this formatter prefixes level name and indents all messages
-  """
-  def format(self, record):
-    # print "", record.levelname #type(record), dir(record)
-    # nb = len("2018-03-17 12:15:41 :: INFO     :: ")
-    res = super(UnittestFormatter, self).format(record)
-    res = indentUnittest(res)
-    return res
+class DefaultFormatter(
+    LOGI.Formatter, object
+):  # object force new-style classes in logging 0.5.0.5 python 2.6
+    """
+    this formatter prefixes level name and indents all messages but INFO stay "as it"
+    """
 
-#################################################################
-class DefaultFormatter(LOGI.Formatter, object): # object force new-style classes in logging 0.5.0.5 python 2.6
-  """
-  this formatter prefixes level name and indents all messages but INFO stay "as it"
-  """
-  def format(self, record):
-    # print "", record.levelname #type(record), dir(record)
-    # nb = len("2018-03-17 12:15:41 :: INFO     :: ")
-    if record.levelname == "INFO":
-      res = record.getMessage()
-    else:
-      res = super(DefaultFormatter, self).format(record)
-      res = indentUnittest(res)
-    return res
+    def format(self, record):
+        # print "", record.levelname #type(record), dir(record)
+        # nb = len("2018-03-17 12:15:41 :: INFO     :: ")
+        if record.levelname == "INFO":
+            res = record.getMessage()
+        else:
+            res = super(DefaultFormatter, self).format(record)
+            res = indentUnittest(res)
+        return res
 
 
 #################################################################
 class UnittestStream(object):
-  """
-  write my stream class
-  only write and flush are used for the streaming
+    """
+    write my stream class
+    only write and flush are used for the streaming
 
-  | https://docs.python.org/2/library/logging.handlers.html
-  | https://stackoverflow.com/questions/31999627/storing-logger-messages-in-a-string
-  """
+    | https://docs.python.org/2/library/logging.handlers.html
+    | https://stackoverflow.com/questions/31999627/storing-logger-messages-in-a-string
+    """
 
-  def __init__(self):
-    self._logs = ''
+    def __init__(self):
+        self._logs = ""
 
-  def getLogs(self):
-    return self._logs
+    def getLogs(self):
+        return self._logs
 
-  def getLogsAndClear(self):
-    res = self._logs
-    self._logs = ''
-    return res
+    def getLogsAndClear(self):
+        res = self._logs
+        self._logs = ""
+        return res
 
-  def write(self, astr):
-    """final method called when message is logged"""
-    # log("UnittestStream.write('%s')" % astr, True) # for debug ...
-    self._logs += astr
+    def write(self, astr):
+        """final method called when message is logged"""
+        # log("UnittestStream.write('%s')" % astr, True) # for debug ...
+        self._logs += astr
 
-  def flush(self):
-    pass
+    def flush(self):
+        pass
 
-  def __str__(self):
-    return self._logs
+    def __str__(self):
+        return self._logs
 
 
 #################################################################
-class StreamHandlerSimple(LOGI.StreamHandler, object): # object force new-style classes in logging 0.5.0.5 python 2.6
-  """
-  A handler class which writes logging records, appropriately formatted,
-  to a stream. Note that this class does not close the stream, as
-  sys.stdout or sys.stderr may be used.
-
-  from logging.StreamHandler class,
-  modified for 'no return' mode line if '...' at end of record message
-  """
-
-  def emit(self, record):
+class StreamHandlerSimple(
+    LOGI.StreamHandler, object
+):  # object force new-style classes in logging 0.5.0.5 python 2.6
     """
-    Emit a record.
-
-    If a formatter is specified, it is used to format the record.
-    The record is then written to the stream with a trailing newline.  If
-    exception information is present, it is formatted using
-    traceback.print_exception and appended to the stream.  If the stream
-    has an 'encoding' attribute, it is used to determine how to do the
-    output to the stream.
+    A handler class which writes logging records, appropriately formatted,
+    to a stream. Note that this class does not close the stream, as
+    sys.stdout or sys.stderr may be used.
+
+    from logging.StreamHandler class,
+    modified for 'no return' mode line if '...' at end of record message
     """
-    # log("StreamHandlerSimple.emit('%s')" % record, True) # for debug ...
-    try:
-      msg = self.format(record)
-      stream = self.stream
-      fs = '%s\n'
-      ufs = u'%s\n'
-      if not _unicode:  # if no unicode support...
-        stream.write(fs % msg)
-      else:
-        try:
-          if (isinstance(msg, unicode) and
-            getattr(stream, 'encoding', None)):
-            # ufs = u'%s\n'
-            try:
-              stream.write(ufs % msg)
-            except UnicodeEncodeError:
-              # Printing to terminals sometimes fails. For example,
-              # with an encoding of 'cp1251', the above write will
-              # work if written to a stream opened or wrapped by
-              # the codecs module, but fail when writing to a
-              # terminal even when the codepage is set to cp1251.
-              # An extra encoding step seems to be needed.
-              stream.write((ufs % msg).encode(stream.encoding))
-          else:
-            stream.write(fs % msg)
-        except UnicodeError:
-          stream.write(fs % msg.encode("UTF-8"))
-      self.flush()
-    except (KeyboardInterrupt, SystemExit):
-      raise
-    except:
-      self.handleError(record)
 
+    def emit(self, record):
+        """
+        Emit a record.
+
+        If a formatter is specified, it is used to format the record.
+        The record is then written to the stream with a trailing newline.  If
+        exception information is present, it is formatted using
+        traceback.print_exception and appended to the stream.  If the stream
+        has an 'encoding' attribute, it is used to determine how to do the
+        output to the stream.
+        """
+        # log("StreamHandlerSimple.emit('%s')" % record, True) # for debug ...
+        try:
+            msg = self.format(record)
+            stream = self.stream
+            fs = "%s\n"
+            ufs = u"%s\n"
+            if not _unicode:  # if no unicode support...
+                stream.write(fs % msg)
+            else:
+                try:
+                    if isinstance(msg, unicode) and getattr(stream, "encoding", None):
+                        # ufs = u'%s\n'
+                        try:
+                            stream.write(ufs % msg)
+                        except UnicodeEncodeError:
+                            # Printing to terminals sometimes fails. For example,
+                            # with an encoding of 'cp1251', the above write will
+                            # work if written to a stream opened or wrapped by
+                            # the codecs module, but fail when writing to a
+                            # terminal even when the codepage is set to cp1251.
+                            # An extra encoding step seems to be needed.
+                            stream.write((ufs % msg).encode(stream.encoding))
+                    else:
+                        stream.write(fs % msg)
+                except UnicodeError:
+                    stream.write(fs % msg.encode("UTF-8"))
+            self.flush()
+        except (KeyboardInterrupt, SystemExit):
+            raise
+        except:
+            self.handleError(record)
 
 
 #################################################################
@@ -387,126 +397,129 @@ class StreamHandlerSimple(LOGI.StreamHandler, object): # object force new-style
 # no more need
 #################################################################
 def initLoggerAsDefault(logger, fmt=None, level=None):
-  """
-  init logger as prefixed message and indented message if multi line
-  exept info() outed 'as it' without any format.
-  level could be modified during execution
-  """
-  log("initLoggerAsDefault name=%s\nfmt='%s' level='%s'" % (logger.name, fmt, level))
-  #handler = StreamHandlerSimple(sys.stdout)  # Logging vers console
-  handler = LOGI.StreamHandler(sys.stdout)  # Logging vers console
-  # set_name absent in logging 0.5.0.5 python 2.6
-  handler._name = logger.name + "_console"
-  if fmt is not None:
-    # formatter = UnittestFormatter(fmt, "%y-%m-%d %H:%M:%S")
-    formatter = DefaultFormatter(fmt, "%y-%m-%d %H:%M:%S")
-    handler.setFormatter(formatter)
-  handler.idCommandHandlers = 0
-  logger.addHandler(handler)
-  # as RootLogger is level WARNING
-  # my logger is not notset but low, handlers needs setlevel greater
-  logger.setLevel(LOGI.DEBUG)
-  # import src/debug as DBG
-  # tmp = (logger.getEffectiveLevel(), LOGI.NOTSET, logger.level, logger.parent.level)
-  # DBG.write("logger levels tmp, True)
-  if level is not None:  # level could be modified during execution
-    handler.setLevel(level)  # on screen log as user wants
-  else:
-    handler.setLevel(LOGI.INFO)  # on screen no log step, which are in xml files
-  return
+    """
+    init logger as prefixed message and indented message if multi line
+    exept info() outed 'as it' without any format.
+    level could be modified during execution
+    """
+    log("initLoggerAsDefault name=%s\nfmt='%s' level='%s'" % (logger.name, fmt, level))
+    # handler = StreamHandlerSimple(sys.stdout)  # Logging vers console
+    handler = LOGI.StreamHandler(sys.stdout)  # Logging vers console
+    # set_name absent in logging 0.5.0.5 python 2.6
+    handler._name = logger.name + "_console"
+    if fmt is not None:
+        # formatter = UnittestFormatter(fmt, "%y-%m-%d %H:%M:%S")
+        formatter = DefaultFormatter(fmt, "%y-%m-%d %H:%M:%S")
+        handler.setFormatter(formatter)
+    handler.idCommandHandlers = 0
+    logger.addHandler(handler)
+    # as RootLogger is level WARNING
+    # my logger is not notset but low, handlers needs setlevel greater
+    logger.setLevel(LOGI.DEBUG)
+    # import src/debug as DBG
+    # tmp = (logger.getEffectiveLevel(), LOGI.NOTSET, logger.level, logger.parent.level)
+    # DBG.write("logger levels tmp, True)
+    if level is not None:  # level could be modified during execution
+        handler.setLevel(level)  # on screen log as user wants
+    else:
+        handler.setLevel(LOGI.INFO)  # on screen no log step, which are in xml files
+    return
 
 
 def initLoggerAsUnittest(logger, fmt=None, level=None):
-  """
-  init logger as silent on stdout/stderr
-  used for retrieve messages in memory for post execution unittest
-  https://docs.python.org/2/library/logging.handlers.html
-  """
-  log("initLoggerAsUnittest name=%s\nfmt='%s' level='%s'" % (logger.name, fmt, level))
-  stream = UnittestStream()
-  handler = LOGI.StreamHandler(stream)  # Logging vers stream
-  # set_name absent in logging 0.5.0.5 python 2.6
-  handler._name = logger.name + "_unittest"
-  if fmt is not None:
-    # formatter = LOGI.Formatter(fmt, "%Y-%m-%d %H:%M:%S")
-    formatter = UnittestFormatter(fmt, "%Y-%m-%d %H:%M:%S")
-    handler.setFormatter(formatter)
-  handler.idCommandHandlers = 0
-  logger.addHandler(handler)
-  logger.stream = stream
-  logger.getLogs = stream.getLogs
-  logger.getLogsAndClear = stream.getLogsAndClear
-  if level is not None:
-    logger.setLevel(level)
-  else:
-    logger.setLevel(LOGI.DEBUG)
+    """
+    init logger as silent on stdout/stderr
+    used for retrieve messages in memory for post execution unittest
+    https://docs.python.org/2/library/logging.handlers.html
+    """
+    log("initLoggerAsUnittest name=%s\nfmt='%s' level='%s'" % (logger.name, fmt, level))
+    stream = UnittestStream()
+    handler = LOGI.StreamHandler(stream)  # Logging vers stream
+    # set_name absent in logging 0.5.0.5 python 2.6
+    handler._name = logger.name + "_unittest"
+    if fmt is not None:
+        # formatter = LOGI.Formatter(fmt, "%Y-%m-%d %H:%M:%S")
+        formatter = UnittestFormatter(fmt, "%Y-%m-%d %H:%M:%S")
+        handler.setFormatter(formatter)
+    handler.idCommandHandlers = 0
+    logger.addHandler(handler)
+    logger.stream = stream
+    logger.getLogs = stream.getLogs
+    logger.getLogsAndClear = stream.getLogsAndClear
+    if level is not None:
+        logger.setLevel(level)
+    else:
+        logger.setLevel(LOGI.DEBUG)
 
 
 def getDefaultLogger():
-  log("getDefaultLogger %s" % _loggerDefaultName)
-  # case multithread may be problem as not LOGI._acquireLock()
-  previousClass = LOGI._loggerClass
-  LOGI.setLoggerClass(LoggerSimple)  # to get LoggerSimple instance with trace etc.
-  res = LOGI.getLogger(_loggerDefaultName)
-  LOGI.setLoggerClass(previousClass)
-  return res
+    log("getDefaultLogger %s" % _loggerDefaultName)
+    # case multithread may be problem as not LOGI._acquireLock()
+    previousClass = LOGI._loggerClass
+    LOGI.setLoggerClass(LoggerSimple)  # to get LoggerSimple instance with trace etc.
+    res = LOGI.getLogger(_loggerDefaultName)
+    LOGI.setLoggerClass(previousClass)
+    return res
 
 
 def getUnittestLogger():
-  log("getUnittestLogger %s" % _loggerUnittestName)
-  # case multithread may be problem as not LOGI._acquireLock()
-  previousClass = LOGI._loggerClass
-  LOGI.setLoggerClass(LoggerSimple)  # to get LoggerSimple instance with trace etc.
-  res = LOGI.getLogger(_loggerUnittestName)
-  LOGI.setLoggerClass(previousClass)
-  return res
+    log("getUnittestLogger %s" % _loggerUnittestName)
+    # case multithread may be problem as not LOGI._acquireLock()
+    previousClass = LOGI._loggerClass
+    LOGI.setLoggerClass(LoggerSimple)  # to get LoggerSimple instance with trace etc.
+    res = LOGI.getLogger(_loggerUnittestName)
+    LOGI.setLoggerClass(previousClass)
+    return res
 
 
 #################################################################
 # small tests as demonstration, see unittest also
 #################################################################
 def testLogger_2(logger):
-  """small test"""
-  # print getStrDirLogger(logger)
-  logger.debug('test logger debug')
-  logger.trace('test logger trace')
-  logger.info('test logger info')
-  logger.warning('test logger warning')
-  logger.error('test logger error')
-  logger.critical('test logger critical')
-  logger.info('\ntest logger info:\n- second line\n- third line\n')
-  logger.warning('test logger warning:\n- second line\n- third line')
+    """small test"""
+    # print getStrDirLogger(logger)
+    logger.debug("test logger debug")
+    logger.trace("test logger trace")
+    logger.info("test logger info")
+    logger.warning("test logger warning")
+    logger.error("test logger error")
+    logger.critical("test logger critical")
+    logger.info("\ntest logger info:\n- second line\n- third line\n")
+    logger.warning("test logger warning:\n- second line\n- third line")
 
-def testMain_2():
-  print("\n**** DEFAULT logger")
-  logdef = getDefaultLogger()
-  # use of setColorLevelname <color>...<reset>, so do not use %(levelname)-8s
-  initLoggerAsDefault(logdef, '%(levelname)-8s :: %(message)s', level=LOGI.DEBUG)
-  testLogger_2(logdef)
 
-  print("\n**** UNITTEST logger")
-  loguni = getUnittestLogger()
-  initLoggerAsUnittest(loguni, '%(asctime)s :: %(levelname)-8s :: %(message)s', level=LOGI.DEBUG)
-  testLogger_2(loguni)  # is silent
-  # log("loguni.getLogs():\n%s" % loguni.getLogs())
-  print("loguni.streamUnittest:\n%s" % loguni.getLogs())
+def testMain_2():
+    print("\n**** DEFAULT logger")
+    logdef = getDefaultLogger()
+    # use of setColorLevelname <color>...<reset>, so do not use %(levelname)-8s
+    initLoggerAsDefault(logdef, "%(levelname)-8s :: %(message)s", level=LOGI.DEBUG)
+    testLogger_2(logdef)
+
+    print("\n**** UNITTEST logger")
+    loguni = getUnittestLogger()
+    initLoggerAsUnittest(
+        loguni, "%(asctime)s :: %(levelname)-8s :: %(message)s", level=LOGI.DEBUG
+    )
+    testLogger_2(loguni)  # is silent
+    # log("loguni.getLogs():\n%s" % loguni.getLogs())
+    print("loguni.streamUnittest:\n%s" % loguni.getLogs())
 
 
 #################################################################
 # in production, or not (if __main__)
 #################################################################
 if __name__ == "__main__":
-  # for example, not in production
-  # get path to salomeTools sources
-  curdir = os.path.dirname(os.path.dirname(__file__))
-  # Make the src & commands package accessible from all code
-  sys.path.insert(0, curdir)
-  testMain_2()
-  # here we have sys.exit()
+    # for example, not in production
+    # get path to salomeTools sources
+    curdir = os.path.dirname(os.path.dirname(__file__))
+    # Make the src & commands package accessible from all code
+    sys.path.insert(0, curdir)
+    testMain_2()
+    # here we have sys.exit()
 else:
-  # in production
-  # get two LoggerSat instance used in salomeTools, no more needed.
-  _loggerDefault = getDefaultLogger()
-  _loggerUnittest = getUnittestLogger()
-  initLoggerAsDefault(_loggerDefault, '%(levelname)-8s :: %(message)s')
-  initLoggerAsUnittest(_loggerUnittest, '%(asctime)s :: %(levelname)s :: %(message)s')
+    # in production
+    # get two LoggerSat instance used in salomeTools, no more needed.
+    _loggerDefault = getDefaultLogger()
+    _loggerUnittest = getUnittestLogger()
+    initLoggerAsDefault(_loggerDefault, "%(levelname)-8s :: %(message)s")
+    initLoggerAsUnittest(_loggerUnittest, "%(asctime)s :: %(levelname)s :: %(message)s")
index bbcf8936866588aa8290797ac2df6176f11546d5..5cad144feeca0629354d1e97ad533afe8f070e2b 100755 (executable)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 #  Copyright (C) 2010-2013  CEA/DEN
 #
 #  This library is free software; you can redistribute it and/or
@@ -29,29 +29,30 @@ import pprint as PP
 from . import printcolors
 
 import src
-import src.debug as DBG # Easy print stderr (for DEBUG only)
+import src.debug as DBG  # Easy print stderr (for DEBUG only)
+
 
 class OptResult(object):
     """
     An instance of this class will be the object manipulated
     in code of all salomeTools commands
     The aim of this class is to have an elegant syntax to manipulate the options.
-    
-    | Example:        
+
+    | Example:
     | >> options, remainderArgs = command.parseArguments(args)
     | >> print(options.output_verbose_level)
     | >> 'INFO'
     """
+
     def __init__(self):
-        """Initialization
-        """
+        """Initialization"""
         self.__dict__ = dict()
 
     def __getattr__(self, name):
         """
-        Overwrite of the __getattr__ function 
+        Overwrite of the __getattr__ function
         to customize it for option usage
-        
+
         :param name: (str) The attribute to get the value.
         :return: (str int list boolean level)
           the value corresponding to the attribute.
@@ -63,9 +64,9 @@ class OptResult(object):
 
     def __setattr__(self, name, value):
         """
-        Overwrite of the __setattr__ function 
+        Overwrite of the __setattr__ function
         to customize it for option usage
-        
+
         :param name: (str) The attribute to set.
         :param value: (str) The value  corresponding to the attribute.
         :return: None
@@ -77,14 +78,15 @@ class OptResult(object):
         res = "%s(\n %s\n)" % (self.__class__.__name__, aStr[1:-1])
         return res
 
+
 class Options(object):
     """
     Class to manage all salomeTools options
     """
+
     def __init__(self):
-        """Initialization
-        """
-        # The options field stocks all options of a command 
+        """Initialization"""
+        # The options field stocks all options of a command
         # in a list that contains dicts
         self.PROPERTY_EXPRESSION = "^.+:.+$"
         self.options = []
@@ -94,78 +96,85 @@ class Options(object):
         self.default = None
         self.results = {}
 
-    def add_option(self, shortName, longName, optionType, destName, helpString="", default=None):
+    def add_option(
+        self, shortName, longName, optionType, destName, helpString="", default=None
+    ):
         """
         Add an option to a command. It gets all attributes
         of an option and append it in the options field
-        
-        :param shortName: (str) 
+
+        :param shortName: (str)
           The short name of the option (as '-l' for level option).
-        :param longName: (str) 
+        :param longName: (str)
           The long name of the option (as '--level' for level option).
         :param optionType: (str) The type of the option (ex "int").
         :param destName: (str) The name that will be used in the code.
-        :param helpString: (str) 
-          The text to display when user ask for help on a command.     
+        :param helpString: (str)
+          The text to display when user ask for help on a command.
         :return: None
         """
-        tmp = [o['shortName'] for o in self.options if o['shortName'] != '']
-        if shortName in tmp: 
-          raise Exception("option '-%s' existing yet" % shortName)
-        tmp = [o['longName'] for o in self.options if o['longName'] != '']
-        if longName in tmp: 
-          raise Exception("option '--%s' existing yet" % longName)
+        tmp = [o["shortName"] for o in self.options if o["shortName"] != ""]
+        if shortName in tmp:
+            raise Exception("option '-%s' existing yet" % shortName)
+        tmp = [o["longName"] for o in self.options if o["longName"] != ""]
+        if longName in tmp:
+            raise Exception("option '--%s' existing yet" % longName)
 
         option = dict()
-        option['shortName'] = shortName
-        option['longName'] = longName
+        option["shortName"] = shortName
+        option["longName"] = longName
 
         if optionType not in self.availableOptions:
-          raise Exception("error optionType '%s' not available." % optionType)
+            raise Exception("error optionType '%s' not available." % optionType)
+
+        option["optionType"] = optionType
+        option["destName"] = destName
+        option["helpString"] = helpString
+        option["result"] = default
 
-        option['optionType'] = optionType
-        option['destName'] = destName
-        option['helpString'] = helpString
-        option['result'] = default
-        
         self.options.append(option)
 
         # add option properties unconditionaly if 'products' option added
         if [shortName, longName] == ["p", "products"]:
-          self.add_option('', 'properties', 'properties', 'properties',
-                          _('Optional: Filter the products by their properties.\n\tSyntax: '
-                          '--properties <property>:<value>'))
-
+            self.add_option(
+                "",
+                "properties",
+                "properties",
+                "properties",
+                _(
+                    "Optional: Filter the products by their properties.\n\tSyntax: "
+                    "--properties <property>:<value>"
+                ),
+            )
 
-        
     def getDetailOption(self, option):
         """
-        for convenience 
-        
+        for convenience
+
         :return: (tuple) 4-elements (shortName, longName, optionType, helpString)
         """
-        oos = option['shortName']
-        ool = option['longName']
-        oot = option['optionType']
-        ooh = option['helpString']
+        oos = option["shortName"]
+        ool = option["longName"]
+        oot = option["optionType"]
+        ooh = option["helpString"]
         return (oos, ool, oot, ooh)
 
     def get_help(self):
         """
-        Returns all options stored in self.options 
+        Returns all options stored in self.options
         as help message colored string
-        
+
         :return: (str) colored string
         """
         msg = ""
         # Do nothing if there are no options
 
-        #there is -h option, always
-        #if len(self.options) == 0:
+        # there is -h option, always
+        # if len(self.options) == 0:
         #    return _("No available options.")
 
-        # for all options, gets its values. 
-        # "shortname" is an mandatory field of the options, could be '' 
+        # for all options, gets its values.
+        # "shortname" is an mandatory field of the options, could be ''
         msg += printcolors.printcHeader(_("Available options are:"))
         for option in self.options:
             oos, ool, oot, ooh = self.getDetailOption(option)
@@ -173,156 +182,167 @@ class Options(object):
                 msg += "\n -%1s, --%s (%s)\n" % (oos, ool, oot)
             else:
                 msg += "\n --%s (%s)\n" % (ool, oot)
-                
+
             msg += "%s\n" % self.indent(ooh, 10)
         return msg
 
     def indent(self, text, amount, car=" "):
         """indent multi lines message"""
         padding = amount * car
-        return ''.join(padding + line for line in text.splitlines(True))
-               
+        return "".join(padding + line for line in text.splitlines(True))
+
     def parse_args(self, argList=None):
         """
-        Instantiates the class OptResult 
+        Instantiates the class OptResult
         that gives access to all options in the code
-        
+
         :param argList: (list) the raw list of arguments that were passed
-        :return: (OptResult, list) as (optResult, args) 
-          optResult is the option instance to manipulate in the code. 
-          args is the full raw list of passed options 
+        :return: (OptResult, list) as (optResult, args)
+          optResult is the option instance to manipulate in the code.
+          args is the full raw list of passed options
         """
         # see https://pymotw.com/2/getopt/
         if argList is None:
             argList = sys.argv[1:]
-        
+
         DBG.write("parse_args", argList)
         # DBG.write("options", self.options)
-        # format shortNameOption and longNameOption 
+        # format shortNameOption and longNameOption
         # to make right arguments to getopt.getopt function
         shortNameOption = ""
         longNameOption = []
         for option in self.options:
-            shortNameOption = shortNameOption + option['shortName']
-            if option['shortName'] != "" and option['optionType'] not in self.noArgOptions:
+            shortNameOption = shortNameOption + option["shortName"]
+            if (
+                option["shortName"] != ""
+                and option["optionType"] not in self.noArgOptions
+            ):
                 shortNameOption = shortNameOption + ":"
 
-            if option['longName'] != "":
-                if option['optionType'] not in self.noArgOptions:
-                    longNameOption.append(option['longName'] + "=")
+            if option["longName"] != "":
+                if option["optionType"] not in self.noArgOptions:
+                    longNameOption.append(option["longName"] + "=")
                 else:
-                    longNameOption.append(option['longName'])
+                    longNameOption.append(option["longName"])
 
-        # call to getopt.getopt function to get the option 
+        # call to getopt.getopt function to get the option
         # passed in the command regarding the available options
         try:
-          optlist, args = getopt.getopt(argList, shortNameOption, longNameOption)
+            optlist, args = getopt.getopt(argList, shortNameOption, longNameOption)
         except Exception as e:
-          msg = str(e) + " on '%s'\n\n" % " ".join(argList) + self.get_help()
-          raise Exception(msg)
+            msg = str(e) + " on '%s'\n\n" % " ".join(argList) + self.get_help()
+            raise Exception(msg)
 
         # instantiate and completing the optResult that will be returned
         optResult = OptResult()
         for option in self.options:
-            shortOption = "-" + option['shortName']
-            longOption = "--" + option['longName']
-            optionType = option['optionType']
+            shortOption = "-" + option["shortName"]
+            longOption = "--" + option["longName"]
+            optionType = option["optionType"]
             for opt in optlist:
                 if opt[0] in [shortOption, longOption]:
                     if optionType == "string":
-                        option['result'] = opt[1]
+                        option["result"] = opt[1]
                     elif optionType == "boolean":
-                        option['result'] = True
+                        option["result"] = True
                     elif optionType == "noboolean":
-                        option['result'] = False
+                        option["result"] = False
                     elif optionType == "int":
-                        option['result'] = int(opt[1])
+                        option["result"] = int(opt[1])
                     elif optionType == "float":
-                        option['result'] = float(opt[1])
+                        option["result"] = float(opt[1])
                     elif optionType == "long":
-                        option['result'] = long(opt[1])
+                        option["result"] = long(opt[1])
                     elif optionType == "list":
-                        if option['result'] is None:
-                            option['result'] = list()
-                        option['result'].append(opt[1])
-                    elif optionType == "level": #logger logging levels
-                        option['result'] = self.filterLevel(opt[1])
+                        if option["result"] is None:
+                            option["result"] = list()
+                        option["result"].append(opt[1])
+                    elif optionType == "level":  # logger logging levels
+                        option["result"] = self.filterLevel(opt[1])
                     elif optionType == "list2":
-                        if option['result'] is None:
-                            option['result'] = list()
-                        option['result'] = self.filterList2(opt[1])
+                        if option["result"] is None:
+                            option["result"] = list()
+                        option["result"] = self.filterList2(opt[1])
                     elif optionType == "properties":
-                        option['result'] = self.filterProperties(opt[1])
+                        option["result"] = self.filterProperties(opt[1])
 
-            optResult.__setattr__(option['destName'], option['result'])
-            # free the option in order to be able to make 
+            optResult.__setattr__(option["destName"], option["result"])
+            # free the option in order to be able to make
             # a new free call of options (API case)
-            option['result'] = None
+            option["result"] = None
 
-        self.results = {"optlist": optlist, "optResult": optResult, "args": args, "argList": argList}
+        self.results = {
+            "optlist": optlist,
+            "optResult": optResult,
+            "args": args,
+            "argList": argList,
+        }
         DBG.write("results", self.results)
         return optResult, args
-        
+
     def filterLevel(self, aLevel):
-      """filter level logging values"""
-      import src.loggingSimple as LOG
-      aLev = aLevel.upper()
-      knownLevels = LOG._knownLevels
-      maxLen = max([len(i) for i in knownLevels])
-      for i in range(maxLen):
-        for lev in knownLevels:
-          if aLev == lev[:i]:
-            DBG.write("filterLevel", "%s -> %s" % (aLevel, lev)) 
-            return lev
-      msg = "Unknown level '%s', accepted are:\n%s" % (aLev, ", ".join(knownLevels))
-      raise Exception(msg)
-      
+        """filter level logging values"""
+        import src.loggingSimple as LOG
+
+        aLev = aLevel.upper()
+        knownLevels = LOG._knownLevels
+        maxLen = max([len(i) for i in knownLevels])
+        for i in range(maxLen):
+            for lev in knownLevels:
+                if aLev == lev[:i]:
+                    DBG.write("filterLevel", "%s -> %s" % (aLevel, lev))
+                    return lev
+        msg = "Unknown level '%s', accepted are:\n%s" % (aLev, ", ".join(knownLevels))
+        raise Exception(msg)
+
     def filterList2(self, aStr):
-      """filter a list as 'KERNEL,YACS,etc.'"""
-      aList = aStr.strip().split(",")
-      # fix list leading ',' as ',KERNEL,...'
-      aList = [i for i in aList if i != ""] # split old list leadin "," as ",KERNEL,ETC..."
-      return aList
-      
+        """filter a list as 'KERNEL,YACS,etc.'"""
+        aList = aStr.strip().split(",")
+        # fix list leading ',' as ',KERNEL,...'
+        aList = [
+            i for i in aList if i != ""
+        ]  # split old list leadin "," as ",KERNEL,ETC..."
+        return aList
+
     def filterProperties(self, aStr):
-      """
-      filter properties values
-
-      example:
-      >> sat -v 9 prepare $TRG -p KERNEL --properties is_SALOME_module:yes
-      """
-      msg = _('The "--properties" option must have the following syntax:\n--properties <property>:<value>')
-      oExpr = re.compile(self.PROPERTY_EXPRESSION)
-      if not oExpr.search(aStr):
-        raise Exception(msg)
-      res = aStr.split(":")
-      if len(res) != 2:
-        raise Exception(msg)
-      return res
-
-    def __repr__(self): 
-      """
-      repr for only self.options and self.results (if present)
-      """
-      aDict = {'options': self.options, 'results': self.results}
-      aStr = PP.pformat(aDict)
-      res = "%s(\n %s\n)" % (self.__class__.__name__, aStr[1:-1])
-      return res
-        
-    def __str__(self): 
-      """
-      str for only resume expected self.options
-      """
-      #aDict = [(k["longName"], k["shortName", k["helpString"]) for k in self.options}
-      #aList = [(k, self.options[k]) for k in sorted(self.options.keys())]
-      aDict = {}
-      for o in self.options:
-        aDict[o["longName"]] = (o["shortName"], o["helpString"])
-      aStr = PP.pformat(aDict)
-      res = "%s(\n %s)" % (self.__class__.__name__, aStr[1:-1])
-      return res
-        
-    def debug_write(self):
-      DBG.write("options and results", self, True)
+        """
+        filter properties values
+
+        example:
+        >> sat -v 9 prepare $TRG -p KERNEL --properties is_SALOME_module:yes
+        """
+        msg = _(
+            'The "--properties" option must have the following syntax:\n--properties <property>:<value>'
+        )
+        oExpr = re.compile(self.PROPERTY_EXPRESSION)
+        if not oExpr.search(aStr):
+            raise Exception(msg)
+        res = aStr.split(":")
+        if len(res) != 2:
+            raise Exception(msg)
+        return res
 
+    def __repr__(self):
+        """
+        repr for only self.options and self.results (if present)
+        """
+        aDict = {"options": self.options, "results": self.results}
+        aStr = PP.pformat(aDict)
+        res = "%s(\n %s\n)" % (self.__class__.__name__, aStr[1:-1])
+        return res
 
+    def __str__(self):
+        """
+        str for only resume expected self.options
+        """
+        # aDict = [(k["longName"], k["shortName", k["helpString"]) for k in self.options}
+        # aList = [(k, self.options[k]) for k in sorted(self.options.keys())]
+        aDict = {}
+        for o in self.options:
+            aDict[o["longName"]] = (o["shortName"], o["helpString"])
+        aStr = PP.pformat(aDict)
+        res = "%s(\n %s)" % (self.__class__.__name__, aStr[1:-1])
+        return res
+
+    def debug_write(self):
+        DBG.write("options and results", self, True)
index 02342c056a8e6087fda6b6e89ca66e008c73f73a..664c156dfac38fe6e73573b55fca5fa58458386d 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 #  Copyright (C) 2010-2013  CEA/DEN
 #
 #  This library is free software; you can redistribute it and/or
 #  You should have received a copy of the GNU Lesser General Public
 #  License along with this library; if not, write to the Free Software
 #  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
-'''In this file is stored the mechanism that manage color prints in the terminal
-'''
+"""In this file is stored the mechanism that manage color prints in the terminal
+"""
 
 
 # define constant to use in scripts
-COLOR_ERROR = 'ERROR'
-COLOR_WARNING = 'WARNING'
-COLOR_SUCCESS = 'SUCCESS'
-COLOR_LABEL = 'LABEL'
-COLOR_HEADER = 'HEADER'
-COLOR_INFO = 'INFO'
-COLOR_HIGLIGHT = 'HIGHLIGHT'
+COLOR_ERROR = "ERROR"
+COLOR_WARNING = "WARNING"
+COLOR_SUCCESS = "SUCCESS"
+COLOR_LABEL = "LABEL"
+COLOR_HEADER = "HEADER"
+COLOR_INFO = "INFO"
+COLOR_HIGLIGHT = "HIGHLIGHT"
 
 # the color map to use to print the colors
 __colormap__ = {
-    COLOR_ERROR: '\033[1m\033[31m',
-    COLOR_SUCCESS: '\033[1m\033[32m',
-    COLOR_WARNING: '\033[33m',
-    COLOR_HEADER: '\033[34m',
-    COLOR_INFO: '\033[35m',
-    COLOR_LABEL: '\033[36m',
-    COLOR_HIGLIGHT: '\033[97m\033[43m'
+    COLOR_ERROR: "\033[1m\033[31m",
+    COLOR_SUCCESS: "\033[1m\033[32m",
+    COLOR_WARNING: "\033[33m",
+    COLOR_HEADER: "\033[34m",
+    COLOR_INFO: "\033[35m",
+    COLOR_LABEL: "\033[36m",
+    COLOR_HIGLIGHT: "\033[97m\033[43m",
 }
 
 # list of available codes
-__code_range__ = ([1, 4] + list(range(30, 38)) + list(range(40, 48))
-                + list(range(90, 98)) + list(range(100, 108)))
+__code_range__ = (
+    [1, 4]
+    + list(range(30, 38))
+    + list(range(40, 48))
+    + list(range(90, 98))
+    + list(range(100, 108))
+)
+
+
+def printc(txt, code=""):
+    """print a text with colors
 
-def printc(txt, code=''):
-    '''print a text with colors
-    
     :param txt str: The text to be printed.
     :param code str: The color to use.
     :return: The colored text.
     :rtype: str
-    '''
+    """
     # no code means 'auto mode' (works only for OK, KO, NO and ERR*)
-    if code == '':
+    if code == "":
         striptxt = txt.strip().upper()
         if striptxt == "OK":
             code = COLOR_SUCCESS
@@ -62,108 +68,117 @@ def printc(txt, code=''):
             return txt
 
     # no code => output the originial text
-    if code not in __colormap__.keys() or __colormap__[code] == '':
+    if code not in __colormap__.keys() or __colormap__[code] == "":
         return txt
 
-    return __colormap__[code] + txt + '\033[0m'
+    return __colormap__[code] + txt + "\033[0m"
+
 
 def printcInfo(txt):
-    '''print a text info color
-    
+    """print a text info color
+
     :param txt str: The text to be printed.
     :return: The colored text.
     :rtype: str
-    '''
+    """
     return printc(txt, COLOR_INFO)
 
+
 def printcError(txt):
-    '''print a text error color
-    
+    """print a text error color
+
     :param txt str: The text to be printed.
     :return: The colored text.
     :rtype: str
-    '''
+    """
     return printc(txt, COLOR_ERROR)
 
+
 def printcWarning(txt):
-    '''print a text warning color
-    
+    """print a text warning color
+
     :param txt str: The text to be printed.
     :return: The colored text.
     :rtype: str
-    '''
+    """
     return printc(txt, COLOR_WARNING)
 
+
 def printcHeader(txt):
-    '''print a text header color
-    
+    """print a text header color
+
     :param txt str: The text to be printed.
     :return: The colored text.
     :rtype: str
-    '''
+    """
     return printc(txt, COLOR_HEADER)
 
+
 def printcLabel(txt):
-    '''print a text label color
-    
+    """print a text label color
+
     :param txt str: The text to be printed.
     :return: The colored text.
     :rtype: str
-    '''
+    """
     return printc(txt, COLOR_LABEL)
 
+
 def printcSuccess(txt):
-    '''print a text success color
-    
+    """print a text success color
+
     :param txt str: The text to be printed.
     :return: The colored text.
     :rtype: str
-    '''
+    """
     return printc(txt, COLOR_SUCCESS)
 
+
 def printcHighlight(txt):
-    '''print a text highlight color
-    
+    """print a text highlight color
+
     :param txt str: The text to be printed.
     :return: The colored text.
     :rtype: str
-    '''
+    """
     return printc(txt, COLOR_HIGLIGHT)
 
+
 def cleancolor(message):
-    '''remove color from a colored text.
-    
+    """remove color from a colored text.
+
     :param message str: The text to be cleaned.
     :return: The cleaned text.
     :rtype: str
-    '''
+    """
     if message == None:
         return message
-    
-    message = message.replace('\033[0m', '')
+
+    message = message.replace("\033[0m", "")
     for i in __code_range__:
-        message = message.replace('\033[%dm' % i, '')
+        message = message.replace("\033[%dm" % i, "")
     return message
 
+
 def print_value(logger, label, value, level=1, suffix=""):
-    '''shortcut method to print a label and a value with the info color
-    
+    """shortcut method to print a label and a value with the info color
+
     :param logger class logger: the logger instance.
     :param label int: the label to print.
     :param value str: the value to print.
     :param level int: the level of verboseness.
     :param suffix str: the suffix to add at the end.
-    '''
+    """
     if type(value) is list:
         skip = "\n     "
         strValue = ""
         i = 0
         for v in value:
-          strValue += "%15s, " % str(v)
-          i += 1
-          if i >= 5:
-            strValue += skip
-            i = 0
+            strValue += "%15s, " % str(v)
+            i += 1
+            if i >= 5:
+                strValue += skip
+                i = 0
         if len(value) > 5:
             strValue = skip + strValue
     else:
@@ -174,27 +189,28 @@ def print_value(logger, label, value, level=1, suffix=""):
     else:
         logger.write("  %s = %s %s\n" % (label, strValue, suffix), level)
 
+
 def print_color_range(start, end):
-    '''print possible range values for colors
-    
+    """print possible range values for colors
+
     :param start int: The smaller value.
     :param end int: The bigger value.
-    '''
-    for k in range(start, end+1):
-        print("\033[%dm%3d\033[0m" % (k, k),)
+    """
+    for k in range(start, end + 1):
+        print(
+            "\033[%dm%3d\033[0m" % (k, k),
+        )
     print
 
+
 # This method prints the color map
 def print_color_map():
-    '''This method prints the color map
-    '''
+    """This method prints the color map"""
     print("colormap:")
     print("{")
     for k in sorted(__colormap__.keys()):
-        codes = __colormap__[k].split('\033[')
+        codes = __colormap__[k].split("\033[")
         codes = filter(lambda l: len(l) > 0, codes)
         codes = map(lambda l: l[:-1], codes)
-        print(printc("  %s: '%s', " % (k, ';'.join(codes)), k))
+        print(printc("  %s: '%s', " % (k, ";".join(codes)), k))
     print("}")
-
-
index 8a456eb3d6b604b9dc034bd7d1edf92d64b26363..40f740c6948be6c1bcb0386be34dc24969a664ff 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 #  Copyright (C) 2010-2012  CEA/DEN
 #
 #  This library is free software; you can redistribute it and/or
@@ -29,19 +29,20 @@ import src
 import src.debug as DBG
 import src.versionMinorMajorPatch as VMMP
 
-AVAILABLE_VCS = ['git', 'svn', 'cvs']
+AVAILABLE_VCS = ["git", "svn", "cvs"]
 
-CONFIG_FILENAME = "sat-config-" # trace product depends version(s)
-PRODUCT_FILENAME = "sat-product-" # trace product compile config
+CONFIG_FILENAME = "sat-config-"  # trace product depends version(s)
+PRODUCT_FILENAME = "sat-product-"  # trace product compile config
 config_expression = "^config-\d+$"
 
+
 def get_product_config(config, product_name, with_install_dir=True):
     """Get the specific configuration of a product from the global configuration
-    
+
     :param config Config: The global configuration
     :param product_name str: The name of the product
-    :param with_install_dir boolean: If false, do not provide an install 
-                                     directory (at false only for internal use 
+    :param with_install_dir boolean: If false, do not provide an install
+                                     directory (at false only for internal use
                                      of the function check_config_exists)
     :return: the specific configuration of the product
     :rtype: Config
@@ -49,171 +50,176 @@ def get_product_config(config, product_name, with_install_dir=True):
 
     # Get the version of the product from the application definition
     version = config.APPLICATION.products[product_name]
-    
+
     # Define debug and dev modes
     # Get the tag if a dictionary is given in APPLICATION.products for the
-    # current product 
-    debug = 'no'
-    dev = 'no'
-    hpc = 'no'
-    verbose = 'no'
-    base = 'maybe'
+    # current product
+    debug = "no"
+    dev = "no"
+    hpc = "no"
+    verbose = "no"
+    base = "maybe"
     section = None
 
     # if no version, then take the default one defined in the application
-    if isinstance(version, bool) or isinstance(version, str): 
+    if isinstance(version, bool) or isinstance(version, str):
         # in this case tag is mandatory, not debug, verbose, dev
-        if 'debug' in config.APPLICATION:
+        if "debug" in config.APPLICATION:
             debug = config.APPLICATION.debug
-        if 'verbose' in config.APPLICATION:
+        if "verbose" in config.APPLICATION:
             verbose = config.APPLICATION.verbose
-        if 'dev' in config.APPLICATION:
+        if "dev" in config.APPLICATION:
             dev = config.APPLICATION.dev
-        if 'hpc' in config.APPLICATION:
+        if "hpc" in config.APPLICATION:
             hpc = config.APPLICATION.hpc
-        if 'base' in config.APPLICATION:
+        if "base" in config.APPLICATION:
             base = config.APPLICATION.base
 
-    # special case for which only the product name is mentionned 
+    # special case for which only the product name is mentionned
     if isinstance(version, bool):
         version = config.APPLICATION.tag
 
     if isinstance(version, src.pyconf.Mapping):
         dic_version = version
         # Get the version/tag
-        if not 'tag' in dic_version:
+        if not "tag" in dic_version:
             version = config.APPLICATION.tag
         else:
             version = dic_version.tag
-        
+
         # Get the debug if any
-        if 'debug' in dic_version:
+        if "debug" in dic_version:
             debug = dic_version.debug
-        elif 'debug' in config.APPLICATION:
+        elif "debug" in config.APPLICATION:
             debug = config.APPLICATION.debug
-        
+
         # Get the verbose if any
-        if 'verbose' in dic_version:
+        if "verbose" in dic_version:
             verbose = dic_version.verbose
-        elif 'verbose' in config.APPLICATION:
+        elif "verbose" in config.APPLICATION:
             verbose = config.APPLICATION.verbose
-        
+
         # Get the dev if any
-        if 'dev' in dic_version:
+        if "dev" in dic_version:
             dev = dic_version.dev
-        elif 'dev' in config.APPLICATION:
+        elif "dev" in config.APPLICATION:
             dev = config.APPLICATION.dev
-        
+
         # Get the hpc if any
-        if 'hpc' in dic_version:
+        if "hpc" in dic_version:
             hpc = dic_version.hpc
-        elif 'hpc' in config.APPLICATION:
+        elif "hpc" in config.APPLICATION:
             hpc = config.APPLICATION.hpc
 
         # Get the base if any
-        if 'base' in dic_version:
+        if "base" in dic_version:
             base = dic_version.base
-        elif 'base' in config.APPLICATION:
+        elif "base" in config.APPLICATION:
             base = config.APPLICATION.base
 
         # Get the section if any
-        if 'section' in dic_version:
+        if "section" in dic_version:
             section = dic_version.section
-    
+
     # this case occur when version is overwritten, cf sat # 8897
-    if isinstance(version, dict): 
+    if isinstance(version, dict):
         dic_version = version
         # Get the version/tag
-        if not 'tag' in dic_version:
+        if not "tag" in dic_version:
             version = config.APPLICATION.tag
         else:
             version = dic_version["tag"]
-        
+
         # Get the debug if any
-        if 'debug' in dic_version:
+        if "debug" in dic_version:
             debug = dic_version["debug"]
-        elif 'debug' in config.APPLICATION:
+        elif "debug" in config.APPLICATION:
             debug = config.APPLICATION.debug
-        
+
         # Get the verbose if any
-        if 'verbose' in dic_version:
+        if "verbose" in dic_version:
             verbose = dic_version["verbose"]
-        elif 'verbose' in config.APPLICATION:
+        elif "verbose" in config.APPLICATION:
             verbose = config.APPLICATION.verbose
-        
+
         # Get the dev if any
-        if 'dev' in dic_version:
+        if "dev" in dic_version:
             dev = dic_version["dev"]
-        elif 'dev' in config.APPLICATION:
+        elif "dev" in config.APPLICATION:
             dev = config.APPLICATION.dev
-        
+
         # Get the hpc if any
-        if 'hpc' in dic_version:
-            hpc = dic_version['hpc']
-        elif 'hpc' in config.APPLICATION:
+        if "hpc" in dic_version:
+            hpc = dic_version["hpc"]
+        elif "hpc" in config.APPLICATION:
             hpc = config.APPLICATION.hpc
 
         # Get the base if any
-        if 'base' in dic_version:
+        if "base" in dic_version:
             base = dic_version["base"]
 
         # Get the section if any
-        if 'section' in dic_version:
-            section = dic_version['section']
+        if "section" in dic_version:
+            section = dic_version["section"]
 
     vv = version
     # substitute some character with _ in order to get the correct definition
     # in config.PRODUCTS. This is done because the pyconf tool does not handle
-    # the . and - characters 
-    for c in ".-/": vv = vv.replace(c, "_")
+    # the . and - characters
+    for c in ".-/":
+        vv = vv.replace(c, "_")
 
     prod_info = None
     if product_name in config.PRODUCTS:
         # Search for the product description in the configuration
         prod_info = get_product_section(config, product_name, vv, section)
-        
+
         # get salomeTool version
         prod_info.sat_version = src.get_salometool_version(config)
 
         # merge opt_depend in depend
-        if prod_info is not None and 'opt_depend' in prod_info:
+        if prod_info is not None and "opt_depend" in prod_info:
             for depend in prod_info.opt_depend:
-                if (depend in config.APPLICATION.products) and (depend not in prod_info.depend) :
-                    prod_info.depend.append(depend,'')
-        
+                if (depend in config.APPLICATION.products) and (
+                    depend not in prod_info.depend
+                ):
+                    prod_info.depend.append(depend, "")
 
-        # In case of a product get with a vcs, 
+        # In case of a product get with a vcs,
         # put the tag (equal to the version)
         if prod_info is not None and prod_info.get_source in AVAILABLE_VCS:
-            
-            if prod_info.get_source == 'git':
+
+            if prod_info.get_source == "git":
                 prod_info.git_info.tag = version
-            
-            if prod_info.get_source == 'svn':
+
+            if prod_info.get_source == "svn":
                 prod_info.svn_info.tag = version
-            
-            if prod_info.get_source == 'cvs':
+
+            if prod_info.get_source == "cvs":
                 prod_info.cvs_info.tag = version
-        
-        # In case of a fixed product, 
+
+        # In case of a fixed product,
         # define the install_dir (equal to the version)
-        if prod_info is not None and \
-           (os.path.isdir(version) or version.startswith("/")):
-           # we consider a (fixed) path  existing paths; 
-           # or paths starting with '/' (the objective is to print a correct 
-           # message to the user in case of non existing path.)
+        if prod_info is not None and (
+            os.path.isdir(version) or version.startswith("/")
+        ):
+            # we consider a (fixed) path  existing paths;
+            # or paths starting with '/' (the objective is to print a correct
+            # message to the user in case of non existing path.)
             prod_info.install_dir = version
             prod_info.get_source = "fixed"
             prod_info.install_mode = "fixed"
-        
+
         # Check if the product is defined as native in the application
         if prod_info is not None:
             if version == "native":
                 prod_info.get_source = "native"
             elif prod_info.get_source == "native":
-                msg = _("The product %(prod)s has version %(ver)s but is "
-                        "declared as native in its definition" %
-                        { 'prod': prod_info.name, 'ver': version})
+                msg = _(
+                    "The product %(prod)s has version %(ver)s but is "
+                    "declared as native in its definition"
+                    % {"prod": prod_info.name, "ver": version}
+                )
                 raise src.SatException(msg)
 
     # If there is no definition but the product is declared as native,
@@ -231,69 +237,80 @@ def get_product_config(config, product_name, with_install_dir=True):
         prod_info.get_source = "fixed"
         prod_info.addMapping("environ", src.pyconf.Mapping(prod_info), "")
 
-
     # If prod_info is still None, it means that there is no product definition
     # in the config. The user has to provide it.
     if prod_info is None:
-        prod_pyconf_path = src.find_file_in_lpath(product_name + ".pyconf",
-                                                  config.PATHS.PRODUCTPATH)
+        prod_pyconf_path = src.find_file_in_lpath(
+            product_name + ".pyconf", config.PATHS.PRODUCTPATH
+        )
         if not prod_pyconf_path:
-            msg = _("""\
+            msg = (
+                _(
+                    """\
 No definition found for the product %(1)s.
 Please create a %(1)s.pyconf file somewhere in:
-  %(2)s""") % {
-  "1": product_name,
-  "2": PP.pformat(config.PATHS.PRODUCTPATH) }
+  %(2)s"""
+                )
+                % {"1": product_name, "2": PP.pformat(config.PATHS.PRODUCTPATH)}
+            )
         else:
-            msg = _("""\
+            msg = (
+                _(
+                    """\
 No definition corresponding to the version %(1)s was found in the file:
   %(2)s.
-Please add a section in it.""") % {"1" : vv, "2" : prod_pyconf_path}
+Please add a section in it."""
+                )
+                % {"1": vv, "2": prod_pyconf_path}
+            )
         raise src.SatException(msg)
-    
+
     # Set the debug, dev and version keys
     prod_info.debug = debug
     prod_info.verbose = verbose
     prod_info.dev = dev
     prod_info.hpc = hpc
     prod_info.version = version
-    if base != 'maybe':
+    if base != "maybe":
         prod_info.base = base
 
     # Set the archive_info if the product is get in archive mode
     if prod_info.get_source == "archive":
         if not "archive_info" in prod_info:
-            prod_info.addMapping("archive_info",
-                                 src.pyconf.Mapping(prod_info),
-                                 "")
-        if "archive_name" in prod_info.archive_info: 
+            prod_info.addMapping("archive_info", src.pyconf.Mapping(prod_info), "")
+        if "archive_name" in prod_info.archive_info:
             arch_name = prod_info.archive_info.archive_name
         elif "archive_prefix" in prod_info.archive_info:
-            arch_name = prod_info.archive_info.archive_prefix + "-" + version + ".tar.gz"
+            arch_name = (
+                prod_info.archive_info.archive_prefix + "-" + version + ".tar.gz"
+            )
         else:
             # standard name
             arch_name = product_name + "-" + version + ".tar.gz"
 
-        arch_path = src.find_file_in_lpath(arch_name,
-                                           config.PATHS.ARCHIVEPATH)
+        arch_path = src.find_file_in_lpath(arch_name, config.PATHS.ARCHIVEPATH)
         if not arch_path:
             # arch_path is not found. It may generate an error in sat source,
             #                         unless the archive is found in ftp serveur
-            prod_info.archive_info.archive_name = arch_name #without path
+            prod_info.archive_info.archive_name = arch_name  # without path
         else:
             prod_info.archive_info.archive_name = arch_path
 
-        
     # If the product compiles with a script, check the script existence
     # and if it is executable
     if product_has_script(prod_info):
         # Check the compil_script key existence
         if "compil_script" not in prod_info:
-            msg = _("""\
+            msg = (
+                _(
+                    """\
 No compilation script found for the product %s.
-Please provide a 'compil_script' key in its definition.""") % product_name
+Please provide a 'compil_script' key in its definition."""
+                )
+                % product_name
+            )
             raise src.SatException(msg)
-        
+
         # Get the path of the script file
         # if windows supposed '.bat', if linux supposed '.sh'
         # but user set extension script file in his pyconf as he wants, no obligation.
@@ -301,23 +318,30 @@ Please provide a 'compil_script' key in its definition.""") % product_name
         script_name = os.path.basename(script)
         if script == script_name:
             # Only a name is given. Search in the default directory
-            script_path = src.find_file_in_lpath(script_name, config.PATHS.PRODUCTPATH, "compil_scripts")
+            script_path = src.find_file_in_lpath(
+                script_name, config.PATHS.PRODUCTPATH, "compil_scripts"
+            )
             if not script_path:
                 msg = _("Compilation script %s not found in") % script_name
-                DBG.tofix(msg, config.PATHS.PRODUCTPATH, True) # say where searched
+                DBG.tofix(msg, config.PATHS.PRODUCTPATH, True)  # say where searched
                 script_path = "%s_(Not_Found_by_Sat!!)" % script_name
             prod_info.compil_script = script_path
 
-       
         # Check that the script is executable
-        if os.path.exists(prod_info.compil_script) and not os.access(prod_info.compil_script, os.X_OK):
-            DBG.tofix("Compilation script  file is not in 'execute mode'", prod_info.compil_script, True)
-    
+        if os.path.exists(prod_info.compil_script) and not os.access(
+            prod_info.compil_script, os.X_OK
+        ):
+            DBG.tofix(
+                "Compilation script  file is not in 'execute mode'",
+                prod_info.compil_script,
+                True,
+            )
+
     # If the product has a post install script, check the script existence
     # and if it is executable
     if product_has_post_script(prod_info):
         # Check the compil_script key existence
-        
+
         # Get the path of the script file
         # if windows supposed '.bat', if linux supposed '.sh'
         # but user set extension script file in his pyconf as he wants, no obligation.
@@ -325,113 +349,126 @@ Please provide a 'compil_script' key in its definition.""") % product_name
         script_name = os.path.basename(script)
         if script == script_name:
             # Only a name is given. Search in the default directory
-            script_path = src.find_file_in_lpath(script_name, config.PATHS.PRODUCTPATH, "post_scripts")
+            script_path = src.find_file_in_lpath(
+                script_name, config.PATHS.PRODUCTPATH, "post_scripts"
+            )
             if not script_path:
                 msg = _("Post install script %s not found in") % script_name
-                DBG.tofix(msg, config.PATHS.PRODUCTPATH, True) # say where searched
+                DBG.tofix(msg, config.PATHS.PRODUCTPATH, True)  # say where searched
                 script_path = "%s_(Not_Found_by_Sat!!)" % script_name
             prod_info.post_script = script_path
 
-       
         # Check that the script is executable
-        if os.path.exists(prod_info.post_script) and not os.access(prod_info.post_script, os.X_OK):
-            DBG.tofix("Post install script file is not in 'execute mode'", prod_info.post_script, True)
+        if os.path.exists(prod_info.post_script) and not os.access(
+            prod_info.post_script, os.X_OK
+        ):
+            DBG.tofix(
+                "Post install script file is not in 'execute mode'",
+                prod_info.post_script,
+                True,
+            )
 
     # Get the full paths of all the patches
     if product_has_patches(prod_info):
         patches = []
         try:
-          for patch in prod_info.patches:
-              patch_path = patch
-              # If only a filename, then search for the patch in the PRODUCTPATH
-              if os.path.basename(patch_path) == patch_path:
-                  # Search in the PRODUCTPATH/patches
-                  patch_path = src.find_file_in_lpath(patch,
-                                                      config.PATHS.PRODUCTPATH,
-                                                      "patches")
-                  if not patch_path:
-                      msg = _("Patch %(patch_name)s for %(prod_name)s not found:"
-                              "\n" % {"patch_name" : patch,
-                                       "prod_name" : prod_info.name}) 
-                      raise src.SatException(msg)
-              patches.append(patch_path)
+            for patch in prod_info.patches:
+                patch_path = patch
+                # If only a filename, then search for the patch in the PRODUCTPATH
+                if os.path.basename(patch_path) == patch_path:
+                    # Search in the PRODUCTPATH/patches
+                    patch_path = src.find_file_in_lpath(
+                        patch, config.PATHS.PRODUCTPATH, "patches"
+                    )
+                    if not patch_path:
+                        msg = _(
+                            "Patch %(patch_name)s for %(prod_name)s not found:"
+                            "\n" % {"patch_name": patch, "prod_name": prod_info.name}
+                        )
+                        raise src.SatException(msg)
+                patches.append(patch_path)
         except:
-          DBG.tofix("problem in prod_info.patches", prod_info)
+            DBG.tofix("problem in prod_info.patches", prod_info)
         prod_info.patches = patches
 
     # Get the full paths of the environment scripts
     if product_has_env_script(prod_info):
         env_script_path = prod_info.environ.env_script
-        # If only a filename, then search for the environment script 
+        # If only a filename, then search for the environment script
         # in the PRODUCTPATH/env_scripts
         if os.path.basename(env_script_path) == env_script_path:
             # Search in the PRODUCTPATH/env_scripts
             env_script_path = src.find_file_in_lpath(
-                                            prod_info.environ.env_script,
-                                            config.PATHS.PRODUCTPATH,
-                                            "env_scripts")
+                prod_info.environ.env_script, config.PATHS.PRODUCTPATH, "env_scripts"
+            )
             if not env_script_path:
-                msg = _("Environment script %(env_name)s for %(prod_name)s not "
-                        "found.\n" % {"env_name" : env_script_path,
-                                       "prod_name" : prod_info.name}) 
+                msg = _(
+                    "Environment script %(env_name)s for %(prod_name)s not "
+                    "found.\n"
+                    % {"env_name": env_script_path, "prod_name": prod_info.name}
+                )
                 raise src.SatException(msg)
 
         prod_info.environ.env_script = env_script_path
-    
-    if with_install_dir: 
-        # The variable with_install_dir is at false only for internal use 
+
+    if with_install_dir:
+        # The variable with_install_dir is at false only for internal use
         # of the function get_install_dir
-        
+
         # Save the install_dir key if there is any
         if "install_dir" in prod_info and not "install_dir_save" in prod_info:
             prod_info.install_dir_save = prod_info.install_dir
-        
+
         # if it is not the first time the install_dir is computed, it means
         # that install_dir_save exists and it has to be taken into account.
         if "install_dir_save" in prod_info:
             prod_info.install_dir = prod_info.install_dir_save
-        
+
         # Set the install_dir key
-        prod_info.install_dir,prod_info.install_mode = get_install_dir(config, version, prod_info)
-                
+        prod_info.install_dir, prod_info.install_mode = get_install_dir(
+            config, version, prod_info
+        )
+
     return prod_info
 
+
 def get_product_section(config, product_name, version, section=None):
     """Build the product description from the configuration
-    
+
     :param config Config: The global configuration
     :param product_name str: The product name
     :param version str: The version of the product as 'V8_4_0', or else.
-    :param section str: The searched section (if not None, the section is 
+    :param section str: The searched section (if not None, the section is
                         explicitly given
     :return: The product description
     :rtype: Config
     """
 
-
-    #get product definition and determine if the incremental definition mode is activated
+    # get product definition and determine if the incremental definition mode is activated
     aProd = config.PRODUCTS[product_name]
-    if "default" in aProd and\
-       "properties" in aProd.default and\
-       "incremental" in aProd.default.properties and\
-       aProd.default.properties.incremental == "yes":
+    if (
+        "default" in aProd
+        and "properties" in aProd.default
+        and "incremental" in aProd.default.properties
+        and aProd.default.properties.incremental == "yes"
+    ):
         # in this (new) mode the definition of the product is given by the default section
         # and is incremented by others.
-        is_incr=True
+        is_incr = True
     else:
         # in this (historic) mode the definition of the product is given by a full unique section
-        is_incr=False
+        is_incr = False
 
     # decode version number
     try:
-      versionMMP = VMMP.MinorMajorPatch(version)
-    except: # example setuptools raise "minor in major_minor_patch is not integer: '0_6c11'"
-      versionMMP = None
+        versionMMP = VMMP.MinorMajorPatch(version)
+    except:  # example setuptools raise "minor in major_minor_patch is not integer: '0_6c11'"
+        versionMMP = None
 
     # if a section is explicitely specified we select it
     if section:
         if section not in aProd:
-            pi=None
+            pi = None
         # returns specific information for the given version
         pi = aProd[section]
         pi.section = section
@@ -451,24 +488,26 @@ def get_product_section(config, product_name, version, section=None):
         l_section_ranges = []
         tagged = []
         for name in l_section_names:
-          aRange = VMMP.getRange_majorMinorPatch(name)
-          if aRange is not None:
-            l_section_ranges.append((name, aRange))
+            aRange = VMMP.getRange_majorMinorPatch(name)
+            if aRange is not None:
+                l_section_ranges.append((name, aRange))
 
         if versionMMP is not None and len(l_section_ranges) > 0:
-          for name, (vmin, vmax) in l_section_ranges:
-            if versionMMP >= vmin and versionMMP <= vmax:
-              tagged.append((name, [vmin, vmax]))
+            for name, (vmin, vmax) in l_section_ranges:
+                if versionMMP >= vmin and versionMMP <= vmax:
+                    tagged.append((name, [vmin, vmax]))
 
         if len(tagged) > 1:
-          DBG.write("multiple version ranges tagged for '%s', fix it" % version,
-                         PP.pformat(tagged))
-          pi=None
-        elif len(tagged) == 1: # ok
-          name, (vmin, vmax) = tagged[0]
-          pi = aProd[name]
-          pi.section = name
-          pi.from_file = aProd.from_file
+            DBG.write(
+                "multiple version ranges tagged for '%s', fix it" % version,
+                PP.pformat(tagged),
+            )
+            pi = None
+        elif len(tagged) == 1:  # ok
+            name, (vmin, vmax) = tagged[0]
+            pi = aProd[name]
+            pi.section = name
+            pi.from_file = aProd.from_file
 
         # Else, get the standard informations
         elif "default" in aProd:
@@ -477,200 +516,226 @@ def get_product_section(config, product_name, version, section=None):
             pi.section = "default"
             pi.from_file = aProd.from_file
         else:
-            pi=None
+            pi = None
 
     if is_incr:
         # If the definition is incremental, we take the default section
-        # and then complete it with other sections : 
+        # and then complete it with other sections :
         #   - default_win
         #   - the selected section (pi)
         #   - the selected _win section
-        prod_info=aProd["default"]
+        prod_info = aProd["default"]
         prod_info.from_file = aProd.from_file
         prod_info.section = "default"
         if src.architecture.is_windows() and "default_win" in aProd:
             for key in aProd["default_win"]:
-                prod_info[key]=aProd["default_win"][key]
-        if pi!=None and pi.section!="default":
+                prod_info[key] = aProd["default_win"][key]
+        if pi != None and pi.section != "default":
             # update prod_info with incremental definition contained in pi
             for key in pi:
-                prod_info[key]=pi[key]
-            win_section=pi.section+"_win"
+                prod_info[key] = pi[key]
+            win_section = pi.section + "_win"
             if src.architecture.is_windows() and win_section in aProd:
                 for key in aProd[win_section]:
-                    prod_info[key]=aProd[win_section][key]
+                    prod_info[key] = aProd[win_section][key]
     else:
-        prod_info=pi
+        prod_info = pi
 
-    #DBG.write("product info returned for product %s with version %s and section %s" %\
+    # DBG.write("product info returned for product %s with version %s and section %s" %\
     #          (product_name, version, section), prod_info)
     return prod_info
-    
+
+
 def get_install_dir(config, version, prod_info):
-    """Compute the installation directory of a given product 
-    
+    """Compute the installation directory of a given product
+
     :param config Config: The global configuration
-    :param base str: This corresponds to the value given by user in its 
+    :param base str: This corresponds to the value given by user in its
                      application.pyconf for the specific product. If "yes", the
                      user wants the product to be in base. If "no", he wants the
                      product to be in the application workdir
     :param version str: The version of the product
-    :param product_info Config: The configuration specific to 
+    :param product_info Config: The configuration specific to
                                the product
-    
+
     :return: The path of the product installation and the mode of the install directory (base/implicit/fixed/value)
     :rtype: str,str
     """
     install_dir = ""
     in_base = False
-    
+
     # prod_info.base : corresponds to what is specified in application pyconf (either from the global key base, or from a product dict)
     # prod_info.install_dir : corresponds to what is specified in product pyconf (usually "base" for prerequisites)
-    if ( ("install_dir" in prod_info and prod_info.install_dir == "base") # product is declared as base in its own config 
-                                      or ("base" in prod_info  and prod_info.base != "no") ):  # product is declared as base in the application
+    if (
+        "install_dir" in prod_info and prod_info.install_dir == "base"
+    ) or (  # product is declared as base in its own config
+        "base" in prod_info and prod_info.base != "no"
+    ):  # product is declared as base in the application
         # a product goes in base if install_dir is set to base, or if product was declared based in application pyconf
         in_base = True
 
     # check desactivation of base at application level
-    if ( "base" in prod_info  and prod_info.base == "no"):
+    if "base" in prod_info and prod_info.base == "no":
         in_base = False
 
     if in_base:
         install_dir = get_base_install_dir(config, prod_info, version)
         install_mode = "base"
     else:
-        if ("install_mode" in prod_info and prod_info.install_mode in ["implicit", "base"]) or\
-           ("install_dir" not in prod_info or prod_info.install_dir == "base"):
-            # the check to "base" comes from the package case were base mode is changed dynamically 
+        if (
+            "install_mode" in prod_info
+            and prod_info.install_mode in ["implicit", "base"]
+        ) or ("install_dir" not in prod_info or prod_info.install_dir == "base"):
+            # the check to "base" comes from the package case were base mode is changed dynamically
             # to create a package launcher.
 
             # Set it to the default value (in application directory)
             install_mode = "implicit"
-            if ( src.appli_test_property(config,"single_install_dir", "yes") and 
-                 src.product.product_test_property(prod_info,"single_install_dir", "yes")):
+            if src.appli_test_property(
+                config, "single_install_dir", "yes"
+            ) and src.product.product_test_property(
+                prod_info, "single_install_dir", "yes"
+            ):
                 # when single_install_dir mode is activated in tha application
-                # we use config.INTERNAL.config.single_install_dir for products 
+                # we use config.INTERNAL.config.single_install_dir for products
                 # having single_install_dir property
-                install_dir = os.path.join(config.APPLICATION.workdir,
-                                           config.INTERNAL.config.install_dir,
-                                           config.INTERNAL.config.single_install_dir)
-            elif ( src.appli_test_property(config,"pip", "yes") and 
-                   src.product.product_test_property(prod_info,"pip", "yes") and
-                   src.appli_test_property(config,"pip_install_dir", "python") ):
+                install_dir = os.path.join(
+                    config.APPLICATION.workdir,
+                    config.INTERNAL.config.install_dir,
+                    config.INTERNAL.config.single_install_dir,
+                )
+            elif (
+                src.appli_test_property(config, "pip", "yes")
+                and src.product.product_test_property(prod_info, "pip", "yes")
+                and src.appli_test_property(config, "pip_install_dir", "python")
+            ):
                 # when pip mode is activated in the application
-                # and product is pip, and pip_install_dir is set to python 
+                # and product is pip, and pip_install_dir is set to python
                 # we assume python in installed in install_dir/Python
-                install_dir = os.path.join(config.APPLICATION.workdir,
-                                           config.INTERNAL.config.install_dir,
-                                           "Python")   
+                install_dir = os.path.join(
+                    config.APPLICATION.workdir,
+                    config.INTERNAL.config.install_dir,
+                    "Python",
+                )
             else:
-                install_dir = os.path.join(config.APPLICATION.workdir,
-                                           config.INTERNAL.config.install_dir,
-                                           prod_info.name)
+                install_dir = os.path.join(
+                    config.APPLICATION.workdir,
+                    config.INTERNAL.config.install_dir,
+                    prod_info.name,
+                )
         else:
             install_dir = prod_info.install_dir
             install_mode = "value"
 
-    return install_dir,install_mode
+    return install_dir, install_mode
+
 
 def get_base_install_dir(config, prod_info, version):
-    """Compute the installation directory of a product in base 
-    
+    """Compute the installation directory of a product in base
+
     :param config Config: The global configuration
-    :param product_info Config: The configuration specific to 
+    :param product_info Config: The configuration specific to
                                the product
-    :param version str: The version of the product    
-    :param base str: This corresponds to the value given by user in its 
+    :param version str: The version of the product
+    :param base str: This corresponds to the value given by user in its
                      application.pyconf for the specific product. If "yes", the
                      user wants the product to be in base. If "no", he wants the
                      product to be in the application workdir.
-                     if it contains something else, is is interpreted as the name 
+                     if it contains something else, is is interpreted as the name
                      of a base we build for module load.
     :return: The path of the product installation
     :rtype: str
-    """    
-    
+    """
+
     # get rid of / to avoid create subdirectories cf sat #18546
-    version_wslash=version.replace("/", "_") 
-
-    if ( src.appli_test_property(config,"pip", "yes") and 
-         src.product.product_test_property(prod_info,"pip", "yes") and
-         src.appli_test_property(config,"pip_install_dir", "python") ):
-         # when pip mode is activated in the application
-         # and product is pip, and pip_install_dir is set to python 
-        python_info=get_product_config(config, "Python")
+    version_wslash = version.replace("/", "_")
+
+    if (
+        src.appli_test_property(config, "pip", "yes")
+        and src.product.product_test_property(prod_info, "pip", "yes")
+        and src.appli_test_property(config, "pip_install_dir", "python")
+    ):
+        # when pip mode is activated in the application
+        # and product is pip, and pip_install_dir is set to python
+        python_info = get_product_config(config, "Python")
         return python_info.install_dir
 
-    base_path = src.get_base_path(config) 
+    base_path = src.get_base_path(config)
     if "base" in prod_info and prod_info.base != "no" and prod_info.base != "yes":
         # we are in the case of a named base
-        prod_dir = os.path.join(base_path, "apps", prod_info.base, prod_info.name, version_wslash)
+        prod_dir = os.path.join(
+            base_path, "apps", prod_info.base, prod_info.name, version_wslash
+        )
         return prod_dir
-    
+
     prod_dir = os.path.join(base_path, prod_info.name + "-" + version_wslash)
     if not os.path.exists(prod_dir):
         return os.path.join(prod_dir, "config-1")
-    
+
     exists, install_dir = check_config_exists(config, prod_dir, prod_info)
     if exists:
         return install_dir
-    
+
     # Find the first config-<i> directory that is available in the product
     # directory
-    found = False 
+    found = False
     label = 1
     while not found:
         install_dir = os.path.join(prod_dir, "config-%i" % label)
         if os.path.exists(install_dir):
-            label+=1
+            label += 1
         else:
             found = True
-            
+
     return install_dir
 
+
 def add_compile_config_file(p_info, config):
-    '''Execute the proper configuration command(s)
+    """Execute the proper configuration command(s)
        in the product build directory.
 
     :param p_info Config: The specific config of the product
     :param config Config: The global configuration
-    '''
+    """
     # Create the compile config
     # DBG.write("add_compile_config_file", p_info, True)
     res = src.pyconf.Config()
     res.addMapping(p_info.name, src.pyconf.Mapping(res), "")
-    res[p_info.name]= p_info.version
+    res[p_info.name] = p_info.version
 
-    depprod=[]
+    depprod = []
     for d in p_info.depend:
         depprod.append(d)
     if "build_depend" in p_info:
         for d in p_info.build_depend:
             depprod.append(d)
     for prod_name in depprod:
-      if prod_name not in res:
-        res.addMapping(prod_name, src.pyconf.Mapping(res), "")
-      prod_dep_info = src.product.get_product_config(config, prod_name, False)
-      res[prod_name] = prod_dep_info.version
+        if prod_name not in res:
+            res.addMapping(prod_name, src.pyconf.Mapping(res), "")
+        prod_dep_info = src.product.get_product_config(config, prod_name, False)
+        res[prod_name] = prod_dep_info.version
     # Write it in the install directory of the product
     # This file is for automatic reading/checking
     # see check_config_exists method
     afilename = CONFIG_FILENAME + p_info.name + ".pyconf"
     aFile = os.path.join(p_info.install_dir, afilename)
-    with open(aFile, 'w') as f:
-      res.__save__(f)
+    with open(aFile, "w") as f:
+        res.__save__(f)
 
     # this file is not mandatory, is for human eye reading
     afilename = PRODUCT_FILENAME + p_info.name + ".pyconf"
     aFile = os.path.join(p_info.install_dir, afilename)
     try:
-      with open(aFile, 'w') as f:
-        p_info.__save__(f, evaluated=True) # evaluated expressions mode
+        with open(aFile, "w") as f:
+            p_info.__save__(f, evaluated=True)  # evaluated expressions mode
     except:
-      # sometime some information cannot be evaluated.
-      # for example, in the context of non VCS archives, information on git server is not available.
-      DBG.write("Warning : sat was not able to evaluate and write down some information in file %s" % aFile)
-  
+        # sometime some information cannot be evaluated.
+        # for example, in the context of non VCS archives, information on git server is not available.
+        DBG.write(
+            "Warning : sat was not able to evaluate and write down some information in file %s"
+            % aFile
+        )
+
 
 def check_config_exists(config, prod_dir, prod_info, verbose=False):
     """\
@@ -690,14 +755,14 @@ def check_config_exists(config, prod_dir, prod_info, verbose=False):
     # check if the directories or files of the directory corresponds to the
     # directory installation of the product
     if os.path.isdir(prod_dir):
-      l_dir_and_files = os.listdir(prod_dir)
+        l_dir_and_files = os.listdir(prod_dir)
     else:
-      raise Exception("Inexisting directory '%s'" % prod_dir)
+        raise Exception("Inexisting directory '%s'" % prod_dir)
 
-    DBG.write("check_config_exists 000",  (prod_dir, l_dir_and_files), verbose)
-    DBG.write("check_config_exists 111",  prod_info, verbose)
+    DBG.write("check_config_exists 000", (prod_dir, l_dir_and_files), verbose)
+    DBG.write("check_config_exists 111", prod_info, verbose)
 
-    depend_all=[]
+    depend_all = []
     if "depend" in prod_info:
         for d in prod_info.depend:
             depend_all.append(d)
@@ -706,74 +771,73 @@ def check_config_exists(config, prod_dir, prod_info, verbose=False):
             depend_all.append(d)
     for dir_or_file in l_dir_and_files:
         oExpr = re.compile(config_expression)
-        if not(oExpr.search(dir_or_file)):
+        if not (oExpr.search(dir_or_file)):
             # in mode BASE, not config-<i>, not interesting
             # DBG.write("not interesting", dir_or_file, True)
             continue
         # check if there is the file sat-config.pyconf file in the installation
-        # directory    
+        # directory
         afilename = CONFIG_FILENAME + prod_info.name + ".pyconf"
         config_file = os.path.join(prod_dir, dir_or_file, afilename)
         DBG.write("check_config_exists 222", config_file, verbose)
         if not os.path.exists(config_file):
             continue
-        
-        # check if there is the config described in the file corresponds the 
+
+        # check if there is the config described in the file corresponds the
         # dependencies of the product
-        config_corresponds = True    
+        config_corresponds = True
         compile_cfg = src.pyconf.Config(config_file)
         for prod_dep in depend_all:
-            # if the dependency is not in the config, 
+            # if the dependency is not in the config,
             # the config does not correspond
             if prod_dep not in compile_cfg:
                 config_corresponds = False
                 break
             else:
                 prod_dep_info = get_product_config(config, prod_dep, False)
-                # If the version of the dependency does not correspond, 
+                # If the version of the dependency does not correspond,
                 # the config does not correspond
                 if prod_dep_info.version != compile_cfg[prod_dep]:
                     config_corresponds = False
                     break
 
         if config_corresponds:
-          for prod_name in compile_cfg:
-            # assume new compatibility with prod_name in sat-config.pyconf files
-            if prod_name == prod_info.name:
-              if prod_info.version == compile_cfg[prod_name]:
-                DBG.write("check_config_exists OK 333", compile_cfg, verbose)
-                pass
-              else: # no correspondence with newer with prod_name sat-config.pyconf files
-                config_corresponds = False
-                break
-            else:
-              # as old compatibility without prod_name sat-config.pyconf files
-              if prod_name not in depend_all:
-                # here there is an unexpected depend in an old compilation
-                config_corresponds = False
-                break
-        
-        if config_corresponds: # returns (and stops) at first correspondence found
+            for prod_name in compile_cfg:
+                # assume new compatibility with prod_name in sat-config.pyconf files
+                if prod_name == prod_info.name:
+                    if prod_info.version == compile_cfg[prod_name]:
+                        DBG.write("check_config_exists OK 333", compile_cfg, verbose)
+                        pass
+                    else:  # no correspondence with newer with prod_name sat-config.pyconf files
+                        config_corresponds = False
+                        break
+                else:
+                    # as old compatibility without prod_name sat-config.pyconf files
+                    if prod_name not in depend_all:
+                        # here there is an unexpected depend in an old compilation
+                        config_corresponds = False
+                        break
+
+        if config_corresponds:  # returns (and stops) at first correspondence found
             DBG.write("check_config_exists OK 444", dir_or_file, verbose)
             return True, os.path.join(prod_dir, dir_or_file)
 
     # no correspondence found
     return False, None
-            
-            
-    
+
+
 def get_products_infos(lproducts, config):
     """Get the specific configuration of a list of products
-    
+
     :param lproducts List: The list of product names
     :param config Config: The global configuration
-    :return: the list of tuples 
+    :return: the list of tuples
              (product name, specific configuration of the product)
     :rtype: [(str, Config)]
     """
     products_infos = []
     # Loop on product names
-    for prod in lproducts:       
+    for prod in lproducts:
         # Get the specific configuration of the product
         prod_info = get_product_config(config, prod)
         if prod_info is not None:
@@ -816,41 +880,45 @@ def get_products_list(options, cfg, logger):
     resAll = src.product.get_products_infos(products, cfg)
 
     # if the property option was passed, filter the list
-    if options.properties: # existing properties
-      ok = []
-      ko = []
-      res =[]
-      prop, value = options.properties # for example 'is_SALOME_module', 'yes'
-      if value[0] == '!':
-          for p_name, p_info in resAll:
-            try:
-              if p_info.properties[prop] == value[1:]:
-                ko.append(p_name)
-              else:
-                res.append((p_name, p_info))
-                ok.append(p_name)
-            except:
-              res.append((p_name, p_info))
-              ok.append(p_name)
-      else:
-          for p_name, p_info in resAll:
-            try:
-              if p_info.properties[prop] == value:
-                res.append((p_name, p_info))
-                ok.append(p_name)
-              else:
-                ko.append(p_name)
-            except:
-              ko.append(p_name)
-
-      if len(ok) != len(resAll):
-        logger.trace("on properties %s\n products accepted:\n %s\n products rejected:\n %s\n" %
-                       (options.properties, PP.pformat(sorted(ok)), PP.pformat(sorted(ko))))
-      else:
-        logger.warning("properties %s\n seems useless with no products rejected" %
-                       (options.properties))
+    if options.properties:  # existing properties
+        ok = []
+        ko = []
+        res = []
+        prop, value = options.properties  # for example 'is_SALOME_module', 'yes'
+        if value[0] == "!":
+            for p_name, p_info in resAll:
+                try:
+                    if p_info.properties[prop] == value[1:]:
+                        ko.append(p_name)
+                    else:
+                        res.append((p_name, p_info))
+                        ok.append(p_name)
+                except:
+                    res.append((p_name, p_info))
+                    ok.append(p_name)
+        else:
+            for p_name, p_info in resAll:
+                try:
+                    if p_info.properties[prop] == value:
+                        res.append((p_name, p_info))
+                        ok.append(p_name)
+                    else:
+                        ko.append(p_name)
+                except:
+                    ko.append(p_name)
+
+        if len(ok) != len(resAll):
+            logger.trace(
+                "on properties %s\n products accepted:\n %s\n products rejected:\n %s\n"
+                % (options.properties, PP.pformat(sorted(ok)), PP.pformat(sorted(ko)))
+            )
+        else:
+            logger.warning(
+                "properties %s\n seems useless with no products rejected"
+                % (options.properties)
+            )
     else:
-      res = resAll # not existing properties as all accepted
+        res = resAll  # not existing properties as all accepted
 
     return res
 
@@ -867,14 +935,14 @@ def get_product_dependencies(config, product_name, product_info):
     :rtype: list
     """
     from compile import get_dependencies_graph, depth_search_graph
-    all_products_infos = get_products_infos(
-                             config.APPLICATION.products,
-                             config)
-    all_products_graph=get_dependencies_graph(all_products_infos)
-    res=[]
-    res=depth_search_graph(all_products_graph, product_name, res)
+
+    all_products_infos = get_products_infos(config.APPLICATION.products, config)
+    all_products_graph = get_dependencies_graph(all_products_infos)
+    res = []
+    res = depth_search_graph(all_products_graph, product_name, res)
     return res[1:]  # remove the product himself (in first position)
 
+
 def check_installation(config, product_info):
     """\
     Verify if a product is well installed. Checks install directory presence
@@ -891,15 +959,17 @@ def check_installation(config, product_info):
 
     if product_is_native(product_info):
         # check a system product
-        check_cmd=src.system.get_pkg_check_cmd(config.VARS.dist_name)
-        run_pkg,build_pkg=src.product.check_system_dep(config.VARS.dist, check_cmd, product_info)
-        build_dep_ko=[]
+        check_cmd = src.system.get_pkg_check_cmd(config.VARS.dist_name)
+        run_pkg, build_pkg = src.product.check_system_dep(
+            config.VARS.dist, check_cmd, product_info
+        )
+        build_dep_ko = []
         for pkg in build_pkg:
             if "KO" in build_pkg[pkg]:
                 build_dep_ko.append(pkg)
         if build_dep_ko:
             # the product is not installed : display message and return error status
-            msg="Please install them with %s before compiling salome" % check_cmd[0]
+            msg = "Please install them with %s before compiling salome" % check_cmd[0]
             print(build_pkg)
             print("\nmissing compile time dependencies : ")
             for md in build_dep_ko:
@@ -916,22 +986,22 @@ def check_installation(config, product_info):
             return False
     else:
         filename = CONFIG_FILENAME + product_info.name + ".pyconf"
-        if not os.path.exists(os.path.join(install_dir, filename)): 
+        if not os.path.exists(os.path.join(install_dir, filename)):
             return False
 
     # check extra files if specified in present_files.install section
-    if ("present_files" in product_info and 
-        "install" in product_info.present_files):
+    if "present_files" in product_info and "install" in product_info.present_files:
         for file_relative_path in product_info.present_files.install:
             file_path = os.path.join(install_dir, file_relative_path)
             if not os.path.exists(file_path):
                 return False
     return True
 
+
 def check_source(product_info):
     """Verify if a sources of product is preset. Checks source directory presence
-    
-    :param product_info Config: The configuration specific to 
+
+    :param product_info Config: The configuration specific to
                                the product
     :return: True if it is well installed
     :rtype: boolean
@@ -939,163 +1009,184 @@ def check_source(product_info):
     source_dir = product_info.source_dir
     if not os.path.exists(source_dir):
         return False
-    if ("present_files" in product_info and 
-        "source" in product_info.present_files):
+    if "present_files" in product_info and "source" in product_info.present_files:
         for file_relative_path in product_info.present_files.source:
             file_path = os.path.join(source_dir, file_relative_path)
             if not os.path.exists(file_path):
                 return False
     return True
 
+
 def product_is_salome(product_info):
     """Know if a product is a SALOME module
-    
-    :param product_info Config: The configuration specific to 
+
+    :param product_info Config: The configuration specific to
                                the product
     :return: True if the product is a SALOME module, else False
     :rtype: boolean
     """
-    return ("properties" in product_info and
-            "is_SALOME_module" in product_info.properties and
-            product_info.properties.is_SALOME_module == "yes")
+    return (
+        "properties" in product_info
+        and "is_SALOME_module" in product_info.properties
+        and product_info.properties.is_SALOME_module == "yes"
+    )
+
 
 def product_is_configuration(product_info):
     """Know if a product is a configuration module
-    
-    :param product_info Config: The configuration specific to 
+
+    :param product_info Config: The configuration specific to
                                the product
     :return: True if the product is a configuration module, else False
     :rtype: boolean
     """
-    return ("properties" in product_info and
-            "configure_dependency" in product_info.properties and
-            product_info.properties.configure_dependency == "yes")
+    return (
+        "properties" in product_info
+        and "configure_dependency" in product_info.properties
+        and product_info.properties.configure_dependency == "yes"
+    )
+
 
 def product_is_fixed(product_info):
     """Know if a product is fixed
-    
-    :param product_info Config: The configuration specific to 
+
+    :param product_info Config: The configuration specific to
                                the product
     :return: True if the product is fixed, else False
     :rtype: boolean
     """
     get_src = product_info.get_source
-    return get_src.lower() == 'fixed'
+    return get_src.lower() == "fixed"
+
 
 def product_is_native(product_info):
     """Know if a product is native
-    
-    :param product_info Config: The configuration specific to 
+
+    :param product_info Config: The configuration specific to
                                the product
     :return: True if the product is native, else False
     :rtype: boolean
     """
     get_src = product_info.get_source
-    return get_src.lower() == 'native'
+    return get_src.lower() == "native"
+
 
 def product_is_dev(product_info):
     """Know if a product is in dev mode
-    
-    :param product_info Config: The configuration specific to 
+
+    :param product_info Config: The configuration specific to
                                the product
     :return: True if the product is in dev mode, else False
     :rtype: boolean
     """
     dev = product_info.dev
-    res = (dev.lower() == 'yes')
-    DBG.write('product_is_dev %s' % product_info.name, res)
+    res = dev.lower() == "yes"
+    DBG.write("product_is_dev %s" % product_info.name, res)
     # if product_info.name == "XDATA": return True #test #10569
     return res
 
+
 def product_is_hpc(product_info):
     """Know if a product is in hpc mode
-    
-    :param product_info Config: The configuration specific to 
+
+    :param product_info Config: The configuration specific to
                                the product
     :return: True if the product is in hpc mode, else False
     :rtype: boolean
     """
     hpc = product_info.hpc
-    res = (hpc.lower() == 'yes')
+    res = hpc.lower() == "yes"
     return res
 
+
 def product_is_debug(product_info):
     """Know if a product is in debug mode
-    
-    :param product_info Config: The configuration specific to 
+
+    :param product_info Config: The configuration specific to
                                the product
     :return: True if the product is in debug mode, else False
     :rtype: boolean
     """
     debug = product_info.debug
-    return debug.lower() == 'yes'
+    return debug.lower() == "yes"
+
 
 def product_is_verbose(product_info):
     """Know if a product is in verbose mode
-    
-    :param product_info Config: The configuration specific to 
+
+    :param product_info Config: The configuration specific to
                                the product
     :return: True if the product is in verbose mode, else False
     :rtype: boolean
     """
     verbose = product_info.verbose
-    return verbose.lower() == 'yes'
+    return verbose.lower() == "yes"
+
 
 def product_is_autotools(product_info):
     """Know if a product is compiled using the autotools
-    
-    :param product_info Config: The configuration specific to 
+
+    :param product_info Config: The configuration specific to
                                the product
     :return: True if the product is autotools, else False
     :rtype: boolean
     """
     build_src = product_info.build_source
-    return build_src.lower() == 'autotools'
+    return build_src.lower() == "autotools"
+
 
 def product_is_cmake(product_info):
     """Know if a product is compiled using the cmake
-    
-    :param product_info Config: The configuration specific to 
+
+    :param product_info Config: The configuration specific to
                                the product
     :return: True if the product is cmake, else False
     :rtype: boolean
     """
     build_src = product_info.build_source
-    return build_src.lower() == 'cmake'
+    return build_src.lower() == "cmake"
+
 
 def product_is_vcs(product_info):
     """Know if a product is download using git, svn or cvs (not archive)
-    
-    :param product_info Config: The configuration specific to 
+
+    :param product_info Config: The configuration specific to
                                the product
     :return: True if the product is vcs, else False
     :rtype: boolean
     """
     return product_info.get_source in AVAILABLE_VCS
 
+
 def product_is_smesh_plugin(product_info):
     """Know if a product is a SMESH plugin
-    
-    :param product_info Config: The configuration specific to 
+
+    :param product_info Config: The configuration specific to
                                the product
     :return: True if the product is a SMESH plugin, else False
     :rtype: boolean
     """
-    return ("properties" in product_info and
-            "smesh_plugin" in product_info.properties and
-            product_info.properties.smesh_plugin == "yes")
+    return (
+        "properties" in product_info
+        and "smesh_plugin" in product_info.properties
+        and product_info.properties.smesh_plugin == "yes"
+    )
+
 
 def product_is_cpp(product_info):
     """Know if a product is cpp
-    
-    :param product_info Config: The configuration specific to 
+
+    :param product_info Config: The configuration specific to
                                the product
     :return: True if the product is a cpp, else False
     :rtype: boolean
     """
-    return ("properties" in product_info and
-            "cpp" in product_info.properties and
-            product_info.properties.cpp == "yes")
+    return (
+        "properties" in product_info
+        and "cpp" in product_info.properties
+        and product_info.properties.cpp == "yes"
+    )
+
 
 def product_compiles(product_info):
     """\
@@ -1107,14 +1198,17 @@ def product_compiles(product_info):
     :return: True if the product compiles, else False
     :rtype: boolean
     """
-    return not("properties" in product_info and
-            "compilation" in product_info.properties and
-            product_info.properties.compilation == "no")
+    return not (
+        "properties" in product_info
+        and "compilation" in product_info.properties
+        and product_info.properties.compilation == "no"
+    )
+
 
 def product_has_script(product_info):
     """Know if a product has a compilation script
-    
-    :param product_info Config: The configuration specific to 
+
+    :param product_info Config: The configuration specific to
                                the product
     :return: True if the product it has a compilation script, else False
     :rtype: boolean
@@ -1123,146 +1217,165 @@ def product_has_script(product_info):
         # Native case
         return False
     build_src = product_info.build_source
-    return build_src.lower() == 'script'
+    return build_src.lower() == "script"
+
 
 def product_has_env_script(product_info):
     """Know if a product has an environment script
-    
-    :param product_info Config: The configuration specific to 
+
+    :param product_info Config: The configuration specific to
                                the product
     :return: True if the product it has an environment script, else False
     :rtype: boolean
     """
     return "environ" in product_info and "env_script" in product_info.environ
 
+
 def product_has_patches(product_info):
     """Know if a product has one or more patches
-    
-    :param product_info Config: The configuration specific to 
+
+    :param product_info Config: The configuration specific to
                                the product
     :return: True if the product has one or more patches
     :rtype: boolean
-    """   
-    res = ( "patches" in product_info and len(product_info.patches) > 0 )
+    """
+    res = "patches" in product_info and len(product_info.patches) > 0
     return res
 
+
 def product_has_post_script(product_info):
     """Know if a product has a post install script
-    
-    :param product_info Config: The configuration specific to 
+
+    :param product_info Config: The configuration specific to
                                the product
     :return: True if the product has one or more patches
     :rtype: boolean
-    """   
-    res = ( "post_script" in product_info and len(product_info.post_script) > 0 and not src.architecture.is_windows())
+    """
+    res = (
+        "post_script" in product_info
+        and len(product_info.post_script) > 0
+        and not src.architecture.is_windows()
+    )
     return res
 
+
 def product_has_logo(product_info):
     """Know if a product has a logo (YACSGEN generate)
-    
-    :param product_info Config: The configuration specific to 
+
+    :param product_info Config: The configuration specific to
                                the product
     :return: The path of the logo if the product has a logo, else False
     :rtype: Str
     """
-    if ("properties" in product_info and
-            "logo" in product_info.properties):
+    if "properties" in product_info and "logo" in product_info.properties:
         return product_info.properties.logo
     else:
         return False
 
+
 def product_has_licence(product_info, path):
     """Find out if a product has a licence
-    
+
     :param product_info Config: The configuration specific to the product
     :param path Str: The path where to search for the licence
     :return: The name of the licence file (the complete path if it is found in the path, else the name, else False
     :rtype: Str
     """
-    if ("properties" in product_info and
-            "licence" in product_info.properties):
+    if "properties" in product_info and "licence" in product_info.properties:
         licence_name = product_info.properties.licence
         if len(path) > 0:
             # search for licence_name in path
             # a- consolidate the path into one signe string licence_path
-            licence_path=path[0]
+            licence_path = path[0]
             for lpath in path[1:]:
-                licence_path=licence_path+":"+lpath
-            licence_path_list=licence_path.split(":")
+                licence_path = licence_path + ":" + lpath
+            licence_path_list = licence_path.split(":")
             licence_fullname = src.find_file_in_lpath(licence_name, licence_path_list)
             if licence_fullname:
                 return licence_fullname
 
-        # if the search of licence in path failed, we return its name (not the full path) 
+        # if the search of licence in path failed, we return its name (not the full path)
         return licence_name
 
     else:
         return False  # product has no licence
 
+
 def product_has_salome_gui(product_info):
     """Know if a product has a SALOME gui
-    
-    :param product_info Config: The configuration specific to 
+
+    :param product_info Config: The configuration specific to
                                the product
     :return: True if the product has a SALOME gui, else False
     :rtype: Boolean
     """
-    return ("properties" in product_info and
-            "has_salome_gui" in product_info.properties and
-            product_info.properties.has_salome_gui == "yes")
+    return (
+        "properties" in product_info
+        and "has_salome_gui" in product_info.properties
+        and product_info.properties.has_salome_gui == "yes"
+    )
+
 
 def product_is_mpi(product_info):
     """Know if a product has openmpi in its dependencies
-    
-    :param product_info Config: The configuration specific to 
+
+    :param product_info Config: The configuration specific to
                                the product
     :return: True if the product has openmpi inits dependencies
     :rtype: boolean
     """
     return "openmpi" in product_info.depend
 
+
 def product_is_generated(product_info):
     """Know if a product is generated (YACSGEN)
-    
-    :param product_info Config: The configuration specific to 
+
+    :param product_info Config: The configuration specific to
                                the product
     :return: True if the product is generated
     :rtype: boolean
     """
-    return ("properties" in product_info and
-            "generate" in product_info.properties and
-            product_info.properties.generate == "yes")
+    return (
+        "properties" in product_info
+        and "generate" in product_info.properties
+        and product_info.properties.generate == "yes"
+    )
+
 
 def product_is_compile_time(product_info):
     """Know if a product is only used at compile time
-    
-    :param product_info Config: The configuration specific to 
+
+    :param product_info Config: The configuration specific to
                                the product
     :return: True if the product is only used at compile time
     :rtype: boolean
     """
-    return ("properties" in product_info and
-            "compile_time" in product_info.properties and
-            product_info.properties.compile_time == "yes")
+    return (
+        "properties" in product_info
+        and "compile_time" in product_info.properties
+        and product_info.properties.compile_time == "yes"
+    )
+
 
 def product_is_compile_and_runtime(product_info):
     """Know if a product is only used at compile time
-    
-    :param product_info Config: The configuration specific to 
+
+    :param product_info Config: The configuration specific to
                                the product
     :return: True if the product is only used at compile time
     :rtype: boolean
     """
-    return ("properties" in product_info and
-            "compile_and_runtime" in product_info.properties and
-            product_info.properties.compile_and_runtime == "yes")
-
+    return (
+        "properties" in product_info
+        and "compile_and_runtime" in product_info.properties
+        and product_info.properties.compile_and_runtime == "yes"
+    )
 
 
 def product_test_property(product_info, property_name, property_value):
     """Generic function to test if a product has a property set to a value
-    
-    :param product_info Config: The configuration specific to 
+
+    :param product_info Config: The configuration specific to
                                the product
     :param property_name : The name of the property to check
     :param property_value : The value of the property to test
@@ -1270,15 +1383,18 @@ def product_test_property(product_info, property_name, property_value):
     :rtype: boolean
     """
     # first check if product has the property
-    if not ("properties" in product_info and
-            property_name in product_info.properties):
+    if not ("properties" in product_info and property_name in product_info.properties):
         return False
-  
+
     # then check to the property is set to property_value
-    eval_expression = 'product_info.properties.%s == "%s"' % (property_name,property_value)
+    eval_expression = 'product_info.properties.%s == "%s"' % (
+        property_name,
+        property_value,
+    )
     result = eval(eval_expression)
     return result
 
+
 def check_system_dep(distrib, check_cmd, product_info):
     """Search for system dependencies, check if installed
     :param dist : The linux ditribution (CO7,DB10...)
@@ -1286,80 +1402,83 @@ def check_system_dep(distrib, check_cmd, product_info):
     :param product_info Config: The configuration specific to the product
     :rtype: two dictionnaries for runtime and compile time dependencies with text status
     """
-    runtime_dep={}
-    build_dep={}
+    runtime_dep = {}
+    build_dep = {}
 
     if "system_info" in product_info:
 
-        sysinfo=product_info.system_info
+        sysinfo = product_info.system_info
         additional_sysinfo = None
 
-        for key in sysinfo :
-            if distrib in key :
+        for key in sysinfo:
+            if distrib in key:
                 additional_sysinfo = sysinfo[key]
 
-        if check_cmd[0]=="rpm":
+        if check_cmd[0] == "rpm":
             if "rpm" in sysinfo:
                 for pkg in sysinfo.rpm:
-                    runtime_dep[pkg]=src.system.check_system_pkg(check_cmd,pkg)
+                    runtime_dep[pkg] = src.system.check_system_pkg(check_cmd, pkg)
             if "rpm_dev" in sysinfo:
                 for pkg in sysinfo.rpm_dev:
-                    build_dep[pkg]=src.system.check_system_pkg(check_cmd,pkg)
-            if additional_sysinfo :
+                    build_dep[pkg] = src.system.check_system_pkg(check_cmd, pkg)
+            if additional_sysinfo:
                 if "rpm" in additional_sysinfo:
                     for pkg in additional_sysinfo.rpm:
-                        runtime_dep[pkg]=src.system.check_system_pkg(check_cmd,pkg)
+                        runtime_dep[pkg] = src.system.check_system_pkg(check_cmd, pkg)
                 if "rpm_dev" in additional_sysinfo:
                     for pkg in additional_sysinfo.rpm_dev:
-                        build_dep[pkg]=src.system.check_system_pkg(check_cmd,pkg)
-        #if check_cmd[0]=="apt" or check_cmd[0]=="dpkg-query":
+                        build_dep[pkg] = src.system.check_system_pkg(check_cmd, pkg)
+        # if check_cmd[0]=="apt" or check_cmd[0]=="dpkg-query":
         else:
             if "apt" in sysinfo:
                 for pkg in sysinfo.apt:
-                    runtime_dep[pkg]=src.system.check_system_pkg(check_cmd,pkg)
+                    runtime_dep[pkg] = src.system.check_system_pkg(check_cmd, pkg)
             if "apt_dev" in sysinfo:
                 for pkg in sysinfo.apt_dev:
-                    build_dep[pkg]=src.system.check_system_pkg(check_cmd,pkg)
-            if additional_sysinfo :
+                    build_dep[pkg] = src.system.check_system_pkg(check_cmd, pkg)
+            if additional_sysinfo:
                 if "apt" in additional_sysinfo:
                     for pkg in additional_sysinfo.apt:
-                        runtime_dep[pkg]=src.system.check_system_pkg(check_cmd,pkg)
+                        runtime_dep[pkg] = src.system.check_system_pkg(check_cmd, pkg)
                 if "apt_dev" in additional_sysinfo:
                     for pkg in additional_sysinfo.apt_dev:
-                        build_dep[pkg]=src.system.check_system_pkg(check_cmd,pkg)
+                        build_dep[pkg] = src.system.check_system_pkg(check_cmd, pkg)
 
-    return runtime_dep,build_dep
+    return runtime_dep, build_dep
 
 
 def get_product_components(product_info):
     """Get the component list to generate with the product
-    
-    :param product_info Config: The configuration specific to 
+
+    :param product_info Config: The configuration specific to
                                the product
     :return: The list of names of the components
     :rtype: List
-    
+
     """
     if not product_is_generated(product_info):
         return []
-    
+
     compo_list = []
     if "component_name" in product_info:
         compo_list = product_info.component_name
-    
+
         if isinstance(compo_list, str):
-            compo_list = [ compo_list ]
+            compo_list = [compo_list]
 
     return compo_list
+
+
 def product_is_wheel(product_info):
-    """ tells whether a product is a wheel
-    
-    :param product_info Config: The configuration specific to 
+    """tells whether a product is a wheel
+
+    :param product_info Config: The configuration specific to
                                the product
     :return: True if the product has a wheel, else False
     :rtype: Boolean
     """
-    return ("properties" in product_info and
-            "is_wheel" in product_info.properties and
-            product_info.properties.is_wheel == "yes")
-
+    return (
+        "properties" in product_info
+        and "is_wheel" in product_info.properties
+        and product_info.properties.is_wheel == "yes"
+    )
index 5957d6ddcf8a12cbe2d5d971bf3eac266ca50ada..c73558bb3b5e4688ae05b41a2ff24dc17bafac67 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 
 # Copyright 2004-2007 by Vinay Sajip. All Rights Reserved.
 #
@@ -33,7 +33,7 @@
 #  License along with this library; if not, write to the Free Software
 #  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 
-# CEA adds : 
+# CEA adds :
 # Possibility to overwrites value in a pyconf file
 # Python 3 porting
 
@@ -108,56 +108,57 @@ The default value of this variable is L{defaultStreamOpener}. For an example
 of how it's used, see test_config.py (search for streamOpener).
 """
 
-__author__  = "Vinay Sajip <vinay_sajip@red-dove.com>"
-__status__  = "alpha"
-__version__ = "0.3.7.1" #modified for salomeTools
-__date__    = "05 October 2007"
+__author__ = "Vinay Sajip <vinay_sajip@red-dove.com>"
+__status__ = "alpha"
+__version__ = "0.3.7.1"  # modified for salomeTools
+__date__ = "05 October 2007"
 
 import codecs
 import os
 import sys
 
-WORD = 'a'
-NUMBER = '9'
+WORD = "a"
+NUMBER = "9"
 STRING = '"'
-EOF = ''
-LCURLY = '{'
-RCURLY = '}'
-LBRACK = '['
-LBRACK2 = 'a['
-RBRACK = ']'
-LPAREN = '('
-LPAREN2 = '(('
-RPAREN = ')'
-DOT = '.'
-COMMA = ','
-COLON = ':'
-AT = '@'
-PLUS = '+'
-MINUS = '-'
-STAR = '*'
-SLASH = '/'
-MOD = '%'
-BACKTICK = '`'
-DOLLAR = '$'
-TRUE = 'True'
-FALSE = 'False'
-NONE = 'None'
+EOF = ""
+LCURLY = "{"
+RCURLY = "}"
+LBRACK = "["
+LBRACK2 = "a["
+RBRACK = "]"
+LPAREN = "("
+LPAREN2 = "(("
+RPAREN = ")"
+DOT = "."
+COMMA = ","
+COLON = ":"
+AT = "@"
+PLUS = "+"
+MINUS = "-"
+STAR = "*"
+SLASH = "/"
+MOD = "%"
+BACKTICK = "`"
+DOLLAR = "$"
+TRUE = "True"
+FALSE = "False"
+NONE = "None"
 
 WORDCHARS = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_"
 
-if sys.platform == 'win32':
-    NEWLINE = '\r\n'
-elif os.name == 'mac':
-    NEWLINE = '\r'
+if sys.platform == "win32":
+    NEWLINE = "\r\n"
+elif os.name == "mac":
+    NEWLINE = "\r"
 else:
-    NEWLINE = '\n'
+    NEWLINE = "\n"
 
 try:
     has_utf32 = True
 except:
     has_utf32 = False
 
+
 class ConfigInputStream(object):
     """
     An input stream which can read either ANSI files with default encoding
@@ -166,6 +167,7 @@ class ConfigInputStream(object):
     Handles UTF-8, UTF-16LE, UTF-16BE. Could handle UTF-32 if Python had
     built-in support.
     """
+
     def __init__(self, stream):
         """
         Initialize an instance.
@@ -178,19 +180,19 @@ class ConfigInputStream(object):
         used = -1
         if has_utf32:
             if signature == codecs.BOM_UTF32_LE:
-                encoding = 'utf-32le'
+                encoding = "utf-32le"
             elif signature == codecs.BOM_UTF32_BE:
-                encoding = 'utf-32be'
+                encoding = "utf-32be"
         if encoding is None:
             if signature[:3] == codecs.BOM_UTF8:
                 used = 3
-                encoding = 'utf-8'
+                encoding = "utf-8"
             elif signature[:2] == codecs.BOM_UTF16_LE:
                 used = 2
-                encoding = 'utf-16le'
+                encoding = "utf-16le"
             elif signature[:2] == codecs.BOM_UTF16_BE:
                 used = 2
-                encoding = 'utf-16be'
+                encoding = "utf-16be"
             else:
                 used = 0
         if used >= 0:
@@ -205,7 +207,7 @@ class ConfigInputStream(object):
         if (size == 0) or (self.encoding is None):
             rv = self.stream.read(size)
         else:
-            rv = u''
+            rv = u""
             while size > 0:
                 rv += self.stream.read(1)
                 size -= 1
@@ -216,19 +218,20 @@ class ConfigInputStream(object):
 
     def readline(self):
         if self.encoding is None:
-            line = ''
+            line = ""
         else:
-            line = u''
+            line = u""
         while True:
             c = self.stream.read(1)
             if isinstance(c, bytes):
                 c = c.decode()
             if c:
                 line += c
-            if c == '\n':
+            if c == "\n":
                 break
         return line
 
+
 class ConfigOutputStream(object):
     """
     An output stream which can write either ANSI files with default encoding
@@ -275,6 +278,7 @@ class ConfigOutputStream(object):
     def close(self):
         self.stream.close()
 
+
 def defaultStreamOpener(name):
     """\
     This function returns a read-only stream, given its name. The name passed
@@ -290,32 +294,40 @@ def defaultStreamOpener(name):
     @return: A stream with the specified name.
     @rtype: A read-only stream (file-like object)
     """
-    return ConfigInputStream(open(name, 'rb'))
+    return ConfigInputStream(open(name, "rb"))
+
 
 streamOpener = None
 
 __resolveOverwrite__ = True
 
+
 class ConfigError(Exception):
     """
     This is the base class of exceptions raised by this module.
     """
+
     pass
 
+
 class ConfigFormatError(ConfigError):
     """
     This is the base class of exceptions raised due to syntax errors in
     configurations.
     """
+
     pass
 
+
 class ConfigResolutionError(ConfigError):
     """
     This is the base class of exceptions raised due to semantic errors in
     configurations.
     """
+
     pass
 
+
 def isWord(s):
     """
     See if a passed-in value is an identifier. If the value passed in is not a
@@ -337,11 +349,12 @@ def isWord(s):
     @return: True if a word, else False
     @rtype: bool
     """
-    if type(s) != type(''):
+    if type(s) != type(""):
         return False
-    s = s.replace('_', '')
+    s = s.replace("_", "")
     return s.isalnum()
 
+
 def makePath(prefix, suffix):
     """\
     Make a path from a prefix and suffix.
@@ -360,10 +373,10 @@ def makePath(prefix, suffix):
     """
     if not prefix:
         rv = suffix
-    elif suffix[0] == '[':
+    elif suffix[0] == "[":
         rv = prefix + suffix
     else:
-        rv = prefix + '.' + suffix
+        rv = prefix + "." + suffix
     return rv
 
 
@@ -378,6 +391,7 @@ class Container(object):
 
         a.list.of[1].or['more'].elements
     """
+
     def __init__(self, parent):
         """
         Initialize an instance.
@@ -385,7 +399,7 @@ class Container(object):
         @param parent: The parent of this instance in the hierarchy.
         @type parent: A L{Container} instance.
         """
-        object.__setattr__(self, 'parent', parent)
+        object.__setattr__(self, "parent", parent)
 
     def setPath(self, path):
         """
@@ -394,7 +408,7 @@ class Container(object):
         to this instance from the root of the hierarchy.
         @type path: str
         """
-        object.__setattr__(self, 'path', path)
+        object.__setattr__(self, "path", path)
 
     def evaluate(self, item):
         """
@@ -434,18 +448,19 @@ class Container(object):
 
     def writeValue(self, value, stream, indent, evaluated=False):
         if isinstance(self, Mapping):
-            indstr = ' '
+            indstr = " "
         else:
-            indstr = indent * '  '
+            indstr = indent * "  "
         if isinstance(value, Reference) or isinstance(value, Expression):
             if not evaluated:
-                stream.write('%s%r%s' % (indstr, value, NEWLINE))
+                stream.write("%s%r%s" % (indstr, value, NEWLINE))
             else:
-                stream.write('%s%r%s' % (indstr, self.evaluate(value), NEWLINE))
+                stream.write("%s%r%s" % (indstr, self.evaluate(value), NEWLINE))
         else:
-            if isinstance(value, str): # and not isWord(value):
+            if isinstance(value, str):  # and not isWord(value):
                 value = repr(value)
-            stream.write('%s%s%s' % (indstr, value, NEWLINE))
+            stream.write("%s%s%s" % (indstr, value, NEWLINE))
+
 
 class Mapping(Container):
     """
@@ -460,34 +475,34 @@ class Mapping(Container):
         @type parent: A L{Container} instance.
         """
         Container.__init__(self, parent)
-        object.__setattr__(self, 'path', '')
-        object.__setattr__(self, 'data', {})
-        object.__setattr__(self, 'order', [])   # to preserve ordering
-        object.__setattr__(self, 'comments', {})
+        object.__setattr__(self, "path", "")
+        object.__setattr__(self, "data", {})
+        object.__setattr__(self, "order", [])  # to preserve ordering
+        object.__setattr__(self, "comments", {})
 
     def __delitem__(self, key):
         """
         Remove an item
         """
-        data = object.__getattribute__(self, 'data')
+        data = object.__getattribute__(self, "data")
         if key not in data:
             raise AttributeError(key)
-        order = object.__getattribute__(self, 'order')
-        comments = object.__getattribute__(self, 'comments')
+        order = object.__getattribute__(self, "order")
+        comments = object.__getattribute__(self, "comments")
         del data[key]
         order.remove(key)
         del comments[key]
 
     def __getitem__(self, key):
-        data = object.__getattribute__(self, 'data')
+        data = object.__getattribute__(self, "data")
         if key not in data:
             raise AttributeError("Unknown pyconf key: '%s'" % key)
         rv = data[key]
         return self.evaluate(rv)
 
     __getattr__ = __getitem__
-    
-    '''
+
+    """
     def __getattribute__(self, name):
         if name == "__dict__":
             return {}
@@ -504,15 +519,15 @@ class Mapping(Container):
             if rv is None:
                 raise AttributeError(name)
         return rv
-    '''
+    """
 
     def iteritems(self):
         for key in self.keys():
-            yield(key, self[key])
+            yield (key, self[key])
         raise StopIteration
 
     def __contains__(self, item):
-        order = object.__getattribute__(self, 'order')
+        order = object.__getattribute__(self, "order")
         return item in order
 
     def addMapping(self, key, value, comment, setting=False):
@@ -530,9 +545,9 @@ class Mapping(Container):
         @raise ConfigFormatError: If an existing key is seen
         again and setting is False.
         """
-        data = object.__getattribute__(self, 'data')
-        order = object.__getattribute__(self, 'order')
-        comments = object.__getattribute__(self, 'comments')
+        data = object.__getattribute__(self, "data")
+        order = object.__getattribute__(self, "order")
+        comments = object.__getattribute__(self, "comments")
 
         data[key] = value
         if key not in order:
@@ -550,7 +565,7 @@ class Mapping(Container):
         """
         Return the keys in a similar way to a dictionary.
         """
-        return object.__getattribute__(self, 'order')
+        return object.__getattribute__(self, "order")
 
     def get(self, key, default=None):
         """
@@ -561,19 +576,19 @@ class Mapping(Container):
         return default
 
     def __str__(self):
-        return str(object.__getattribute__(self, 'data'))
+        return str(object.__getattribute__(self, "data"))
 
     def __repr__(self):
-        return repr(object.__getattribute__(self, 'data'))
+        return repr(object.__getattribute__(self, "data"))
 
     def __len__(self):
-        return len(object.__getattribute__(self, 'order'))
+        return len(object.__getattribute__(self, "order"))
 
     def __iter__(self):
         return self.iterkeys()
 
     def iterkeys(self):
-        order = object.__getattribute__(self, 'order')
+        order = object.__getattribute__(self, "order")
         return order.__iter__()
 
     def writeToStream(self, stream, indent, container, evaluated=False):
@@ -589,15 +604,15 @@ class Mapping(Container):
         @param container: The container of this instance
         @type container: L{Container}
         """
-        indstr = indent * '  '
+        indstr = indent * "  "
         if len(self) == 0:
-            stream.write(' { }%s' % NEWLINE)
+            stream.write(" { }%s" % NEWLINE)
         else:
             if isinstance(container, Mapping):
                 stream.write(NEWLINE)
-            stream.write('%s{%s' % (indstr, NEWLINE))
+            stream.write("%s{%s" % (indstr, NEWLINE))
             self.__save__(stream, indent + 1, evaluated=evaluated)
-            stream.write('%s}%s' % (indstr, NEWLINE))
+            stream.write("%s}%s" % (indstr, NEWLINE))
 
     def __save__(self, stream, indent=0, evaluated=False):
         """
@@ -607,10 +622,10 @@ class Mapping(Container):
         @param indent: The indentation level for the output.
         @type indent: int
         """
-        indstr = indent * '  '
-        order = object.__getattribute__(self, 'order')
-        data = object.__getattribute__(self, 'data')
-        maxlen = 0 # max(map(lambda x: len(x), order))
+        indstr = indent * "  "
+        order = object.__getattribute__(self, "order")
+        data = object.__getattribute__(self, "data")
+        maxlen = 0  # max(map(lambda x: len(x), order))
         for key in order:
             comment = self.comments[key]
             if isWord(key):
@@ -618,16 +633,17 @@ class Mapping(Container):
             else:
                 skey = repr(key)
             if comment:
-                stream.write('%s#%s' % (indstr, comment))
+                stream.write("%s#%s" % (indstr, comment))
             if skey.startswith("u'"):
                 skey = skey[1:]
-            stream.write('%s%-*s :' % (indstr, maxlen, skey))
+            stream.write("%s%-*s :" % (indstr, maxlen, skey))
             value = data[key]
             if isinstance(value, Container):
                 value.writeToStream(stream, indent, self, evaluated=evaluated)
             else:
                 self.writeValue(value, stream, indent, evaluated=evaluated)
 
+
 class Config(Mapping):
     """
     This class represents a configuration, and is the only one which clients
@@ -640,11 +656,12 @@ class Config(Mapping):
 
         An instance acts as a namespace.
         """
+
         def __init__(self):
             self.sys = sys
             self.os = os
 
-    def __init__(self, streamOrFile=None, parent=None, PWD = None):
+    def __init__(self, streamOrFile=None, parent=None, PWD=None):
         """
         Initializes an instance.
 
@@ -657,14 +674,14 @@ class Config(Mapping):
         in the configuration hierarchy.
         @type parent: a L{Container} instance.
         """
-        try: # Python 3 compatibility
+        try:  # Python 3 compatibility
             if isinstance(streamOrFile, unicode):
                 streamOrFile = streamOrFile.encode()
         except NameError:
             pass
         Mapping.__init__(self, parent)
-        object.__setattr__(self, 'reader', ConfigReader(self))
-        object.__setattr__(self, 'namespaces', [Config.Namespace()])
+        object.__setattr__(self, "reader", ConfigReader(self))
+        object.__setattr__(self, "namespaces", [Config.Namespace()])
         if streamOrFile is not None:
             if isinstance(streamOrFile, str) or isinstance(streamOrFile, bytes):
                 global streamOpener
@@ -692,7 +709,7 @@ class Config(Mapping):
         existing keys.
         @raise ConfigFormatError: if there is a syntax error in the stream.
         """
-        reader = object.__getattribute__(self, 'reader')
+        reader = object.__getattribute__(self, "reader")
         reader.load(stream)
         stream.close()
 
@@ -707,7 +724,7 @@ class Config(Mapping):
         an additional level of indirection.
         @type name: str
         """
-        namespaces = object.__getattribute__(self, 'namespaces')
+        namespaces = object.__getattribute__(self, "namespaces")
         if name is None:
             namespaces.append(ns)
         else:
@@ -721,7 +738,7 @@ class Config(Mapping):
         called.
         @type name: str
         """
-        namespaces = object.__getattribute__(self, 'namespaces')
+        namespaces = object.__getattribute__(self, "namespaces")
         if name is None:
             namespaces.remove(ns)
         else:
@@ -750,23 +767,26 @@ class Config(Mapping):
         @rtype: any
         @raise ConfigError: If the path is invalid
         """
-        s = 'self.' + path
+        s = "self." + path
         try:
             return eval(s)
         except Exception as e:
             raise ConfigError(str(e))
 
+
 class Sequence(Container):
     """
     This internal class implements a value which is a sequence of other values.
     """
+
     class SeqIter(object):
         """
         This internal class implements an iterator for a L{Sequence} instance.
         """
+
         def __init__(self, seq):
             self.seq = seq
-            self.limit = len(object.__getattribute__(seq, 'data'))
+            self.limit = len(object.__getattribute__(seq, "data"))
             self.index = 0
 
         def __iter__(self):
@@ -778,9 +798,9 @@ class Sequence(Container):
             rv = self.seq[self.index]
             self.index += 1
             return rv
-        
+
         # This method is for python3 compatibility
-        def __next__(self): 
+        def __next__(self):
             if self.index >= self.limit:
                 raise StopIteration
             rv = self.seq[self.index]
@@ -795,8 +815,8 @@ class Sequence(Container):
         @type parent: A L{Container} instance.
         """
         Container.__init__(self, parent)
-        object.__setattr__(self, 'data', [])
-        object.__setattr__(self, 'comments', [])
+        object.__setattr__(self, "data", [])
+        object.__setattr__(self, "comments", [])
 
     def append(self, item, comment):
         """
@@ -807,17 +827,20 @@ class Sequence(Container):
         @param comment: A comment for the item.
         @type comment: str
         """
-        data = object.__getattribute__(self, 'data')
-        comments = object.__getattribute__(self, 'comments')
+        data = object.__getattribute__(self, "data")
+        comments = object.__getattribute__(self, "comments")
         data.append(item)
         comments.append(comment)
 
     def __getitem__(self, index):
-        data = object.__getattribute__(self, 'data')
+        data = object.__getattribute__(self, "data")
         try:
             rv = data[index]
         except (IndexError, KeyError, TypeError):
-            raise ConfigResolutionError('Invalid pyconf index %r for %r' % (index, object.__getattribute__(self, 'path')))
+            raise ConfigResolutionError(
+                "Invalid pyconf index %r for %r"
+                % (index, object.__getattribute__(self, "path"))
+            )
         if not isinstance(rv, list):
             rv = self.evaluate(rv)
         else:
@@ -832,13 +855,13 @@ class Sequence(Container):
         return Sequence.SeqIter(self)
 
     def __repr__(self):
-        return repr(object.__getattribute__(self, 'data'))
+        return repr(object.__getattribute__(self, "data"))
 
     def __str__(self):
-        return str(self[:]) # using the slice evaluates the contents
+        return str(self[:])  # using the slice evaluates the contents
 
     def __len__(self):
-        return len(object.__getattribute__(self, 'data'))
+        return len(object.__getattribute__(self, "data"))
 
     def writeToStream(self, stream, indent, container, evaluated=False):
         """
@@ -853,15 +876,15 @@ class Sequence(Container):
         @param container: The container of this instance
         @type container: L{Container}
         """
-        indstr = indent * '  '
+        indstr = indent * "  "
         if len(self) == 0:
-            stream.write(' [ ]%s' % NEWLINE)
+            stream.write(" [ ]%s" % NEWLINE)
         else:
             if isinstance(container, Mapping):
                 stream.write(NEWLINE)
-            stream.write('%s[%s' % (indstr, NEWLINE))
+            stream.write("%s[%s" % (indstr, NEWLINE))
             self.__save__(stream, indent + 1, evaluated=evaluated)
-            stream.write('%s]%s' % (indstr, NEWLINE))
+            stream.write("%s]%s" % (indstr, NEWLINE))
 
     def __save__(self, stream, indent, evaluated=False):
         """
@@ -873,23 +896,25 @@ class Sequence(Container):
         """
         if indent == 0:
             raise ConfigError("sequence cannot be saved as a top-level item")
-        data = object.__getattribute__(self, 'data')
-        comments = object.__getattribute__(self, 'comments')
-        indstr = indent * '  '
+        data = object.__getattribute__(self, "data")
+        comments = object.__getattribute__(self, "comments")
+        indstr = indent * "  "
         for i in range(0, len(data)):
             value = data[i]
             comment = comments[i]
             if comment:
-                stream.write('%s#%s' % (indstr, comment))
+                stream.write("%s#%s" % (indstr, comment))
             if isinstance(value, Container):
                 value.writeToStream(stream, indent, self, evaluated=evaluated)
             else:
                 self.writeValue(value, stream, indent, evaluated=evaluated)
 
+
 class Reference(object):
     """
     This internal class implements a value which is a reference to another value.
     """
+
     def __init__(self, config, type, ident):
         """
         Initialize an instance.
@@ -926,7 +951,7 @@ class Reference(object):
         @rtype: L{Config}
         """
         while (container is not None) and not isinstance(container, Config):
-            container = object.__getattribute__(container, 'parent')
+            container = object.__getattribute__(container, "parent")
         return container
 
     def resolve(self, container):
@@ -940,11 +965,11 @@ class Reference(object):
         @raise ConfigResolutionError: If resolution fails.
         """
         rv = None
-        path = object.__getattribute__(container, 'path')
+        path = object.__getattribute__(container, "path")
         current = container
         while current is not None:
             if self.type == BACKTICK:
-                namespaces = object.__getattribute__(current, 'namespaces')
+                namespaces = object.__getattribute__(current, "namespaces")
                 found = False
                 for ns in namespaces:
                     try:
@@ -966,18 +991,20 @@ class Reference(object):
                 except:
                     rv = None
                     pass
-            current = object.__getattribute__(current, 'parent')
+            current = object.__getattribute__(current, "parent")
         if current is None:
-            raise ConfigResolutionError("unable to evaluate %r in the configuration %s" % (self, path))
+            raise ConfigResolutionError(
+                "unable to evaluate %r in the configuration %s" % (self, path)
+            )
         return rv
 
     def __str__(self):
         s = self.elements[0]
         for tt, tv in self.elements[1:]:
             if tt == DOT:
-                s += '.%s' % tv
+                s += ".%s" % tv
             else:
-                s += '[%r]' % tv
+                s += "[%r]" % tv
         if self.type == BACKTICK:
             return BACKTICK + s + BACKTICK
         else:
@@ -986,10 +1013,12 @@ class Reference(object):
     def __repr__(self):
         return self.__str__()
 
+
 class Expression(object):
     """
     This internal class implements a value which is obtained by evaluating an expression.
     """
+
     def __init__(self, op, lhs, rhs):
         """
         Initialize an instance.
@@ -1006,7 +1035,7 @@ class Expression(object):
         self.rhs = rhs
 
     def __str__(self):
-        return '%r %s %r' % (self.lhs, self.op, self.rhs)
+        return "%r %s %r" % (self.lhs, self.op, self.rhs)
 
     def __repr__(self):
         return self.__str__()
@@ -1047,6 +1076,7 @@ class Expression(object):
             rv = lhs % rhs
         return rv
 
+
 class ConfigReader(object):
     """
     This internal class implements a parser for configurations.
@@ -1059,12 +1089,12 @@ class ConfigReader(object):
         self.colno = 0
         self.lastc = None
         self.last_token = None
-        self.commentchars = '#'
-        self.whitespace = ' \t\r\n'
-        self.quotes = '\'"'
-        self.punct = ':-+*/%,.{}[]()@`$'
-        self.digits = '0123456789'
-        self.wordchars = '%s' % WORDCHARS # make a copy
+        self.commentchars = "#"
+        self.whitespace = " \t\r\n"
+        self.quotes = "'\""
+        self.punct = ":-+*/%,.{}[]()@`$"
+        self.digits = "0123456789"
+        self.wordchars = "%s" % WORDCHARS  # make a copy
         self.identchars = self.wordchars + self.digits
         self.pbchars = []
         self.pbtokens = []
@@ -1092,14 +1122,14 @@ class ConfigReader(object):
         """
         if self.pbchars:
             c = self.pbchars.pop()
-            if isinstance(c,bytes):
+            if isinstance(c, bytes):
                 c = c.decode()
         else:
             c = self.stream.read(1)
-            if isinstance(c,bytes):
+            if isinstance(c, bytes):
                 c = c.decode()
             self.colno += 1
-            if c == '\n':
+            if c == "\n":
                 self.lineno += 1
                 self.colno = 1
         return c
@@ -1124,16 +1154,16 @@ class ConfigReader(object):
             return self.pbtokens.pop()
         stream = self.stream
         self.comment = None
-        token = ''
+        token = ""
         tt = EOF
         while True:
             c = self.getChar()
             if not c:
                 break
-            elif c == '#':
-                if self.comment :
-                    self.comment += '#' + stream.readline()
-                else :
+            elif c == "#":
+                if self.comment:
+                    self.comment += "#" + stream.readline()
+                else:
                     self.comment = stream.readline()
                 self.lineno += 1
                 continue
@@ -1161,14 +1191,21 @@ class ConfigReader(object):
                         break
                     token += c
                     if (c == quote) and not escaped:
-                        if not multiline or (len(token) >= 6 and token.endswith(token[:3]) and token[-4] != '\\'):
+                        if not multiline or (
+                            len(token) >= 6
+                            and token.endswith(token[:3])
+                            and token[-4] != "\\"
+                        ):
                             break
-                    if c == '\\':
+                    if c == "\\":
                         escaped = not escaped
                     else:
                         escaped = False
                 if not c:
-                    raise ConfigFormatError('%s: Unterminated quoted string: %r, %r' % (self.location(), token, c))
+                    raise ConfigFormatError(
+                        "%s: Unterminated quoted string: %r, %r"
+                        % (self.location(), token, c)
+                    )
                 break
             if c in self.whitespace:
                 self.lastc = c
@@ -1176,10 +1213,10 @@ class ConfigReader(object):
             elif c in self.punct:
                 token = c
                 tt = c
-                if (self.lastc == ']') or (self.lastc in self.identchars):
-                    if c == '[':
+                if (self.lastc == "]") or (self.lastc in self.identchars):
+                    if c == "[":
                         tt = LBRACK2
-                    elif c == '(':
+                    elif c == "(":
                         tt = LPAREN2
                 break
             elif c in self.digits:
@@ -1191,7 +1228,7 @@ class ConfigReader(object):
                         break
                     if c in self.digits:
                         token += c
-                    elif (c == '.') and token.find('.') < 0:
+                    elif (c == ".") and token.find(".") < 0:
                         token += c
                     else:
                         if c and (c not in self.whitespace):
@@ -1205,7 +1242,7 @@ class ConfigReader(object):
                 while c and (c in self.identchars):
                     token += c
                     c = self.getChar()
-                if c: # and c not in self.whitespace:
+                if c:  # and c not in self.whitespace:
                     self.pbchars.append(c)
                 if token == "True":
                     tt = TRUE
@@ -1215,16 +1252,18 @@ class ConfigReader(object):
                     tt = NONE
                 break
             else:
-                raise ConfigFormatError('%s: Unexpected character: %r' % (self.location(), c))
+                raise ConfigFormatError(
+                    "%s: Unexpected character: %r" % (self.location(), c)
+                )
         if token:
             self.lastc = token[-1]
         else:
             self.lastc = None
         self.last_token = tt
-        
+
         # Python 2.x specific unicode conversion
         if sys.version_info[0] == 2 and tt == WORD and isinstance(token, unicode):
-            token = token.encode('ascii')
+            token = token.encode("ascii")
         return (tt, token)
 
     def load(self, stream, parent=None, suffix=None):
@@ -1244,13 +1283,19 @@ class ConfigReader(object):
         """
         if parent is not None:
             if suffix is None:
-                raise ConfigError("internal error: load called with parent but no suffix")
-            self.config.setPath(makePath(object.__getattribute__(parent, 'path'), suffix))
+                raise ConfigError(
+                    "internal error: load called with parent but no suffix"
+                )
+            self.config.setPath(
+                makePath(object.__getattribute__(parent, "path"), suffix)
+            )
         self.setStream(stream)
         self.token = self.getToken()
         self.parseMappingBody(self.config)
         if self.token[0] != EOF:
-            raise ConfigFormatError('%s: expecting EOF, found %r' % (self.location(), self.token[1]))
+            raise ConfigFormatError(
+                "%s: expecting EOF, found %r" % (self.location(), self.token[1])
+            )
 
     def setStream(self, stream):
         """
@@ -1260,10 +1305,10 @@ class ConfigReader(object):
         @type stream: A stream (file-like object).
         """
         self.stream = stream
-        if hasattr(stream, 'name'):
+        if hasattr(stream, "name"):
             filename = stream.name
         else:
-            filename = '?'
+            filename = "?"
         self.filename = filename
         self.lineno = 1
         self.colno = 1
@@ -1281,7 +1326,9 @@ class ConfigReader(object):
         @raise ConfigFormatError: If the token does not match what's expected.
         """
         if self.token[0] != t:
-            raise ConfigFormatError("%s: expecting %s, found %r" % (self.location(), t, self.token[1]))
+            raise ConfigFormatError(
+                "%s: expecting %s, found %r" % (self.location(), t, self.token[1])
+            )
         rv = self.token
         self.token = self.getToken()
         return rv
@@ -1312,7 +1359,7 @@ class ConfigReader(object):
             suffix = tv
         elif tt == STRING:
             key = eval(tv)
-            suffix = '[%s]' % tv
+            suffix = "[%s]" % tv
         else:
             msg = "%s: expecting word or string, found %r"
             raise ConfigFormatError(msg % (self.location(), tv))
@@ -1326,13 +1373,12 @@ class ConfigReader(object):
         try:
             parent.addMapping(key, value, comment)
         except Exception as e:
-            raise ConfigFormatError("%s: %s, %r" % (self.location(), e,
-                                    self.token[1]))
+            raise ConfigFormatError("%s: %s, %r" % (self.location(), e, self.token[1]))
         tt = self.token[0]
         if tt not in [EOF, WORD, STRING, RCURLY, COMMA]:
             msg = "%s: expecting one of EOF, WORD, STRING, \
 RCURLY, COMMA, found %r"
-            raise ConfigFormatError(msg  % (self.location(), self.token[1]))
+            raise ConfigFormatError(msg % (self.location(), self.token[1]))
         if tt == COMMA:
             self.token = self.getToken()
 
@@ -1349,16 +1395,27 @@ RCURLY, COMMA, found %r"
         @raise ConfigFormatError: if a syntax error is found.
         """
         tt = self.token[0]
-        if tt in [STRING, WORD, NUMBER, LPAREN, DOLLAR,
-                  TRUE, FALSE, NONE, BACKTICK, MINUS]:
+        if tt in [
+            STRING,
+            WORD,
+            NUMBER,
+            LPAREN,
+            DOLLAR,
+            TRUE,
+            FALSE,
+            NONE,
+            BACKTICK,
+            MINUS,
+        ]:
             rv = self.parseScalar()
         elif tt == LBRACK:
             rv = self.parseSequence(parent, suffix)
         elif tt in [LCURLY, AT]:
             rv = self.parseMapping(parent, suffix)
         else:
-            raise ConfigFormatError("%s: unexpected input: %r" %
-               (self.location(), self.token[1]))
+            raise ConfigFormatError(
+                "%s: unexpected input: %r" % (self.location(), self.token[1])
+            )
         return rv
 
     def parseSequence(self, parent, suffix):
@@ -1374,13 +1431,24 @@ RCURLY, COMMA, found %r"
         @raise ConfigFormatError: if a syntax error is found.
         """
         rv = Sequence(parent)
-        rv.setPath(makePath(object.__getattribute__(parent, 'path'), suffix))
+        rv.setPath(makePath(object.__getattribute__(parent, "path"), suffix))
         self.match(LBRACK)
         comment = self.comment
         tt = self.token[0]
-        while tt in [STRING, WORD, NUMBER, LCURLY, LBRACK, LPAREN, DOLLAR,
-                     TRUE, FALSE, NONE, BACKTICK]:
-            suffix = '[%d]' % len(rv)
+        while tt in [
+            STRING,
+            WORD,
+            NUMBER,
+            LCURLY,
+            LBRACK,
+            LPAREN,
+            DOLLAR,
+            TRUE,
+            FALSE,
+            NONE,
+            BACKTICK,
+        ]:
+            suffix = "[%d]" % len(rv)
             value = self.parseValue(parent, suffix)
             rv.append(value, comment)
             tt = self.token[0]
@@ -1408,8 +1476,7 @@ RCURLY, COMMA, found %r"
         if self.token[0] == LCURLY:
             self.match(LCURLY)
             rv = Mapping(parent)
-            rv.setPath(
-               makePath(object.__getattribute__(parent, 'path'), suffix))
+            rv.setPath(makePath(object.__getattribute__(parent, "path"), suffix))
             self.parseMappingBody(rv)
             self.match(RCURLY)
         else:
@@ -1482,8 +1549,9 @@ RCURLY, COMMA, found %r"
             self.match(MINUS)
             rv = -self.parseScalar()
         else:
-            raise ConfigFormatError("%s: unexpected input: %r" %
-               (self.location(), self.token[1]))
+            raise ConfigFormatError(
+                "%s: unexpected input: %r" % (self.location(), self.token[1])
+            )
         return rv
 
     def parseReference(self, type):
@@ -1517,12 +1585,15 @@ RCURLY, COMMA, found %r"
             self.match(LBRACK2)
             tt, tv = self.token
             if tt not in [NUMBER, STRING]:
-                raise ConfigFormatError("%s: expected number or string, found %r" % (self.location(), tv))
+                raise ConfigFormatError(
+                    "%s: expected number or string, found %r" % (self.location(), tv)
+                )
             self.token = self.getToken()
             tv = eval(tv)
             self.match(RBRACK)
             ref.addElement(LBRACK, tv)
 
+
 def defaultMergeResolve(map1, map2, key):
     """\
     A default resolver for merge conflicts. 
@@ -1553,6 +1624,7 @@ def defaultMergeResolve(map1, map2, key):
         rv = "mismatch"
     return rv
 
+
 def overwriteMergeResolve(map1, map2, key):
     """
     An overwriting resolver for merge conflicts. Calls L{defaultMergeResolve},
@@ -1570,12 +1642,14 @@ def overwriteMergeResolve(map1, map2, key):
         rv = "overwrite"
     return rv
 
+
 def deepCopyMapping(inMapping):
     res = Mapping()
     for element in inMapping:
         res[element] = inMapping[element]
     return res
 
+
 class ConfigMerger(object):
     """
     This class is used for merging two configurations. If a key exists in the
@@ -1621,9 +1695,9 @@ class ConfigMerger(object):
         @type map2: L{Mapping}.
         """
 
-        overwrite_list = object.__getattribute__(seq2, 'data')
+        overwrite_list = object.__getattribute__(seq2, "data")
         for overwrite_instruction in overwrite_list:
-            object.__setattr__(overwrite_instruction, 'parent', map1)
+            object.__setattr__(overwrite_instruction, "parent", map1)
             if "__condition__" in overwrite_instruction.keys():
                 overwrite_condition = overwrite_instruction["__condition__"]
                 if eval(overwrite_condition, globals(), map1):
@@ -1631,15 +1705,19 @@ class ConfigMerger(object):
                         if key == "__condition__":
                             continue
                         try:
-                            exec( 'map1.' + key + " = " + repr(overwrite_instruction[key]))
+                            exec(
+                                "map1." + key + " = " + repr(overwrite_instruction[key])
+                            )
                         except:
-                            exec('map1.' + key + " = " + str(overwrite_instruction[key]))
+                            exec(
+                                "map1." + key + " = " + str(overwrite_instruction[key])
+                            )
             else:
                 for key in overwrite_instruction.keys():
                     try:
-                        exec('map1.' + key + " = " + repr(overwrite_instruction[key]))
+                        exec("map1." + key + " = " + repr(overwrite_instruction[key]))
                     except:
-                        exec('map1.' + key + " = " + str(overwrite_instruction[key]))
+                        exec("map1." + key + " = " + str(overwrite_instruction[key]))
 
     def mergeMapping(self, map1, map2):
         """
@@ -1655,12 +1733,12 @@ class ConfigMerger(object):
         global __resolveOverwrite__
         for key in map2.keys():
             if __resolveOverwrite__ and key == "__overwrite__":
-                self.overwriteKeys(map1,map2[key])
+                self.overwriteKeys(map1, map2[key])
 
             elif key not in keys:
                 map1[key] = map2[key]
-                if isinstance(map1[key], Container) :
-                    object.__setattr__(map1[key], 'parent', map1)
+                if isinstance(map1[key], Container):
+                    object.__setattr__(map1[key], "parent", map1)
             else:
                 obj1 = map1[key]
                 obj2 = map2[key]
@@ -1672,7 +1750,7 @@ class ConfigMerger(object):
                 elif decision == "overwrite":
                     map1[key] = obj2
                     if isinstance(map1[key], Container):
-                        object.__setattr__(map1[key], 'parent', map1)
+                        object.__setattr__(map1[key], "parent", map1)
                 elif decision == "mismatch":
                     self.handleMismatch(obj1, obj2)
                 else:
@@ -1690,12 +1768,12 @@ class ConfigMerger(object):
         @param seq2: The sequence to merge.
         @type seq2: L{Sequence}.
         """
-        data1 = object.__getattribute__(seq1, 'data')
-        data2 = object.__getattribute__(seq2, 'data')
+        data1 = object.__getattribute__(seq1, "data")
+        data2 = object.__getattribute__(seq2, "data")
         for obj in data2:
             data1.append(obj)
-        comment1 = object.__getattribute__(seq1, 'comments')
-        comment2 = object.__getattribute__(seq2, 'comments')
+        comment1 = object.__getattribute__(seq1, "comments")
+        comment2 = object.__getattribute__(seq2, "comments")
         for obj in comment2:
             comment1.append(obj)
 
@@ -1710,6 +1788,7 @@ class ConfigMerger(object):
         """
         raise ConfigError("unable to merge %r with %r" % (obj1, obj2))
 
+
 class ConfigList(list):
     """
     This class implements an ordered list of configurations and allows you
index 1af9116677d72a9969a3b197376751e66d925140..f5ffcf98a76ba2429da47b0b1ce84501ff90ab63 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 
 #  Copyright (C) 2018-20xx  CEA/DEN
 #
@@ -29,206 +29,205 @@ import pprint as PP
 # global module variable
 _OK_STATUS = "OK"
 _KO_STATUS = "KO"
-_NA_STATUS = "NA" # not applicable
-_UNKNOWN_STATUS = "ND" # not defined
+_NA_STATUS = "NA"  # not applicable
+_UNKNOWN_STATUS = "ND"  # not defined
 _KNOWNFAILURE_STATUS = "KF"
 _TIMEOUT_STATUS = "TIMEOUT"
 
 #####################################################
 class ReturnCode(object):
-  """
-  assume simple return code for methods, with explanation as 'why'.
-  Obviously why is 'why it is not OK', 
-  but also why is 'why it is OK' (if you want). 
-  Optionaly contains a return value as self.getValue()
-  
-  | Usage:
-  | >> import returnCode as RCO
-  | 
-  | >> aValue = doSomethingToReturn()
-  | >> return RCO.ReturnCode("KO", "there is no problem here", aValue)
-  | >> return RCO.ReturnCode("KO", "there is a problem here because etc", None)
-  | >> return RCO.ReturnCode("TIMEOUT_STATUS", "too long here because etc")
-  | >> return RCO.ReturnCode("NA", "not applicable here because etc")
-  | 
-  | >> rc = doSomething()
-  | >> print("short returnCode string", str(rc))
-  | >> print("long returnCode string with value", repr(rc))
-  | 
-  | >> rc1 = RCO.ReturnCode("OK", ...)
-  | >> rc2 = RCO.ReturnCode("KO", ...)
-  | >> rcFinal = rc1 + rc2
-  | >> print("long returnCode string with value", repr(rcFinal)) # KO!
-  | 
-  | >> rc = doSomething()
-  | >> if rc.isOk(): doSomethingAsOK()
-  | >> if not rc.isOk(): doSomethingAsKO()
-  | 
-  | >> rc = doSomething().raiseIfKo() # raise Exception if KO
-  | >> doSomethingWithValue(rc.getValue()) # here i am sure that is OK
-  """
-
-  # redunctant but useful class variables
-  OK_STATUS = _OK_STATUS
-  KO_STATUS = _KO_STATUS
-  NA_STATUS = _NA_STATUS # not applicable
-  UNKNOWN_STATUS = _UNKNOWN_STATUS # not defined
-  KNOWNFAILURE_STATUS = _KNOWNFAILURE_STATUS
-  TIMEOUT_STATUS = _TIMEOUT_STATUS
-
-  # an integer for sys.exit(anInteger)
-  # OKSYS and KOSYS seems equal on linux or windows
-  OKSYS = 0  # OK 
-  KOSYS = 1  # KO
-  NASYS = 2  # KO not applicable return code
-  NDSYS = 3  # KO not defined return code
-  KFSYS = 4  # KO known failure return code
-  TOSYS = 5  # KO time out
-  
-  _TOSYS = { 
-    OK_STATUS: OKSYS,
-    KO_STATUS: KOSYS,
-    NA_STATUS: NASYS,
-    UNKNOWN_STATUS: NDSYS,
-    KNOWNFAILURE_STATUS: KFSYS,
-    TIMEOUT_STATUS: TOSYS, 
-  }
-  _DEFAULT_WHY = "No given explanation"
-  _DEFAULT_VALUE = None
-
-  def __init__(self, status=None, why=None, value=None):
-    self._why = self._DEFAULT_WHY 
-    self._value = self._DEFAULT_VALUE
-    if status is None:
-      self._status = self.UNKNOWN_STATUS
-    else:
-      self.setStatus(status, why, value)
-    
-  def __repr__(self):
-    """complete with value, 'ok, why, value' message"""
-    res = '%s: %s --value: %s' % (self._status, self._why, PP.pformat(self._value))
-    return res
-  
-  def __str__(self):
-    """without value, only simple 'ok, why' message"""
-    res = '%s: %s' % (self._status, self._why)
-    return res
-
-  def indent(self, text, amount=5, ch=' '):
-    """indent multi lines message"""
-    padding = amount * ch
-    res = ''.join(padding + line for line in text.splitlines(True))
-    return res[amount:]
-
-  def __add__(self, rc2):
-    """allows expression 'returnCode1 + returnCode2 + ...' """
-    isOk = self.isOk() and rc2.isOk()
-    newWhy = self._toList(self.getWhy()) + self._toList(rc2.getWhy())
-    newValue = self._toList(self.getValue()) + self._toList(rc2.getValue())    
-    if isOk: 
-      return ReturnCode("OK", newWhy, newValue)
-    else:
-      return ReturnCode("KO", newWhy, newValue)
-    
-  def __radd__(self, other):
-    # see http://www.marinamele.com/2014/04/modifying-add-method-of-python-class.html
-    if other == 0:
-      return self
-    else:
-      return self.__add__(other) 
-    
-  def _toList(self, strOrList):
-    """internal use"""
-    if type(strOrList) is not list: 
-      return [strOrList]
-    else:
-      return strOrList
-
-  def toSys(self):
-    """return system return code as bash or bat"""
-    try:
-      return self._TOSYS[self._status]
-    except:
-      return self._TOSYS[self.NA_STATUS]
-
-  def toXmlPassed(self):
-    """return xml  return code as '0' (passed) or '1' (not passed)"""
-    if self.isOk(): 
-      return "0"
-    else:
-      return "1"
-    
-  def getWhy(self):
-    """return why as str or list if sum or some ReturnCode"""
-    return self._why
-    
-  def setWhy(self, why):
-    self._why = why
-    
-  def getValue(self):
-    return self._value
-    
-  def setValue(self, value):
-    """choice as not deep copying if mutables value"""
-    # TODO deepcopy maybe for value, not yet
-    self._value = value
-    
-  def setStatus(self, status, why=None, value=None):
-    if why is None: 
-      aWhy = self._DEFAULT_WHY
-    else:
-      aWhy = why
-      
-    if status in self._TOSYS.keys():
-      self._status = status
-      self._why = aWhy
-    else:
-      self._status = self.NA_STATUS
-      self._why = "Error status '%s' for '%s'" % (status, aWhy)
-      
-    if value is not None:
-      # TODO deepcopy maybe for value, not yet
-      self._value = value
-    else:
-      self._value = self._DEFAULT_VALUE
-      
-  def getStatus(self):
-    return self._status
-
-  def isOk(self):
     """
-    return True if ok.
-    inexisting method isKo(), use more explicit/readability 'if not res.isOk()'
-    """
-    return (self._status == self.OK_STATUS)
-  
-  def raiseIfKo(self):
-    """
-    raise an exception with message why if not ok, else return self.
-    This trick is to write usage
-    
+    assume simple return code for methods, with explanation as 'why'.
+    Obviously why is 'why it is not OK',
+    but also why is 'why it is OK' (if you want).
+    Optionaly contains a return value as self.getValue()
+
     | Usage:
+    | >> import returnCode as RCO
+    |
+    | >> aValue = doSomethingToReturn()
+    | >> return RCO.ReturnCode("KO", "there is no problem here", aValue)
+    | >> return RCO.ReturnCode("KO", "there is a problem here because etc", None)
+    | >> return RCO.ReturnCode("TIMEOUT_STATUS", "too long here because etc")
+    | >> return RCO.ReturnCode("NA", "not applicable here because etc")
+    |
+    | >> rc = doSomething()
+    | >> print("short returnCode string", str(rc))
+    | >> print("long returnCode string with value", repr(rc))
+    |
+    | >> rc1 = RCO.ReturnCode("OK", ...)
+    | >> rc2 = RCO.ReturnCode("KO", ...)
+    | >> rcFinal = rc1 + rc2
+    | >> print("long returnCode string with value", repr(rcFinal)) # KO!
+    |
+    | >> rc = doSomething()
+    | >> if rc.isOk(): doSomethingAsOK()
+    | >> if not rc.isOk(): doSomethingAsKO()
+    |
     | >> rc = doSomething().raiseIfKo() # raise Exception if KO
     | >> doSomethingWithValue(rc.getValue()) # here i am sure that is OK
     """
-    if self.isOk(): 
-      return self
-    else:
-      raise Exception(self.getWhy())
+
+    # redunctant but useful class variables
+    OK_STATUS = _OK_STATUS
+    KO_STATUS = _KO_STATUS
+    NA_STATUS = _NA_STATUS  # not applicable
+    UNKNOWN_STATUS = _UNKNOWN_STATUS  # not defined
+    KNOWNFAILURE_STATUS = _KNOWNFAILURE_STATUS
+    TIMEOUT_STATUS = _TIMEOUT_STATUS
+
+    # an integer for sys.exit(anInteger)
+    # OKSYS and KOSYS seems equal on linux or windows
+    OKSYS = 0  # OK
+    KOSYS = 1  # KO
+    NASYS = 2  # KO not applicable return code
+    NDSYS = 3  # KO not defined return code
+    KFSYS = 4  # KO known failure return code
+    TOSYS = 5  # KO time out
+
+    _TOSYS = {
+        OK_STATUS: OKSYS,
+        KO_STATUS: KOSYS,
+        NA_STATUS: NASYS,
+        UNKNOWN_STATUS: NDSYS,
+        KNOWNFAILURE_STATUS: KFSYS,
+        TIMEOUT_STATUS: TOSYS,
+    }
+    _DEFAULT_WHY = "No given explanation"
+    _DEFAULT_VALUE = None
+
+    def __init__(self, status=None, why=None, value=None):
+        self._why = self._DEFAULT_WHY
+        self._value = self._DEFAULT_VALUE
+        if status is None:
+            self._status = self.UNKNOWN_STATUS
+        else:
+            self.setStatus(status, why, value)
+
+    def __repr__(self):
+        """complete with value, 'ok, why, value' message"""
+        res = "%s: %s --value: %s" % (self._status, self._why, PP.pformat(self._value))
+        return res
+
+    def __str__(self):
+        """without value, only simple 'ok, why' message"""
+        res = "%s: %s" % (self._status, self._why)
+        return res
+
+    def indent(self, text, amount=5, ch=" "):
+        """indent multi lines message"""
+        padding = amount * ch
+        res = "".join(padding + line for line in text.splitlines(True))
+        return res[amount:]
+
+    def __add__(self, rc2):
+        """allows expression 'returnCode1 + returnCode2 + ...'"""
+        isOk = self.isOk() and rc2.isOk()
+        newWhy = self._toList(self.getWhy()) + self._toList(rc2.getWhy())
+        newValue = self._toList(self.getValue()) + self._toList(rc2.getValue())
+        if isOk:
+            return ReturnCode("OK", newWhy, newValue)
+        else:
+            return ReturnCode("KO", newWhy, newValue)
+
+    def __radd__(self, other):
+        # see http://www.marinamele.com/2014/04/modifying-add-method-of-python-class.html
+        if other == 0:
+            return self
+        else:
+            return self.__add__(other)
+
+    def _toList(self, strOrList):
+        """internal use"""
+        if type(strOrList) is not list:
+            return [strOrList]
+        else:
+            return strOrList
+
+    def toSys(self):
+        """return system return code as bash or bat"""
+        try:
+            return self._TOSYS[self._status]
+        except:
+            return self._TOSYS[self.NA_STATUS]
+
+    def toXmlPassed(self):
+        """return xml  return code as '0' (passed) or '1' (not passed)"""
+        if self.isOk():
+            return "0"
+        else:
+            return "1"
+
+    def getWhy(self):
+        """return why as str or list if sum or some ReturnCode"""
+        return self._why
+
+    def setWhy(self, why):
+        self._why = why
+
+    def getValue(self):
+        return self._value
+
+    def setValue(self, value):
+        """choice as not deep copying if mutables value"""
+        # TODO deepcopy maybe for value, not yet
+        self._value = value
+
+    def setStatus(self, status, why=None, value=None):
+        if why is None:
+            aWhy = self._DEFAULT_WHY
+        else:
+            aWhy = why
+
+        if status in self._TOSYS.keys():
+            self._status = status
+            self._why = aWhy
+        else:
+            self._status = self.NA_STATUS
+            self._why = "Error status '%s' for '%s'" % (status, aWhy)
+
+        if value is not None:
+            # TODO deepcopy maybe for value, not yet
+            self._value = value
+        else:
+            self._value = self._DEFAULT_VALUE
+
+    def getStatus(self):
+        return self._status
+
+    def isOk(self):
+        """
+        return True if ok.
+        inexisting method isKo(), use more explicit/readability 'if not res.isOk()'
+        """
+        return self._status == self.OK_STATUS
+
+    def raiseIfKo(self):
+        """
+        raise an exception with message why if not ok, else return self.
+        This trick is to write usage
+
+        | Usage:
+        | >> rc = doSomething().raiseIfKo() # raise Exception if KO
+        | >> doSomethingWithValue(rc.getValue()) # here i am sure that is OK
+        """
+        if self.isOk():
+            return self
+        else:
+            raise Exception(self.getWhy())
+
 
 def ReturnCodeFromList(aListOfReturnCodes):
-  """
-  Create ReturnCode from list of ReturnCode
-  
-  convenience over "+" operand
-  """
-  res = "OK"
-  whyes = []
-  for rc in aListOfReturnCodes:
-    if not rc.isOk():
-      res = "KO"
-    whyes.append(str(rc))
-  reswhy = "\n  ".join(whyes)
-  return ReturnCode(res, "\n  " + reswhy)
-    
-    
\ No newline at end of file
+    """
+    Create ReturnCode from list of ReturnCode
+
+    convenience over "+" operand
+    """
+    res = "OK"
+    whyes = []
+    for rc in aListOfReturnCodes:
+        if not rc.isOk():
+            res = "KO"
+        whyes.append(str(rc))
+    reswhy = "\n  ".join(whyes)
+    return ReturnCode(res, "\n  " + reswhy)
index a49524b24c465199efc051015dc06bed564dd2f7..e166b89ee11ba02d4ecd79857c2030a6e512b313 100755 (executable)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 #  Copyright (C) 2010-2012  CEA/DEN
 #
 #  This library is free software; you can redistribute it and/or
@@ -51,14 +51,14 @@ import gettext
 import traceback
 
 import src
-import src.debug as DBG # Easy print stderr (for DEBUG only)
-import src.returnCode as RCO # Easy (ok/ko, why) return methods code
+import src.debug as DBG  # Easy print stderr (for DEBUG only)
+import src.returnCode as RCO  # Easy (ok/ko, why) return methods code
 import src.utilsSat as UTS
 
 # get path to salomeTools sources
-satdir  = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
-srcdir = os.path.join(satdir, 'src')
-cmdsdir = os.path.join(satdir, 'commands')
+satdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+srcdir = os.path.join(satdir, "src")
+cmdsdir = os.path.join(satdir, "commands")
 
 import commands.config as CONFIG
 
@@ -66,11 +66,11 @@ import commands.config as CONFIG
 gettext.install("salomeTools", os.path.join(srcdir, "i18n"))
 
 try:
-  _LANG = os.environ["LANG"] # original locale
+    _LANG = os.environ["LANG"]  # original locale
 except:
-  _LANG = "en_US.utf8" #default
+    _LANG = "en_US.utf8"  # default
 
-# The possible hooks : 
+# The possible hooks :
 # pre is for hooks to be executed before commands
 # post is for hooks to be executed after commands
 C_PRE_HOOK = "pre"
@@ -78,100 +78,128 @@ C_POST_HOOK = "post"
 
 # Define all possible option for salomeTools command :  sat <option> <args>
 parser = src.options.Options()
-parser.add_option('h', 'help', 'boolean', 'help', 
-                  _("shows global help or help on a specific command."))
-parser.add_option('o', 'overwrite', 'list', "overwrite", 
-                  _("overwrites a configuration parameters."))
-parser.add_option('g', 'debug', 'boolean', 'debug_mode', 
-                  _("run salomeTools in debug mode."))
-parser.add_option('v', 'verbose', 'int', "output_verbose_level", 
-                  _("change output verbose level (default is 3)."))
-parser.add_option('b', 'batch', 'boolean', "batch", 
-                  _("batch mode (no question)."))
-parser.add_option('t', 'all_in_terminal', 'boolean', "all_in_terminal", 
-                  _("all traces in the terminal (for example compilation logs)."))
-parser.add_option('l', 'logs_paths_in_file', 'string', "logs_paths_in_file", 
-                  _("put the command results and paths to log files."))
+parser.add_option(
+    "h",
+    "help",
+    "boolean",
+    "help",
+    _("shows global help or help on a specific command."),
+)
+parser.add_option(
+    "o", "overwrite", "list", "overwrite", _("overwrites a configuration parameters.")
+)
+parser.add_option(
+    "g", "debug", "boolean", "debug_mode", _("run salomeTools in debug mode.")
+)
+parser.add_option(
+    "v",
+    "verbose",
+    "int",
+    "output_verbose_level",
+    _("change output verbose level (default is 3)."),
+)
+parser.add_option("b", "batch", "boolean", "batch", _("batch mode (no question)."))
+parser.add_option(
+    "t",
+    "all_in_terminal",
+    "boolean",
+    "all_in_terminal",
+    _("all traces in the terminal (for example compilation logs)."),
+)
+parser.add_option(
+    "l",
+    "logs_paths_in_file",
+    "string",
+    "logs_paths_in_file",
+    _("put the command results and paths to log files."),
+)
 
 
 ########################################################################
 # utility methods
 ########################################################################
 def find_command_list(dirPath):
-    ''' Parse files in dirPath that end with .py : it gives commands list
-    
+    """Parse files in dirPath that end with .py : it gives commands list
+
     :param dirPath str: The directory path where to search the commands
-    :return: cmd_list : the list containing the commands name 
+    :return: cmd_list : the list containing the commands name
     :rtype: list
-    '''
+    """
     cmd_list = []
     for item in os.listdir(dirPath):
-        if "__init__" in item: continue # skip __init__.py
-        if item.endswith('.py'):
-            cmd_list.append(item[:-len('.py')])
+        if "__init__" in item:
+            continue  # skip __init__.py
+        if item.endswith(".py"):
+            cmd_list.append(item[: -len(".py")])
     return cmd_list
 
 
 # The list of valid salomeTools commands from cmdsdir
 # ['config', 'compile', 'prepare', ...]
 _COMMANDS_NAMES = find_command_list(cmdsdir)
-lCommand = find_command_list(cmdsdir) # obsolete
+lCommand = find_command_list(cmdsdir)  # obsolete
+
 
 def getCommandsList():
-    """Gives commands list (as basename of files .py in directory commands""" 
+    """Gives commands list (as basename of files .py in directory commands"""
     return _COMMANDS_NAMES
 
+
 def launchSat(command, logger=None):
     """
     launch sat as subprocess.Popen
     command as string ('sat --help' for example)
     used for unittest, or else...
-    
+
     :return: RCO.ReturnCode with getValue as subprocess.Popen output
     """
     if "sat" not in command.split()[0]:
-      raise Exception(_("Not a valid command for launchSat: '%s'") % command)
-    env = dict(os.environ) # copy
+        raise Exception(_("Not a valid command for launchSat: '%s'") % command)
+    env = dict(os.environ)  # copy
     # theorically useless, in user environ $PATH,
     # on ne sait jamais
     # https://docs.python.org/2/library/os.html
-    # On some platforms, including FreeBSD and Mac OS X, 
+    # On some platforms, including FreeBSD and Mac OS X,
     # setting environ may cause memory leaks.
     # see test/initializeTest.py
     if satdir not in env["PATH"].split(":"):
-      env["PATH"] = satdir + ":" + env["PATH"]
+        env["PATH"] = satdir + ":" + env["PATH"]
     # TODO setLocale not 'fr' on subprocesses, why not?
     # env["LANG"] == ''
-    res = UTS.Popen(command, env=env, logger=logger) # logger or not.
+    res = UTS.Popen(command, env=env, logger=logger)  # logger or not.
     return res
 
+
 def setNotLocale():
     """force english at any moment"""
-    os.environ["LANG"] = ''
+    os.environ["LANG"] = ""
     gettext.install("salomeTools", os.path.join(srcdir, "i18n"))
     DBG.write("setNotLocale", os.environ["LANG"])
-    
+
+
 def setLocale():
     """
-    reset initial locale at any moment 
+    reset initial locale at any moment
     'fr' or else (TODO) from initial environment var '$LANG'
     'i18n' as 'internationalization'
     """
     os.environ["LANG"] = _LANG
     gettext.install("salomeTools", os.path.join(srcdir, "i18n"))
     DBG.write("setLocale", os.environ["LANG"])
-    
+
+
 def getVersion():
     """get version number as string"""
     return src.__version__
+
+
 def assumeAsList(strOrList):
     """return a list as sys.argv if string"""
     if type(strOrList) is list:
-      return list(strOrList) # copy
+        return list(strOrList)  # copy
     else:
-      res = strOrList.split(" ")
-      return [r for r in res if r != ""] # supposed string to split for convenience
+        res = strOrList.split(" ")
+        return [r for r in res if r != ""]  # supposed string to split for convenience
 
 
 ########################################################################
@@ -181,6 +209,7 @@ class Sat(object):
     """
     The main class that stores all the commands of salomeTools
     """
+
     def __init__(self, logger=None):
         """
         Initialization
@@ -200,13 +229,13 @@ class Sat(object):
         self.options = None  # the options passed to salomeTools
         self.datadir = None  # default value will be <salomeTools root>/data
 
-    def obsolete__init__(self, opt='', datadir=None):
-        '''Initialization
+    def obsolete__init__(self, opt="", datadir=None):
+        """Initialization
 
         :param opt str: The sat options
         :param: datadir str : the directory that contain all the external
                               data (like software pyconf and software scripts)
-        '''
+        """
         # Read the salomeTools options (the list of possible options is
         # at the beginning of this file)
         argList = self.assumeAsList(opt)
@@ -237,9 +266,9 @@ class Sat(object):
         if options.debug_mode:
             DBG.push_debug(True)
         self.arguments = opt
-        self.options = options # the generic options passed to salomeTools
+        self.options = options  # the generic options passed to salomeTools
         self.remaindersArgs = remaindersArgs  # the command and their options
-        self.datadir = datadir # default value will be <salomeTools root>/data
+        self.datadir = datadir  # default value will be <salomeTools root>/data
         self._setCommands(cmdsdir)
         DBG.write("Sat.options", self.options, self.options.debug_mode)
 
@@ -254,30 +283,30 @@ class Sat(object):
         argList = self.assumeAsList(args)
         # no arguments : print general help
         if len(argList) == 0:
-          self.mainLogger.info(get_help())
-          return RCO.ReturnCode("OK", "no args as sat --help")
+            self.mainLogger.info(get_help())
+            return RCO.ReturnCode("OK", "no args as sat --help")
 
         self.setInternals(opt=argList, datadir=None)
 
         # print general help on -h
         if self.options.help and len(self.remaindersArgs) == 0:
-          self.mainLogger.info(get_help())
-          return RCO.ReturnCode("OK", "help done")
+            self.mainLogger.info(get_help())
+            return RCO.ReturnCode("OK", "help done")
 
         DBG.write("options", self.options)
         DBG.write("remaindersArgs", self.remaindersArgs)
 
         if len(self.remaindersArgs) == 0:
-          return RCO.ReturnCode("KO", "Nothing to do")
+            return RCO.ReturnCode("KO", "Nothing to do")
 
         # print command help on -h --help after name command
         if "-h" in self.remaindersArgs or "--help" in self.remaindersArgs:
-          self.mainLogger.info(self.get_help(self.remaindersArgs))
-          return RCO.ReturnCode("OK", "sat --help command")
+            self.mainLogger.info(self.get_help(self.remaindersArgs))
+            return RCO.ReturnCode("OK", "sat --help command")
 
         # print command help on -h and continue if something do do more
         if self.options.help and len(self.remaindersArgs) >= 1:
-          self.mainLogger.info(self.get_help(self.remaindersArgs))
+            self.mainLogger.info(self.get_help(self.remaindersArgs))
 
         command = self.remaindersArgs[0]
         # get dynamically the command function to call
@@ -285,16 +314,19 @@ class Sat(object):
         # Run the command using the arguments
         code = fun_command(self.remaindersArgs[1:])
 
-        if code is None: code = 0 # what?! do not know why so respect history
+        if code is None:
+            code = 0  # what?! do not know why so respect history
 
         # return salomeTools command with the right message
         # code (0 if no errors, else 1)
         if code == _KOSYS:
-          return RCO.ReturnCode("KO", "problem on execute_cli 'sat %s'" % " ".join(argList))
+            return RCO.ReturnCode(
+                "KO", "problem on execute_cli 'sat %s'" % " ".join(argList)
+            )
         else:
-          return RCO.ReturnCode("OK", "execute_cli 'sat %s' done" % " ".join(argList))
+            return RCO.ReturnCode("OK", "execute_cli 'sat %s' done" % " ".join(argList))
 
-    '''
+    """
     # OBSOLETE... see file ../sat
     # ###############################
     # MAIN : terminal command usage #
@@ -322,15 +354,15 @@ class Sat(object):
         if code is None: code = 0
         sys.exit(code)
 
-    '''
+    """
 
     def __getattr__(self, name):
-        '''
+        """
         overwrite of __getattr__ function in order to display
         a customized message in case of a wrong call
-        
-        :param name str: The name of the attribute 
-        '''
+
+        :param name str: The name of the attribute
+        """
         if name in self.__dict__:
             return self.__dict__[name]
         else:
@@ -339,13 +371,13 @@ class Sat(object):
     def assumeAsList(self, strOrList):
         # DBG.write("Sat assumeAsList", strOrList, True)
         return assumeAsList(strOrList)
-    
+
     def _setCommands(self, dirPath):
-        '''set class attributes corresponding to all commands that are 
+        """set class attributes corresponding to all commands that are
            in the dirPath directory
-        
-        :param dirPath str: The directory path containing the commands 
-        '''
+
+        :param dirPath str: The directory path containing the commands
+        """
         # loop on the commands name
         for nameCmd in lCommand:
             # Exception for the jobs command that requires the paramiko module
@@ -355,6 +387,7 @@ class Sat(object):
                     ff = tempfile.TemporaryFile()
                     sys.stderr = ff
                     import paramiko
+
                     sys.stderr = saveout
                 except:
                     sys.stderr = saveout
@@ -363,32 +396,31 @@ class Sat(object):
             # load the module that has name nameCmd in dirPath
             (file_, pathname, description) = imp.find_module(nameCmd, [dirPath])
             module = imp.load_module(nameCmd, file_, pathname, description)
-            
-            def run_command(args='',
-                            options=None,
-                            batch = False,
-                            verbose = -1,
-                            logger_add_link = None):
-                '''
+
+            def run_command(
+                args="", options=None, batch=False, verbose=-1, logger_add_link=None
+            ):
+                """
                 The function that will load the configuration (all pyconf)
                 and return the function run of the command corresponding to module
-                
-                :param args str: The arguments of the command 
-                '''
+
+                :param args str: The arguments of the command
+                """
                 # Make sure the internationalization is available
-                gettext.install('salomeTools', os.path.join(satdir, 'src', 'i18n'))
-                
+                gettext.install("salomeTools", os.path.join(satdir, "src", "i18n"))
+
                 # Get the arguments in a list and remove the empty elements
-                if type(args) == type(''):
+                if type(args) == type(""):
                     # split by spaces without considering spaces in quotes
                     argv_0 = re.findall(r'(?:"[^"]*"|[^\s"])+', args)
                 else:
                     argv_0 = args
-                
-                if argv_0 != ['']:
-                    while "" in argv_0: argv_0.remove("")
-                
-                # Format the argv list in order to prevent strings 
+
+                if argv_0 != [""]:
+                    while "" in argv_0:
+                        argv_0.remove("")
+
+                # Format the argv list in order to prevent strings
                 # that contain a blank to be separated
                 argv = []
                 elem_old = ""
@@ -398,29 +430,31 @@ class Sat(object):
                     else:
                         argv[-1] += " " + elem
                     elem_old = elem
-                           
+
                 # if it is provided by the command line, get the application
                 appliToLoad = None
-                if argv not in [[''], []] and argv[0][0] != "-":
-                    appliToLoad = argv[0].rstrip('*')
+                if argv not in [[""], []] and argv[0][0] != "-":
+                    appliToLoad = argv[0].rstrip("*")
                     argv = argv[1:]
-                
+
                 # Check if the global options of salomeTools have to be changed
                 if options:
                     options_save = self.options
-                    self.options = options  
+                    self.options = options
 
-                # read the configuration from all the pyconf files    
+                # read the configuration from all the pyconf files
                 cfgManager = CONFIG.ConfigManager()
-                self.cfg = cfgManager.get_config(datadir=self.datadir, 
-                                                 application=appliToLoad, 
-                                                 options=self.options, 
-                                                 command=__nameCmd__)
-                               
+                self.cfg = cfgManager.get_config(
+                    datadir=self.datadir,
+                    application=appliToLoad,
+                    options=self.options,
+                    command=__nameCmd__,
+                )
+
                 # Set the verbose mode if called
                 if verbose > -1:
                     verbose_save = self.options.output_verbose_level
-                    self.options.__setattr__("output_verbose_level", verbose)    
+                    self.options.__setattr__("output_verbose_level", verbose)
 
                 # Set batch mode if called
                 if batch:
@@ -429,26 +463,31 @@ class Sat(object):
 
                 # set output level
                 if self.options.output_verbose_level is not None:
-                    self.cfg.USER.output_verbose_level = self.options.output_verbose_level
+                    self.cfg.USER.output_verbose_level = (
+                        self.options.output_verbose_level
+                    )
                 if self.cfg.USER.output_verbose_level < 1:
                     self.cfg.USER.output_verbose_level = 0
-                silent = (self.cfg.USER.output_verbose_level == 0)
+                silent = self.cfg.USER.output_verbose_level == 0
 
                 # create log file
                 micro_command = False
                 if logger_add_link:
                     micro_command = True
-                logger_command = src.logger.Logger(self.cfg,
-                                   silent_sysstd=silent,
-                                   all_in_terminal=self.options.all_in_terminal,
-                                   micro_command=micro_command)
-                
+                logger_command = src.logger.Logger(
+                    self.cfg,
+                    silent_sysstd=silent,
+                    all_in_terminal=self.options.all_in_terminal,
+                    micro_command=micro_command,
+                )
+
                 # Check that the path given by the logs_paths_in_file option
                 # is a file path that can be written
                 if self.options.logs_paths_in_file and not micro_command:
                     try:
                         self.options.logs_paths_in_file = os.path.abspath(
-                                                self.options.logs_paths_in_file)
+                            self.options.logs_paths_in_file
+                        )
                         dir_file = os.path.dirname(self.options.logs_paths_in_file)
                         if not os.path.exists(dir_file):
                             os.makedirs(dir_file)
@@ -457,34 +496,39 @@ class Sat(object):
                         file_test = open(self.options.logs_paths_in_file, "w")
                         file_test.close()
                     except Exception as e:
-                        msg = _("WARNING: the logs_paths_in_file option will "
-                                "not be taken into account.\nHere is the error:")
-                        logger_command.write("%s\n%s\n\n" % (
-                                             src.printcolors.printcWarning(msg),
-                                             str(e)))
+                        msg = _(
+                            "WARNING: the logs_paths_in_file option will "
+                            "not be taken into account.\nHere is the error:"
+                        )
+                        logger_command.write(
+                            "%s\n%s\n\n" % (src.printcolors.printcWarning(msg), str(e))
+                        )
                         self.options.logs_paths_in_file = None
 
-
                 # do nothing more if help is True
                 if self.options.help:
-                  return 0
+                    return 0
 
                 options_launched = ""
                 res = None
                 try:
-                    # Execute the hooks (if there is any) 
+                    # Execute the hooks (if there is any)
                     # and run method of the command
                     self.run_hook(__nameCmd__, C_PRE_HOOK, logger_command)
                     res = __module__.run(argv, self, logger_command)
                     self.run_hook(__nameCmd__, C_POST_HOOK, logger_command)
                     if res is None:
                         res = 0
-                        
+
                 except src.SatException as e:
                     # for sat exception do not display the stack, unless debug mode is set
                     logger_command.write("\n***** ", 1)
-                    logger_command.write(src.printcolors.printcError(
-                            "salomeTools ERROR: sat %s" % __nameCmd__), 1)
+                    logger_command.write(
+                        src.printcolors.printcError(
+                            "salomeTools ERROR: sat %s" % __nameCmd__
+                        ),
+                        1,
+                    )
                     logger_command.write(" *****\n", 1)
                     print(e.message)
                     if self.options.debug_mode:
@@ -493,66 +537,73 @@ class Sat(object):
                 except Exception as e:
                     # here we print the stack in addition
                     logger_command.write("\n***** ", 1)
-                    logger_command.write(src.printcolors.printcError(
-                            "salomeTools ERROR: sat %s" % __nameCmd__), 1)
+                    logger_command.write(
+                        src.printcolors.printcError(
+                            "salomeTools ERROR: sat %s" % __nameCmd__
+                        ),
+                        1,
+                    )
 
                     logger_command.write("\n" + DBG.format_exception("") + "\n", 1)
 
-
                 finally:
                     # set res if it is not set in the command
                     if res is None:
                         res = 1
-                                            
+
                     # come back to the original global options
                     if options:
                         options_launched = get_text_from_options(self.options)
                         self.options = options_save
-                    
-                    # come back in the original batch mode if 
+
+                    # come back in the original batch mode if
                     # batch argument was called
                     if batch:
                         self.options.__setattr__("batch", batch_save)
 
-                    # come back in the original verbose mode if 
-                    # verbose argument was called                        
+                    # come back in the original verbose mode if
+                    # verbose argument was called
                     if verbose > -1:
-                        self.options.__setattr__("output_verbose_level", 
-                                                 verbose_save)
-                    # put final attributes in xml log file 
+                        self.options.__setattr__("output_verbose_level", verbose_save)
+                    # put final attributes in xml log file
                     # (end time, total time, ...) and write it
-                    launchedCommand = ' '.join([self.cfg.VARS.salometoolsway +
-                                                os.path.sep +
-                                                'sat',
-                                                options_launched,
-                                                __nameCmd__, 
-                                                ' '.join(argv_0)])
+                    launchedCommand = " ".join(
+                        [
+                            self.cfg.VARS.salometoolsway + os.path.sep + "sat",
+                            options_launched,
+                            __nameCmd__,
+                            " ".join(argv_0),
+                        ]
+                    )
                     # TODO may be no need as call escapeSequence xml
                     launchedCommand = launchedCommand.replace('"', "'")
-                    
-                    # Add a link to the parent command      
+
+                    # Add a link to the parent command
                     if logger_add_link is not None:
-                        logger_add_link.add_link(logger_command.logFileName,
-                                                 __nameCmd__,
-                                                 res,
-                                                 launchedCommand)
+                        logger_add_link.add_link(
+                            logger_command.logFileName,
+                            __nameCmd__,
+                            res,
+                            launchedCommand,
+                        )
                         logger_add_link.l_logFiles += logger_command.l_logFiles
-                                            
+
                     # Put the final attributes corresponding to end time and
                     # Write the file to the hard drive
-                    logger_command.end_write(
-                                        {"launchedCommand" : launchedCommand})
-                    
+                    logger_command.end_write({"launchedCommand": launchedCommand})
+
                     if res != 0:
                         res = 1
-                        
-                    # print the log file path if 
-                    # the maximum verbose mode is invoked
+
+                    # print the log file path if
+                    # the maximum verbose mode is invoked
                     if not micro_command:
-                        logger_command.write("\nPath to the xml log file :\n",
-                                             5)
-                        logger_command.write("%s\n\n" % src.printcolors.printcInfo(
-                                                logger_command.logFilePath), 5)
+                        logger_command.write("\nPath to the xml log file :\n", 5)
+                        logger_command.write(
+                            "%s\n\n"
+                            % src.printcolors.printcInfo(logger_command.logFilePath),
+                            5,
+                        )
 
                     # If the logs_paths_in_file was called, write the result
                     # and log files in the given file path
@@ -564,31 +615,33 @@ class Sat(object):
                             if i < len(logger_command.l_logFiles):
                                 file_res.write("\n")
                                 file_res.flush()
-                
+
                 return res
 
-            # Make sure that run_command will be redefined 
+            # Make sure that run_command will be redefined
             # at each iteration of the loop
             globals_up = {}
             globals_up.update(run_command.__globals__)
-            globals_up.update({'__nameCmd__': nameCmd, '__module__' : module})
-            func = types.FunctionType(run_command.__code__,
-                                      globals_up,
-                                      run_command.__name__,
-                                      run_command.__defaults__,
-                                      run_command.__closure__)
+            globals_up.update({"__nameCmd__": nameCmd, "__module__": module})
+            func = types.FunctionType(
+                run_command.__code__,
+                globals_up,
+                run_command.__name__,
+                run_command.__defaults__,
+                run_command.__closure__,
+            )
 
             # set the attribute corresponding to the command
             self.__setattr__(nameCmd, func)
 
     def run_hook(self, cmd_name, hook_type, logger):
-        '''Execute a hook file for a given command regarding the fact 
+        """Execute a hook file for a given command regarding the fact
            it is pre or post
-        
+
         :param cmd_name str: The the command on which execute the hook
         :param hook_type str: pre or post
         :param logger Logger: the logging instance to use for the prints
-        '''
+        """
         # The hooks must be defined in the application pyconf
         # So, if there is no application, do not do anything
         if not src.config_has_application(self.cfg):
@@ -596,27 +649,27 @@ class Sat(object):
 
         # The hooks must be defined in the application pyconf in the
         # APPLICATION section, hook : { command : 'script_path.py'}
-        if "hook" not in self.cfg.APPLICATION \
-                    or cmd_name not in self.cfg.APPLICATION.hook:
+        if (
+            "hook" not in self.cfg.APPLICATION
+            or cmd_name not in self.cfg.APPLICATION.hook
+        ):
             return
 
         # Get the hook_script path and verify that it exists
         hook_script_path = self.cfg.APPLICATION.hook[cmd_name]
         if not os.path.exists(hook_script_path):
-            raise src.SatException(_("Hook script not found: %s") % 
-                                   hook_script_path)
-        
+            raise src.SatException(_("Hook script not found: %s") % hook_script_path)
+
         # Try to execute the script, catch the exception if it fails
         try:
             # import the module (in the sense of python)
             pymodule = imp.load_source(cmd_name, hook_script_path)
-            
+
             # format a message to be printed at hook execution
             msg = src.printcolors.printcWarning(_("Run hook script"))
-            msg = "%s: %s\n" % (msg, 
-                                src.printcolors.printcInfo(hook_script_path))
-            
-            # run the function run_pre_hook if this function is called 
+            msg = "%s: %s\n" % (msg, src.printcolors.printcInfo(hook_script_path))
+
+            # run the function run_pre_hook if this function is called
             # before the command, run_post_hook if it is called after
             if hook_type == C_PRE_HOOK and "run_pre_hook" in dir(pymodule):
                 logger.write(msg, 1)
@@ -631,12 +684,12 @@ class Sat(object):
             raise src.SatException(msg)
 
     def get_help(self, opt):
-        '''Prints help for a command. Function called when "sat -h <command>"
-        
+        """Prints help for a command. Function called when "sat -h <command>"
+
         :param argv str: the options passed (to get the command name)
-        '''
+        """
         # if no command as argument (sat -h)
-        if len(opt)==0:
+        if len(opt) == 0:
             return get_help()
         # get command name
         command = opt[0]
@@ -647,30 +700,30 @@ class Sat(object):
         # Check if this command exists
         if not hasattr(self, command):
             raise src.SatException(_("Command '%s' does not exist") % command)
-        
+
         # Print salomeTools version
         msg = "\n" + get_version() + "\n\n"
-        
+
         # load the module
         module = self.get_module(command)
 
         # print the description of the command that is done in the command file
-        if hasattr( module, "description" ) :
-            msg += src.printcolors.printcHeader( _("Description:") )
-            msg += '\n' + module.description() + '\n\n'
+        if hasattr(module, "description"):
+            msg += src.printcolors.printcHeader(_("Description:"))
+            msg += "\n" + module.description() + "\n\n"
 
         # print the description of the command options
-        if hasattr( module, "parser" ):
+        if hasattr(module, "parser"):
             msg += module.parser.get_help()
 
         msg += "\n -h, --help (boolean)\n          shows help on command.\n"
         return msg
 
     def get_module(self, module):
-        '''Loads a command. Function called only by print_help
-        
+        """Loads a command. Function called only by print_help
+
         :param module str: the command to load
-        '''
+        """
         # Check if this command exists
         if not hasattr(self, module):
             raise src.SatException(_("Command '%s' does not exist") % module)
@@ -680,6 +733,7 @@ class Sat(object):
         module = imp.load_module(module, file_, pathname, description)
         return module
 
+
 ##################################################################
 def get_text_from_options(options):
     text_options = ""
@@ -688,35 +742,37 @@ def get_text_from_options(options):
             continue
         if options.__getattr__(attr) != None:
             option_contain = options.__getattr__(attr)
-            if type(option_contain)==type([]):
+            if type(option_contain) == type([]):
                 option_contain = ",".join(option_contain)
-            if type(option_contain)==type(True):
+            if type(option_contain) == type(True):
                 option_contain = ""
-            text_options+= "--%s %s " % (attr, option_contain)
+            text_options += "--%s %s " % (attr, option_contain)
     return text_options
-                
+
 
 def get_version():
-    '''
+    """
     get colorized salomeTools version (in src/internal_config/salomeTools.pyconf).
     returns string
-    '''
-    # read the config 
+    """
+    # read the config
     cfgManager = CONFIG.ConfigManager()
     cfg = cfgManager.get_config()
     # print the key corresponding to salomeTools version
-    msg = (src.printcolors.printcHeader( _("Version: ") ) + src.get_salometool_version(cfg))
+    msg = src.printcolors.printcHeader(_("Version: ")) + src.get_salometool_version(cfg)
     return msg
 
 
 def get_help():
-    '''
+    """
     get salomeTools general help.
     returns string
-    '''
+    """
     msg = "\n" + get_version() + "\n\n"
-    msg += src.printcolors.printcHeader( _("Usage: ") ) + \
-          "sat [sat_options] <command> [application] [command_options]\n\n"
+    msg += (
+        src.printcolors.printcHeader(_("Usage: "))
+        + "sat [sat_options] <command> [application] [command_options]\n\n"
+    )
 
     msg += parser.get_help() + "\n"
 
@@ -727,18 +783,18 @@ def get_help():
 
     msg += "\n"
     # Explain how to get the help for a specific command
-    msg += src.printcolors.printcHeader(
-        _("Get help for a specific command:")) + \
-        "\n>> sat --help <command>\n"
+    msg += (
+        src.printcolors.printcHeader(_("Get help for a specific command:"))
+        + "\n>> sat --help <command>\n"
+    )
     return msg
 
+
 def write_exception(exc):
-    '''write exception in case of error in a command
-    
+    """write exception in case of error in a command
+
     :param exc exception: the exception to print
-    '''
+    """
     sys.stderr.write("\n***** ")
     sys.stderr.write(src.printcolors.printcError("salomeTools ERROR:"))
     sys.stderr.write("\n" + str(exc) + "\n")
-
-
index 2b29aa14acdca92918ad1d80ba393a4400c7c77d..4fae24e169b8026147b703ec049c15cbbaa9233c 100755 (executable)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 #  Copyright (C) 2010-2013  CEA/DEN
 #
 #  This library is free software; you can redistribute it and/or
 
 import string
 
+
 class MyTemplate(string.Template):
-    delimiter = '¤'
+    delimiter = "¤"
+
 
 def substitute(template_file, subst_dic):
-    template = open(template_file, 'r')
+    template = open(template_file, "r")
     template = MyTemplate(template.read())
 
     return template.safe_substitute(subst_dic)
-
index 64b4b11c402da21c7c019b13298be6e803190e1c..df17cc6917f4efa6576ff34b14b02ed6727e3700 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 
 # ToolBox for test framework
 
@@ -12,30 +12,36 @@ class SatTestError(Exception):
     """
     Exception class for test errors.
     """
+
     def __init__(self, value):
         self.value = value
 
     def __str__(self):
         return repr(self.value)
 
+
 class SatNotApplicableError(Exception):
     """
     Exception class for test errors.
     """
+
     def __init__(self, value):
         self.value = value
 
     def __str__(self):
         return repr(self.value)
 
+
 def ERROR(message):
     print("ERROR", message)
     raise SatTestError(message)
-    
+
+
 def NOT_APPLICABLE(message):
     print("NOT_APPLICABLE", message)
     raise SatNotApplicableError(message)
 
+
 def compFloat(f1, f2, tol=10e-10):
     """Compares 2 numbers with tolerance tol."""
     diff = abs(f1 - f2)
@@ -46,6 +52,7 @@ def compFloat(f1, f2, tol=10e-10):
         comp = "KO"
     return comp
 
+
 def compFiles(f1, f2, tol=0):
     """Compares 2 files."""
     assert os.path.exists(f1), "compFiles: file not found: %s" % f1
@@ -60,24 +67,28 @@ def compFiles(f1, f2, tol=0):
         comp = "KO"
     return comp
 
+
 def mdump_med(med_file, dump_file, options):
     """Uses mdump to dump a med file."""
     assert isinstance(options, list), "Bad options for mdump: %s" % options
     assert len(options) == 3, "Bad options for mdump: %s" % options
     cmd = "mdump %s %s" % (med_file, " ".join(options))
-    #print(cmd)
+    # print(cmd)
 
     with open(dump_file, "w") as df:
         pdump = subprocess.Popen(cmd, shell=True, stdout=df)
         st = pdump.wait()
     return st
 
+
 def compMED(file1, file2, tol=0, diff_flags=""):
     """Compares 2 med files by using mdump."""
 
     # local utility method
     def do_dump(med):
-        dump = os.path.join(os.environ['TT_TMP_RESULT'], os.path.basename(med) + ".mdump")
+        dump = os.path.join(
+            os.environ["TT_TMP_RESULT"], os.path.basename(med) + ".mdump"
+        )
         st = mdump_med(med, dump, ["1", "NODALE", "FULL_INTERLACE"])
         if st != 0 or not os.path.exists(dump):
             raise Exception("Error mpdump %s" % med)
@@ -88,18 +99,20 @@ def compMED(file1, file2, tol=0, diff_flags=""):
         with open(dump, "w") as dumpfile:
             for line in lines:
                 try:
-                    line.index('Universal name of mesh')
+                    line.index("Universal name of mesh")
                     continue
                 except:
-                    dumpfile.write(line.replace(med, 'filename'))
+                    dumpfile.write(line.replace(med, "filename"))
         return dump
 
-
     # begin method
-    print(""">>>> compMED
+    print(
+        """>>>> compMED
  file1: %s
  file2: %s
-""" % (file1, file2))
+"""
+        % (file1, file2)
+    )
 
     if not os.path.exists(file1):
         print("compMED: file not found: '%s'" % file1)
@@ -160,5 +173,4 @@ class TOOLS_class(object):
         return os.path.join(self.tmp_dir, name)
 
     def writeInFiles(self, pylog):
-        pylog.write('inFiles=%s\n' % str(self.inFiles))
-
+        pylog.write("inFiles=%s\n" % str(self.inFiles))
index 03c7b5810943500cefa86a2b459fed78860e2f35..e56dd7ff15382eb0ec4a5dbae0728814b3ca2c13 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 #  Copyright (C) 2010-2013  CEA/DEN
 #
 #  This library is free software; you can redistribute it and/or
 try:
     execfile
 except:
+
     def execfile(somefile, global_vars, local_vars):
         with open(somefile) as f:
-            code = compile(f.read(), somefile, 'exec')
+            code = compile(f.read(), somefile, "exec")
             exec(code, global_vars, local_vars)
 
+
 import os
 import sys
 import datetime
@@ -39,10 +41,10 @@ verbose = False
 
 from . import fork
 import src
-from  src.versionMinorMajorPatch import MinorMajorPatch as MMP
+from src.versionMinorMajorPatch import MinorMajorPatch as MMP
 
 # directories not considered as test grids
-C_IGNORE_GRIDS = ['.git', '.svn', 'RESSOURCES']
+C_IGNORE_GRIDS = [".git", ".svn", "RESSOURCES"]
 
 DEFAULT_TIMEOUT = 150
 
@@ -53,19 +55,22 @@ def getTmpDirDEFAULT():
         directory = os.getenv("TEMP")
     else:
         # for Linux: use /tmp/logs/{user} folder
-        directory = os.path.join( '/tmp', 'logs', os.getenv("USER", "unknown"))
+        directory = os.path.join("/tmp", "logs", os.getenv("USER", "unknown"))
     return directory
 
+
 class Test:
-    def __init__(self,
-                 config,
-                 logger,
-                 tmp_working_dir,
-                 testbase="",
-                 grids=None,
-                 sessions=None,
-                 launcher="",
-                 show_desktop=True):
+    def __init__(
+        self,
+        config,
+        logger,
+        tmp_working_dir,
+        testbase="",
+        grids=None,
+        sessions=None,
+        launcher="",
+        show_desktop=True,
+    ):
         self.grids = grids
         self.config = config
         self.logger = logger
@@ -94,117 +99,146 @@ class Test:
 
     def _copy_dir(self, source, target):
         if self.config.VARS.python >= "2.6":
-            shutil.copytree(source, target,
-                            symlinks=True,
-                            ignore=shutil.ignore_patterns('.git*','.svn*'))
+            shutil.copytree(
+                source,
+                target,
+                symlinks=True,
+                ignore=shutil.ignore_patterns(".git*", ".svn*"),
+            )
         else:
-            shutil.copytree(source, target,
-                            symlinks=True)
+            shutil.copytree(source, target, symlinks=True)
 
     def prepare_testbase_from_dir(self, testbase_name, testbase_dir):
-        self.logger.write(_("get test base from dir: %s\n") % \
-                          src.printcolors.printcLabel(testbase_dir), 3)
+        self.logger.write(
+            _("get test base from dir: %s\n")
+            % src.printcolors.printcLabel(testbase_dir),
+            3,
+        )
         if not os.access(testbase_dir, os.X_OK):
-            raise src.SatException(_("testbase %(name)s (%(dir)s) does not "
-                                     "exist ...\n") % { 'name': testbase_name,
-                                                       'dir': testbase_dir })
+            raise src.SatException(
+                _("testbase %(name)s (%(dir)s) does not " "exist ...\n")
+                % {"name": testbase_name, "dir": testbase_dir}
+            )
 
-        self._copy_dir(testbase_dir,
-                       os.path.join(self.tmp_working_dir, 'BASES', testbase_name))
+        self._copy_dir(
+            testbase_dir, os.path.join(self.tmp_working_dir, "BASES", testbase_name)
+        )
 
-    def prepare_testbase_from_git(self,
-                                  testbase_name,
-                                  testbase_base,
-                                  testbase_tag):
+    def prepare_testbase_from_git(self, testbase_name, testbase_base, testbase_tag):
         self.logger.write(
-            _("get test base '%(testbase)s' with '%(tag)s' tag from git\n") % {
-                        "testbase" : src.printcolors.printcLabel(testbase_name),
-                        "tag" : src.printcolors.printcLabel(testbase_tag)},
-                          3)
+            _("get test base '%(testbase)s' with '%(tag)s' tag from git\n")
+            % {
+                "testbase": src.printcolors.printcLabel(testbase_name),
+                "tag": src.printcolors.printcLabel(testbase_tag),
+            },
+            3,
+        )
         try:
-            def set_signal(): # pragma: no cover
+
+            def set_signal():  # pragma: no cover
                 """see http://bugs.python.org/issue1652"""
                 import signal
+
                 signal.signal(signal.SIGPIPE, signal.SIG_DFL)
 
             cmd = "git clone --depth 1 %(base)s %(dir)s"
             cmd += " && cd %(dir)s"
-            if testbase_tag=='master':
+            if testbase_tag == "master":
                 cmd += " && git fetch origin %(branch)s"
             else:
                 cmd += " && git fetch origin %(branch)s:%(branch)s"
             cmd += " && git checkout %(branch)s"
-            cmd = cmd % { 'branch': testbase_tag,
-                         'base': testbase_base,
-                         'dir': testbase_name }
+            cmd = cmd % {
+                "branch": testbase_tag,
+                "base": testbase_base,
+                "dir": testbase_name,
+            }
 
             self.logger.write("> %s\n" % cmd, 5)
             if src.architecture.is_windows():
                 # preexec_fn not supported on windows platform
-                res = subprocess.call(cmd,
-                                cwd=os.path.join(self.tmp_working_dir, 'BASES'),
-                                shell=True,
-                                stdout=self.logger.logTxtFile,
-                                stderr=subprocess.PIPE)
+                res = subprocess.call(
+                    cmd,
+                    cwd=os.path.join(self.tmp_working_dir, "BASES"),
+                    shell=True,
+                    stdout=self.logger.logTxtFile,
+                    stderr=subprocess.PIPE,
+                )
             else:
-                res = subprocess.call(cmd,
-                                cwd=os.path.join(self.tmp_working_dir, 'BASES'),
-                                shell=True,
-                                preexec_fn=set_signal,
-                                stdout=self.logger.logTxtFile,
-                                stderr=subprocess.PIPE)
+                res = subprocess.call(
+                    cmd,
+                    cwd=os.path.join(self.tmp_working_dir, "BASES"),
+                    shell=True,
+                    preexec_fn=set_signal,
+                    stdout=self.logger.logTxtFile,
+                    stderr=subprocess.PIPE,
+                )
             if res != 0:
-                raise src.SatException(_("Error: unable to get test base "
-                                         "'%(name)s' from git '%(repo)s'.") % \
-                                       { 'name': testbase_name,
-                                        'repo': testbase_base })
+                raise src.SatException(
+                    _(
+                        "Error: unable to get test base "
+                        "'%(name)s' from git '%(repo)s'."
+                    )
+                    % {"name": testbase_name, "repo": testbase_base}
+                )
 
         except OSError:
             self.logger.error(_("git is not installed. exiting...\n"))
             sys.exit(0)
 
     def prepare_testbase_from_svn(self, user, testbase_name, testbase_base):
-        self.logger.write(_("get test base '%s' from svn\n") % \
-                          src.printcolors.printcLabel(testbase_name), 3)
+        self.logger.write(
+            _("get test base '%s' from svn\n")
+            % src.printcolors.printcLabel(testbase_name),
+            3,
+        )
         try:
-            def set_signal(): # pragma: no cover
+
+            def set_signal():  # pragma: no cover
                 """see http://bugs.python.org/issue1652"""
                 import signal
+
                 signal.signal(signal.SIGPIPE, signal.SIG_DFL)
 
             cmd = "svn checkout --username %(user)s %(base)s %(dir)s"
-            cmd = cmd % { 'user': user,
-                         'base': testbase_base,
-                         'dir': testbase_name }
+            cmd = cmd % {"user": user, "base": testbase_base, "dir": testbase_name}
 
             # Get the application environment
             self.logger.write(_("Set the application environment\n"), 5)
-            env_appli = src.environment.SalomeEnviron(self.config,
-                                      src.environment.Environ(dict(os.environ)))
+            env_appli = src.environment.SalomeEnviron(
+                self.config, src.environment.Environ(dict(os.environ))
+            )
             env_appli.set_application_env(self.logger)
 
             self.logger.write("> %s\n" % cmd, 5)
             if src.architecture.is_windows():
                 # preexec_fn not supported on windows platform
-                res = subprocess.call(cmd,
-                                cwd=os.path.join(self.tmp_working_dir, 'BASES'),
-                                shell=True,
-                                stdout=self.logger.logTxtFile,
-                                stderr=subprocess.PIPE)
+                res = subprocess.call(
+                    cmd,
+                    cwd=os.path.join(self.tmp_working_dir, "BASES"),
+                    shell=True,
+                    stdout=self.logger.logTxtFile,
+                    stderr=subprocess.PIPE,
+                )
             else:
-                res = subprocess.call(cmd,
-                                cwd=os.path.join(self.tmp_working_dir, 'BASES'),
-                                shell=True,
-                                preexec_fn=set_signal,
-                                stdout=self.logger.logTxtFile,
-                                stderr=subprocess.PIPE,
-                                env=env_appli.environ.environ,)
+                res = subprocess.call(
+                    cmd,
+                    cwd=os.path.join(self.tmp_working_dir, "BASES"),
+                    shell=True,
+                    preexec_fn=set_signal,
+                    stdout=self.logger.logTxtFile,
+                    stderr=subprocess.PIPE,
+                    env=env_appli.environ.environ,
+                )
 
             if res != 0:
-                raise src.SatException(_("Error: unable to get test base '%(nam"
-                                         "e)s' from svn '%(repo)s'.") % \
-                                       { 'name': testbase_name,
-                                        'repo': testbase_base })
+                raise src.SatException(
+                    _(
+                        "Error: unable to get test base '%(nam"
+                        "e)s' from svn '%(repo)s'."
+                    )
+                    % {"name": testbase_name, "repo": testbase_base}
+                )
 
         except OSError:
             self.logger.error(_("svn is not installed. exiting...\n"))
@@ -213,10 +247,7 @@ class Test:
     ##
     # Configure tests base.
     def prepare_testbase(self, test_base_name):
-        src.printcolors.print_value(self.logger,
-                                    _("Test base"),
-                                    test_base_name,
-                                    3)
+        src.printcolors.print_value(self.logger, _("Test base"), test_base_name, 3)
         self.logger.write("\n", 3, False)
 
         # search for the test base
@@ -236,30 +267,30 @@ class Test:
                 return 0
 
         if not test_base_info:
-            message = (_("########## ERROR: test base '%s' not found\n") %
-                       test_base_name)
+            message = _("########## ERROR: test base '%s' not found\n") % test_base_name
             self.logger.write("%s\n" % src.printcolors.printcError(message))
             return 1
 
         if test_base_info.get_sources == "dir":
-            self.prepare_testbase_from_dir(test_base_name,
-                                           test_base_info.info.dir)
+            self.prepare_testbase_from_dir(test_base_name, test_base_info.info.dir)
         elif test_base_info.get_sources == "git":
-            self.prepare_testbase_from_git(test_base_name,
-                                       test_base_info.info.base,
-                                       self.config.APPLICATION.test_base.tag)
+            self.prepare_testbase_from_git(
+                test_base_name,
+                test_base_info.info.base,
+                self.config.APPLICATION.test_base.tag,
+            )
         elif test_base_info.get_sources == "svn":
-            svn_user = src.get_cfg_param(test_base_info.info,
-                                         "svn_user",
-                                         self.config.USER.svn_user)
-            self.prepare_testbase_from_svn(svn_user,
-                                       test_base_name,
-                                       test_base_info.info.base)
+            svn_user = src.get_cfg_param(
+                test_base_info.info, "svn_user", self.config.USER.svn_user
+            )
+            self.prepare_testbase_from_svn(
+                svn_user, test_base_name, test_base_info.info.base
+            )
         else:
-            raise src.SatException(_("unknown source type '%(type)s' for test b"
-                                     "ase '%(base)s' ...\n") % {
-                                        'type': test_base_info.get_sources,
-                                        'base': test_base_name })
+            raise src.SatException(
+                _("unknown source type '%(type)s' for test b" "ase '%(base)s' ...\n")
+                % {"type": test_base_info.get_sources, "base": test_base_name}
+            )
 
         self.currentTestBase = test_base_name
 
@@ -284,23 +315,20 @@ class Test:
             if not error.fixed:
                 # the error is fixed
                 self.known_errors.fix_error(error)
-                #import testerror
-                #testerror.write_test_failures(
+                # import testerror
+                # testerror.write_test_failures(
                 #                        self.config.TOOLS.testerror.file_path,
                 #                        self.known_errors.errors)
-            return status, [ error.date,
-                            error.expected,
-                            error.comment,
-                            error.fixed ]
+            return status, [error.date, error.expected, error.comment, error.fixed]
 
         if error.fixed:
             self.known_errors.unfix_error(error)
-            #import testerror
-            #testerror.write_test_failures(self.config.TOOLS.testerror.file_path,
+            # import testerror
+            # testerror.write_test_failures(self.config.TOOLS.testerror.file_path,
             #                              self.known_errors.errors)
 
         delta = self.known_errors.get_expecting_days(error)
-        kfres = [ error.date, error.expected, error.comment, error.fixed ]
+        kfres = [error.date, error.expected, error.comment, error.fixed]
         if delta < 0:
             return src.KO_STATUS, kfres
         return src.KNOWNFAILURE_STATUS, kfres
@@ -310,10 +338,12 @@ class Test:
     def read_results(self, listTest, has_timed_out):
         results = {}
         for test in listTest:
-            resfile = os.path.join(self.currentDir,
-                                   self.currentgrid,
-                                   self.currentsession,
-                                   test[:-3] + ".result.py")
+            resfile = os.path.join(
+                self.currentDir,
+                self.currentgrid,
+                self.currentsession,
+                test[:-3] + ".result.py",
+            )
 
             # check if <test>.result.py file exists
             if not os.path.exists(resfile):
@@ -321,68 +351,71 @@ class Test:
             else:
                 gdic, ldic = {}, {}
                 if verbose:
-                  print("test script: '%s':\n'%s'\n" % (resfile, open(resfile, 'r').read()))
+                    print(
+                        "test script: '%s':\n'%s'\n"
+                        % (resfile, open(resfile, "r").read())
+                    )
 
                 try:
-                  execfile(resfile, gdic, ldic)
-
-                  status = src.TIMEOUT_STATUS
-                  if not has_timed_out:
-                      status = src.KO_STATUS
-
-                  if 'status' in ldic:
-                      status = ldic['status']
-
-                  expected = []
-                  if status == src.KO_STATUS or status == src.OK_STATUS:
-                      status, expected = self.search_known_errors(status,
-                                                              self.currentgrid,
-                                                              self.currentsession,
-                                                              test)
-
-                  callback = ""
-                  if 'callback' in ldic:
-                      callback = ldic['callback']
-                  elif status == src.KO_STATUS:
-                      callback = "CRASH"
-                      if verbose:
-                        print("--- CRASH ldic\n%s" % PP.pformat(ldic)) # cvw TODO
-                        print("--- CRASH gdic\n%s" %  PP.pformat(gdic))
-                        pass
-
-                  exec_time = -1
-                  if 'time' in ldic:
-                      try:
-                          exec_time = float(ldic['time'])
-                      except:
-                          pass
-
-                  results[test] = [status, exec_time, callback, expected]
+                    execfile(resfile, gdic, ldic)
+
+                    status = src.TIMEOUT_STATUS
+                    if not has_timed_out:
+                        status = src.KO_STATUS
+
+                    if "status" in ldic:
+                        status = ldic["status"]
+
+                    expected = []
+                    if status == src.KO_STATUS or status == src.OK_STATUS:
+                        status, expected = self.search_known_errors(
+                            status, self.currentgrid, self.currentsession, test
+                        )
+
+                    callback = ""
+                    if "callback" in ldic:
+                        callback = ldic["callback"]
+                    elif status == src.KO_STATUS:
+                        callback = "CRASH"
+                        if verbose:
+                            print("--- CRASH ldic\n%s" % PP.pformat(ldic))  # cvw TODO
+                            print("--- CRASH gdic\n%s" % PP.pformat(gdic))
+                            pass
+
+                    exec_time = -1
+                    if "time" in ldic:
+                        try:
+                            exec_time = float(ldic["time"])
+                        except:
+                            pass
+
+                    results[test] = [status, exec_time, callback, expected]
 
                 except:
-                  results[test] = ["?", -1, "", []]
-                  # results[test] = [src.O_STATUS, -1, open(resfile, 'r').read(), []]
+                    results[test] = ["?", -1, "", []]
+                    # results[test] = [src.O_STATUS, -1, open(resfile, 'r').read(), []]
 
             # check if <test>.py file exists
-            testfile = os.path.join(self.currentDir,
-                                   self.currentgrid,
-                                   self.currentsession,
-                                   test)
+            testfile = os.path.join(
+                self.currentDir, self.currentgrid, self.currentsession, test
+            )
 
             if not os.path.exists(testfile):
-                results[test].append('')
+                results[test].append("")
             else:
                 text = open(testfile, "r").read()
                 results[test].append(text)
 
             # check if <test>.out.py file exists
-            outfile = os.path.join(self.currentDir,
-                                   self.currentgrid,
-                                   self.currentsession,
-                                   test[:-3] + ".out.py")
+            outfile = os.path.join(
+                self.currentDir,
+                self.currentgrid,
+                self.currentsession,
+                test[:-3] + ".out.py",
+            )
 
             if not os.path.exists(outfile):
-                results[test].append('')
+                results[test].append("")
             else:
                 text = open(outfile, "r").read()
                 results[test].append(text)
@@ -396,43 +429,49 @@ class Test:
     def generate_script(self, listTest, script_path, ignoreList):
         # open template file
         tFile = os.path.join(self.config.VARS.srcDir, "test", "scriptTemplate.py")
-        with open(tFile, 'r') as f:
-          template = string.Template(f.read())
+        with open(tFile, "r") as f:
+            template = string.Template(f.read())
 
         # create substitution dictionary
         d = dict()
-        d['resourcesWay'] = os.path.join(self.currentDir, 'RESSOURCES')
-        d['tmpDir'] = os.path.join(self.tmp_working_dir, 'WORK')
-        d['toolsWay'] = os.path.join(self.config.VARS.srcDir, "test")
-        d['sessionDir'] = os.path.join(self.currentDir, self.currentgrid, self.currentsession)
-        d['resultFile'] = os.path.join(self.tmp_working_dir, 'WORK', 'exec_result')
-        d['listTest'] = listTest
-        d['sessionName'] = self.currentsession
-        d['ignore'] = ignoreList
+        d["resourcesWay"] = os.path.join(self.currentDir, "RESSOURCES")
+        d["tmpDir"] = os.path.join(self.tmp_working_dir, "WORK")
+        d["toolsWay"] = os.path.join(self.config.VARS.srcDir, "test")
+        d["sessionDir"] = os.path.join(
+            self.currentDir, self.currentgrid, self.currentsession
+        )
+        d["resultFile"] = os.path.join(self.tmp_working_dir, "WORK", "exec_result")
+        d["listTest"] = listTest
+        d["sessionName"] = self.currentsession
+        d["ignore"] = ignoreList
 
         # create script with template
         contents = template.safe_substitute(d)
-        if verbose: print("generate_script '%s':\n%s" % (script_path, contents)) # cvw TODO
-        with open(script_path, 'w') as f:
-          f.write(contents)
-
+        if verbose:
+            print("generate_script '%s':\n%s" % (script_path, contents))  # cvw TODO
+        with open(script_path, "w") as f:
+            f.write(contents)
 
     # Find the getTmpDir function that gives access to *_pidict file directory.
     # (the *_pidict file exists when SALOME is launched)
     def get_tmp_dir(self):
         # Rare case where there is no KERNEL in grid list
         # (for example MED_STANDALONE)
-        if ('APPLICATION' in self.config
-                and 'KERNEL' not in self.config.APPLICATION.products
-                and 'KERNEL_ROOT_DIR' not in os.environ):
+        if (
+            "APPLICATION" in self.config
+            and "KERNEL" not in self.config.APPLICATION.products
+            and "KERNEL_ROOT_DIR" not in os.environ
+        ):
             return getTmpDirDEFAULT
 
         # Case where "sat test" is launched in an existing SALOME environment
-        if 'KERNEL_ROOT_DIR' in os.environ:
-            root_dir =  os.environ['KERNEL_ROOT_DIR']
+        if "KERNEL_ROOT_DIR" in os.environ:
+            root_dir = os.environ["KERNEL_ROOT_DIR"]
 
-        if ('APPLICATION' in self.config and
-            'KERNEL' in self.config.APPLICATION.products):
+        if (
+            "APPLICATION" in self.config
+            and "KERNEL" in self.config.APPLICATION.products
+        ):
             root_dir = src.product.get_product_config(self.config, "KERNEL").install_dir
 
         # Case where there the appli option is called (with path to launcher)
@@ -441,42 +480,47 @@ class Test:
             # and the new one
             launcherName = os.path.basename(self.launcher)
             launcherDir = os.path.dirname(self.launcher)
-            if launcherName == 'runAppli':
+            if launcherName == "runAppli":
                 # Old application
-                cmd = """
+                cmd = (
+                    """
 for i in %s/env.d/*.sh;
   do source ${i};
 done
 echo $KERNEL_ROOT_DIR
-""" % launcherDir
+"""
+                    % launcherDir
+                )
             else:
                 # New application
-                cmd = """
+                cmd = (
+                    """
 echo -e 'import os\nprint(os.environ[\"KERNEL_ROOT_DIR\"])' > tmpscript.py
 %s shell tmpscript.py
-""" % self.launcher
+"""
+                    % self.launcher
+                )
 
             if src.architecture.is_windows():
-                subproc_res = subprocess.Popen(cmd,
-                            stdout=subprocess.PIPE,
-                            shell=True).communicate()
+                subproc_res = subprocess.Popen(
+                    cmd, stdout=subprocess.PIPE, shell=True
+                ).communicate()
                 pass
             else:
-                subproc_res = subprocess.Popen(cmd,
-                            stdout=subprocess.PIPE,
-                            shell=True,
-                            executable='/bin/bash').communicate()
+                subproc_res = subprocess.Popen(
+                    cmd, stdout=subprocess.PIPE, shell=True, executable="/bin/bash"
+                ).communicate()
                 pass
 
             root_dir = subproc_res[0].split()[-1]
 
         # import grid salome_utils from KERNEL that gives
         # the right getTmpDir function
-        root_dir = root_dir.decode('utf-8')
-        aPath = [os.path.join(root_dir, 'bin', 'salome')]
+        root_dir = root_dir.decode("utf-8")
+        aPath = [os.path.join(root_dir, "bin", "salome")]
         sal_uts = "salome_utils"
         try:
-            (file_, pathname, description) = imp.find_module(sal_uts, aPath )
+            (file_, pathname, description) = imp.find_module(sal_uts, aPath)
         except Exception:
             msg = "inexisting %s.py in %s" % (sal_uts, aPath)
             raise Exception(msg)
@@ -491,10 +535,8 @@ echo -e 'import os\nprint(os.environ[\"KERNEL_ROOT_DIR\"])' > tmpscript.py
             if file_:
                 file_.close()
 
-
     def get_test_timeout(self, test_name, default_value):
-        if ("timeout" in self.settings and
-                test_name in self.settings["timeout"]):
+        if "timeout" in self.settings and test_name in self.settings["timeout"]:
             return self.settings["timeout"][test_name]
 
         return default_value
@@ -507,34 +549,38 @@ echo -e 'import os\nprint(os.environ[\"KERNEL_ROOT_DIR\"])' > tmpscript.py
             # and the new one
             launcherName = os.path.basename(self.launcher)
             launcherDir = os.path.dirname(self.launcher)
-            if os.path.basename(launcherDir) == 'APPLI':
+            if os.path.basename(launcherDir) == "APPLI":
                 # Old application
                 binSalome = self.launcher
-                binPython = ("for i in " +
-                             launcherDir +
-                             "/env.d/*.sh; do source ${i}; done ; python")
-                killSalome = ("for i in " +
-                        launcherDir +
-                        "/env.d/*.sh; do source ${i}; done ; killSalome.py'")
+                binPython = (
+                    "for i in "
+                    + launcherDir
+                    + "/env.d/*.sh; do source ${i}; done ; python"
+                )
+                killSalome = (
+                    "for i in "
+                    + launcherDir
+                    + "/env.d/*.sh; do source ${i}; done ; killSalome.py'"
+                )
                 return binSalome, binPython, killSalome
             else:
                 # New application
                 binSalome = self.launcher
-                binPython = self.launcher + ' shell'
-                killSalome = self.launcher + ' killall'
+                binPython = self.launcher + " shell"
+                killSalome = self.launcher + " killall"
                 return binSalome, binPython, killSalome
 
         # SALOME version detection and APPLI repository detection
         VersionSalome = src.get_salome_version(self.config)
-        appdir = 'APPLI'
+        appdir = "APPLI"
         if "APPLI" in self.config and "application_name" in self.config.APPLI:
             appdir = self.config.APPLI.application_name
 
         # Case where SALOME has NOT the launcher that uses the SalomeContext API
-        if VersionSalome < MMP([7,3,0]):
-            binSalome = os.path.join(self.config.APPLICATION.workdir,
-                                     appdir,
-                                     "runAppli")
+        if VersionSalome < MMP([7, 3, 0]):
+            binSalome = os.path.join(
+                self.config.APPLICATION.workdir, appdir, "runAppli"
+            )
             binPython = "python"
             killSalome = "killSalome.py"
             src.environment.load_environment(self.config, False, self.logger)
@@ -543,40 +589,42 @@ echo -e 'import os\nprint(os.environ[\"KERNEL_ROOT_DIR\"])' > tmpscript.py
         # Case where SALOME has the launcher that uses the SalomeContext API
         else:
             launcher_name = src.get_launcher_name(self.config)
-            binSalome = os.path.join(self.config.APPLICATION.workdir,
-                                     launcher_name)
+            binSalome = os.path.join(self.config.APPLICATION.workdir, launcher_name)
 
-            binPython = binSalome + ' shell'
-            killSalome = binSalome + ' killall'
+            binPython = binSalome + " shell"
+            killSalome = binSalome + " killall"
             return binSalome, binPython, killSalome
 
         return binSalome, binPython, killSalome
 
-
     ##
     # Runs tests of a session (using a single instance of Salome).
     def run_tests(self, listTest, ignoreList):
-        out_path = os.path.join(self.currentDir,
-                                self.currentgrid,
-                                self.currentsession)
-        if verbose: print("run_tests '%s'\nlistTest: %s\nignoreList: %s" %
-                   (self.currentDir, PP.pformat(listTest), PP.pformat(ignoreList))) # cvw TODO
+        out_path = os.path.join(self.currentDir, self.currentgrid, self.currentsession)
+        if verbose:
+            print(
+                "run_tests '%s'\nlistTest: %s\nignoreList: %s"
+                % (self.currentDir, PP.pformat(listTest), PP.pformat(ignoreList))
+            )  # cvw TODO
         sessionname = "%s/%s" % (self.currentgrid, self.currentsession)
-        time_out = self.get_test_timeout(sessionname,
-                                         DEFAULT_TIMEOUT)
+        time_out = self.get_test_timeout(sessionname, DEFAULT_TIMEOUT)
 
         time_out_salome = DEFAULT_TIMEOUT
 
         # generate wrapper script
-        script_path = os.path.join(out_path, 'wrapperScript.py')
+        script_path = os.path.join(out_path, "wrapperScript.py")
         self.generate_script(listTest, script_path, ignoreList)
 
         tmpDir = self.get_tmp_dir()
 
         binSalome, binPython, killSalome = self.generate_launching_commands()
-        if "run_with_grids" in self.settings and \
-           sessionname in self.settings["run_with_grids"]:
-            binSalome = (binSalome + " -m %s" % self.settings["run_with_grids"][sessionname])
+        if (
+            "run_with_grids" in self.settings
+            and sessionname in self.settings["run_with_grids"]
+        ):
+            binSalome = (
+                binSalome + " -m %s" % self.settings["run_with_grids"][sessionname]
+            )
 
         logWay = os.path.join(self.tmp_working_dir, "WORK", "log_cxx")
 
@@ -585,36 +633,40 @@ echo -e 'import os\nprint(os.environ[\"KERNEL_ROOT_DIR\"])' > tmpscript.py
         if self.currentsession.startswith("NOGUI_"):
             # runSalome -t (bash)
             status, elapsed = fork.batch(
-                                binSalome,
-                                self.logger,
-                                os.path.join(self.tmp_working_dir, "WORK"),
-                                [ "-t", "--shutdown-server=1", script_path ],
-                                delai=time_out,
-                                log=logWay)
+                binSalome,
+                self.logger,
+                os.path.join(self.tmp_working_dir, "WORK"),
+                ["-t", "--shutdown-server=1", script_path],
+                delai=time_out,
+                log=logWay,
+            )
 
         elif self.currentsession.startswith("PY_"):
             # python script.py
             status, elapsed = fork.batch(
-                                binPython,
-                                self.logger,
-                                os.path.join(self.tmp_working_dir, "WORK"),
-                                [script_path],
-                                delai=time_out,
-                                log=logWay)
+                binPython,
+                self.logger,
+                os.path.join(self.tmp_working_dir, "WORK"),
+                [script_path],
+                delai=time_out,
+                log=logWay,
+            )
 
         else:
             opt = "-z 0"
-            if self.show_desktop: opt = "--show-desktop=0"
+            if self.show_desktop:
+                opt = "--show-desktop=0"
             status, elapsed = fork.batch_salome(
-                                binSalome,
-                                self.logger,
-                                os.path.join( self.tmp_working_dir, "WORK"),
-                                [ opt, "--shutdown-server=1", script_path ],
-                                getTmpDir=tmpDir,
-                                fin=killSalome,
-                                delai=time_out,
-                                log=logWay,
-                                delaiapp=time_out_salome)
+                binSalome,
+                self.logger,
+                os.path.join(self.tmp_working_dir, "WORK"),
+                [opt, "--shutdown-server=1", script_path],
+                getTmpDir=tmpDir,
+                fin=killSalome,
+                delai=time_out,
+                log=logWay,
+                delaiapp=time_out_salome,
+            )
 
         self.logger.write("status = %s, elapsed = %s\n" % (status, elapsed), 5)
 
@@ -636,7 +688,8 @@ echo -e 'import os\nprint(os.environ[\"KERNEL_ROOT_DIR\"])' > tmpscript.py
             script_info.time = script_results[sr][1]
             if script_info.res == src.TIMEOUT_STATUS:
                 script_info.time = time_out
-            if script_info.time < 1e-3: script_info.time = 0
+            if script_info.time < 1e-3:
+                script_info.time = 0
 
             callback = script_results[sr][2]
             if script_info.res != src.OK_STATUS and len(callback) > 0:
@@ -654,7 +707,7 @@ echo -e 'import os\nprint(os.environ[\"KERNEL_ROOT_DIR\"])' > tmpscript.py
             script_info.out = script_results[sr][5]
 
             # add it to the list of results
-            test_info.script.append(script_info, '')
+            test_info.script.append(script_info, "")
 
             # display the results
             if script_info.time > 0:
@@ -664,15 +717,24 @@ echo -e 'import os\nprint(os.environ[\"KERNEL_ROOT_DIR\"])' > tmpscript.py
 
             sp = "." * (35 - len(script_info.name))
             self.logger.write(self.write_test_margin(3), 3)
-            self.logger.write("script %s %s %s %s\n" % (
-                                src.printcolors.printcLabel(script_info.name),
-                                sp,
-                                src.printcolors.printc(script_info.res),
-                                exectime), 3, False)
+            self.logger.write(
+                "script %s %s %s %s\n"
+                % (
+                    src.printcolors.printcLabel(script_info.name),
+                    sp,
+                    src.printcolors.printc(script_info.res),
+                    exectime,
+                ),
+                3,
+                False,
+            )
             if script_info and len(callback) > 0:
-                self.logger.write("Exception in %s\n%s\n" % \
-                    (script_info.name,
-                     src.printcolors.printcWarning(callback)), 2, False)
+                self.logger.write(
+                    "Exception in %s\n%s\n"
+                    % (script_info.name, src.printcolors.printcWarning(callback)),
+                    2,
+                    False,
+                )
 
             if script_info.res == src.OK_STATUS:
                 self.nb_succeed += 1
@@ -685,28 +747,35 @@ echo -e 'import os\nprint(os.environ[\"KERNEL_ROOT_DIR\"])' > tmpscript.py
             elif script_info.res == "?":
                 self.nb_not_run += 1
 
-
-        self.config.TESTS.append(test_info, '')
+        self.config.TESTS.append(test_info, "")
 
     ##
     # Runs all tests of a session.
     def run_session_tests(self):
 
         self.logger.write(self.write_test_margin(2), 3)
-        self.logger.write("Session = %s\n" % src.printcolors.printcLabel(
-                                                    self.currentsession), 3, False)
+        self.logger.write(
+            "Session = %s\n" % src.printcolors.printcLabel(self.currentsession),
+            3,
+            False,
+        )
 
         # prepare list of tests to run
-        tests = os.listdir(os.path.join(self.currentDir,
-                                        self.currentgrid,
-                                        self.currentsession))
+        tests = os.listdir(
+            os.path.join(self.currentDir, self.currentgrid, self.currentsession)
+        )
         # avoid result files of previous tests, if presents
         # tests = filter(lambda l: l.endswith(".py"), tests)
-        tests = [t for t in tests if t.endswith(".py") \
-                   and not ( t.endswith(".out.py") or \
-                             t.endswith(".result.py") or \
-                             t.endswith("wrapperScript.py") \
-                           ) ]
+        tests = [
+            t
+            for t in tests
+            if t.endswith(".py")
+            and not (
+                t.endswith(".out.py")
+                or t.endswith(".result.py")
+                or t.endswith("wrapperScript.py")
+            )
+        ]
         tests = sorted(tests, key=str.lower)
 
         # build list of known failures
@@ -714,7 +783,7 @@ echo -e 'import os\nprint(os.environ[\"KERNEL_ROOT_DIR\"])' > tmpscript.py
         ignoreDict = {}
         for k in self.ignore_tests.keys():
             if k.startswith(cat):
-                ignoreDict[k[len(cat):]] = self.ignore_tests[k]
+                ignoreDict[k[len(cat) :]] = self.ignore_tests[k]
 
         self.run_tests(tests, ignoreDict)
 
@@ -722,20 +791,21 @@ echo -e 'import os\nprint(os.environ[\"KERNEL_ROOT_DIR\"])' > tmpscript.py
     # Runs all tests of a grid.
     def run_grid_tests(self):
         self.logger.write(self.write_test_margin(1), 3)
-        self.logger.write("grid = %s\n" % src.printcolors.printcLabel(
-                                                self.currentgrid), 3, False)
+        self.logger.write(
+            "grid = %s\n" % src.printcolors.printcLabel(self.currentgrid), 3, False
+        )
 
         grid_path = os.path.join(self.currentDir, self.currentgrid)
 
         sessions = []
         if self.sessions is not None:
-            sessions = self.sessions # user choice
+            sessions = self.sessions  # user choice
         else:
             # use all scripts in grid
-            sessions = filter(lambda l: l not in C_IGNORE_GRIDS,
-                           os.listdir(grid_path))
-            sessions = filter(lambda l: os.path.isdir(os.path.join(grid_path,
-                                                                l)), sessions)
+            sessions = filter(lambda l: l not in C_IGNORE_GRIDS, os.listdir(grid_path))
+            sessions = filter(
+                lambda l: os.path.isdir(os.path.join(grid_path, l)), sessions
+            )
 
         sessions = sorted(sessions, key=str.lower)
         existingSessions = self.getSubDirectories(grid_path)
@@ -746,7 +816,10 @@ echo -e 'import os\nprint(os.environ[\"KERNEL_ROOT_DIR\"])' > tmpscript.py
 Session '%s' not found
 Existing sessions are:
 %s
-""" % (session_, PP.pformat(sorted(existingSessions)))
+""" % (
+                    session_,
+                    PP.pformat(sorted(existingSessions)),
+                )
                 self.logger.write(src.printcolors.printcWarning(msg), 3, False)
             else:
                 self.currentsession = session_
@@ -758,7 +831,7 @@ Existing sessions are:
         excluding '.git' etc as beginning with '.'
         """
         res = os.listdir(aDir)
-        res = [d for d in res if os.path.isdir(os.path.join(aDir, d)) and d[0] != '.']
+        res = [d for d in res if os.path.isdir(os.path.join(aDir, d)) and d[0] != "."]
         # print("getSubDirectories %s are:\n%s" % (aDir, PP.pformat(res)))
         return res
 
@@ -766,22 +839,17 @@ Existing sessions are:
     # Runs test testbase.
     def run_testbase_tests(self):
         res_dir = os.path.join(self.currentDir, "RESSOURCES")
-        os.environ['PYTHONPATH'] =  (res_dir +
-                                     os.pathsep +
-                                     os.environ['PYTHONPATH'])
-        os.environ['TT_BASE_RESSOURCES'] = res_dir
-        src.printcolors.print_value(self.logger,
-                                    "TT_BASE_RESSOURCES",
-                                    res_dir,
-                                    4)
+        os.environ["PYTHONPATH"] = res_dir + os.pathsep + os.environ["PYTHONPATH"]
+        os.environ["TT_BASE_RESSOURCES"] = res_dir
+        src.printcolors.print_value(self.logger, "TT_BASE_RESSOURCES", res_dir, 4)
         self.logger.write("\n", 4, False)
 
         self.logger.write(self.write_test_margin(0), 3)
         testbase_label = "Test base = %s\n" % src.printcolors.printcLabel(
-                                                        self.currentTestBase)
+            self.currentTestBase
+        )
         self.logger.write(testbase_label, 3, False)
-        self.logger.write("-" * len(src.printcolors.cleancolor(testbase_label)),
-                          3)
+        self.logger.write("-" * len(src.printcolors.cleancolor(testbase_label)), 3)
         self.logger.write("\n", 3, False)
 
         # load settings
@@ -790,20 +858,26 @@ Existing sessions are:
             gdic, ldic = {}, {}
             execfile(settings_file, gdic, ldic)
             self.logger.write("Load test settings '%s'\n" % settings_file, 5)
-            self.settings = ldic['settings_dic']
-            self.ignore_tests = ldic['known_failures_list']
+            self.settings = ldic["settings_dic"]
+            self.ignore_tests = ldic["known_failures_list"]
             if isinstance(self.ignore_tests, list):
                 self.ignore_tests = {}
-                self.logger.write(src.printcolors.printcWarning(
-                  "known_failures_list must be a dictionary (not a list)") + "\n", 1, False)
+                self.logger.write(
+                    src.printcolors.printcWarning(
+                        "known_failures_list must be a dictionary (not a list)"
+                    )
+                    + "\n",
+                    1,
+                    False,
+                )
         else:
             self.ignore_tests = {}
             self.settings.clear()
 
         # read known failures pyconf
         if "testerror" in self.config.LOCAL:
-            #import testerror
-            #self.known_errors = testerror.read_test_failures(
+            # import testerror
+            # self.known_errors = testerror.read_test_failures(
             #                            self.config.TOOLS.testerror.file_path,
             #                            do_error=False)
             pass
@@ -811,14 +885,15 @@ Existing sessions are:
             self.known_errors = None
 
         if self.grids is not None:
-            grids = self.grids # given by user
+            grids = self.grids  # given by user
         else:
             # select all the grids (i.e. directories) in the directory
-            grids = filter(lambda l: l not in C_IGNORE_GRIDS,
-                             os.listdir(self.currentDir))
-            grids = filter(lambda l: os.path.isdir(
-                                        os.path.join(self.currentDir, l)),
-                             grids)
+            grids = filter(
+                lambda l: l not in C_IGNORE_GRIDS, os.listdir(self.currentDir)
+            )
+            grids = filter(
+                lambda l: os.path.isdir(os.path.join(self.currentDir, l)), grids
+            )
 
         grids = sorted(grids, key=str.lower)
         existingGrids = self.getSubDirectories(self.currentDir)
@@ -829,43 +904,59 @@ Existing sessions are:
 Grid '%s' does not exist
 Existing grids are:
 %s
-""" % (grid, PP.pformat(sorted(existingGrids)))
+""" % (
+                    grid,
+                    PP.pformat(sorted(existingGrids)),
+                )
                 self.logger.write(src.printcolors.printcWarning(msg), 3, False)
             else:
                 self.currentgrid = grid
                 self.run_grid_tests()
 
     def run_script(self, script_name):
-        if ('APPLICATION' in self.config and
-                script_name in self.config.APPLICATION):
+        if "APPLICATION" in self.config and script_name in self.config.APPLICATION:
             script = self.config.APPLICATION[script_name]
             if len(script) == 0:
                 return
 
             self.logger.write("\n", 2, False)
             if not os.path.exists(script):
-                self.logger.write(src.printcolors.printcWarning("WARNING: scrip"
-                                        "t not found: %s" % script) + "\n", 2)
+                self.logger.write(
+                    src.printcolors.printcWarning(
+                        "WARNING: scrip" "t not found: %s" % script
+                    )
+                    + "\n",
+                    2,
+                )
             else:
-                self.logger.write(src.printcolors.printcHeader("----------- sta"
-                                            "rt %s" % script_name) + "\n", 2)
+                self.logger.write(
+                    src.printcolors.printcHeader(
+                        "----------- sta" "rt %s" % script_name
+                    )
+                    + "\n",
+                    2,
+                )
                 self.logger.write("Run script: %s\n" % script, 2)
                 subprocess.Popen(script, shell=True).wait()
-                self.logger.write(src.printcolors.printcHeader("----------- end"
-                                                " %s" % script_name) + "\n", 2)
+                self.logger.write(
+                    src.printcolors.printcHeader("----------- end" " %s" % script_name)
+                    + "\n",
+                    2,
+                )
 
     def run_all_tests(self):
         initTime = datetime.datetime.now()
 
-        self.run_script('test_setup')
+        self.run_script("test_setup")
         self.logger.write("\n", 2, False)
 
-        self.logger.write(src.printcolors.printcHeader(
-                                            _("=== STARTING TESTS")) + "\n", 2)
+        self.logger.write(
+            src.printcolors.printcHeader(_("=== STARTING TESTS")) + "\n", 2
+        )
         self.logger.write("\n", 2, False)
-        self.currentDir = os.path.join(self.tmp_working_dir,
-                                       'BASES',
-                                       self.currentTestBase)
+        self.currentDir = os.path.join(
+            self.tmp_working_dir, "BASES", self.currentTestBase
+        )
         self.run_testbase_tests()
 
         # calculate total execution time
@@ -873,20 +964,22 @@ Existing grids are:
         totalTime -= datetime.timedelta(microseconds=totalTime.microseconds)
         self.logger.write("\n", 2, False)
         self.logger.write(src.printcolors.printcHeader(_("=== END TESTS")), 2)
-        self.logger.write(" %s\n" % src.printcolors.printcInfo(str(totalTime)),
-                          2,
-                          False)
+        self.logger.write(
+            " %s\n" % src.printcolors.printcInfo(str(totalTime)), 2, False
+        )
 
         #
         # Start the tests
         #
-        self.run_script('test_cleanup')
+        self.run_script("test_cleanup")
         self.logger.write("\n", 2, False)
 
         # evaluate results
 
-        res_out = _("Tests Results: %(succeed)d / %(total)d\n") % \
-            { 'succeed': self.nb_succeed, 'total': self.nb_run }
+        res_out = _("Tests Results: %(succeed)d / %(total)d\n") % {
+            "succeed": self.nb_succeed,
+            "total": self.nb_run,
+        }
         if self.nb_succeed == self.nb_run:
             res_out = src.printcolors.printcSuccess(res_out)
         else:
index ea42232743bc6f4a45a1a39db38b2d9b79a37c5b..7a4c07025eb5429d2cbddca5f682f737a018606b 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 
 #  Copyright (C) 2010-2018  CEA/DEN
 #
@@ -38,39 +38,48 @@ import tempfile
 import subprocess as SP
 
 import src.returnCode as RCO
-import src.debug as DBG # Easy print stderr (for DEBUG only)
+import src.debug as DBG  # Easy print stderr (for DEBUG only)
 
 
 ##############################################################################
 # subprocess utilities, with logger functionnalities (trace etc.)
 ##############################################################################
-    
-def Popen(command, shell=True, cwd=None, env=None, stdout=SP.PIPE, stderr=SP.PIPE, logger=None):
-  """
-  make subprocess.Popen(cmd), with 
-  call logger.trace and logger.error if problem as returncode != 0 
-  """
-  if True: #try:  
-    proc = SP.Popen(command, shell=shell, cwd=cwd, env=env, stdout=stdout, stderr=SP.STDOUT)
-    res_out, res_err = proc.communicate() # res_err = None as stderr=SP.STDOUT
-    rc = proc.returncode
-    
-    DBG.write("Popen logger returncode", (rc, res_out))
-    
-    if rc == 0:
-      if logger is not None:
-        logger.trace("<OK> launch command rc=%s cwd=%s:\n%s" % (rc, cwd, command))
-        logger.trace("<OK> result command stdout&stderr:\n%s" % res_out)
-      return RCO.ReturnCode("OK", "Popen command done", value=res_out)
-    else:
-      if logger is not None:
-        logger.warning("<KO> launch command rc=%s cwd=%s:\n%s" % (rc, cwd, command))
-        logger.warning("<KO> result command stdout&stderr:\n%s" % res_out)
-      return RCO.ReturnCode("KO", "Popen command problem", value=res_out)
-  else: #except Exception as e:
-    logger.error("<KO> launch command cwd=%s:\n%s" % (cwd, command))
-    logger.error("launch command exception:\n%s" % e)
-    return RCO.ReturnCode("KO", "Popen command problem")
+
+
+def Popen(
+    command, shell=True, cwd=None, env=None, stdout=SP.PIPE, stderr=SP.PIPE, logger=None
+):
+    """
+    make subprocess.Popen(cmd), with
+    call logger.trace and logger.error if problem as returncode != 0
+    """
+    if True:  # try:
+        proc = SP.Popen(
+            command, shell=shell, cwd=cwd, env=env, stdout=stdout, stderr=SP.STDOUT
+        )
+        res_out, res_err = proc.communicate()  # res_err = None as stderr=SP.STDOUT
+        rc = proc.returncode
+
+        DBG.write("Popen logger returncode", (rc, res_out))
+
+        if rc == 0:
+            if logger is not None:
+                logger.trace(
+                    "<OK> launch command rc=%s cwd=%s:\n%s" % (rc, cwd, command)
+                )
+                logger.trace("<OK> result command stdout&stderr:\n%s" % res_out)
+            return RCO.ReturnCode("OK", "Popen command done", value=res_out)
+        else:
+            if logger is not None:
+                logger.warning(
+                    "<KO> launch command rc=%s cwd=%s:\n%s" % (rc, cwd, command)
+                )
+                logger.warning("<KO> result command stdout&stderr:\n%s" % res_out)
+            return RCO.ReturnCode("KO", "Popen command problem", value=res_out)
+    else:  # except Exception as e:
+        logger.error("<KO> launch command cwd=%s:\n%s" % (cwd, command))
+        logger.error("launch command exception:\n%s" % e)
+        return RCO.ReturnCode("KO", "Popen command problem")
 
 
 def sleep(sec):
index a8df9545c1f84dd954fb3da96b5b89660b9daed9..9746cfd5e9de9a42dd344a0e350a1d64ab35578a 100755 (executable)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 
 #  Copyright (C) 2010-2018  CEA/DEN
 #
@@ -32,176 +32,192 @@ and range of versions
 import os
 import sys
 
-verbose = False # True
+verbose = False  # True
 
 #############################################
 def only_numbers(aStr):
-  """
-  Remove non numericals characters from string,
+    """
+    Remove non numericals characters from string,
+
+    :param aStr: string to work
+    :return: None if no number presence
+    """
+    res = "".join([nb for nb in aStr if nb in "0123456789"])
+    if res == "":
+        return None
+    else:
+        return res
 
-  :param aStr: string to work
-  :return: None if no number presence
-  """
-  res = ''.join([nb for nb in aStr if nb in '0123456789'])
-  if res == "":
-    return None
-  else:
-    return res
 
 #############################################
 def remove_startswith(aStr, startsToCheck):
-  """
-  remove starting strings, if begining of aStr correspond
-  order of list startsToCheck matter
-  do the stuff only for the first correspondence in startsToCheck
-  """
-  for s in startsToCheck:
-    if aStr.startswith(s):
-      return aStr[len(s):]
-  return aStr
+    """
+    remove starting strings, if begining of aStr correspond
+    order of list startsToCheck matter
+    do the stuff only for the first correspondence in startsToCheck
+    """
+    for s in startsToCheck:
+        if aStr.startswith(s):
+            return aStr[len(s) :]
+    return aStr
+
 
 #############################################
 def toList_majorMinorPatch(aStr, verbose=False):
-  """
-  Returns list of integer as  [major, minor, patch] from a string,
+    """
+    Returns list of integer as  [major, minor, patch] from a string,
+
+    | accepts '1.2.3' '1_2_3' 'version_1.2.3' 'version1.2.3' 'v1.2.3',
+    | completion '123' means '123.0.0', '1.2' means '1.2.0'
+    | lower or upper
+    | raise exception if problem
+    """
+    if verbose:
+        print("toList_majorMinorPatch('%s')" % aStr)
+    res = aStr.replace(" ", "")
+    res = res.lower()
+    res = remove_startswith(res, "version_ version v".split())
+    res = res.replace(".", "_").split("_")
+    if len(res) > 3:
+        msg = "Not a major_minor_patch correct syntax: '%s'" % aStr
+        raise Exception(msg)
+    if len(res) == 0:
+        msg = "An empty string is not a major_minor_patch syntax"
+        raise Exception(msg)
+
+    # complete MINOR.PATCH if not existing
+    if len(res) == 1:
+        res.append("0")
+    if len(res) == 2:
+        res.append("0")
+
+    try:
+        ii = int(res[0])
+    except:
+        msg = "major in major_minor_patch is not integer: '%s'" % aStr
+        raise Exception(msg)
+    if ii < 0:
+        msg = "major in major_minor_patch is negative integer: '%s'" % aStr
+        raise Exception(msg)
+
+    try:
+        ii = int(res[1])
+    except:
+        msg = "minor in major_minor_patch is not integer: '%s'" % aStr
+        raise Exception(msg)
+    if ii < 0:
+        msg = "minor in major_minor_patch is negative integer: '%s'" % aStr
+        raise Exception(msg)
+
+    try:
+        ii = int(res[2])
+    except:
+        msg = "patch in major_minor_patch is not integer: '%s'" % aStr
+        raise Exception(msg)
+    if ii < 0:
+        msg = "patch in major_minor_patch is negative integer: '%s'" % aStr
+        raise Exception(msg)
+
+    return [int(i) for i in res]
 
-  | accepts '1.2.3' '1_2_3' 'version_1.2.3' 'version1.2.3' 'v1.2.3',
-  | completion '123' means '123.0.0', '1.2' means '1.2.0'
-  | lower or upper
-  | raise exception if problem
-  """
-  if verbose: print("toList_majorMinorPatch('%s')" % aStr)
-  res = aStr.replace(" ", "")
-  res = res.lower()
-  res = remove_startswith(res, "version_ version v".split())
-  res = res.replace(".", "_").split("_")
-  if len(res) > 3:
-    msg = "Not a major_minor_patch correct syntax: '%s'" % aStr
-    raise Exception(msg)
-  if len(res) == 0:
-    msg = "An empty string is not a major_minor_patch syntax"
-    raise Exception(msg)
-
-  # complete MINOR.PATCH if not existing
-  if len(res) == 1:
-    res.append("0")
-  if len(res) == 2:
-    res.append("0")
-
-  try:
-    ii = int(res[0])
-  except:
-    msg = "major in major_minor_patch is not integer: '%s'" % aStr
-    raise Exception(msg)
-  if ii < 0:
-    msg = "major in major_minor_patch is negative integer: '%s'" % aStr
-    raise Exception(msg)
-
-  try:
-    ii = int(res[1])
-  except:
-    msg = "minor in major_minor_patch is not integer: '%s'" % aStr
-    raise Exception(msg)
-  if ii < 0:
-    msg = "minor in major_minor_patch is negative integer: '%s'" % aStr
-    raise Exception(msg)
-
-  try:
-    ii = int(res[2])
-  except:
-    msg = "patch in major_minor_patch is not integer: '%s'" % aStr
-    raise Exception(msg)
-  if ii < 0:
-    msg = "patch in major_minor_patch is negative integer: '%s'" % aStr
-    raise Exception(msg)
-
-  return [int(i) for i in res]
 
 #############################################
 def toCompactStr_majorMinorPatch(version):
-  """
-  OBSOLETE method
-  parameter version is list of integer as  [major, minor, patch]
-
-  | returns "789" for [7, 8, 9]
-  | warning:
-  |   minor, pach have to be integer less than 10
-  |   raise exception for [7, 10, 11]
-  |   (which returns "71011" as ambigous 710.1.1 for example)
-  """
-  # forbidden use from nov. 2023 and SALOME 9.10.0
-  raise Exception("obsolete toCompactStr_majorMinorPatch method: forbiden use of compact representation of '%s', fix problem in caller" % version)
+    """
+    OBSOLETE method
+    parameter version is list of integer as  [major, minor, patch]
+
+    | returns "789" for [7, 8, 9]
+    | warning:
+    |   minor, pach have to be integer less than 10
+    |   raise exception for [7, 10, 11]
+    |   (which returns "71011" as ambigous 710.1.1 for example)
+    """
+    # forbidden use from nov. 2023 and SALOME 9.10.0
+    raise Exception(
+        "obsolete toCompactStr_majorMinorPatch method: forbiden use of compact representation of '%s', fix problem in caller"
+        % version
+    )
 
 
 #############################################
 def getRange_majorMinorPatch(aStr, verbose=False):
-  """
-  extract from aStr a version range, defined as
-  '*_from_aMinVersionTag_to_aMaxVersionTag' or
-  '*version_aMinVersionTag_to_aMaxVersionTag'.
+    """
+    extract from aStr a version range, defined as
+    '*_from_aMinVersionTag_to_aMaxVersionTag' or
+    '*version_aMinVersionTag_to_aMaxVersionTag'.
+
+    where aMinVersionTag and aMaxVersionTag are compatible with MinorMajorPatch class syntaxes
+    '1.2.3' or '1_2_3' etc.
+    if not found '_from_' or 'version_' first then aMinVersionTag is '0.0.0'
+
+    :param aStr: string to work
+    :return: list [min, max], where min, max are MinorMajorPatch instances.
+             else None if not found
+    """
+    tmp1 = aStr.lower().split("_to_")
+
+    if len(tmp1) < 2:
+        return None  # no '_to_'
+    if len(tmp1) > 2:
+        msg = "more than one '_to_' is incorrect for version range: '%s'" % aStr
+        raise Exception(msg)
+    aMax = tmp1[1]
+
+    # accept older syntax as 'version_1_0_0_to_2_0_0', (as '_from_1_0_0_to_2_0_0')
+    if "version_" in tmp1[0] and "_from_" not in tmp1[0]:
+        aStr_with_from = aStr.lower().replace("version_", "_from_", 1)
+    else:
+        aStr_with_from = aStr.lower()
 
-  where aMinVersionTag and aMaxVersionTag are compatible with MinorMajorPatch class syntaxes
-  '1.2.3' or '1_2_3' etc.
-  if not found '_from_' or 'version_' first then aMinVersionTag is '0.0.0'
+    # print("aStr_with_from '%s' -> '%s'" % (aStr, aStr_with_from))
 
-  :param aStr: string to work
-  :return: list [min, max], where min, max are MinorMajorPatch instances.
-           else None if not found
-  """
-  tmp1 = aStr.lower().split("_to_")
-
-  if len(tmp1) < 2:
-    return None # no '_to_'
-  if len(tmp1) > 2:
-    msg = "more than one '_to_' is incorrect for version range: '%s'" % aStr
-    raise Exception(msg)
-  aMax = tmp1[1]
-
-  # accept older syntax as 'version_1_0_0_to_2_0_0', (as '_from_1_0_0_to_2_0_0')
-  if "version_" in tmp1[0] and "_from_" not in tmp1[0]:
-    aStr_with_from = aStr.lower().replace("version_", "_from_", 1)
-  else:
-    aStr_with_from = aStr.lower()
-
-  # print("aStr_with_from '%s' -> '%s'" % (aStr, aStr_with_from))
-
-  tmp0 = aStr_with_from.split("_from_")
-  tmp1 = aStr_with_from.split("_to_")
-
-  if len(tmp0) > 2:
-    msg = "more than one '_from_' is incorrect for version range: '%s'" % aStr
-    raise Exception(msg)
-
-  tmp2 = tmp1[0].split("_from_")
-
-  if len(tmp2) == 2:
-    aMin = tmp2[1]
-  else:
-    aMin ="0.0.0"
-
-  if verbose:
-    msg = "version min '%s' and version max '%s' in version range: '%s'" % (aMin, aMax, aStr)
-    print(msg)
-
-  try:
-    rMin = MinorMajorPatch(aMin)
-    rMax = MinorMajorPatch(aMax)
-  except:
-    msg = "problem version range in '%s'" % aStr
-    raise Exception(msg)
-    """if verbose:
+    tmp0 = aStr_with_from.split("_from_")
+    tmp1 = aStr_with_from.split("_to_")
+
+    if len(tmp0) > 2:
+        msg = "more than one '_from_' is incorrect for version range: '%s'" % aStr
+        raise Exception(msg)
+
+    tmp2 = tmp1[0].split("_from_")
+
+    if len(tmp2) == 2:
+        aMin = tmp2[1]
+    else:
+        aMin = "0.0.0"
+
+    if verbose:
+        msg = "version min '%s' and version max '%s' in version range: '%s'" % (
+            aMin,
+            aMax,
+            aStr,
+        )
+        print(msg)
+
+    try:
+        rMin = MinorMajorPatch(aMin)
+        rMax = MinorMajorPatch(aMax)
+    except:
+        msg = "problem version range in '%s'" % aStr
+        raise Exception(msg)
+        """if verbose:
       print("WARNING: problem version range in '%s'" % aStr)
     return None"""
 
-  if rMin > rMax:
-    msg = "version min '%s' > version max '%s' in version range: '%s'" % (rMin, rMax, aStr)
-    raise Exception(msg)
+    if rMin > rMax:
+        msg = "version min '%s' > version max '%s' in version range: '%s'" % (
+            rMin,
+            rMax,
+            aStr,
+        )
+        raise Exception(msg)
+
+    return [rMin, rMax]
 
-  return [rMin, rMax]
 
 #############################################
 class MinorMajorPatch(object):
-  """\
+    """\
   class to define a version as MAJOR.MINOR.PATCH
 
   | Given a version number MAJOR.MINOR.PATCH separator "_" or "."
@@ -211,64 +227,67 @@ class MinorMajorPatch(object):
   | PATCH version when you make backwards-compatible bug fixes.
   """
 
-  def __init__(self, version):
-    if type(version) == list:
-      aStr = '_'.join([str(i) for i in version])
-      v = toList_majorMinorPatch(aStr)
-    else:
-      v = toList_majorMinorPatch(version)
-    self.major = v[0]
-    self.minor = v[1]
-    self.patch = v[2]
-
-  def __repr__(self, sep="_"):
-    """example is 'version_1_2_3' """
-    res = "version_%i%s%i%s%i" % (self.major, sep, self.minor, sep, self.patch)
-    return res
-
-  def __str__(self, sep="."):
-    """example is '1.2.3' """
-    res = "%i%s%i%s%i" % (self.major, sep, self.minor, sep, self.patch)
-    return res
-
-  def strSalome(self):
-    """example is '1_2_3' """
-    return self.__str__(sep="_")
-
-  def strClassic(self):
-    """example is '1.2.3' """
-    return self.__str__(sep=".")
-
-  def strCompact(self):
-    """example is '123' from '1.2.3' """
-    # forbidden use from nov. 2023 and SALOME 9.10.0
-    raise Exception("obsolete strCompact method: forbiden use of compact representation of '%s', fix problem in caller" % str(self))
-    # return toCompactStr_majorMinorPatch(self.toList())
-
-  def toList(self):
-    """example is list of integer [1, 2, 3] from '1.2.3' """
-    return [self.major, self.minor, self.patch]
-
-  def __lt__(self, other):
-    res = (self.toList() < other.toList())
-    return res
-
-  def __le__(self, other):
-    res = (self.toList() <= other.toList())
-    return res
-
-  def __gt__(self, other):
-    res = (self.toList() > other.toList())
-    return res
-
-  def __ge__(self, other):
-    res = (self.toList() >= other.toList())
-    return res
-
-  def __eq__(self, other):
-    res = (self.toList() == other.toList())
-    return res
-
-  def __ne__(self, other):
-    res = (self.toList() != other.toList())
-    return res
+    def __init__(self, version):
+        if type(version) == list:
+            aStr = "_".join([str(i) for i in version])
+            v = toList_majorMinorPatch(aStr)
+        else:
+            v = toList_majorMinorPatch(version)
+        self.major = v[0]
+        self.minor = v[1]
+        self.patch = v[2]
+
+    def __repr__(self, sep="_"):
+        """example is 'version_1_2_3'"""
+        res = "version_%i%s%i%s%i" % (self.major, sep, self.minor, sep, self.patch)
+        return res
+
+    def __str__(self, sep="."):
+        """example is '1.2.3'"""
+        res = "%i%s%i%s%i" % (self.major, sep, self.minor, sep, self.patch)
+        return res
+
+    def strSalome(self):
+        """example is '1_2_3'"""
+        return self.__str__(sep="_")
+
+    def strClassic(self):
+        """example is '1.2.3'"""
+        return self.__str__(sep=".")
+
+    def strCompact(self):
+        """example is '123' from '1.2.3'"""
+        # forbidden use from nov. 2023 and SALOME 9.10.0
+        raise Exception(
+            "obsolete strCompact method: forbiden use of compact representation of '%s', fix problem in caller"
+            % str(self)
+        )
+        # return toCompactStr_majorMinorPatch(self.toList())
+
+    def toList(self):
+        """example is list of integer [1, 2, 3] from '1.2.3'"""
+        return [self.major, self.minor, self.patch]
+
+    def __lt__(self, other):
+        res = self.toList() < other.toList()
+        return res
+
+    def __le__(self, other):
+        res = self.toList() <= other.toList()
+        return res
+
+    def __gt__(self, other):
+        res = self.toList() > other.toList()
+        return res
+
+    def __ge__(self, other):
+        res = self.toList() >= other.toList()
+        return res
+
+    def __eq__(self, other):
+        res = self.toList() == other.toList()
+        return res
+
+    def __ne__(self, other):
+        res = self.toList() != other.toList()
+        return res
index 6a4bb660eebf4ec231cedc40f0b753724116654a..8e55cf224005176594ba7fdcd5cc8b58398ab712 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 #  Copyright (C) 2010-2013  CEA/DEN
 #
 #  This library is free software; you can redistribute it and/or
 import os
 import shutil
 
-try: # For python2
+try:  # For python2
     import sys
-    reload(sys)  
-    sys.setdefaultencoding('utf8')
+
+    reload(sys)
+    sys.setdefaultencoding("utf8")
 except:
     pass
 
@@ -31,67 +32,70 @@ import src.ElementTree as etree
 
 verbose = False
 
+
 class XmlLogFile(object):
-    '''Class to manage writing in salomeTools xml log file
-    '''
-    def __init__(self, filePath, rootname, attrib = {}):
-        '''Initialization
-        
+    """Class to manage writing in salomeTools xml log file"""
+
+    def __init__(self, filePath, rootname, attrib={}):
+        """Initialization
+
         :param filePath str: The path to the file where to write the log file
         :param rootname str: The name of the root node of the xml file
-        :param attrib dict: the dictionary that contains the attributes 
+        :param attrib dict: the dictionary that contains the attributes
                             and value of the root node
-        '''
-        # Initialize the filePath and ensure that the directory 
+        """
+        # Initialize the filePath and ensure that the directory
         # that contain the file exists (make it if necessary)
         self.logFile = filePath
         src.ensure_path_exists(os.path.dirname(filePath))
         # Initialize the field that contain the xml in memory
-        self.xmlroot = etree.Element(rootname, attrib = attrib)
+        self.xmlroot = etree.Element(rootname, attrib=attrib)
+
+    def write_tree(self, stylesheet=None, file_path=None):
+        """Write the xml tree in the log file path. Add the stylesheet if asked.
 
-    def write_tree(self, stylesheet=None, file_path = None):
-        '''Write the xml tree in the log file path. Add the stylesheet if asked.
-        
         :param stylesheet str: The stylesheet to apply to the xml file
-        '''
+        """
         log_file_path = self.logFile
         if file_path:
-          log_file_path = file_path
+            log_file_path = file_path
         try:
-          with open(log_file_path, 'w') as f:
-            f.write("<?xml version='1.0' encoding='utf-8'?>\n")
-            if stylesheet:
-                # example as href='./hat.xsl' 
-                # as local file xml with local file xsl
-                # with 'python3 -m http.server 8765 &' and
-                # 'chromium-browser http://localhost:8765/hat.xml' or
-                # 'firefox http://localhost:8765/hat.xml'
-                f.write("<?xml-stylesheet type='text/xsl' href='./%s'?>\n" %  stylesheet)
-                pass
-            res= etree.tostring(self.xmlroot, encoding='utf-8')
-            f.write(res)
+            with open(log_file_path, "w") as f:
+                f.write("<?xml version='1.0' encoding='utf-8'?>\n")
+                if stylesheet:
+                    # example as href='./hat.xsl'
+                    # as local file xml with local file xsl
+                    # with 'python3 -m http.server 8765 &' and
+                    # 'chromium-browser http://localhost:8765/hat.xml' or
+                    # 'firefox http://localhost:8765/hat.xml'
+                    f.write(
+                        "<?xml-stylesheet type='text/xsl' href='./%s'?>\n" % stylesheet
+                    )
+                    pass
+                res = etree.tostring(self.xmlroot, encoding="utf-8")
+                f.write(res)
         except IOError:
-            pass  
-        
+            pass
+
     def add_simple_node(self, node_name, text=None, attrib={}):
-        '''Add a node with some attibutes and text to the root node.
-        
+        """Add a node with some attibutes and text to the root node.
+
         :param node_name str: the name of the node to add
         :param text str: the text of the node
-        :param attrib dict: the dictionary containing the 
+        :param attrib dict: the dictionary containing the
                             attribute of the new node
-        '''
+        """
         n = etree.Element(node_name, attrib=attrib)
         n.text = text
         self.xmlroot.append(n)
         return n
-    
+
     def append_node_text(self, node_name, text):
-        '''Append a new text to the node that has node_name as name
-        
+        """Append a new text to the node that has node_name as name
+
         :param node_name str: The name of the node on which append text
         :param text str: The text to append
-        '''
+        """
         # find the corresponding node
         for field in self.xmlroot:
             if field.tag == node_name:
@@ -99,40 +103,41 @@ class XmlLogFile(object):
                 field.text += text
 
     def append_node_attrib(self, node_name, attrib):
-        '''Append a new attributes to the node that has node_name as name
-        
+        """Append a new attributes to the node that has node_name as name
+
         :param node_name str: The name of the node on which append text
         :param attrib dixt: The attrib to append
-        '''
+        """
         self.xmlroot.find(node_name).attrib.update(attrib)
 
+
 class ReadXmlFile(object):
-    '''Class to manage reading of an xml log file
-    '''
+    """Class to manage reading of an xml log file"""
+
     def __init__(self, filePath):
-        '''Initialization
-        
+        """Initialization
+
         :param filePath str: The xml file to be read
-        '''
+        """
         self.filePath = filePath
         etree_inst = etree.parse(filePath)
         self.xmlroot = etree_inst.parse(filePath)
 
     def getRootAttrib(self):
-        '''Get the attibutes of the self.xmlroot
-        
+        """Get the attibutes of the self.xmlroot
+
         :return: The attributes of the root node
         :rtype: dict
-        '''
+        """
         return self.xmlroot.attrib
-    
+
     def get_attrib(self, node_name):
-        '''Get the attibutes of the node node_name in self.xmlroot
-        
+        """Get the attibutes of the node node_name in self.xmlroot
+
         :param node_name str: the name of the node
         :return: the attibutes of the node node_name in self.xmlroot
         :rtype: dict
-        '''
+        """
         attrib = self.xmlroot.find(node_name).attrib
         # To be python 3 compatible, convert bytes to str if there are any
         fixedAttrib = {}
@@ -147,93 +152,98 @@ class ReadXmlFile(object):
                 value = attrib[k]
             fixedAttrib[key] = value
         return fixedAttrib
-    
+
     def get_node_text(self, node):
-        '''Get the text of the first node that has name 
+        """Get the text of the first node that has name
            that corresponds to the parameter node
-        
+
         :param node str: the name of the node from which get the text
-        :return: the text of the first node that has name 
+        :return: the text of the first node that has name
                  that corresponds to the parameter node
         :rtype: str
-        '''
+        """
         return self.xmlroot.find(node).text
-    
+
+
 def add_simple_node(root_node, node_name, text=None, attrib={}):
-    '''Add a node with some attibutes and text to the root node.
+    """Add a node with some attibutes and text to the root node.
 
-    :param root_node etree.Element: the Etree element where to add the new node    
+    :param root_node etree.Element: the Etree element where to add the new node
     :param node_name str: the name of the node to add
     :param text str: the text of the node
-    :param attrib dict: the dictionary containing the 
+    :param attrib dict: the dictionary containing the
                         attribute of the new node
-    '''
+    """
     n = etree.Element(node_name, attrib=attrib)
     n.text = text
     root_node.append(n)
     return n
 
+
 def append_node_attrib(root_node, attrib):
-    '''Append a new attributes to the node that has node_name as name
-    
-    :param root_node etree.Element: the Etree element 
+    """Append a new attributes to the node that has node_name as name
+
+    :param root_node etree.Element: the Etree element
                                     where to append the new attibutes
     :param attrib dixt: The attrib to append
-    '''
+    """
     root_node.attrib.update(attrib)
 
+
 def find_node_by_attrib(xmlroot, name_node, key, value):
-    '''Find the nfirst ode from xmlroot that has name name_node and that has in 
+    """Find the nfirst ode from xmlroot that has name name_node and that has in
        its attributes {key : value}. Return the node
-    
+
     :param xmlroot etree.Element: the Etree element where to search
     :param name_node str: the name of node to search
     :param key str: the key to search
     :param value str: the value to search
     :return: the found node
     :rtype: xmlroot etree.Element
-    '''
-    l_nodes =  xmlroot.findall(name_node)
+    """
+    l_nodes = xmlroot.findall(name_node)
     for node in l_nodes:
         if key not in node.attrib.keys():
             continue
         if node.attrib[key] == value:
             return node
     return None
-    
+
 
 def write_report(filename, xmlroot, stylesheet):
     """Writes a report file from a XML tree.
-    
+
     :param filename str: The path to the file to create
     :param xmlroot etree.Element: the Etree element to write to the file
     :param stylesheet str: The stylesheet to add to the begin of the file
     """
     dirname = os.path.dirname(filename)
     if not os.path.exists(dirname):
-      os.makedirs(dirname)
+        os.makedirs(dirname)
     if len(stylesheet) > 0:
-       styleName = stylesheet
+        styleName = stylesheet
     else:
-       styleName = None
+        styleName = None
 
     with open(filename, "w") as f:
-      f.write("<?xml version='1.0' encoding='utf-8'?>\n")
-      if styleName is not None:
-        f.write("<?xml-stylesheet type='text/xsl' href='%s'?>\n" % styleName)
-      res = etree.tostring(xmlroot, encoding='utf-8')
-      # print("********** etree.tostring %s" % res)
-      f.write(res)
+        f.write("<?xml version='1.0' encoding='utf-8'?>\n")
+        if styleName is not None:
+            f.write("<?xml-stylesheet type='text/xsl' href='%s'?>\n" % styleName)
+        res = etree.tostring(xmlroot, encoding="utf-8")
+        # print("********** etree.tostring %s" % res)
+        f.write(res)
 
     # create fileStyle in dirname if not existing
     if styleName is not None:
-      styleFile = os.path.join(dirname, styleName)
-      if not os.path.exists(styleFile):
-        # copy if from "salomeTools/src/xsl"
-        srcdir = os.path.dirname(src.__file__)
-        srcFile = os.path.join(srcdir, "xsl", styleName)
-        if verbose: print("write_report %s style %s" % (srcFile, styleFile))
-        shutil.copy(srcFile, dirname)
+        styleFile = os.path.join(dirname, styleName)
+        if not os.path.exists(styleFile):
+            # copy if from "salomeTools/src/xsl"
+            srcdir = os.path.dirname(src.__file__)
+            srcFile = os.path.join(srcdir, "xsl", styleName)
+            if verbose:
+                print("write_report %s style %s" % (srcFile, styleFile))
+            shutil.copy(srcFile, dirname)
+
 
 def escapeSequence(aStr):
     """
@@ -255,15 +265,14 @@ def escapeSequence(aStr):
     ' (apostrophe or single quote) as &#39;
     " (double-quote) as &#34;
     """
-    replaces = [ ('&', '&amp;'),
-                 ('>', '&gt;'),
-                 ('<', '&lt;'),
-                 ("'", '&#39;'),
-                 ('"', '&#34;'),
-                ]
+    replaces = [
+        ("&", "&amp;"),
+        (">", "&gt;"),
+        ("<", "&lt;"),
+        ("'", "&#39;"),
+        ('"', "&#34;"),
+    ]
     res = aStr
-    for ini, fin in replaces: # order matters
-      res = res.replace(ini, fin)
+    for ini, fin in replaces:  # order matters
+        res = res.replace(ini, fin)
     return res
-
-    
index 14a3ff7d1a2698d60572341e9ffe5ba7323d450c..a54f6ddcf322bf431ae8463f9f8241b186ee2164 100755 (executable)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 
 #  Copyright (C) 2010-2018  CEA/DEN
 #
@@ -43,19 +43,23 @@ satdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
 # sys.stderr.write("INFO    : initializeTest needs '%s' in sys.path:\n%s\n" % (satdir, PP.pformat(sys.path)))
 
 if satdir not in sys.path:
-  # get path to salomeTools sources FIRST as prepend
-  # Make the src & commands package accessible from all test code
-  sys.path.insert(0, satdir)
-  sys.stderr.write("""\
+    # get path to salomeTools sources FIRST as prepend
+    # Make the src & commands package accessible from all test code
+    sys.path.insert(0, satdir)
+    sys.stderr.write(
+        """\
 WARNING : sys.path not set for salomeTools, fixed for you:
           sys.path prepend '%s'
-          sys.path:\n%s\n""" % (satdir, PP.pformat(sys.path)))
-  # os.environ PATH is not set...
-  # supposedly useful only for subprocess launch from sat
-  # see https://docs.python.org/2/library/os.html
-  # On some platforms, including FreeBSD and Mac OS X, 
-  # setting environ may cause memory leaks.
-  # sys.stderr.write("os.environ PATH:\n%s\n" % PP.pformat(os.environ["PATH"].split(":")))
-  sys.stderr.write("INFO    : to fix this message type:\n  'export PYTHONPATH=%s:${PYTHONPATH}'\n" % satdir)
-  
-
+          sys.path:\n%s\n"""
+        % (satdir, PP.pformat(sys.path))
+    )
+    # os.environ PATH is not set...
+    # supposedly useful only for subprocess launch from sat
+    # see https://docs.python.org/2/library/os.html
+    # On some platforms, including FreeBSD and Mac OS X,
+    # setting environ may cause memory leaks.
+    # sys.stderr.write("os.environ PATH:\n%s\n" % PP.pformat(os.environ["PATH"].split(":")))
+    sys.stderr.write(
+        "INFO    : to fix this message type:\n  'export PYTHONPATH=%s:${PYTHONPATH}'\n"
+        % satdir
+    )
index ead64c570925d7cde381aea6045257756483f805..9d09ca83f597a7a4e5782031e5f4fa097eb90a6b 100755 (executable)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 
 #  Copyright (C) 2010-2018  CEA/DEN
 #
@@ -21,13 +21,13 @@ import os
 import sys
 import unittest
 
-import initializeTest # set PATH etc for test
+import initializeTest  # set PATH etc for test
 
-import src.debug as DBG # Easy print stderr (for DEBUG only)
-import src.pyconf as PYF # 0.3.7
+import src.debug as DBG  # Easy print stderr (for DEBUG only)
+import src.pyconf as PYF  # 0.3.7
 
 _EXAMPLES = {
-: """\
+    1: """\
   messages:
   [
     {
@@ -47,84 +47,78 @@ _EXAMPLES = {
     }
   ]
 """,
-
-2 : """\
+    2: """\
   aa: 111
   bb: $aa + 222
 """,
-
-3 : """\
+    3: """\
   aa: Yves
   bb: "Herve" # avoid Hervé -> 'utf8' codec can't decode byte
 """,
-
-4 : """\
+    4: """\
   aa: Yves
   bb: "Hervé" # avoid Hervé -> 'utf8' codec can't decode byte
 """,
-
-
 }
 
 
 class TestCase(unittest.TestCase):
-  "Test the debug.py"""
-  
-  def test_000(self):
-    # one shot setUp() for this TestCase
-    # DBG.push_debug(True)
-    # SAT.setNotLocale() # test english
-    return
-    
-  def test_005(self):
-    res = DBG.getLocalEnv()
-    self.assertTrue(len(res.split()) > 0)
-    self.assertTrue("USER :" in res)
-    self.assertTrue("LANG :" in res)
-       
-  def test_010(self):
-    inStream = DBG.InStream(_EXAMPLES[1])
-    self.assertEqual(inStream.getvalue(), _EXAMPLES[1])
-    cfg = PYF.Config(inStream)
-    self.assertEqual(len(cfg.messages), 3)
-    outStream = DBG.OutStream()
-    DBG.saveConfigStd(cfg, outStream)
-    res = outStream.value
-    DBG.write("test_010 cfg std", res)
-    self.assertTrue("messages :" in res)
-    self.assertTrue("'sys.stderr'" in res)
-    
-  def test_020(self):
-    inStream = DBG.InStream(_EXAMPLES[2])
-    cfg = PYF.Config(inStream)
-    res = DBG.getStrConfigDbg(cfg)
-    DBG.write("test_020 cfg dbg", res)
-    ress = res.split("\n")
-    self.assertTrue(".aa" in ress[0])
-    self.assertTrue(": '111'" in ress[0])
-    self.assertTrue(".bb" in ress[1])
-    self.assertTrue(": $aa + 222 " in ress[1])
-    self.assertTrue("--> '333'" in ress[1])
-    
-  def test_025(self):
-    inStream = DBG.InStream(_EXAMPLES[1])
-    cfg = PYF.Config(inStream)
-    outStream = DBG.OutStream()
-    DBG.saveConfigDbg(cfg, outStream)
-    res = outStream.value
-    DBG.write("test_025 cfg dbg", res)
-    for i in range(len(cfg.messages)):
-      self.assertTrue("messages[%i].name" % i in res)
-    self.assertTrue("--> 'HELLO sys.stderr'" in res)
+    "Test the debug.py" ""
+
+    def test_000(self):
+        # one shot setUp() for this TestCase
+        # DBG.push_debug(True)
+        # SAT.setNotLocale() # test english
+        return
+
+    def test_005(self):
+        res = DBG.getLocalEnv()
+        self.assertTrue(len(res.split()) > 0)
+        self.assertTrue("USER :" in res)
+        self.assertTrue("LANG :" in res)
 
-      
-  def test_999(self):
-    # one shot tearDown() for this TestCase
-    # SAT.setLocale() # end test english
-    # DBG.pop_debug()
-    return
-    
-if __name__ == '__main__':
+    def test_010(self):
+        inStream = DBG.InStream(_EXAMPLES[1])
+        self.assertEqual(inStream.getvalue(), _EXAMPLES[1])
+        cfg = PYF.Config(inStream)
+        self.assertEqual(len(cfg.messages), 3)
+        outStream = DBG.OutStream()
+        DBG.saveConfigStd(cfg, outStream)
+        res = outStream.value
+        DBG.write("test_010 cfg std", res)
+        self.assertTrue("messages :" in res)
+        self.assertTrue("'sys.stderr'" in res)
+
+    def test_020(self):
+        inStream = DBG.InStream(_EXAMPLES[2])
+        cfg = PYF.Config(inStream)
+        res = DBG.getStrConfigDbg(cfg)
+        DBG.write("test_020 cfg dbg", res)
+        ress = res.split("\n")
+        self.assertTrue(".aa" in ress[0])
+        self.assertTrue(": '111'" in ress[0])
+        self.assertTrue(".bb" in ress[1])
+        self.assertTrue(": $aa + 222 " in ress[1])
+        self.assertTrue("--> '333'" in ress[1])
+
+    def test_025(self):
+        inStream = DBG.InStream(_EXAMPLES[1])
+        cfg = PYF.Config(inStream)
+        outStream = DBG.OutStream()
+        DBG.saveConfigDbg(cfg, outStream)
+        res = outStream.value
+        DBG.write("test_025 cfg dbg", res)
+        for i in range(len(cfg.messages)):
+            self.assertTrue("messages[%i].name" % i in res)
+        self.assertTrue("--> 'HELLO sys.stderr'" in res)
+
+    def test_999(self):
+        # one shot tearDown() for this TestCase
+        # SAT.setLocale() # end test english
+        # DBG.pop_debug()
+        return
+
+
+if __name__ == "__main__":
     unittest.main(exit=False)
     pass
-
index b4953f4a008f8b57cec68961c6ee9b399ace272a..0da8c69dffc335db0a8562bfa10c7f5bc7d9f82b 100755 (executable)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 
 #  Copyright (C) 2010-2018  CEA/DEN
 #
@@ -35,191 +35,224 @@ import sys
 import unittest
 import pprint as PP
 
-import initializeTest # set PATH etc for test
+import initializeTest  # set PATH etc for test
 
 import src.versionMinorMajorPatch as VMMP
 
-verbose = False # True
+verbose = False  # True
+
 
 class TestCase(unittest.TestCase):
-  "Test the versionMajorMinorPatch.py"""
-
-  def test_010(self):
-    if verbose: print(PP.pformat(dir(self)))
-    self.assertTrue(VMMP.only_numbers("") is None)
-    self.assertEqual(VMMP.only_numbers("1.2.3"), "123")
-    self.assertEqual(VMMP.only_numbers("\n11.12.13\n"), "111213")
-    self.assertEqual(VMMP.only_numbers(" \n 11.\t\n\t..12.13-rc2\n"), "1112132")
-
-  def test_015(self):
-    res = "a_b_c"
-    self.assertEqual(VMMP.remove_startswith("version_a_b_c", "version_".split()), res)
-    self.assertEqual(VMMP.remove_startswith("v_a_b_c",       "version_ v_".split()), res)
-    self.assertEqual(VMMP.remove_startswith("va_b_c",        "version_ v_ v".split()), res)
-
-    ini = "version_a_b_c"
-    self.assertEqual(VMMP.remove_startswith(ini, "V".split()), ini)
-    self.assertEqual(VMMP.remove_startswith(ini, "_".split()), ini)
-    self.assertEqual(VMMP.remove_startswith(ini, "a_b_c".split()), ini)
-    self.assertEqual(VMMP.remove_startswith(ini, "VERSION".split()), ini)
-
-
-  def test_020(self):
-    res = [11, 222, 3333]
-    self.assertEqual(VMMP.toList_majorMinorPatch("11.222.3333"), res)
-    self.assertEqual(VMMP.toList_majorMinorPatch("11_222_3333"), res)
-    self.assertEqual(VMMP.toList_majorMinorPatch("11.222_3333"), res)
-    self.assertEqual(VMMP.toList_majorMinorPatch("  11.  222 . 3333  "), res)
-    self.assertEqual(VMMP.toList_majorMinorPatch("\n  11  .    222 .   3333   \n"), res)
-    self.assertEqual(VMMP.toList_majorMinorPatch(" \n11.\t222.\r3333\n "), res) # could be tricky
-
-    self.assertEqual(VMMP.toList_majorMinorPatch("V11.222.3333"), res)
-    self.assertEqual(VMMP.toList_majorMinorPatch("Version11_222_3333"), res)
-    self.assertEqual(VMMP.toList_majorMinorPatch("Version_11_222_3333"), res)
-
-
-    self.assertEqual(VMMP.toList_majorMinorPatch("11"), [11, 0, 0])
-    self.assertEqual(VMMP.toList_majorMinorPatch("11.0"), [11, 0, 0])
-    self.assertEqual(VMMP.toList_majorMinorPatch("11.2"), [11, 2, 0])
-    self.assertEqual(VMMP.toList_majorMinorPatch("\n1 .    2  \n"), [1, 2, 0])
-
-    with self.assertRaises(Exception): VMMP.toList_majorMinorPatch("")
-    with self.assertRaises(Exception): VMMP.toList_majorMinorPatch("11.")
-    with self.assertRaises(Exception): VMMP.toList_majorMinorPatch("11.2.")
-    with self.assertRaises(Exception): VMMP.toList_majorMinorPatch("11.2.3.")
-    with self.assertRaises(Exception): VMMP.toList_majorMinorPatch(".11")
-    with self.assertRaises(Exception): VMMP.toList_majorMinorPatch("1_2_3_4")
-    with self.assertRaises(Exception): VMMP.toList_majorMinorPatch("_1_2_3_")
-    with self.assertRaises(Exception): VMMP.toList_majorMinorPatch(" \n 11...22.333-rc2\n")
-    with self.assertRaises(Exception): VMMP.toList_majorMinorPatch(" \n 11...22.333-rc2\n")
-    with self.assertRaises(Exception): VMMP.toList_majorMinorPatch(" \n 11...22.333-rc2\n")
-
-
-  def test_040(self):
-    MMP = VMMP.MinorMajorPatch
-    v = [1, 2, 3]
-    self.assertEqual(MMP(v).__str__(), "1.2.3")
-    self.assertEqual(MMP(v).__str__(sep="_"), "1_2_3")
-    self.assertEqual(str(MMP(v)), "1.2.3")
-
-    self.assertEqual(MMP(v).__repr__(), "version_1_2_3")
-    self.assertEqual(MMP(v).__repr__(sep="."), "version_1.2.3")
-
-    self.assertEqual(MMP(v).strSalome(), "1_2_3")
-    self.assertEqual(MMP(v).strClassic(), "1.2.3")
-
-    self.assertEqual(MMP(['  123 \n', 2, 10]).strClassic(), "123.2.10")
-    self.assertEqual(MMP(['  123 \n', 2, 10]).strSalome(), "123_2_10")
-
-    with self.assertRaises(Exception): MMP([-5, 2, 10])
-    with self.assertRaises(Exception): MMP([5, -2, 10])
-    with self.assertRaises(Exception): MMP([5, 2, -10])
-    with self.assertRaises(Exception): MMP(['-123', 2, 10])
-
-  def test_050(self):
-    MMP = VMMP.MinorMajorPatch
-    v000 = MMP("0.0.0")
-    v010 = MMP("0.1.0")
-    v100 = MMP("1.0.0")
-    v101 = MMP("1.0.1")
-
-    va = v000
-    vb = MMP("0.0.0")
-    self.assertTrue(va == vb)
-    self.assertTrue(va >= vb)
-    self.assertTrue(va <= vb)
-    self.assertFalse(va != vb)
-    self.assertFalse(va > vb)
-    self.assertFalse(va < vb)
-
-    va = v000
-    vb = v010
-    self.assertFalse(va == vb)
-    self.assertFalse(va >= vb)
-    self.assertTrue(va <= vb)
-    self.assertTrue(va != vb)
-    self.assertFalse(va > vb)
-    self.assertTrue(va < vb)
-
-    va = v101
-    vb = v100
-    self.assertFalse(va == vb)
-    self.assertTrue(va >= vb)
-    self.assertFalse(va <= vb)
-    self.assertTrue(va != vb)
-    self.assertTrue(va > vb)
-    self.assertFalse(va < vb)
-
-  def test_060(self):
-    MMP = VMMP.MinorMajorPatch
-    v0 = MMP("0")
-    v1 = MMP("1")
-    v2 = MMP("2")
-    v123 = MMP("1.2.3")
-    v456 = MMP("4.5.6")
-
-    tests = """\
+    "Test the versionMajorMinorPatch.py" ""
+
+    def test_010(self):
+        if verbose:
+            print(PP.pformat(dir(self)))
+        self.assertTrue(VMMP.only_numbers("") is None)
+        self.assertEqual(VMMP.only_numbers("1.2.3"), "123")
+        self.assertEqual(VMMP.only_numbers("\n11.12.13\n"), "111213")
+        self.assertEqual(VMMP.only_numbers(" \n 11.\t\n\t..12.13-rc2\n"), "1112132")
+
+    def test_015(self):
+        res = "a_b_c"
+        self.assertEqual(
+            VMMP.remove_startswith("version_a_b_c", "version_".split()), res
+        )
+        self.assertEqual(VMMP.remove_startswith("v_a_b_c", "version_ v_".split()), res)
+        self.assertEqual(VMMP.remove_startswith("va_b_c", "version_ v_ v".split()), res)
+
+        ini = "version_a_b_c"
+        self.assertEqual(VMMP.remove_startswith(ini, "V".split()), ini)
+        self.assertEqual(VMMP.remove_startswith(ini, "_".split()), ini)
+        self.assertEqual(VMMP.remove_startswith(ini, "a_b_c".split()), ini)
+        self.assertEqual(VMMP.remove_startswith(ini, "VERSION".split()), ini)
+
+    def test_020(self):
+        res = [11, 222, 3333]
+        self.assertEqual(VMMP.toList_majorMinorPatch("11.222.3333"), res)
+        self.assertEqual(VMMP.toList_majorMinorPatch("11_222_3333"), res)
+        self.assertEqual(VMMP.toList_majorMinorPatch("11.222_3333"), res)
+        self.assertEqual(VMMP.toList_majorMinorPatch("  11.  222 . 3333  "), res)
+        self.assertEqual(
+            VMMP.toList_majorMinorPatch("\n  11  .    222 .   3333   \n"), res
+        )
+        self.assertEqual(
+            VMMP.toList_majorMinorPatch(" \n11.\t222.\r3333\n "), res
+        )  # could be tricky
+
+        self.assertEqual(VMMP.toList_majorMinorPatch("V11.222.3333"), res)
+        self.assertEqual(VMMP.toList_majorMinorPatch("Version11_222_3333"), res)
+        self.assertEqual(VMMP.toList_majorMinorPatch("Version_11_222_3333"), res)
+
+        self.assertEqual(VMMP.toList_majorMinorPatch("11"), [11, 0, 0])
+        self.assertEqual(VMMP.toList_majorMinorPatch("11.0"), [11, 0, 0])
+        self.assertEqual(VMMP.toList_majorMinorPatch("11.2"), [11, 2, 0])
+        self.assertEqual(VMMP.toList_majorMinorPatch("\n1 .    2  \n"), [1, 2, 0])
+
+        with self.assertRaises(Exception):
+            VMMP.toList_majorMinorPatch("")
+        with self.assertRaises(Exception):
+            VMMP.toList_majorMinorPatch("11.")
+        with self.assertRaises(Exception):
+            VMMP.toList_majorMinorPatch("11.2.")
+        with self.assertRaises(Exception):
+            VMMP.toList_majorMinorPatch("11.2.3.")
+        with self.assertRaises(Exception):
+            VMMP.toList_majorMinorPatch(".11")
+        with self.assertRaises(Exception):
+            VMMP.toList_majorMinorPatch("1_2_3_4")
+        with self.assertRaises(Exception):
+            VMMP.toList_majorMinorPatch("_1_2_3_")
+        with self.assertRaises(Exception):
+            VMMP.toList_majorMinorPatch(" \n 11...22.333-rc2\n")
+        with self.assertRaises(Exception):
+            VMMP.toList_majorMinorPatch(" \n 11...22.333-rc2\n")
+        with self.assertRaises(Exception):
+            VMMP.toList_majorMinorPatch(" \n 11...22.333-rc2\n")
+
+    def test_040(self):
+        MMP = VMMP.MinorMajorPatch
+        v = [1, 2, 3]
+        self.assertEqual(MMP(v).__str__(), "1.2.3")
+        self.assertEqual(MMP(v).__str__(sep="_"), "1_2_3")
+        self.assertEqual(str(MMP(v)), "1.2.3")
+
+        self.assertEqual(MMP(v).__repr__(), "version_1_2_3")
+        self.assertEqual(MMP(v).__repr__(sep="."), "version_1.2.3")
+
+        self.assertEqual(MMP(v).strSalome(), "1_2_3")
+        self.assertEqual(MMP(v).strClassic(), "1.2.3")
+
+        self.assertEqual(MMP(["  123 \n", 2, 10]).strClassic(), "123.2.10")
+        self.assertEqual(MMP(["  123 \n", 2, 10]).strSalome(), "123_2_10")
+
+        with self.assertRaises(Exception):
+            MMP([-5, 2, 10])
+        with self.assertRaises(Exception):
+            MMP([5, -2, 10])
+        with self.assertRaises(Exception):
+            MMP([5, 2, -10])
+        with self.assertRaises(Exception):
+            MMP(["-123", 2, 10])
+
+    def test_050(self):
+        MMP = VMMP.MinorMajorPatch
+        v000 = MMP("0.0.0")
+        v010 = MMP("0.1.0")
+        v100 = MMP("1.0.0")
+        v101 = MMP("1.0.1")
+
+        va = v000
+        vb = MMP("0.0.0")
+        self.assertTrue(va == vb)
+        self.assertTrue(va >= vb)
+        self.assertTrue(va <= vb)
+        self.assertFalse(va != vb)
+        self.assertFalse(va > vb)
+        self.assertFalse(va < vb)
+
+        va = v000
+        vb = v010
+        self.assertFalse(va == vb)
+        self.assertFalse(va >= vb)
+        self.assertTrue(va <= vb)
+        self.assertTrue(va != vb)
+        self.assertFalse(va > vb)
+        self.assertTrue(va < vb)
+
+        va = v101
+        vb = v100
+        self.assertFalse(va == vb)
+        self.assertTrue(va >= vb)
+        self.assertFalse(va <= vb)
+        self.assertTrue(va != vb)
+        self.assertTrue(va > vb)
+        self.assertFalse(va < vb)
+
+    def test_060(self):
+        MMP = VMMP.MinorMajorPatch
+        v0 = MMP("0")
+        v1 = MMP("1")
+        v2 = MMP("2")
+        v123 = MMP("1.2.3")
+        v456 = MMP("4.5.6")
+
+        tests = """\
 toto_from_1_to_2
    _from_1.0.0_to_2.0.0
 _from_1_0.  0_to_  2.0_0
 _from_V1.0.0_to_2.0.0
 _from_version_1.0.0_to_2.0.0
 version_1.0.0_to_2.0.0
-VERSION_1.0.0_to_2.0.0""".split("\n")
-
-    for a in tests:
-      # print("test '%s'" % a)
-      r1, r2 = VMMP.getRange_majorMinorPatch(a)
-      self.assertEqual(r1, v1)
-      self.assertEqual(r2, v2)
-
-    a = "toto_to_2"
-    r1, r2 = VMMP.getRange_majorMinorPatch(a)
-    self.assertEqual(r1, v0)
-    self.assertEqual(r2, v2)
-
-    a = "toto_to_Version2"
-    r1, r2 = VMMP.getRange_majorMinorPatch(a)
-    self.assertEqual(r1, v0)
-    self.assertEqual(r2, v2)
-
-    a = "toto_from_1.2.3_to_Version4_5_6"
-    r1, r2 = VMMP.getRange_majorMinorPatch(a)
-    self.assertEqual(r1, v123)
-    self.assertEqual(r2, v456)
-
-    a = "toto_from_1.2.3_to_Version1_2_3"
-    r1, r2 = VMMP.getRange_majorMinorPatch(a)
-    self.assertEqual(r1, v123)
-    self.assertEqual(r2, v123)
-
-    # _from_ without _to_ does not matter
-    tests = """\
+VERSION_1.0.0_to_2.0.0""".split(
+            "\n"
+        )
+
+        for a in tests:
+            # print("test '%s'" % a)
+            r1, r2 = VMMP.getRange_majorMinorPatch(a)
+            self.assertEqual(r1, v1)
+            self.assertEqual(r2, v2)
+
+        a = "toto_to_2"
+        r1, r2 = VMMP.getRange_majorMinorPatch(a)
+        self.assertEqual(r1, v0)
+        self.assertEqual(r2, v2)
+
+        a = "toto_to_Version2"
+        r1, r2 = VMMP.getRange_majorMinorPatch(a)
+        self.assertEqual(r1, v0)
+        self.assertEqual(r2, v2)
+
+        a = "toto_from_1.2.3_to_Version4_5_6"
+        r1, r2 = VMMP.getRange_majorMinorPatch(a)
+        self.assertEqual(r1, v123)
+        self.assertEqual(r2, v456)
+
+        a = "toto_from_1.2.3_to_Version1_2_3"
+        r1, r2 = VMMP.getRange_majorMinorPatch(a)
+        self.assertEqual(r1, v123)
+        self.assertEqual(r2, v123)
+
+        # _from_ without _to_ does not matter
+        tests = """\
 
 toto
 from
 to
 _from_
-toto_from_2""".split("\n")
-
-    for a in tests:
-      rx = VMMP.getRange_majorMinorPatch(a, verbose=False)
-      self.assertEqual(rx, None)
-
-    # _to_ without _from_ does not matter, as implicit _from_ '0.0.0'
-    # empty _to_ raise error
-    with self.assertRaises(Exception): VMMP.getRange_majorMinorPatch("_to_")
-    with self.assertRaises(Exception): VMMP.getRange_majorMinorPatch("_from_to_")
-    with self.assertRaises(Exception): VMMP.getRange_majorMinorPatch("_from__to_")
-    with self.assertRaises(Exception): VMMP.getRange_majorMinorPatch("toto_from__to_")
-    with self.assertRaises(Exception): VMMP.getRange_majorMinorPatch("toto_from_123_to_")
-    with self.assertRaises(Exception): VMMP.getRange_majorMinorPatch("version_123_to_")
-    with self.assertRaises(Exception): VMMP.getRange_majorMinorPatch("version_to_")
-
-    # min > max does matter
-    with self.assertRaises(Exception): VMMP.getRange_majorMinorPatch("_from_3_to_2")
-    with self.assertRaises(Exception): VMMP.getRange_majorMinorPatch("_from_3.2.5_to_V2_1_1")
-
-if __name__ == '__main__':
-  unittest.main(exit=False)
-  pass
+toto_from_2""".split(
+            "\n"
+        )
+
+        for a in tests:
+            rx = VMMP.getRange_majorMinorPatch(a, verbose=False)
+            self.assertEqual(rx, None)
+
+        # _to_ without _from_ does not matter, as implicit _from_ '0.0.0'
+        # empty _to_ raise error
+        with self.assertRaises(Exception):
+            VMMP.getRange_majorMinorPatch("_to_")
+        with self.assertRaises(Exception):
+            VMMP.getRange_majorMinorPatch("_from_to_")
+        with self.assertRaises(Exception):
+            VMMP.getRange_majorMinorPatch("_from__to_")
+        with self.assertRaises(Exception):
+            VMMP.getRange_majorMinorPatch("toto_from__to_")
+        with self.assertRaises(Exception):
+            VMMP.getRange_majorMinorPatch("toto_from_123_to_")
+        with self.assertRaises(Exception):
+            VMMP.getRange_majorMinorPatch("version_123_to_")
+        with self.assertRaises(Exception):
+            VMMP.getRange_majorMinorPatch("version_to_")
+
+        # min > max does matter
+        with self.assertRaises(Exception):
+            VMMP.getRange_majorMinorPatch("_from_3_to_2")
+        with self.assertRaises(Exception):
+            VMMP.getRange_majorMinorPatch("_from_3.2.5_to_V2_1_1")
+
+
+if __name__ == "__main__":
+    unittest.main(exit=False)
+    pass
index 37e132ab6d0fbd43d105399ccb1834a240c54c51..3d4d0c1dc152d1dd3a05865be3fe99c2e9dbc4c2 100755 (executable)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 
 #  Copyright (C) 2010-2018  CEA/DEN
 #
@@ -55,106 +55,105 @@ from logging.handlers import BufferingHandler
 
 import src.debug as DBG
 
-verbose = False #True
+verbose = False  # True
+
+_TRACE = LOGI.INFO - 2  # just below info
 
-_TRACE = LOGI.INFO - 2 # just below info
 
 class LoggerSat(LOGI.Logger):
-  """
-  Elementary prototype for logger sat
-  add a level TRACE as log.trace(msg) 
-  below log.info(msg)
-  above log.debug(msg)
-  to assume store long log asci in files txt under/outside files xml
-  
-  see: /usr/lib64/python2.7/logging/xxx__init__.py etc.
-  """
-  
-  def __init__(self, name, level=LOGI.INFO):
-    """
-    Initialize the logger with a name and an optional level.
     """
-    super(LoggerSat, self).__init__(name, level)
-    LOGI.addLevelName(_TRACE, "TRACE")
-    # LOGI.TRACE = _TRACE # only for coherency,
-    
-  def trace(self, msg, *args, **kwargs):
+    Elementary prototype for logger sat
+    add a level TRACE as log.trace(msg)
+    below log.info(msg)
+    above log.debug(msg)
+    to assume store long log asci in files txt under/outside files xml
+
+    see: /usr/lib64/python2.7/logging/xxx__init__.py etc.
     """
-    Log 'msg % args' with severity '_TRACE'.
 
-    To pass exception information, use the keyword argument exc_info with
-    a true value, e.g.
+    def __init__(self, name, level=LOGI.INFO):
+        """
+        Initialize the logger with a name and an optional level.
+        """
+        super(LoggerSat, self).__init__(name, level)
+        LOGI.addLevelName(_TRACE, "TRACE")
+        # LOGI.TRACE = _TRACE # only for coherency,
+
+    def trace(self, msg, *args, **kwargs):
+        """
+        Log 'msg % args' with severity '_TRACE'.
+
+        To pass exception information, use the keyword argument exc_info with
+        a true value, e.g.
+
+        logger.trace("Houston, we have a %s", "long trace to follow")
+        """
+        if self.isEnabledFor(_TRACE):
+            self._log(_TRACE, msg, args, **kwargs)
 
-    logger.trace("Houston, we have a %s", "long trace to follow")
-    """
-    if self.isEnabledFor(_TRACE):
-        self._log(_TRACE, msg, args, **kwargs)
 
 class TestCase(unittest.TestCase):
-  "Test the debug.py"""
-  
-  initialLoggerClass = [] # to keep clean module logging
-  
-  def test_000(self):
-    # one shot setUp() for this TestCase
-    self.initialLoggerClass.append(LOGI._loggerClass)
-    LOGI.setLoggerClass(LoggerSat)
-    if verbose:
-      DBG.push_debug(True)
-      # DBG.write("assert unittest", [a for a in dir(self) if "assert" in a])
-    pass
-  
-  def test_999(self):
-    # one shot tearDown() for this TestCase
-    if verbose:
-      DBG.pop_debug()
-    LOGI.setLoggerClass(self.initialLoggerClass[0])
-    return
-  
-  def test_010(self):
-    # LOGI.setLoggerClass(LoggerSat) # done once in test_000
-    name = "testLogging"
-    lgr = LOGI.getLogger(name) # create it
-    lgr.setLevel("DEBUG")
-    self.assertEqual(lgr.__class__, LoggerSat)
-    self.assertEqual(lgr.name, name)
-    self.assertIn("trace", dir(lgr))
-    self.assertIn("TRACE", LOGI._levelNames.keys())
-    self.assertIn(_TRACE, LOGI._levelNames.keys())
-    self.assertEqual(LOGI.getLevelName(LOGI.INFO), "INFO")
-    self.assertEqual(LOGI.getLevelName(_TRACE), "TRACE")
-    
-    # creation d'un handler pour chaque log sur la console
-    formatter = LOGI.Formatter('%(levelname)-8s :: %(message)s')
-    # stream_handler = LOGI.handlers.StreamHandler() # log outputs in console
-    stream_handler = LOGI.handlers.BufferingHandler(1000) # log outputs in memory
-    stream_handler.setLevel(LOGI.DEBUG)
-    stream_handler.setFormatter(formatter)
-    lgr.addHandler(stream_handler)
-    # print # skip one line if outputs in console
-    lgr.warning("!!! test warning")
-    lgr.info("!!! test info")
-    lgr.trace("!!! test trace")
-    lgr.debug("!!! test debug")
-    self.assertEqual(len(stream_handler.buffer), 4)
-    rec = stream_handler.buffer[-1]
-    self.assertEqual(rec.levelname, "DEBUG")
-    self.assertEqual(rec.msg, "!!! test debug")
-    self.assertEqual(stream_handler.get_name(), None) # what to serve ?
-    
-  def test_020(self):
-    # LOGI.setLoggerClass(LoggerSat)
-    name = "testLogging"
-    lgr = LOGI.getLogger(name) #  find it as created yet in test_010
-    stream_handler = lgr.handlers[0]
-    rec = stream_handler.buffer[-1]
-    self.assertEqual(rec.levelname, "DEBUG")
-    self.assertEqual(rec.msg, "!!! test debug")
-
-      
-          
-    
-if __name__ == '__main__':
+    "Test the debug.py" ""
+
+    initialLoggerClass = []  # to keep clean module logging
+
+    def test_000(self):
+        # one shot setUp() for this TestCase
+        self.initialLoggerClass.append(LOGI._loggerClass)
+        LOGI.setLoggerClass(LoggerSat)
+        if verbose:
+            DBG.push_debug(True)
+            # DBG.write("assert unittest", [a for a in dir(self) if "assert" in a])
+        pass
+
+    def test_999(self):
+        # one shot tearDown() for this TestCase
+        if verbose:
+            DBG.pop_debug()
+        LOGI.setLoggerClass(self.initialLoggerClass[0])
+        return
+
+    def test_010(self):
+        # LOGI.setLoggerClass(LoggerSat) # done once in test_000
+        name = "testLogging"
+        lgr = LOGI.getLogger(name)  # create it
+        lgr.setLevel("DEBUG")
+        self.assertEqual(lgr.__class__, LoggerSat)
+        self.assertEqual(lgr.name, name)
+        self.assertIn("trace", dir(lgr))
+        self.assertIn("TRACE", LOGI._levelNames.keys())
+        self.assertIn(_TRACE, LOGI._levelNames.keys())
+        self.assertEqual(LOGI.getLevelName(LOGI.INFO), "INFO")
+        self.assertEqual(LOGI.getLevelName(_TRACE), "TRACE")
+
+        # creation d'un handler pour chaque log sur la console
+        formatter = LOGI.Formatter("%(levelname)-8s :: %(message)s")
+        # stream_handler = LOGI.handlers.StreamHandler() # log outputs in console
+        stream_handler = LOGI.handlers.BufferingHandler(1000)  # log outputs in memory
+        stream_handler.setLevel(LOGI.DEBUG)
+        stream_handler.setFormatter(formatter)
+        lgr.addHandler(stream_handler)
+        # print # skip one line if outputs in console
+        lgr.warning("!!! test warning")
+        lgr.info("!!! test info")
+        lgr.trace("!!! test trace")
+        lgr.debug("!!! test debug")
+        self.assertEqual(len(stream_handler.buffer), 4)
+        rec = stream_handler.buffer[-1]
+        self.assertEqual(rec.levelname, "DEBUG")
+        self.assertEqual(rec.msg, "!!! test debug")
+        self.assertEqual(stream_handler.get_name(), None)  # what to serve ?
+
+    def test_020(self):
+        # LOGI.setLoggerClass(LoggerSat)
+        name = "testLogging"
+        lgr = LOGI.getLogger(name)  #  find it as created yet in test_010
+        stream_handler = lgr.handlers[0]
+        rec = stream_handler.buffer[-1]
+        self.assertEqual(rec.levelname, "DEBUG")
+        self.assertEqual(rec.msg, "!!! test debug")
+
+
+if __name__ == "__main__":
     unittest.main(exit=False)
     pass
-
index a87dc7f57b94b0d05fc15edcd1843e1f2b64ed7c..a62f1490d998b497532a403517c123c2e68ae300 100755 (executable)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 
 #  Copyright (C) 2010-2018  CEA/DEN
 #
@@ -21,13 +21,13 @@ import os
 import sys
 import unittest
 
-import initializeTest # set PATH etc for test
+import initializeTest  # set PATH etc for test
 
-import src.debug as DBG # Easy print stderr (for DEBUG only)
-import src.pyconf as PYF # 0.3.7
+import src.debug as DBG  # Easy print stderr (for DEBUG only)
+import src.pyconf as PYF  # 0.3.7
 
 _EXAMPLES = {
-: """\
+    1: """\
   messages:
   [
     {
@@ -47,23 +47,19 @@ _EXAMPLES = {
     }
   ]
 """,
-
-2 : """\
+    2: """\
   aa: 111
   bb: $aa + 222
 """,
-
-3 : """\
+    3: """\
   aa: Yves
   bb: "Herve" # avoid Hervé -> 'utf8' codec can't decode byte
 """,
-
-4 : """\
+    4: """\
   aa: Yves
   bb: "Hervé" # avoid Hervé -> 'utf8' codec can't decode byte
 """,
-
-5 : """\
+    5: """\
   aa: Yves
   bb: "Herve"
   cc: [ 
@@ -79,15 +75,13 @@ _EXAMPLES = {
    d4 : $bb + " bye"
    }   
 """,
-
-# error circular
-6 : """\
+    # error circular
+    6: """\
   aa: Yves
   bb: $cc
   cc: $bb
 """,
-
-7 : """\
+    7: """\
   aa: Yves
   bb: $cc
   cc: [ 
@@ -95,8 +89,7 @@ _EXAMPLES = {
     $bb
     ]
 """,
-
-8 : """\
+    8: """\
   aa: Yves
   bb: $cc
   cc: { 
@@ -104,163 +97,163 @@ _EXAMPLES = {
     cc2: $bb
     }
 """,
-
 }
 
 
 class TestCase(unittest.TestCase):
-  "Test the pyconf.py"""
-  
-  def test_000(self):
-    # one shot setUp() for this TestCase
-    # DBG.push_debug(True)
-    # SAT.setNotLocale() # test english
-    return
+    "Test the pyconf.py" ""
+
+    def test_000(self):
+        # one shot setUp() for this TestCase
+        # DBG.push_debug(True)
+        # SAT.setNotLocale() # test english
+        return
 
-  def test_010(self):
-    # pyconf.py doc example 0.3.7
-    # https://www.red-dove.com/config-doc/ is 0.3.9 !
-    # which, when run, would yield the console output:
+    def test_010(self):
+        # pyconf.py doc example 0.3.7
+        # https://www.red-dove.com/config-doc/ is 0.3.9 !
+        # which, when run, would yield the console output:
 
-    expected = """\
+        expected = """\
 Welcome, Harry
 Welkom, Ruud
 Bienvenue, Yves
 """
-    inStream = DBG.InStream(_EXAMPLES[1])
-    cfg = PYF.Config(inStream)
-    res = ''
-    for m in cfg.messages:
-        res += '%s, %s\n' % (m.message, m.name)
-    self.assertEqual(res, expected)
-    outStream = DBG.OutStream()
-    cfg.__save__(outStream) # sat renamed save() in __save__()
-    res = outStream.value
-    DBG.write("test_010 cfg", res)
-    self.assertTrue("name : 'Harry'" in res)
-    self.assertTrue("name : 'Ruud'" in res)
-    self.assertTrue("name : 'Yves'" in res)
-        
-  def test_020(self):
-    cfg = PYF.Config()
-    self.assertEqual(str(cfg), '{}')
-    self.assertEqual(cfg.__repr__(), '{}')
-    cfg.aa = "1111"
-    self.assertEqual(str(cfg), "{'aa': '1111'}")
-    cfg.bb = 2222
-    self.assertTrue("'bb': 2222" in str(cfg))
-    self.assertTrue("'aa': '1111'" in str(cfg))
-    cfg.cc = 3333.
-    self.assertTrue("'cc': 3333." in str(cfg))
-    
-  def test_030(self):
-    inStream = DBG.InStream(_EXAMPLES[2])
-    cfg = PYF.Config(inStream)
-    self.assertEqual(str(cfg),  "{'aa': 111, 'bb': $aa + 222}")
-    self.assertEqual(cfg.aa, 111)
-    self.assertEqual(cfg.bb, 333)
-      
-  def test_040(self):
-    inStream = DBG.InStream(_EXAMPLES[3])
-    cfg = PYF.Config(inStream)
-    self.assertEqual(cfg.aa, "Yves")
-    self.assertEqual(cfg.bb, "Herve")
-    self.assertEqual(type(cfg.bb), str)
-    cfg.bb = "Hervé" # try this
-    self.assertEqual(type(cfg.bb), str)
-    self.assertEqual(cfg.bb, "Hervé")
-    
-  def test_045(self):
-    # make Hervé valid only with pyconf.py as 0.3.9
-    inStream = DBG.InStream(_EXAMPLES[4])
-    outStream = DBG.OutStream()
-    with self.assertRaises(Exception):
-      cfg = PYF.Config(inStream)
+        inStream = DBG.InStream(_EXAMPLES[1])
+        cfg = PYF.Config(inStream)
+        res = ""
+        for m in cfg.messages:
+            res += "%s, %s\n" % (m.message, m.name)
+        self.assertEqual(res, expected)
+        outStream = DBG.OutStream()
+        cfg.__save__(outStream)  # sat renamed save() in __save__()
+        res = outStream.value
+        DBG.write("test_010 cfg", res)
+        self.assertTrue("name : 'Harry'" in res)
+        self.assertTrue("name : 'Ruud'" in res)
+        self.assertTrue("name : 'Yves'" in res)
+
+    def test_020(self):
+        cfg = PYF.Config()
+        self.assertEqual(str(cfg), "{}")
+        self.assertEqual(cfg.__repr__(), "{}")
+        cfg.aa = "1111"
+        self.assertEqual(str(cfg), "{'aa': '1111'}")
+        cfg.bb = 2222
+        self.assertTrue("'bb': 2222" in str(cfg))
+        self.assertTrue("'aa': '1111'" in str(cfg))
+        cfg.cc = 3333.0
+        self.assertTrue("'cc': 3333." in str(cfg))
+
+    def test_030(self):
+        inStream = DBG.InStream(_EXAMPLES[2])
+        cfg = PYF.Config(inStream)
+        self.assertEqual(str(cfg), "{'aa': 111, 'bb': $aa + 222}")
+        self.assertEqual(cfg.aa, 111)
+        self.assertEqual(cfg.bb, 333)
+
+    def test_040(self):
+        inStream = DBG.InStream(_EXAMPLES[3])
+        cfg = PYF.Config(inStream)
+        self.assertEqual(cfg.aa, "Yves")
+        self.assertEqual(cfg.bb, "Herve")
+        self.assertEqual(type(cfg.bb), str)
+        cfg.bb = "Hervé"  # try this
+        self.assertEqual(type(cfg.bb), str)
+        self.assertEqual(cfg.bb, "Hervé")
+
+    def test_045(self):
+        # make Hervé valid only with pyconf.py as 0.3.9
+        inStream = DBG.InStream(_EXAMPLES[4])
+        outStream = DBG.OutStream()
+        with self.assertRaises(Exception):
+            cfg = PYF.Config(inStream)
+
+        return  # TODO only with pyconf.py as 0.3.9
+        cfg.save(outStream)  # OK
+        # TODO: cfg = PYF.Config(inStream)
+        # cfg.__save__(outStream)  # KO and sat renamed save() in __save__()
+        res = outStream.value
+        DBG.write("test_045 cfg", res)
+        self.assertTrue("aa : 'Yves'" in res)
+        self.assertTrue(r"bb : 'Herv\xc3\xa9'" in res)
+        self.assertEqual(cfg.bb, "Hervé")
+
+    def test_100(self):
+        inStream = DBG.InStream(_EXAMPLES[5])
+        outStream = DBG.OutStream()
+        cfg = PYF.Config(inStream)  # KO
+        cfg.__save__(outStream)  # sat renamed save() in __save__()
+        res = outStream.value
+        DBG.write("test_100 cfg save", res)
+        DBG.write("test_100 cfg debug", cfg)
+        DBG.write("test_100 cfg.cc debug", cfg.cc)
+
+        cc = cfg.cc
+        # DBG.write("test_100 type cc[3]", dir(cc), True)
+        DBG.write("test_100 cc", [cc.data[i] for i in range(len(cc))])
+
+    def test_100(self):
+        inStream = DBG.InStream(_EXAMPLES[5])
+        outStream = DBG.OutStream()
+        cfg = PYF.Config(inStream)  # KO
+        cfg.__save__(outStream)  # sat renamed save() in __save__()
+        res = outStream.value
+        DBG.write("test_100 cfg save", res)
+        DBG.write("test_100 cfg debug", cfg)
+        DBG.write("test_100 cfg.cc debug", cfg.cc)
+
+        cc = cfg.cc
+        # DBG.write("test_100 type cc[3]", dir(cc), True)
+        DBG.write("test_100 cc", [cc.data[i] for i in range(len(cc))])
+
+    def test_110(self):
+        inStream = DBG.InStream(_EXAMPLES[6])
+        outStream = DBG.OutStream()
+        cfg = PYF.Config(inStream)
+        cfg.__save__(outStream)
+
+        res = outStream.value
+        DBG.write("test_110 cfg save", res)
+        self.assertNotIn("ERROR", res)
+
+        res = DBG.getStrConfigDbg(cfg)
+        DBG.write("test_110 cfg debug", res)
+        self.assertIn("ERROR", res)
+        self.assertIn("unable to evaluate $cc", res)
+        self.assertIn("unable to evaluate $bb", res)
+
+    def test_120(self):
+        for ii in [7, 8]:
+            inStream = DBG.InStream(_EXAMPLES[ii])
+            outStream = DBG.OutStream()
+            cfg = PYF.Config(inStream)
+            cfg.__save__(outStream)
+
+            res = outStream.value
+            DBG.write("test_120 cfg save", res)
+            self.assertNotIn("ERROR", res)
+
+            res = DBG.getStrConfigDbg(cfg)
+
+            DBG.write("test_120 cfg debug", res)
+            # no error circular !!!
+            # self.assertIn("ERROR", res) # no error circular !!!
+            # self.assertIn("unable to evaluate $cc", res)
+            # self.assertIn("unable to evaluate $bb", res)
+            res = cfg.bb
+            DBG.write("test_120 cfg.bb debug", res)
+
+            res = cfg.cc
+            DBG.write("test_120 cfg.cc debug", res)
 
-    return # TODO only with pyconf.py as 0.3.9
-    cfg.save(outStream) # OK
-    # TODO: cfg = PYF.Config(inStream)
-    # cfg.__save__(outStream)  # KO and sat renamed save() in __save__()
-    res = outStream.value
-    DBG.write("test_045 cfg", res)
-    self.assertTrue("aa : 'Yves'" in res)
-    self.assertTrue(r"bb : 'Herv\xc3\xa9'" in res)
-    self.assertEqual(cfg.bb, "Hervé")
-    
-  def test_100(self):
-    inStream = DBG.InStream(_EXAMPLES[5])
-    outStream = DBG.OutStream()
-    cfg = PYF.Config(inStream) # KO
-    cfg.__save__(outStream) # sat renamed save() in __save__()
-    res = outStream.value
-    DBG.write("test_100 cfg save", res)
-    DBG.write("test_100 cfg debug", cfg)
-    DBG.write("test_100 cfg.cc debug", cfg.cc)
-    
-    cc = cfg.cc
-    # DBG.write("test_100 type cc[3]", dir(cc), True)
-    DBG.write("test_100 cc", [cc.data[i] for i in range(len(cc))])
-      
-  def test_100(self):
-    inStream = DBG.InStream(_EXAMPLES[5])
-    outStream = DBG.OutStream()
-    cfg = PYF.Config(inStream) # KO
-    cfg.__save__(outStream) # sat renamed save() in __save__()
-    res = outStream.value
-    DBG.write("test_100 cfg save", res)
-    DBG.write("test_100 cfg debug", cfg)
-    DBG.write("test_100 cfg.cc debug", cfg.cc)
-    
-    cc = cfg.cc
-    # DBG.write("test_100 type cc[3]", dir(cc), True)
-    DBG.write("test_100 cc", [cc.data[i] for i in range(len(cc))])
-      
-  def test_110(self):
-    inStream = DBG.InStream(_EXAMPLES[6])
-    outStream = DBG.OutStream()
-    cfg = PYF.Config(inStream)
-    cfg.__save__(outStream)
-    
-    res = outStream.value
-    DBG.write("test_110 cfg save", res)
-    self.assertNotIn("ERROR", res)
-    
-    res = DBG.getStrConfigDbg(cfg)
-    DBG.write("test_110 cfg debug", res)
-    self.assertIn("ERROR", res)
-    self.assertIn("unable to evaluate $cc", res)
-    self.assertIn("unable to evaluate $bb", res)
-    
-  def test_120(self):
-   for ii in [7, 8]:
-    inStream = DBG.InStream(_EXAMPLES[ii])
-    outStream = DBG.OutStream()
-    cfg = PYF.Config(inStream)
-    cfg.__save__(outStream)
-    
-    res = outStream.value
-    DBG.write("test_120 cfg save", res)
-    self.assertNotIn("ERROR", res)
-    
-    res = DBG.getStrConfigDbg(cfg)
+    def test_999(self):
+        # one shot tearDown() for this TestCase
+        # SAT.setLocale() # end test english
+        # DBG.pop_debug()
+        return
 
-    DBG.write("test_120 cfg debug", res)
-    # no error circular !!!
-    # self.assertIn("ERROR", res) # no error circular !!!
-    # self.assertIn("unable to evaluate $cc", res)
-    # self.assertIn("unable to evaluate $bb", res)
-    res = cfg.bb
-    DBG.write("test_120 cfg.bb debug", res)
 
-    res = cfg.cc
-    DBG.write("test_120 cfg.cc debug", res)
-    
-  def test_999(self):
-    # one shot tearDown() for this TestCase
-    # SAT.setLocale() # end test english
-    # DBG.pop_debug()
-    return
-    
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main(exit=False)
     pass
index f11b9f397536446b385ae9b869d9934193811f5b..d092136fcf9ad418732250ab1859faf062278733 100755 (executable)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 
 #  Copyright (C) 2010-2018  CEA/DEN
 #
@@ -21,124 +21,128 @@ import os
 import sys
 import unittest
 
-import initializeTest # set PATH etc for test
+import initializeTest  # set PATH etc for test
 
 import src.salomeTools as SAT
-import src.debug as DBG # Easy print stderr (for DEBUG only)
+import src.debug as DBG  # Easy print stderr (for DEBUG only)
 import src.loggingSimple as LOG
 
+
 class TestCase(unittest.TestCase):
-  "Test the sat --help commands"""
-  
-  debug = False
-  
-  def tearDown(self):
-    # print "tearDown", __file__
-    # assure self.logger clear for next test
-    logger = LOG.getUnittestLogger()
-    logs = logger.getLogsAndClear()
-    # using assertNotIn() is too much verbose
-    self.assertFalse("ERROR" in logs)
-    self.assertFalse("CRITICAL" in logs)
-  
-  def test_000(self):
-    logger = LOG.getUnittestLogger()
-    # one shot setUp() for this TestCase
-    if self.debug: DBG.push_debug(True)
-    SAT.setNotLocale() # test english
-
-  def test_999(self):
-    # one shot tearDown() for this TestCase
-    SAT.setLocale() # end test english
-    if self.debug: DBG.pop_debug()
-
-  def test_010(self): # TODO fix logger unittest
-    cmd = "sat --help"
-    res = SAT.launchSat(cmd)
-    self.assertTrue(res.isOk())
-    out = res.getValue()
-    self.assertTrue(" - config" in out)
-    self.assertTrue(" - prepare" in out)
-    self.assertTrue(" - compile" in out)
-
-  def test_011(self):
-    logger = LOG.getUnittestLogger()
-    cmd = "--help"
-    s = SAT.Sat(logger)
-    returnCode = s.execute_cli(cmd)
-    self.assertTrue(returnCode.isOk())
-    logs = logger.getLogs()
-    DBG.write("test_011 logger", logs)
-    self.assertTrue(" - config" in logs)
-    self.assertTrue(" - prepare" in logs)
-    self.assertTrue(" - compile" in logs)
-    
-  def test_030(self):
-    cmd = "sat config --help"
-    returnCode = SAT.launchSat(cmd)
-    self.assertTrue(returnCode.isOk())
-    out = returnCode.getValue()
-    DBG.write("test_030 stdout", out)
-    self.assertTrue("--value" in out)
-
-  def test_031(self):
-    logger = LOG.getUnittestLogger()
-    cmd = "config --help"
-    s = SAT.Sat(logger)
-    returnCode = s.execute_cli(cmd)
-    self.assertTrue(returnCode.isOk())
-    logs = logger.getLogs()
-    DBG.write("test_031 logger", logs)
-    self.assertTrue("--help" in logs)
-
-  def test_032(self):
-    logger = LOG.getUnittestLogger()
-    cmd = "prepare --help"
-    s = SAT.Sat(logger)
-    returnCode = s.execute_cli(cmd)
-    self.assertTrue(returnCode.isOk())
-    logs = logger.getLogs()
-    DBG.write("test_031 logger", logs)
-    self.assertTrue("--help" in logs)
-
-  def test_040(self):
-    logger = LOG.getUnittestLogger()
-    cmd = "config --list"
-    s = SAT.Sat(logger)
-    returnCode = s.execute_cli(cmd)
-    self.assertTrue(returnCode.isOk())
-    logs = logger.getLogs()
-    self.assertTrue("Applications" in logs)
-
-  def test_050(self):
-    cmds = SAT.getCommandsList()
-    DBG.write("test_050 getCommandsList", cmds)
-    for c in cmds:
-      cmd = "sat %s --help" % c
-      DBG.write("test_050", cmd)
-      returnCode = SAT.launchSat(cmd)
-      if not returnCode.isOk():
-        DBG.write("test_050 %s" % cmd, returnCode.getValue(), True)
-      self.assertTrue(returnCode.isOk())
-      out = returnCode.getValue()
-      DBG.write("test_050 %s stdout" % c, out)
-      self.assertTrue("The %s command" % c in out)
-      self.assertTrue("Available options" in out)
-      
-  def test_051(self):
-    logger = LOG.getUnittestLogger()
-    cmds = SAT.getCommandsList()
-    for c in cmds:
-      cmd = "%s --help" % c
-      DBG.write("test_051", cmd)
-      s = SAT.Sat(logger)
-      returnCode = s.execute_cli(cmd)
-      self.assertTrue(returnCode.isOk())
-      logs = logger.getLogsAndClear()
-      DBG.write(cmd, logs)
-      self.assertTrue("The %s command" % c in logs)
-      self.assertTrue("Available options" in logs)
-                
-if __name__ == '__main__':
+    "Test the sat --help commands" ""
+
+    debug = False
+
+    def tearDown(self):
+        # print "tearDown", __file__
+        # assure self.logger clear for next test
+        logger = LOG.getUnittestLogger()
+        logs = logger.getLogsAndClear()
+        # using assertNotIn() is too much verbose
+        self.assertFalse("ERROR" in logs)
+        self.assertFalse("CRITICAL" in logs)
+
+    def test_000(self):
+        logger = LOG.getUnittestLogger()
+        # one shot setUp() for this TestCase
+        if self.debug:
+            DBG.push_debug(True)
+        SAT.setNotLocale()  # test english
+
+    def test_999(self):
+        # one shot tearDown() for this TestCase
+        SAT.setLocale()  # end test english
+        if self.debug:
+            DBG.pop_debug()
+
+    def test_010(self):  # TODO fix logger unittest
+        cmd = "sat --help"
+        res = SAT.launchSat(cmd)
+        self.assertTrue(res.isOk())
+        out = res.getValue()
+        self.assertTrue(" - config" in out)
+        self.assertTrue(" - prepare" in out)
+        self.assertTrue(" - compile" in out)
+
+    def test_011(self):
+        logger = LOG.getUnittestLogger()
+        cmd = "--help"
+        s = SAT.Sat(logger)
+        returnCode = s.execute_cli(cmd)
+        self.assertTrue(returnCode.isOk())
+        logs = logger.getLogs()
+        DBG.write("test_011 logger", logs)
+        self.assertTrue(" - config" in logs)
+        self.assertTrue(" - prepare" in logs)
+        self.assertTrue(" - compile" in logs)
+
+    def test_030(self):
+        cmd = "sat config --help"
+        returnCode = SAT.launchSat(cmd)
+        self.assertTrue(returnCode.isOk())
+        out = returnCode.getValue()
+        DBG.write("test_030 stdout", out)
+        self.assertTrue("--value" in out)
+
+    def test_031(self):
+        logger = LOG.getUnittestLogger()
+        cmd = "config --help"
+        s = SAT.Sat(logger)
+        returnCode = s.execute_cli(cmd)
+        self.assertTrue(returnCode.isOk())
+        logs = logger.getLogs()
+        DBG.write("test_031 logger", logs)
+        self.assertTrue("--help" in logs)
+
+    def test_032(self):
+        logger = LOG.getUnittestLogger()
+        cmd = "prepare --help"
+        s = SAT.Sat(logger)
+        returnCode = s.execute_cli(cmd)
+        self.assertTrue(returnCode.isOk())
+        logs = logger.getLogs()
+        DBG.write("test_031 logger", logs)
+        self.assertTrue("--help" in logs)
+
+    def test_040(self):
+        logger = LOG.getUnittestLogger()
+        cmd = "config --list"
+        s = SAT.Sat(logger)
+        returnCode = s.execute_cli(cmd)
+        self.assertTrue(returnCode.isOk())
+        logs = logger.getLogs()
+        self.assertTrue("Applications" in logs)
+
+    def test_050(self):
+        cmds = SAT.getCommandsList()
+        DBG.write("test_050 getCommandsList", cmds)
+        for c in cmds:
+            cmd = "sat %s --help" % c
+            DBG.write("test_050", cmd)
+            returnCode = SAT.launchSat(cmd)
+            if not returnCode.isOk():
+                DBG.write("test_050 %s" % cmd, returnCode.getValue(), True)
+            self.assertTrue(returnCode.isOk())
+            out = returnCode.getValue()
+            DBG.write("test_050 %s stdout" % c, out)
+            self.assertTrue("The %s command" % c in out)
+            self.assertTrue("Available options" in out)
+
+    def test_051(self):
+        logger = LOG.getUnittestLogger()
+        cmds = SAT.getCommandsList()
+        for c in cmds:
+            cmd = "%s --help" % c
+            DBG.write("test_051", cmd)
+            s = SAT.Sat(logger)
+            returnCode = s.execute_cli(cmd)
+            self.assertTrue(returnCode.isOk())
+            logs = logger.getLogsAndClear()
+            DBG.write(cmd, logs)
+            self.assertTrue("The %s command" % c in logs)
+            self.assertTrue("Available options" in logs)
+
+
+if __name__ == "__main__":
     unittest.main(exit=False)
     pass
index a2a8dd10002154e28b0cbac5c18d1d7a3ed189b8..8c246fb2ea63f51fb864201b2f96ed0e11c7c4c6 100755 (executable)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 
 #  Copyright (C) 2010-2018  CEA/DEN
 #
@@ -21,110 +21,113 @@ import os
 import sys
 import unittest
 
-import initializeTest # set PATH etc for test
+import initializeTest  # set PATH etc for test
 
 import src.salomeTools as SAT
-import src.debug as DBG # Easy print stderr (for DEBUG only)
+import src.debug as DBG  # Easy print stderr (for DEBUG only)
 import src.loggingSimple as LOG
 
+
 class TestCase(unittest.TestCase):
-  "Test the sat commands on APPLI_TEST configuration pyconf etc. files"""
-
-  debug = False
-  
-  # see test_100, # commands are expected OK
-  TRG = "APPLI_TEST" # "SALOME-8.4.0"
-  satCommandsToTestOk = [
-    "config -l",
-    "config -v .",
-    "config -g .",
-    "config %s --value ." %  TRG,
-    "config %s --debug ." %  TRG,
-    "config %s --info KERNEL" %  TRG,
-    "config %s --show_patchs" %  TRG,
-  ]
-  # see test_110, # commands are expected KO
-  satCommandsToTestKo = [
-    "config %s --info oops" %  TRG,
-    "config --oops",
-  ]
-  # see test_120, # commands are expected Raise,
-  # but if fixed go to satCommandsToTestKo
-  satCommandsToTestRaise = [
-    "oopsconfig --oops .",
-  ]
-  
-  def tearDown(self):
-    # print "tearDown", __file__
-    # assure self.logger clear for next test
-    logger = LOG.getUnittestLogger()
-    logs = logger.getLogsAndClear()
-    # using assertNotIn() is too much verbose
-    self.assertFalse("ERROR    ::" in logs)
-    self.assertFalse("CRITICAL ::" in logs)
-
-  def test_000(self):
-    logger = LOG.getUnittestLogger()
-    # one shot setUp() for this TestCase
-    if self.debug: DBG.push_debug(True)
-    SAT.setNotLocale()  # test english
-
-  def test_999(self):
-    # one shot tearDown() for this TestCase
-    SAT.setLocale() # end test english
-    if self.debug: DBG.pop_debug()
-
-  def test_010(self):
-    logger = LOG.getUnittestLogger()
-    DBG.write("test_010 logger", logger.name)
-    cmd = "config -l"
-    s = SAT.Sat(logger)
-    DBG.write("s.getConfig()", s.getConfig()) #none
-    DBG.write("s.__dict__", s.__dict__) # have 
-    returnCode = s.execute_cli(cmd)
-    DBG.write("test_010", returnCode)
-    logs = logger.getLogs()
-    DBG.write("test_010 logger", logs)
-    self.assertTrue(returnCode.isOk())
-    
-  def xtest_100(self): # TODO
-    # test all satCommands expected OK
-    logger = LOG.getUnittestLogger()
-    dbg = self.debug
-    for cmd in self.satCommandsToTestOk:
-      s = SAT.Sat(logger)
-      returnCode = s.execute_cli(cmd)
-      DBG.write("test_100 'sat %s'" % cmd, str(returnCode), dbg)
-      logs = logger.getLogsAndClear()
-      DBG.write("logs", logs, dbg)
-      # using assertNotIn() is too much verbose
-      self.assertFalse("ERROR    ::" in logs)
-      self.assertFalse("CRITICAL ::" in logs)
-      
-  def test_110(self):
-    # test all satCommands expected KO
-    logger = LOG.getUnittestLogger()
-    dbg = self.debug
-    for cmd in self.satCommandsToTestKo:
-      s = SAT.Sat(logger)
-      returnCode = s.execute_cli(cmd)
-      DBG.write("test_110 'sat %s'" % cmd, returnCode, dbg)
-      logs = logger.getLogsAndClear()
-      DBG.write("logs", logs, dbg)    
-      
-  def test_120(self):
-    # test all satCommands expected raise
-    logger = LOG.getUnittestLogger()
-    dbg = self.debug
-    for cmd in self.satCommandsToTestRaise:
-      s = SAT.Sat(logger)
-      DBG.write("test_120 'sat %s'" % cmd, "expected raise", dbg)
-      with self.assertRaises(Exception):
+    "Test the sat commands on APPLI_TEST configuration pyconf etc. files" ""
+
+    debug = False
+
+    # see test_100, # commands are expected OK
+    TRG = "APPLI_TEST"  # "SALOME-8.4.0"
+    satCommandsToTestOk = [
+        "config -l",
+        "config -v .",
+        "config -g .",
+        "config %s --value ." % TRG,
+        "config %s --debug ." % TRG,
+        "config %s --info KERNEL" % TRG,
+        "config %s --show_patchs" % TRG,
+    ]
+    # see test_110, # commands are expected KO
+    satCommandsToTestKo = [
+        "config %s --info oops" % TRG,
+        "config --oops",
+    ]
+    # see test_120, # commands are expected Raise,
+    # but if fixed go to satCommandsToTestKo
+    satCommandsToTestRaise = [
+        "oopsconfig --oops .",
+    ]
+
+    def tearDown(self):
+        # print "tearDown", __file__
+        # assure self.logger clear for next test
+        logger = LOG.getUnittestLogger()
+        logs = logger.getLogsAndClear()
+        # using assertNotIn() is too much verbose
+        self.assertFalse("ERROR    ::" in logs)
+        self.assertFalse("CRITICAL ::" in logs)
+
+    def test_000(self):
+        logger = LOG.getUnittestLogger()
+        # one shot setUp() for this TestCase
+        if self.debug:
+            DBG.push_debug(True)
+        SAT.setNotLocale()  # test english
+
+    def test_999(self):
+        # one shot tearDown() for this TestCase
+        SAT.setLocale()  # end test english
+        if self.debug:
+            DBG.pop_debug()
+
+    def test_010(self):
+        logger = LOG.getUnittestLogger()
+        DBG.write("test_010 logger", logger.name)
+        cmd = "config -l"
+        s = SAT.Sat(logger)
+        DBG.write("s.getConfig()", s.getConfig())  # none
+        DBG.write("s.__dict__", s.__dict__)  # have
         returnCode = s.execute_cli(cmd)
-      logs = logger.getLogsAndClear()
-      DBG.write("logs", logs, dbg)    
-      
-      
-if __name__ == '__main__':
+        DBG.write("test_010", returnCode)
+        logs = logger.getLogs()
+        DBG.write("test_010 logger", logs)
+        self.assertTrue(returnCode.isOk())
+
+    def xtest_100(self):  # TODO
+        # test all satCommands expected OK
+        logger = LOG.getUnittestLogger()
+        dbg = self.debug
+        for cmd in self.satCommandsToTestOk:
+            s = SAT.Sat(logger)
+            returnCode = s.execute_cli(cmd)
+            DBG.write("test_100 'sat %s'" % cmd, str(returnCode), dbg)
+            logs = logger.getLogsAndClear()
+            DBG.write("logs", logs, dbg)
+            # using assertNotIn() is too much verbose
+            self.assertFalse("ERROR    ::" in logs)
+            self.assertFalse("CRITICAL ::" in logs)
+
+    def test_110(self):
+        # test all satCommands expected KO
+        logger = LOG.getUnittestLogger()
+        dbg = self.debug
+        for cmd in self.satCommandsToTestKo:
+            s = SAT.Sat(logger)
+            returnCode = s.execute_cli(cmd)
+            DBG.write("test_110 'sat %s'" % cmd, returnCode, dbg)
+            logs = logger.getLogsAndClear()
+            DBG.write("logs", logs, dbg)
+
+    def test_120(self):
+        # test all satCommands expected raise
+        logger = LOG.getUnittestLogger()
+        dbg = self.debug
+        for cmd in self.satCommandsToTestRaise:
+            s = SAT.Sat(logger)
+            DBG.write("test_120 'sat %s'" % cmd, "expected raise", dbg)
+            with self.assertRaises(Exception):
+                returnCode = s.execute_cli(cmd)
+            logs = logger.getLogsAndClear()
+            DBG.write("logs", logs, dbg)
+
+
+if __name__ == "__main__":
     unittest.main(exit=False)
     pass
index ab5f92ba9d935662f267541714102715ecfbca48..9e2308f680cff05e70ee4dd25192fc1b4a401b59 100755 (executable)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 
 #  Copyright (C) 2010-2018  CEA/DEN
 #
@@ -98,43 +98,44 @@ import getpass
 
 verbose = False
 
+
 class TestCase(unittest.TestCase):
-  "Test a paramiko connection"""
-
-  def setLoggerParamiko(self):
-    """to get logs of paramiko, useful if problems"""
-    import logging as LOGI
-    loggerPrmk = LOGI.getLogger("paramiko")
-    if len(loggerPrmk.handlers) != 0:
-       print("logging.__file__ %s" % LOGI.__file__)
-       print("logger paramiko have handler set yet, is a surprise")
-       return
-    if not verbose:
-       # stay as it, null
-       return
-
-    #set a paramiko logger verbose
-    handler = LOGI.StreamHandler()
-    msg = "create paramiko logger, with handler on stdout"
-    
-    # handler = LOGI.MemoryHandler()
-    # etc... https://docs.python.org/2/library/logging.handlers.html
-    # msg = "create paramiko logger, with handler in memory"
-
-    # original frm from paramiko
-    # frm = '%(levelname)-.3s [%(asctime)s.%(msecs)03d] thr=%(thread)-3d %(name)s: %(message)s' # noqa
-    frm = '%(levelname)-5s :: %(asctime)s :: %(name)s :: %(message)s'
-    handler.setFormatter(LOGI.Formatter(frm, '%y%m%d_%H%M%S'))
-    loggerPrmk.addHandler(handler)
-      
-    # logger is not notset but low, handlers needs setlevel greater
-    loggerPrmk.setLevel(LOGI.DEBUG)
-    handler.setLevel(LOGI.INFO) # LOGI.DEBUG) # may be other one
-
-    loggerPrmk.info(msg)
-
-
-  '''example from internet
+    "Test a paramiko connection" ""
+
+    def setLoggerParamiko(self):
+        """to get logs of paramiko, useful if problems"""
+        import logging as LOGI
+
+        loggerPrmk = LOGI.getLogger("paramiko")
+        if len(loggerPrmk.handlers) != 0:
+            print("logging.__file__ %s" % LOGI.__file__)
+            print("logger paramiko have handler set yet, is a surprise")
+            return
+        if not verbose:
+            # stay as it, null
+            return
+
+        # set a paramiko logger verbose
+        handler = LOGI.StreamHandler()
+        msg = "create paramiko logger, with handler on stdout"
+
+        # handler = LOGI.MemoryHandler()
+        # etc... https://docs.python.org/2/library/logging.handlers.html
+        # msg = "create paramiko logger, with handler in memory"
+
+        # original frm from paramiko
+        # frm = '%(levelname)-.3s [%(asctime)s.%(msecs)03d] thr=%(thread)-3d %(name)s: %(message)s' # noqa
+        frm = "%(levelname)-5s :: %(asctime)s :: %(name)s :: %(message)s"
+        handler.setFormatter(LOGI.Formatter(frm, "%y%m%d_%H%M%S"))
+        loggerPrmk.addHandler(handler)
+
+        # logger is not notset but low, handlers needs setlevel greater
+        loggerPrmk.setLevel(LOGI.DEBUG)
+        handler.setLevel(LOGI.INFO)  # LOGI.DEBUG) # may be other one
+
+        loggerPrmk.info(msg)
+
+    """example from internet
   def fetch_netmask(self, hostname, port=22):
     private_key = os.path.expanduser('~/.ssh/id_rsa')
     connection = open_ssh_connection('wambeke', hostname, port=port, key=private_key)
@@ -150,56 +151,56 @@ class TestCase(unittest.TestCase):
     client.set_missing_host_key_policy(PK.AutoAddPolicy())
     client.connect(hostname, port=port, timeout=5, username=username, key_filename=key)
     return client
-  '''
-
-  def test_000(self):
-    self.setLoggerParamiko()
-    
-
-  def test_010(self):
-    # http://docs.paramiko.org/en/2.4/api/agent.html
-
-    try:
-      import paramiko as PK
-    except:
-      print("\nproblem 'import paramiko', no tests")
-      return
-
-    # port=22 # useless
-    username = getpass.getuser()
-    hostname = os.uname()[1]
-    aFile = "/tmp/%s_test_paramiko.tmp" % username
-    cmd = ("pwd; ls -alt {0}; cat {0}").format(aFile)
-    
-    # connect
-    client = PK.SSHClient()
-    client.set_missing_host_key_policy(PK.AutoAddPolicy())  
-    # client.connect(hostname, username=username, password="xxxxx")
-    # client.connect(hostname, username=username, passphrase="yyyy", key_filename="/home/wambeke/.ssh/id_rsa_satjobs_passphrase")
-    # client.connect(hostname, username=username)
-
-    # timeout in seconds
-    client.connect(hostname, username=username, timeout=1.)
-    
-    # obtain session
-    session = client.get_transport().open_session()
-    # Forward local agent
-    PK.agent.AgentRequestHandler(session)
-    # commands executed after this point will see the forwarded agent on the remote end.
-    
-    # one api
-    session.exec_command("date > %s" % aFile)
-    cmd = ("pwd; ls -alt {0}; cat {0} && echo OK").format(aFile)
-    # another api
-    stdin, stdout, stderr = client.exec_command(cmd)
-    output = stdout.read()
-    if verbose:
-      print('stdout:\n%s' % output)
-    self.assertTrue(aFile in output)
-    self.assertTrue("OK" in output)
-    client.close()
-                
-if __name__ == '__main__':
+  """
+
+    def test_000(self):
+        self.setLoggerParamiko()
+
+    def test_010(self):
+        # http://docs.paramiko.org/en/2.4/api/agent.html
+
+        try:
+            import paramiko as PK
+        except:
+            print("\nproblem 'import paramiko', no tests")
+            return
+
+        # port=22 # useless
+        username = getpass.getuser()
+        hostname = os.uname()[1]
+        aFile = "/tmp/%s_test_paramiko.tmp" % username
+        cmd = ("pwd; ls -alt {0}; cat {0}").format(aFile)
+
+        # connect
+        client = PK.SSHClient()
+        client.set_missing_host_key_policy(PK.AutoAddPolicy())
+        # client.connect(hostname, username=username, password="xxxxx")
+        # client.connect(hostname, username=username, passphrase="yyyy", key_filename="/home/wambeke/.ssh/id_rsa_satjobs_passphrase")
+        # client.connect(hostname, username=username)
+
+        # timeout in seconds
+        client.connect(hostname, username=username, timeout=1.0)
+
+        # obtain session
+        session = client.get_transport().open_session()
+        # Forward local agent
+        PK.agent.AgentRequestHandler(session)
+        # commands executed after this point will see the forwarded agent on the remote end.
+
+        # one api
+        session.exec_command("date > %s" % aFile)
+        cmd = ("pwd; ls -alt {0}; cat {0} && echo OK").format(aFile)
+        # another api
+        stdin, stdout, stderr = client.exec_command(cmd)
+        output = stdout.read()
+        if verbose:
+            print("stdout:\n%s" % output)
+        self.assertTrue(aFile in output)
+        self.assertTrue("OK" in output)
+        client.close()
+
+
+if __name__ == "__main__":
     # verbose = True # human eyes
     unittest.main(exit=False)
     pass
index 79192ad0e1e8114029a9d310326b33c1bdee2c19..807b1d77fbfdf725a41f255caf78b89505db712c 100755 (executable)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 
 #  Copyright (C) 2010-2018  CEA/DEN
 #
@@ -24,204 +24,249 @@ import unittest
 import src.product
 from src.salomeTools import Sat
 
+
 class TestCase(unittest.TestCase):
     """Test of the compile command"""
 
     def test_010(self):
         # Test the compile command with '--products' option
-        OK = 'KO'
+        OK = "KO"
 
-        appli = 'appli-test'
-        product_name = 'PRODUCT_GIT'
+        appli = "appli-test"
+        product_name = "PRODUCT_GIT"
 
         sat = Sat()
-                            
-        sat.prepare(appli + ' --product ' + product_name)
-        expected_install_dir = src.product.get_product_config(sat.cfg, product_name).install_dir
-        expected_file_path = os.path.join(expected_install_dir, 'bin/hello')
-
-        sat.clean(appli + ' --build --install --product ' + product_name, batch=True)
-        sat.compile(appli + ' --product ' + product_name)
-        
+
+        sat.prepare(appli + " --product " + product_name)
+        expected_install_dir = src.product.get_product_config(
+            sat.cfg, product_name
+        ).install_dir
+        expected_file_path = os.path.join(expected_install_dir, "bin/hello")
+
+        sat.clean(appli + " --build --install --product " + product_name, batch=True)
+        sat.compile(appli + " --product " + product_name)
+
         if os.path.exists(expected_file_path):
-            OK = 'OK'         
+            OK = "OK"
         # pyunit method to compare 2 str
-        self.assertEqual(OK, 'OK')
+        self.assertEqual(OK, "OK")
 
     def test_020(self):
         # Test the configure command with '--fathers' option
-        OK = 'KO'
+        OK = "KO"
 
-        appli = 'appli-test'
-        product_name = 'PRODUCT_GIT'
-        product_name2 = 'PRODUCT_ARCHIVE'
+        appli = "appli-test"
+        product_name = "PRODUCT_GIT"
+        product_name2 = "PRODUCT_ARCHIVE"
 
         sat = Sat()
-                            
-        sat.prepare(appli + ' --product ' + product_name +"," +product_name2)
-        expected_install_dir = src.product.get_product_config(sat.cfg, product_name).install_dir
-        expected_file_path = os.path.join(expected_install_dir, 'bin/hello')
-        expected_install_dir2 = src.product.get_product_config(sat.cfg, product_name2).install_dir
-        expected_file_path2 = os.path.join(expected_install_dir2, 'bin/hello-archive')
-        
-        sat.clean(appli + ' --build --install --product ' + product_name +"," +product_name2, batch=True)
-        sat.compile(appli + ' --with_fathers --product ' + product_name)
-        
+
+        sat.prepare(appli + " --product " + product_name + "," + product_name2)
+        expected_install_dir = src.product.get_product_config(
+            sat.cfg, product_name
+        ).install_dir
+        expected_file_path = os.path.join(expected_install_dir, "bin/hello")
+        expected_install_dir2 = src.product.get_product_config(
+            sat.cfg, product_name2
+        ).install_dir
+        expected_file_path2 = os.path.join(expected_install_dir2, "bin/hello-archive")
+
+        sat.clean(
+            appli
+            + " --build --install --product "
+            + product_name
+            + ","
+            + product_name2,
+            batch=True,
+        )
+        sat.compile(appli + " --with_fathers --product " + product_name)
+
         if os.path.exists(expected_file_path) and os.path.exists(expected_file_path2):
-            OK = 'OK'         
+            OK = "OK"
         # pyunit method to compare 2 str
-        self.assertEqual(OK, 'OK')
-        
+        self.assertEqual(OK, "OK")
+
     def test_030(self):
         # Test the configure command with '--children' option
-        OK = 'KO'
+        OK = "KO"
 
-        appli = 'appli-test'
-        product_name = 'PRODUCT_GIT'
-        product_name2 = 'PRODUCT_ARCHIVE'
+        appli = "appli-test"
+        product_name = "PRODUCT_GIT"
+        product_name2 = "PRODUCT_ARCHIVE"
 
         sat = Sat()
-                            
-        sat.prepare(appli + ' --product ' + product_name +"," +product_name2)
-        expected_install_dir = src.product.get_product_config(sat.cfg, product_name).install_dir
-        expected_file_path = os.path.join(expected_install_dir, 'bin/hello')
-        expected_install_dir2 = src.product.get_product_config(sat.cfg, product_name2).install_dir
-        expected_file_path2 = os.path.join(expected_install_dir2, 'bin/hello-archive')
-
-        sat.clean(appli + ' --build --install --product ' + product_name +"," +product_name2, batch=True)
-        sat.compile(appli + ' --with_children --product ' + product_name2)
-        
+
+        sat.prepare(appli + " --product " + product_name + "," + product_name2)
+        expected_install_dir = src.product.get_product_config(
+            sat.cfg, product_name
+        ).install_dir
+        expected_file_path = os.path.join(expected_install_dir, "bin/hello")
+        expected_install_dir2 = src.product.get_product_config(
+            sat.cfg, product_name2
+        ).install_dir
+        expected_file_path2 = os.path.join(expected_install_dir2, "bin/hello-archive")
+
+        sat.clean(
+            appli
+            + " --build --install --product "
+            + product_name
+            + ","
+            + product_name2,
+            batch=True,
+        )
+        sat.compile(appli + " --with_children --product " + product_name2)
+
         if os.path.exists(expected_file_path) and os.path.exists(expected_file_path2):
-            OK = 'OK'         
+            OK = "OK"
         # pyunit method to compare 2 str
-        self.assertEqual(OK, 'OK')
+        self.assertEqual(OK, "OK")
 
     def test_040(self):
         # Test the configure command with '--clean_all' option
-        OK = 'KO'
+        OK = "KO"
 
-        appli = 'appli-test'
-        product_name = 'PRODUCT_GIT'
-        product_name2 = 'PRODUCT_ARCHIVE'
+        appli = "appli-test"
+        product_name = "PRODUCT_GIT"
+        product_name2 = "PRODUCT_ARCHIVE"
 
         sat = Sat()
-                            
-        sat.prepare(appli + ' --product ' + product_name +"," +product_name2)
-        expected_install_dir = src.product.get_product_config(sat.cfg, product_name).install_dir
-        expected_file_path = os.path.join(expected_install_dir, 'bin/hello')
-        expected_install_dir2 = src.product.get_product_config(sat.cfg, product_name2).install_dir
-        expected_file_path2 = os.path.join(expected_install_dir2, 'bin/hello-archive')
-
-        sat.compile(appli + ' --with_children --product ' + product_name2)
-        
-        sat.compile(appli + ' --clean_all --with_children --product ' + product_name2, batch=True)
-        
+
+        sat.prepare(appli + " --product " + product_name + "," + product_name2)
+        expected_install_dir = src.product.get_product_config(
+            sat.cfg, product_name
+        ).install_dir
+        expected_file_path = os.path.join(expected_install_dir, "bin/hello")
+        expected_install_dir2 = src.product.get_product_config(
+            sat.cfg, product_name2
+        ).install_dir
+        expected_file_path2 = os.path.join(expected_install_dir2, "bin/hello-archive")
+
+        sat.compile(appli + " --with_children --product " + product_name2)
+
+        sat.compile(
+            appli + " --clean_all --with_children --product " + product_name2,
+            batch=True,
+        )
+
         if os.path.exists(expected_file_path) and os.path.exists(expected_file_path2):
-            OK = 'OK'         
+            OK = "OK"
         # pyunit method to compare 2 str
-        self.assertEqual(OK, 'OK')
+        self.assertEqual(OK, "OK")
 
     def test_050(self):
         # Test the configure command with '--clean_install' option
-        OK = 'KO'
+        OK = "KO"
 
-        appli = 'appli-test'
-        product_name = 'PRODUCT_GIT'
-        product_name2 = 'PRODUCT_ARCHIVE'
+        appli = "appli-test"
+        product_name = "PRODUCT_GIT"
+        product_name2 = "PRODUCT_ARCHIVE"
 
         sat = Sat()
-                            
-        sat.prepare(appli + ' --product ' + product_name +"," +product_name2)
-        expected_install_dir = src.product.get_product_config(sat.cfg, product_name).install_dir
-        expected_file_path = os.path.join(expected_install_dir, 'bin/hello')
-        expected_install_dir2 = src.product.get_product_config(sat.cfg, product_name2).install_dir
-        expected_file_path2 = os.path.join(expected_install_dir2, 'bin/hello-archive')
-
-        sat.compile(appli + ' --with_children --product ' + product_name2)
-        
-        sat.compile(appli + ' --clean_install --with_children --product ' + product_name2, batch=True)
-        
+
+        sat.prepare(appli + " --product " + product_name + "," + product_name2)
+        expected_install_dir = src.product.get_product_config(
+            sat.cfg, product_name
+        ).install_dir
+        expected_file_path = os.path.join(expected_install_dir, "bin/hello")
+        expected_install_dir2 = src.product.get_product_config(
+            sat.cfg, product_name2
+        ).install_dir
+        expected_file_path2 = os.path.join(expected_install_dir2, "bin/hello-archive")
+
+        sat.compile(appli + " --with_children --product " + product_name2)
+
+        sat.compile(
+            appli + " --clean_install --with_children --product " + product_name2,
+            batch=True,
+        )
+
         if os.path.exists(expected_file_path) and os.path.exists(expected_file_path2):
-            OK = 'OK'         
+            OK = "OK"
         # pyunit method to compare 2 str
-        self.assertEqual(OK, 'OK')
+        self.assertEqual(OK, "OK")
 
     def test_060(self):
         # Test the configure command with '--make_flags' option
-        OK = 'KO'
+        OK = "KO"
 
-        appli = 'appli-test'
-        product_name = 'PRODUCT_GIT'
+        appli = "appli-test"
+        product_name = "PRODUCT_GIT"
 
         sat = Sat()
-                            
-        sat.prepare(appli + ' --product ' + product_name)
-        expected_install_dir = src.product.get_product_config(sat.cfg, product_name).install_dir
-        expected_file_path = os.path.join(expected_install_dir, 'bin/hello')
-
-        sat.clean(appli + ' --build --install --product ' + product_name, batch=True)
-        sat.compile(appli + ' --make_flags 3 --product ' + product_name)
-               
+
+        sat.prepare(appli + " --product " + product_name)
+        expected_install_dir = src.product.get_product_config(
+            sat.cfg, product_name
+        ).install_dir
+        expected_file_path = os.path.join(expected_install_dir, "bin/hello")
+
+        sat.clean(appli + " --build --install --product " + product_name, batch=True)
+        sat.compile(appli + " --make_flags 3 --product " + product_name)
+
         if os.path.exists(expected_file_path):
-            OK = 'OK'         
+            OK = "OK"
         # pyunit method to compare 2 str
-        self.assertEqual(OK, 'OK')
+        self.assertEqual(OK, "OK")
 
     def test_070(self):
         # Test the configure command with '--show' option
-        OK = 'KO'
+        OK = "KO"
 
-        appli = 'appli-test'
-        product_name = 'PRODUCT_GIT'
+        appli = "appli-test"
+        product_name = "PRODUCT_GIT"
 
         sat = Sat()
-                            
-        sat.prepare(appli + ' --product ' + product_name)
-        expected_install_dir = src.product.get_product_config(sat.cfg, product_name).install_dir
-        expected_file_path = os.path.join(expected_install_dir, 'bin/hello')
-
-        sat.clean(appli + ' --build --install --product ' + product_name, batch=True)
-        sat.compile(appli + ' --show --product ' + product_name)
-               
-        if not(os.path.exists(expected_file_path)):
-            OK = 'OK'         
+
+        sat.prepare(appli + " --product " + product_name)
+        expected_install_dir = src.product.get_product_config(
+            sat.cfg, product_name
+        ).install_dir
+        expected_file_path = os.path.join(expected_install_dir, "bin/hello")
+
+        sat.clean(appli + " --build --install --product " + product_name, batch=True)
+        sat.compile(appli + " --show --product " + product_name)
+
+        if not (os.path.exists(expected_file_path)):
+            OK = "OK"
         # pyunit method to compare 2 str
-        self.assertEqual(OK, 'OK')
+        self.assertEqual(OK, "OK")
 
     def test_080(self):
         # Test the configure command with '--stop_first_fail' option
-        OK = 'KO'
+        OK = "KO"
 
-        appli = 'appli-test'
+        appli = "appli-test"
 
         sat = Sat()
-                            
-        sat.prepare(appli + ' --product PRODUCT_CVS,Python')
-        expected_install_dir = src.product.get_product_config(sat.cfg, "PRODUCT_CVS").install_dir
-
-        sat.clean(appli + ' --build --install --product PRODUCT_CVS', batch=True)
-        sat.compile(appli + ' --stop_first_fail --product PRODUCT_CVS,Python')
-               
-        if not(os.path.exists(expected_install_dir)):
-            OK = 'OK'         
+
+        sat.prepare(appli + " --product PRODUCT_CVS,Python")
+        expected_install_dir = src.product.get_product_config(
+            sat.cfg, "PRODUCT_CVS"
+        ).install_dir
+
+        sat.clean(appli + " --build --install --product PRODUCT_CVS", batch=True)
+        sat.compile(appli + " --stop_first_fail --product PRODUCT_CVS,Python")
+
+        if not (os.path.exists(expected_install_dir)):
+            OK = "OK"
         # pyunit method to compare 2 str
-        self.assertEqual(OK, 'OK')
+        self.assertEqual(OK, "OK")
 
     def test_090(self):
-        # Test the 'sat -h compile' command to get description       
+        # Test the 'sat -h compile' command to get description
 
         OK = "KO"
 
         import compile
-        
+
         if "The compile command constructs the products" in compile.description():
             OK = "OK"
 
         # pyunit method to compare 2 str
         self.assertEqual(OK, "OK")
 
+
 # test launch
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
     pass
-
index 9d995a297e639b8e8c653cd5d59caa65839d00bf..c9ac3d70127c12a477ca642b3caccf1da79332d2 100755 (executable)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 
 #  Copyright (C) 2010-2018  CEA/DEN
 #
@@ -24,6 +24,7 @@ import unittest
 import src.product
 from src.salomeTools import Sat
 
+
 class TestCase(unittest.TestCase):
     """Test of the configure command"""
 
@@ -35,76 +36,86 @@ class TestCase(unittest.TestCase):
 
     def test_010(self):
         # Test the configure command with a product in cmake
-        OK = 'KO'
+        OK = "KO"
 
-        appli = 'appli-test'
-        product_name = 'PRODUCT_GIT'
+        appli = "appli-test"
+        product_name = "PRODUCT_GIT"
 
         sat = Sat()
-                            
-        sat.prepare(appli + ' --product ' + product_name)
-        expected_build_dir = src.product.get_product_config(sat.cfg, product_name).build_dir
-        expected_file_path = os.path.join(expected_build_dir, 'CMakeCache.txt')
-       
-        sat.configure(appli + ' --product ' + product_name)
-        
+
+        sat.prepare(appli + " --product " + product_name)
+        expected_build_dir = src.product.get_product_config(
+            sat.cfg, product_name
+        ).build_dir
+        expected_file_path = os.path.join(expected_build_dir, "CMakeCache.txt")
+
+        sat.configure(appli + " --product " + product_name)
+
         if os.path.exists(os.path.join(expected_build_dir, expected_file_path)):
-            OK = 'OK'         
+            OK = "OK"
         # pyunit method to compare 2 str
-        self.assertEqual(OK, 'OK')
+        self.assertEqual(OK, "OK")
 
     def test_020(self):
         # Test the configure command with a product in autotools
-        OK = 'KO'
+        OK = "KO"
 
-        appli = 'appli-test'
-        product_name = 'PRODUCT_CVS'
+        appli = "appli-test"
+        product_name = "PRODUCT_CVS"
 
         sat = Sat()
-                            
-        sat.prepare(appli + ' --product ' + product_name)
-        expected_build_dir = src.product.get_product_config(sat.cfg, product_name).build_dir
-        expected_file_path = os.path.join(expected_build_dir, 'config.log')
-       
-        sat.configure(appli + ' --product ' + product_name)
-        
+
+        sat.prepare(appli + " --product " + product_name)
+        expected_build_dir = src.product.get_product_config(
+            sat.cfg, product_name
+        ).build_dir
+        expected_file_path = os.path.join(expected_build_dir, "config.log")
+
+        sat.configure(appli + " --product " + product_name)
+
         if os.path.exists(os.path.join(expected_build_dir, expected_file_path)):
-            OK = 'OK'         
+            OK = "OK"
         # pyunit method to compare 2 str
-        self.assertEqual(OK, 'OK')
+        self.assertEqual(OK, "OK")
 
     def test_030(self):
         # Test the configure command with a product in script mode
-        OK = 'KO'
+        OK = "KO"
 
-        appli = 'appli-test'
-        product_name = 'Python'
+        appli = "appli-test"
+        product_name = "Python"
 
         sat = Sat()
-                            
-        sat.prepare(appli + ' --product ' + product_name)
-        expected_build_dir = src.product.get_product_config(sat.cfg, product_name).build_dir
-      
-        sat.configure(appli + ' --product ' + product_name)
-        
+
+        sat.prepare(appli + " --product " + product_name)
+        expected_build_dir = src.product.get_product_config(
+            sat.cfg, product_name
+        ).build_dir
+
+        sat.configure(appli + " --product " + product_name)
+
         if os.path.exists(expected_build_dir):
-            OK = 'OK'         
+            OK = "OK"
         # pyunit method to compare 2 str
-        self.assertEqual(OK, 'OK')
+        self.assertEqual(OK, "OK")
 
     def test_040(self):
         # Test the 'sat -h configure'
         OK = "KO"
 
         import configure
-        
-        if "The configure command executes in the build directory" in configure.description():
+
+        if (
+            "The configure command executes in the build directory"
+            in configure.description()
+        ):
             OK = "OK"
 
         # pyunit method to compare 2 str
         self.assertEqual(OK, "OK")
 
+
 # test launch
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
     pass
index c9569654d2fecea3fde5ac8ee1b0a2a3a753453d..b00fd8d4dc3e7fccd25b3d4b0ea5de09c9a8e7a7 100755 (executable)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 
 #  Copyright (C) 2010-2018  CEA/DEN
 #
@@ -24,83 +24,91 @@ import unittest
 import src.product
 from src.salomeTools import Sat
 
+
 class TestCase(unittest.TestCase):
     """Test of the make command"""
 
     def test_010(self):
         # Test the configure command without any option
-        OK = 'KO'
+        OK = "KO"
 
-        appli = 'appli-test'
-        product_name = 'PRODUCT_GIT'
+        appli = "appli-test"
+        product_name = "PRODUCT_GIT"
 
         sat = Sat()
-                            
-        sat.prepare(appli + ' --product ' + product_name)
-        expected_build_dir = src.product.get_product_config(sat.cfg, product_name).build_dir
-        expected_file_path = os.path.join(expected_build_dir, 'hello')
-       
-        sat.configure(appli + ' --product ' + product_name)        
-        sat.make(appli + ' --product ' + product_name)
-        
+
+        sat.prepare(appli + " --product " + product_name)
+        expected_build_dir = src.product.get_product_config(
+            sat.cfg, product_name
+        ).build_dir
+        expected_file_path = os.path.join(expected_build_dir, "hello")
+
+        sat.configure(appli + " --product " + product_name)
+        sat.make(appli + " --product " + product_name)
+
         if os.path.exists(os.path.join(expected_build_dir, expected_file_path)):
-            OK = 'OK'         
+            OK = "OK"
         # pyunit method to compare 2 str
-        self.assertEqual(OK, 'OK')
+        self.assertEqual(OK, "OK")
 
     def test_020(self):
         # Test the make command with an option
-        OK = 'KO'
-        appli = 'appli-test'
-        product_name = 'PRODUCT_GIT'
+        OK = "KO"
+        appli = "appli-test"
+        product_name = "PRODUCT_GIT"
 
         sat = Sat()
-                            
-        sat.prepare(appli + ' --product ' + product_name)
-        expected_build_dir = src.product.get_product_config(sat.cfg, product_name).build_dir
-        expected_file_path = os.path.join(expected_build_dir, 'hello')
-       
-        sat.configure(appli + ' --product ' + product_name)   
-        sat.make(appli + ' --product ' + product_name + ' --option -j3')
-        
+
+        sat.prepare(appli + " --product " + product_name)
+        expected_build_dir = src.product.get_product_config(
+            sat.cfg, product_name
+        ).build_dir
+        expected_file_path = os.path.join(expected_build_dir, "hello")
+
+        sat.configure(appli + " --product " + product_name)
+        sat.make(appli + " --product " + product_name + " --option -j3")
+
         if os.path.exists(os.path.join(expected_build_dir, expected_file_path)):
-            OK = 'OK'         
+            OK = "OK"
         # pyunit method to compare 2 str
-        self.assertEqual(OK, 'OK')
+        self.assertEqual(OK, "OK")
 
     def test_030(self):
         # Test the make command with a product in script mode
-        OK = 'KO'
+        OK = "KO"
 
-        appli = 'appli-test'
-        product_name = 'Python'
+        appli = "appli-test"
+        product_name = "Python"
 
         sat = Sat()
-                            
-        sat.prepare(appli + ' --product ' + product_name)
-        expected_install_dir = src.product.get_product_config(sat.cfg, product_name).install_dir
+
+        sat.prepare(appli + " --product " + product_name)
+        expected_install_dir = src.product.get_product_config(
+            sat.cfg, product_name
+        ).install_dir
         expected_file = "bin/python2.7"
-        
-        sat.make(appli + ' --product ' + product_name)
-        
+
+        sat.make(appli + " --product " + product_name)
+
         if os.path.exists(os.path.join(expected_install_dir, expected_file)):
-            OK = 'OK'         
+            OK = "OK"
         # pyunit method to compare 2 str
-        self.assertEqual(OK, 'OK')
+        self.assertEqual(OK, "OK")
 
     def test_040(self):
-        # Test the sat -h make 
+        # Test the sat -h make
         OK = "KO"
 
         import make
-        
-        if "The make command executes the \"make\" command" in make.description():
+
+        if 'The make command executes the "make" command' in make.description():
             OK = "OK"
 
         # pyunit method to compare 2 str
         self.assertEqual(OK, "OK")
 
+
 # test launch
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
     pass
index 62aba791ddcbacfb186e087d020c72867b72529b..6793b0a95b91533b94803ef71f774cb00a946e11 100755 (executable)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 
 #  Copyright (C) 2010-2018  CEA/DEN
 #
@@ -24,46 +24,53 @@ import unittest
 import src.product
 from src.salomeTools import Sat
 
+
 class TestMakeinstall(unittest.TestCase):
     """Test of the makeinstall command"""
 
     def test_010(self):
         # Test the configure-make-makeinstall command without any option
-        OK = 'KO'
+        OK = "KO"
 
-        appli = 'appli-test'
-        product_name = 'PRODUCT_GIT'
+        appli = "appli-test"
+        product_name = "PRODUCT_GIT"
 
         sat = Sat()
-                            
-        sat.prepare(appli + ' --product ' + product_name)
-        expected_install_dir = src.product.get_product_config(sat.cfg, product_name).install_dir
-        expected_file_path = os.path.join(expected_install_dir, 'bin/hello')
-       
-        sat.configure(appli + ' --product ' + product_name)
-        
-        sat.make(appli + ' --product ' + product_name)
-        
-        sat.makeinstall(appli + ' --product ' + product_name)
-        
+
+        sat.prepare(appli + " --product " + product_name)
+        expected_install_dir = src.product.get_product_config(
+            sat.cfg, product_name
+        ).install_dir
+        expected_file_path = os.path.join(expected_install_dir, "bin/hello")
+
+        sat.configure(appli + " --product " + product_name)
+
+        sat.make(appli + " --product " + product_name)
+
+        sat.makeinstall(appli + " --product " + product_name)
+
         if os.path.exists(expected_file_path):
-            OK = 'OK'         
+            OK = "OK"
         # pyunit method to compare 2 str
-        self.assertEqual(OK, 'OK')
+        self.assertEqual(OK, "OK")
 
     def test_020(self):
         # Test the sat -h make
         OK = "KO"
 
         import makeinstall
-        
-        if "The makeinstall command executes the 'make install' command" in makeinstall.description():
+
+        if (
+            "The makeinstall command executes the 'make install' command"
+            in makeinstall.description()
+        ):
             OK = "OK"
 
         # pyunit method to compare 2 str
         self.assertEqual(OK, "OK")
 
+
 # test launch
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
     pass
index bb7fe0b891475ed5379c383cf720c9de18358f4e..08b4f5d2d0fc72b817bd6f12bdeb5796e1c8c39b 100755 (executable)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 
 #  Copyright (C) 2010-2018  CEA/DEN
 #
@@ -23,28 +23,37 @@ import unittest
 
 from src.salomeTools import Sat
 
+
 class TestCase(unittest.TestCase):
     """sat config --copy"""
-    
+
     def test_010(self):
         # Test the copy of a pyconf
         res = "KO"
         appli_to_copy = "appli-test"
 
-        expected_file = os.path.expanduser(os.path.join('~','.salomeTools', 'Applications', 'LOCAL_' + appli_to_copy + '.pyconf'))
+        expected_file = os.path.expanduser(
+            os.path.join(
+                "~",
+                ".salomeTools",
+                "Applications",
+                "LOCAL_" + appli_to_copy + ".pyconf",
+            )
+        )
         if os.path.exists(expected_file):
             os.remove(expected_file)
-               
+
         # The command to test
-        sat = Sat('')
-        sat.config('appli-test -c')
+        sat = Sat("")
+        sat.config("appli-test -c")
 
         if os.path.exists(expected_file):
             res = "OK"
             os.remove(expected_file)
         self.assertEqual(res, "OK")
 
+
 # test launch
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
     pass
index c6674e9c8f2384c46038fe331091c81ab0a715c4..b7e265300ac7bddb26b719c7f781f593709c04d1 100755 (executable)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 
 #  Copyright (C) 2010-2018  CEA/DEN
 #
@@ -28,22 +28,25 @@ from unittestpy.tools import check_proc_existence_and_kill_multi
 
 sleep_time = 2
 
+
 class TestCase(unittest.TestCase):
     """sat config --edit"""
-    
+
     def test_010(self):
         # Test the launch of the editor when invoking the config -e
         OK = "KO"
 
         sat = Sat("-oUSER.editor='cooledit'")
         sat.config()
-        cmd_config = threading.Thread(target=sat.config, args=('-e',))
+        cmd_config = threading.Thread(target=sat.config, args=("-e",))
         cmd_config.start()
 
         time.sleep(sleep_time)
 
         editor = sat.cfg.USER.editor
-        pid = check_proc_existence_and_kill_multi(editor + ".*" + "salomeTools\.pyconf", 10)
+        pid = check_proc_existence_and_kill_multi(
+            editor + ".*" + "salomeTools\.pyconf", 10
+        )
 
         if pid:
             OK = "OK"
@@ -55,19 +58,22 @@ class TestCase(unittest.TestCase):
 
         sat = Sat("-oUSER.editor='cooledit'")
         sat.config()
-        cmd_config = threading.Thread(target=sat.config, args=('appli-test -e',))
+        cmd_config = threading.Thread(target=sat.config, args=("appli-test -e",))
         cmd_config.start()
 
         time.sleep(sleep_time)
 
         editor = sat.cfg.USER.editor
-        pid = check_proc_existence_and_kill_multi(editor + ".*" + "appli-test\.pyconf", 10)
+        pid = check_proc_existence_and_kill_multi(
+            editor + ".*" + "appli-test\.pyconf", 10
+        )
 
         if pid:
             OK = "OK"
         self.assertEqual(OK, "OK")
+
+
 # test launch
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
     pass
index 50a7d847ffe389462254cffb17ef684ae67d8dd1..5557fc0f40db71fe243d3050b816b812ca3db253 100755 (executable)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 
 #  Copyright (C) 2010-2018  CEA/DEN
 #
@@ -25,9 +25,10 @@ import unittest
 from src.salomeTools import Sat
 from unittestpy.tools import outRedirection
 
+
 class TestCase(unittest.TestCase):
     """sat config --value"""
-    
+
     def test_010(self):
         # Test the display of the right value of "sat config -v VARS.hostname"
         OK = "KO"
@@ -37,7 +38,7 @@ class TestCase(unittest.TestCase):
 
         # The command to test
         sat = Sat()
-        sat.config('-v VARS.hostname')
+        sat.config("-v VARS.hostname")
 
         # stop output redirection
         my_out.end_redirection()
@@ -58,7 +59,7 @@ class TestCase(unittest.TestCase):
 
         # The command to test
         sat = Sat()
-        sat.config('-l')
+        sat.config("-l")
 
         # stop output redirection
         my_out.end_redirection()
@@ -70,7 +71,7 @@ class TestCase(unittest.TestCase):
         if "ERROR" not in res:
             OK = "OK"
         self.assertEqual(OK, "OK")
-    
+
     """    
     def test_030(self):
         # Test the exception when salomeTools.pyconf has errors           
@@ -101,9 +102,10 @@ class TestCase(unittest.TestCase):
             shutil.copyfile(salomeToolspyconfPath_save, salomeToolspyconfPath)
             os.remove(salomeToolspyconfPath_save)
         self.assertEqual(OK, "OK")
-    """       
-        
+    """
+
+
 # test launch
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
     pass
index c11fee1cff0860920f9e694744cf96e1d0350036..cf7119d83b6fc4ec6288b3adcdcfbce9bed32517 100755 (executable)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 
 #  Copyright (C) 2010-2018  CEA/DEN
 #
@@ -25,79 +25,78 @@ import unittest
 from src.salomeTools import Sat
 from unittestpy.tools import outRedirection
 
+
 class TestCase(unittest.TestCase):
     """sat config -v VARS.python"""
-    
+
     def test_010(self):
         # Test the display of the right value of 'sat config -v VARS.python'
-        OK = 'KO'
+        OK = "KO"
 
         # output redirection
         my_out = outRedirection()
 
         # The command to test
-        sat = Sat('')
-        sat.config('-v VARS.python')
+        sat = Sat("")
+        sat.config("-v VARS.python")
 
         # stop output redirection
         my_out.end_redirection()
 
         # get results
         res = my_out.read_results()
-        
-        
+
         if platform.python_version() in res:
-            OK = 'OK'
-        self.assertEqual(OK, 'OK')
+            OK = "OK"
+        self.assertEqual(OK, "OK")
 
     def test_020(self):
         # Test the display of the right value of 'sat config -s'
-        OK = 'KO'
+        OK = "KO"
 
         # output redirection
         my_out = outRedirection()
 
         # The command to test
-        sat = Sat('')
-        sat.config('-s')
+        sat = Sat("")
+        sat.config("-s")
 
         # stop output redirection
         my_out.end_redirection()
 
         # get results
         res = my_out.read_results()
-        
-        
-        if 'INTERNAL' in res:
-            OK = 'OK'
-        self.assertEqual(OK, 'OK')
-        
+
+        if "INTERNAL" in res:
+            OK = "OK"
+        self.assertEqual(OK, "OK")
+
     def test_030(self):
         # Test the display of the right value of 'sat config --info'
-        application = 'appli-test'
-        product = 'PRODUCT_DEV'
-        
-        OK = 'KO'
+        application = "appli-test"
+        product = "PRODUCT_DEV"
+
+        OK = "KO"
 
         # output redirection
         my_out = outRedirection()
 
         # The command to test
-        sat = Sat('')
-        sat.config(application + ' --info ' + product)
+        sat = Sat("")
+        sat.config(application + " --info " + product)
 
         # stop output redirection
         my_out.end_redirection()
 
         # get results
         res = my_out.read_results()
-        
-        
-        if 'compilation method = cmake' in res:
-            OK = 'OK'
-        self.assertEqual(OK, 'OK')
+
+        if "compilation method = cmake" in res:
+            OK = "OK"
+        self.assertEqual(OK, "OK")
+
 
 # test launch
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
     pass
index 3a63fd5afd80269b92d26870b440e9a4aa14c277..868957495d12b8dbb3067f8892191a5ca4169abd 100755 (executable)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 
 #  Copyright (C) 2010-2018  CEA/DEN
 #
@@ -23,17 +23,18 @@ import unittest
 
 from src.salomeTools import Sat
 
+
 class TestSource(unittest.TestCase):
     """Test of the environ command"""
-    
+
     def test_010(self):
         # Test the environ command without any option
-        OK = 'KO'
-        
-        appli = 'appli-test'
+        OK = "KO"
+
+        appli = "appli-test"
+
+        file_env_name = "env_launch.sh"
 
-        file_env_name = 'env_launch.sh'
-        
         sat = Sat()
         sat.config(appli)
 
@@ -45,18 +46,18 @@ class TestSource(unittest.TestCase):
         sat.environ(appli)
 
         if os.path.exists(expected_file_path):
-            OK = 'OK'
-        self.assertEqual(OK, 'OK')
+            OK = "OK"
+        self.assertEqual(OK, "OK")
 
     def test_020(self):
         # Test the environ command with option '--products'
-        OK = 'KO'
-        
-        appli = 'appli-test'
-        product_name = 'PRODUCT_GIT'
-        
-        file_env_name = 'env_launch.sh'
-        
+        OK = "KO"
+
+        appli = "appli-test"
+        product_name = "PRODUCT_GIT"
+
+        file_env_name = "env_launch.sh"
+
         sat = Sat()
         sat.config(appli)
 
@@ -65,49 +66,49 @@ class TestSource(unittest.TestCase):
         if os.path.exists(expected_file_path):
             os.remove(expected_file_path)
 
-        sat.environ(appli + ' --products ' + product_name)
+        sat.environ(appli + " --products " + product_name)
 
         if os.path.exists(expected_file_path):
-            OK = 'OK'
-        self.assertEqual(OK, 'OK')        
+            OK = "OK"
+        self.assertEqual(OK, "OK")
 
     def test_030(self):
         # Test the environ command with option --target
-        OK = 'KO'
-        
-        appli = 'appli-test'
-        
-        file_env_name = 'env_launch.sh'
-        
+        OK = "KO"
+
+        appli = "appli-test"
+
+        file_env_name = "env_launch.sh"
+
         sat = Sat()
         sat.config(appli)
 
-        expected_file_path = os.path.join('.', file_env_name)
-        expected_file_path2 = os.path.join('.', 'env_build.sh')
+        expected_file_path = os.path.join(".", file_env_name)
+        expected_file_path2 = os.path.join(".", "env_build.sh")
 
         if os.path.exists(expected_file_path):
             os.remove(expected_file_path)
 
-        sat.environ(appli + ' --target .')
+        sat.environ(appli + " --target .")
 
         if os.path.exists(expected_file_path):
-            OK = 'OK'
+            OK = "OK"
 
         if os.path.exists(expected_file_path):
             os.remove(expected_file_path)
             os.remove(expected_file_path2)
 
         # pyunit method to compare 2 str
-        self.assertEqual(OK, 'OK') 
+        self.assertEqual(OK, "OK")
 
     def test_040(self):
         # Test the environ command with option --prefix
-        OK = 'KO'
-        
-        appli = 'appli-test'
-        prefix = 'TEST'
-        file_env_name = prefix + '_launch.sh'
-        
+        OK = "KO"
+
+        appli = "appli-test"
+        prefix = "TEST"
+        file_env_name = prefix + "_launch.sh"
+
         sat = Sat()
         sat.config(appli)
 
@@ -116,20 +117,20 @@ class TestSource(unittest.TestCase):
         if os.path.exists(expected_file_path):
             os.remove(expected_file_path)
 
-        sat.environ(appli + ' --prefix ' + prefix)
+        sat.environ(appli + " --prefix " + prefix)
 
         if os.path.exists(expected_file_path):
-            OK = 'OK'
-        self.assertEqual(OK, 'OK') 
+            OK = "OK"
+        self.assertEqual(OK, "OK")
 
     def test_050(self):
         # Test the environ command with option --shell
-        OK = 'KO'
-        
-        appli = 'appli-test'
-        shell = 'bat'
-        file_env_name = 'env_launch.bat'
-        
+        OK = "KO"
+
+        appli = "appli-test"
+        shell = "bat"
+        file_env_name = "env_launch.bat"
+
         sat = Sat()
         sat.config(appli)
 
@@ -138,13 +139,14 @@ class TestSource(unittest.TestCase):
         if os.path.exists(expected_file_path):
             os.remove(expected_file_path)
 
-        sat.environ(appli + ' --shell ' + shell)
+        sat.environ(appli + " --shell " + shell)
 
         if os.path.exists(expected_file_path):
-            OK = 'OK'
-        self.assertEqual(OK, 'OK') 
+            OK = "OK"
+        self.assertEqual(OK, "OK")
+
 
 # test launch
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
     pass
index fc231913313207f03ef4ddecdd1a3f9b64d56449..d255fd08eaa8c9f62fb711f93e47f2b65ac14ee6 100755 (executable)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 
 #  Copyright (C) 2010-2018  CEA/DEN
 #
@@ -23,100 +23,106 @@ import unittest
 
 from src.salomeTools import Sat
 
+
 class TestCase(unittest.TestCase):
     """Test the job command"""
 
     def test_010(self):
         # Test the job command
-        OK = 'KO'
+        OK = "KO"
         tmp_file = "/tmp/test.txt"
 
         sat = Sat("-l " + tmp_file)
-        
+
         # Execute the job command
-        sat.job("--jobs_config .test --name Job 1" )
+        sat.job("--jobs_config .test --name Job 1")
 
         ff = open(tmp_file, "r")
         log_files = ff.readlines()
         ff.close()
         os.remove(tmp_file)
-        log_config = [line.replace("\n", "") for line in log_files if 'config.xml' in line]
-        
+        log_config = [
+            line.replace("\n", "") for line in log_files if "config.xml" in line
+        ]
+
         text = open(log_config[0], "r").read()
 
         if "nb_proc" in text:
-            OK = 'OK'         
-        self.assertEqual(OK, 'OK')
-
+            OK = "OK"
+        self.assertEqual(OK, "OK")
 
     def test_020(self):
         # Test the job command with a failing command
-        OK = 'KO'
+        OK = "KO"
         tmp_file = "/tmp/test.txt"
 
         sat = Sat("-l " + tmp_file)
-        
+
         # Execute the job command
-        res = sat.job("--jobs_config .test --name Job 4" )
+        res = sat.job("--jobs_config .test --name Job 4")
 
         if res == 1:
-            OK = 'OK'         
+            OK = "OK"
         # pyunit method to compare 2 str
-        self.assertEqual(OK, 'OK')
+        self.assertEqual(OK, "OK")
 
     def test_030(self):
         # Test the job command with a wrong file configuration
-        OK = 'KO'
+        OK = "KO"
         tmp_file = "/tmp/test.txt"
 
         sat = Sat("-l " + tmp_file)
-        
+
         # Execute the job command
-        res = sat.job("--jobs_config NOTEXIST --name Job 4" )
+        res = sat.job("--jobs_config NOTEXIST --name Job 4")
 
         if res == 1:
-            OK = 'OK'         
-        self.assertEqual(OK, 'OK')
+            OK = "OK"
+        self.assertEqual(OK, "OK")
 
     def test_040(self):
         # Test the job command without --jobs_config option
-        OK = 'KO'
+        OK = "KO"
         tmp_file = "/tmp/test.txt"
 
         sat = Sat("-l " + tmp_file)
-        
+
         # Execute the job command
-        res = sat.job("--name Job 4" )
+        res = sat.job("--name Job 4")
 
         if res == 1:
-            OK = 'OK'         
-        self.assertEqual(OK, 'OK')
+            OK = "OK"
+        self.assertEqual(OK, "OK")
 
     def test_050(self):
         # Test the job command without --jobs_config option
-        OK = 'KO'
+        OK = "KO"
         tmp_file = "/tmp/test.txt"
 
         sat = Sat("-l " + tmp_file)
-        
+
         # Execute the job command
-        res = sat.job("--jobs_config .test --name NOTEXIST" )
+        res = sat.job("--jobs_config .test --name NOTEXIST")
 
         if res == 1:
-            OK = 'OK'         
-        self.assertEqual(OK, 'OK')
+            OK = "OK"
+        self.assertEqual(OK, "OK")
 
     def test_060(self):
-        # Test the sat -h job     
+        # Test the sat -h job
         OK = "KO"
 
         import job
-        
-        if "Executes the commands of the job defined in the jobs configuration file" in job.description():
+
+        if (
+            "Executes the commands of the job defined in the jobs configuration file"
+            in job.description()
+        ):
             OK = "OK"
         self.assertEqual(OK, "OK")
 
+
 # test launch
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
     pass
index 1b22378c73b5246f1b40e35cbc872d52f2cd8356..aa2f1ad5fc9ff73ed193a7011cac872e441337cd 100755 (executable)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 
 #  Copyright (C) 2010-2018  CEA/DEN
 #
@@ -24,102 +24,103 @@ import unittest
 from src.salomeTools import Sat
 from unittestpy.tools import outRedirection
 
+
 class TestCase(unittest.TestCase):
-    "Test the jobs command"""
+    "Test the jobs command" ""
 
     def test_010(self):
         # Test the jobs command
-        OK = 'KO'
+        OK = "KO"
         tmp_file = "/tmp/test.txt"
 
         sat = Sat("-l " + tmp_file)
-        
+
         # Execute the jobs command
-        sat.jobs("--name .test --publish" )
+        sat.jobs("--name .test --publish")
 
         ff = open(tmp_file, "r")
         log_files = ff.readlines()
         ff.close()
         os.remove(tmp_file)
-        log_jobs = [line.replace("\n", "") for line in log_files if 'jobs.xml' in line]
-        
+        log_jobs = [line.replace("\n", "") for line in log_files if "jobs.xml" in line]
+
         text = open(log_jobs[0], "r").read()
-        
+
         expected_res = [
-        "Establishing connection with all the machines",
-        "Executing the jobs",
-        "Results for job"
+            "Establishing connection with all the machines",
+            "Executing the jobs",
+            "Results for job",
         ]
-        
+
         res = 0
         for exp_res in expected_res:
             if exp_res not in text:
                 res += 1
-        
+
         if res == 0:
-            OK = 'OK'
-        self.assertEqual(OK, 'OK')
+            OK = "OK"
+        self.assertEqual(OK, "OK")
 
     def test_020(self):
         # Test the jobs command with option --only_jobs
-        OK = 'KO'
+        OK = "KO"
         tmp_file = "/tmp/test.txt"
 
         sat = Sat("-l " + tmp_file)
-        
+
         # Execute the jobs command
-        sat.jobs("--name .test --publish --only_jobs Job 1" )
+        sat.jobs("--name .test --publish --only_jobs Job 1")
 
         ff = open(tmp_file, "r")
         log_files = ff.readlines()
         ff.close()
         os.remove(tmp_file)
-        log_jobs = [line.replace("\n", "") for line in log_files if 'jobs.xml' in line]
-        
+        log_jobs = [line.replace("\n", "") for line in log_files if "jobs.xml" in line]
+
         text = open(log_jobs[0], "r").read()
-        
+
         expected_res = [
-        "Establishing connection with all the machines",
-        "Executing the jobs",
-        "Results for job"
+            "Establishing connection with all the machines",
+            "Executing the jobs",
+            "Results for job",
         ]
-        
+
         res = 0
         for exp_res in expected_res:
             if exp_res not in text:
                 res += 1
-        
+
         if res == 0:
-            OK = 'OK'
-        self.assertEqual(OK, 'OK')
+            OK = "OK"
+        self.assertEqual(OK, "OK")
 
     def test_030(self):
         # Test the jobs command without --name option
-        OK = 'KO'
+        OK = "KO"
         tmp_file = "/tmp/test.txt"
 
         sat = Sat("-l " + tmp_file)
-        
+
         # Execute the job command
         res = sat.jobs()
 
         if res == 1:
-            OK = 'OK'         
-        self.assertEqual(OK, 'OK')
-        
+            OK = "OK"
+        self.assertEqual(OK, "OK")
+
     def test_040(self):
         # Test the jobs command with a wrong file configuration
-        OK = 'KO'
+        OK = "KO"
         tmp_file = "/tmp/test.txt"
 
         sat = Sat("-l " + tmp_file)
-        
+
         # Execute the job command
-        res = sat.jobs("--name NOTEXIST" )
+        res = sat.jobs("--name NOTEXIST")
 
         if res == 1:
-            OK = 'OK'         
-        self.assertEqual(OK, 'OK')
+            OK = "OK"
+        self.assertEqual(OK, "OK")
 
     def test_050(self):
         # Test the display of the right value of 'sat jobs --list'
@@ -130,7 +131,7 @@ class TestCase(unittest.TestCase):
 
         # The command to test
         sat = Sat()
-        sat.jobs('--list')
+        sat.jobs("--list")
 
         # stop output redirection
         my_out.end_redirection()
@@ -144,16 +145,20 @@ class TestCase(unittest.TestCase):
         self.assertEqual(OK, "OK")
 
     def test_060(self):
-        # Test the sat -h jobs       
+        # Test the sat -h jobs
         OK = "KO"
 
         import jobs
-        
-        if "The jobs command launches maintenances that are described in the dedicated jobs configuration file." in jobs.description():
+
+        if (
+            "The jobs command launches maintenances that are described in the dedicated jobs configuration file."
+            in jobs.description()
+        ):
             OK = "OK"
         self.assertEqual(OK, "OK")
 
+
 # test launch
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
     pass
index a9c8be45cd5790f2615bc1d3f9bd2af05abbfd63..2a45cd0a281d66113805037b31489f5160a1970d 100755 (executable)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 
 #  Copyright (C) 2010-2018  CEA/DEN
 #
@@ -30,21 +30,24 @@ from unittestpy.tools import check_proc_existence_and_kill_multi
 
 sleep_time = 2
 
+
 class TestCase(unittest.TestCase):
     """Test of log command: launch of browser"""
-           
+
     def test_010(self):
         # Test the write of xml log when invoking a command
         OK = "KO"
-        
+
         # launch the command that will write a log
         sat = Sat()
-        sat.config('appli-test -v USER.browser')
-        
+        sat.config("appli-test -v USER.browser")
+
         # get log file path
         logDir = sat.cfg.USER.log_dir
-        logPath = os.path.join(logDir, sat.cfg.VARS.datehour + "_" + sat.cfg.VARS.command + ".xml")
-        
+        logPath = os.path.join(
+            logDir, sat.cfg.VARS.datehour + "_" + sat.cfg.VARS.command + ".xml"
+        )
+
         if os.path.exists(logPath):
             OK = "OK"
         self.assertEqual(OK, "OK")
@@ -52,16 +55,15 @@ class TestCase(unittest.TestCase):
     def test_020(self):
         # Test the terminal option without application
         OK = "KO"
-        
+
         # launch the command that will write a log
         sat = Sat()
-        
+
         one = u"1"
         sys.stdin = io.StringIO(one)
-        
-        
+
         try:
-            sat.log('-t')
+            sat.log("-t")
             OK = "OK"
             sys.stdin = sys.__stdin__
         except:
@@ -71,17 +73,17 @@ class TestCase(unittest.TestCase):
     def test_030(self):
         # Test the terminal option with application
         OK = "KO"
-        
+
         # launch the command that will write a log
         sat = Sat()
-              
-        sat.config('appli-test -v VARS.python')
-        
+
+        sat.config("appli-test -v VARS.python")
+
         one = u"1"
         sys.stdin = io.StringIO(one)
-        
+
         try:
-            sat.log('appli-test -t --last')
+            sat.log("appli-test -t --last")
             OK = "OK"
             sys.stdin = sys.__stdin__
         except:
@@ -91,17 +93,17 @@ class TestCase(unittest.TestCase):
     def test_040(self):
         # Test the terminal option with 0 as input
         OK = "KO"
-        
+
         # launch the command that will write a log
         sat = Sat()
-              
-        sat.config('appli-test -v VARS.python')
-        
+
+        sat.config("appli-test -v VARS.python")
+
         zero = u"0\n1"
         sys.stdin = io.StringIO(zero)
-        
+
         try:
-            sat.log('--terminal')
+            sat.log("--terminal")
             OK = "OK"
         finally:
             sys.stdin = sys.__stdin__
@@ -110,19 +112,19 @@ class TestCase(unittest.TestCase):
     def test_050(self):
         # Test the terminal option with input bigger than the number of logs
         OK = "KO"
-        
+
         # launch the command that will write a log
         sat = Sat()
-              
-        sat.config('appli-test -v VARS.python')
-        
+
+        sat.config("appli-test -v VARS.python")
+
         nb_logs = len(os.listdir(sat.cfg.USER.log_dir))
-        
+
         nb_logs_u = unicode(str(nb_logs) + "\n1")
         sys.stdin = io.StringIO(nb_logs_u)
-        
+
         try:
-            sat.log('--terminal')
+            sat.log("--terminal")
             OK = "OK"
         finally:
             sys.stdin = sys.__stdin__
@@ -131,17 +133,17 @@ class TestCase(unittest.TestCase):
     def test_060(self):
         # Test the terminal option with input return
         OK = "KO"
-        
+
         # launch the command that will write a log
         sat = Sat()
-              
-        sat.config('appli-test -v VARS.python')
-        
+
+        sat.config("appli-test -v VARS.python")
+
         ret = unicode("\n0")
         sys.stdin = io.StringIO(ret)
-        
+
         try:
-            sat.log('--terminal')
+            sat.log("--terminal")
             OK = "OK"
         finally:
             sys.stdin = sys.__stdin__
@@ -150,17 +152,17 @@ class TestCase(unittest.TestCase):
     def test_070(self):
         # Test the terminal option with input not int
         OK = "KO"
-        
+
         # launch the command that will write a log
         sat = Sat()
-              
-        sat.config('appli-test -v VARS.python')
-        
+
+        sat.config("appli-test -v VARS.python")
+
         ret = unicode("blabla\n0")
         sys.stdin = io.StringIO(ret)
-        
+
         try:
-            sat.log('--terminal')
+            sat.log("--terminal")
             OK = "OK"
         finally:
             sys.stdin = sys.__stdin__
@@ -169,75 +171,78 @@ class TestCase(unittest.TestCase):
     def test_080(self):
         # Test the terminal option and option last
         OK = "KO"
-        
+
         # launch the command that will write a log
         sat = Sat()
-        
+
         try:
-            sat.log('--terminal --last')
+            sat.log("--terminal --last")
             OK = "OK"
         finally:
             sys.stdin = sys.__stdin__
-        
+
         # pyunit method to compare 2 str
         self.assertEqual(OK, "OK")
-    
+
     def test_090(self):
         # Test the option --last
         OK = "KO"
-        
+
         # launch the command that will write a log
         sat = Sat("-oUSER.browser='konqueror'")
-              
-        sat.config('appli-test -v VARS.python')
-        
-        
+
+        sat.config("appli-test -v VARS.python")
+
         time.sleep(sleep_time)
-        cmd_log = threading.Thread(target=sat.log, args=('appli-test --last',))
+        cmd_log = threading.Thread(target=sat.log, args=("appli-test --last",))
         cmd_log.start()
-        
+
         time.sleep(sleep_time)
 
         browser = sat.cfg.USER.browser
         pid = check_proc_existence_and_kill_multi(browser + ".*" + "xml", 10)
-        
+
         if pid:
             OK = "OK"
         self.assertEqual(OK, "OK")
-    
+
     def test_100(self):
         # Test the option --clean
         OK = "KO"
-        
+
         # launch the command that will write a log
         sat = Sat()
-               
-        sat.config('-v VARS.user')
-        
+
+        sat.config("-v VARS.user")
+
         nb_logs_t0 = len(os.listdir(sat.cfg.USER.log_dir))
 
-        sat.log('--clean 1')
-        
+        sat.log("--clean 1")
+
         nb_logs_t1 = len(os.listdir(sat.cfg.USER.log_dir))
-        
-        if nb_logs_t1-nb_logs_t0 == 0:
+
+        if nb_logs_t1 - nb_logs_t0 == 0:
             OK = "OK"
         self.assertEqual(OK, "OK")
 
     def test_120(self):
         # Test the option --clean with big number of files to clean
         OK = "KO"
-        
+
         # launch the command that will write a log
         sat = Sat()
-               
-        sat.config('-v VARS.user')
-        
+
+        sat.config("-v VARS.user")
+
         nb_logs_t0 = len(os.listdir(sat.cfg.USER.log_dir))
-        
+
         if os.path.exists(sat.cfg.USER.log_dir + "_save"):
             shutil.rmtree(sat.cfg.USER.log_dir + "_save")
-        print("TODO: risky !!!copytree!!!", sat.cfg.USER.log_dir, sat.cfg.USER.log_dir + "_save")
+        print(
+            "TODO: risky !!!copytree!!!",
+            sat.cfg.USER.log_dir,
+            sat.cfg.USER.log_dir + "_save",
+        )
         """
         shutil.copytree(sat.cfg.USER.log_dir,sat.cfg.USER.log_dir + "_save")
         
@@ -252,7 +257,7 @@ class TestCase(unittest.TestCase):
             OK = "OK"
         """
         self.assertEqual(OK, "OK")
-    
+
     """
     def test_130(self):
         # Test the option --full
@@ -283,12 +288,13 @@ class TestCase(unittest.TestCase):
         OK = "KO"
 
         import log
-        
+
         if "Gives access to the logs produced" in log.description():
             OK = "OK"
         self.assertEqual(OK, "OK")
 
+
 # test launch
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
     pass
index 38a61841058e585282c4d282d8af9e1627a7867c..76b7b5185f0e90535b6b8b2f4d871ba0902c3b3a 100755 (executable)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 
 #  Copyright (C) 2010-2018  CEA/DEN
 #
@@ -28,6 +28,7 @@ from unittestpy.tools import check_proc_existence_and_kill_multi
 
 sleep_time = 2
 
+
 class TestCase(unittest.TestCase):
     """Test of log command: launch of browser"""
 
@@ -37,11 +38,11 @@ class TestCase(unittest.TestCase):
 
         sat = Sat("-oUSER.browser='konqueror'")
         time.sleep(sleep_time)
-        cmd_log = threading.Thread(target=sat.log, args=('',))
+        cmd_log = threading.Thread(target=sat.log, args=("",))
         cmd_log.start()
 
         time.sleep(sleep_time)
-        
+
         sat.config("")
         browser = sat.cfg.USER.browser
         pid = check_proc_existence_and_kill_multi(browser + ".*" + "hat\.xml", 10)
@@ -50,7 +51,8 @@ class TestCase(unittest.TestCase):
             OK = "OK"
         self.assertEqual(OK, "OK")
 
+
 # test launch
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
     pass
index 08bb54f1a3d6bd5f963207e1c9fc97a7e7ab4179..3203654a4127382e04ea87aedac1051e6c8e708a 100755 (executable)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 
 #  Copyright (C) 2010-2018  CEA/DEN
 #
@@ -25,49 +25,52 @@ from src.salomeTools import Sat
 import src.product
 from unittestpy.tools import outRedirection
 
+
 class TestCase(unittest.TestCase):
     """Test of the clean command"""
 
     def test_010(self):
         # Test the clean command with no arguments (nothing to clean)
-        OK = 'KO'
+        OK = "KO"
 
-        appli = 'appli-test'
+        appli = "appli-test"
 
         sat = Sat()
 
         # output redirection
         my_out = outRedirection()
-        
+
         sat.clean(appli)
-        
+
         # stop output redirection
         my_out.end_redirection()
 
         # get results
         res = my_out.read_results()
-        
+
         if "Nothing to suppress" in res:
-            OK = 'OK'
-        self.assertEqual(OK, 'OK')
+            OK = "OK"
+        self.assertEqual(OK, "OK")
 
     def test_020(self):
         # Test the clean of sources
         OK = "KO"
 
-        appli = 'appli-test'
+        appli = "appli-test"
         product_name = "PRODUCT_GIT"
 
-        sat = Sat()      
-        
+        sat = Sat()
+
         # Make sure the sources exist
         sat.prepare(appli + " -p " + product_name)
-        
+
         # Call the command
         sat.clean(appli + " -p " + product_name + " --sources", batch=True)
-           
-        expected_src_dir = src.product.get_product_config(sat.cfg, product_name).source_dir
-        
+
+        expected_src_dir = src.product.get_product_config(
+            sat.cfg, product_name
+        ).source_dir
+
         if not os.path.exists(expected_src_dir):
             OK = "OK"
         self.assertEqual(OK, "OK")
@@ -76,20 +79,22 @@ class TestCase(unittest.TestCase):
         # Test the clean of build
         OK = "KO"
 
-        appli = 'appli-test'
+        appli = "appli-test"
         product_name = "PRODUCT_GIT"
 
-        sat = Sat()      
-        
+        sat = Sat()
+
         # Make sure the build exists
         sat.prepare(appli + " -p " + product_name)
         sat.configure(appli + " -p " + product_name)
-        
+
         # Call the command
         sat.clean(appli + " -p " + product_name + " --build", batch=True)
-           
-        expected_build_dir = src.product.get_product_config(sat.cfg, product_name).build_dir
-        
+
+        expected_build_dir = src.product.get_product_config(
+            sat.cfg, product_name
+        ).build_dir
+
         if not os.path.exists(expected_build_dir):
             OK = "OK"
         self.assertEqual(OK, "OK")
@@ -98,20 +103,22 @@ class TestCase(unittest.TestCase):
         # Test the clean of install
         OK = "KO"
 
-        appli = 'appli-test'
+        appli = "appli-test"
         product_name = "PRODUCT_GIT"
 
-        sat = Sat()      
-        
+        sat = Sat()
+
         # Make sure the build exists
         sat.prepare(appli + " -p " + product_name)
         sat.configure(appli + " -p " + product_name)
-        
+
         # Call the command
         sat.clean(appli + " -p " + product_name + " --install", batch=True)
-           
-        expected_install_dir = src.product.get_product_config(sat.cfg, product_name).install_dir
-        
+
+        expected_install_dir = src.product.get_product_config(
+            sat.cfg, product_name
+        ).install_dir
+
         if not os.path.exists(expected_install_dir):
             OK = "OK"
         self.assertEqual(OK, "OK")
@@ -120,23 +127,33 @@ class TestCase(unittest.TestCase):
         # Test the clean of all (build, src, install)
         OK = "KO"
 
-        appli = 'appli-test'
+        appli = "appli-test"
         product_name = "PRODUCT_GIT"
 
-        sat = Sat()      
-        
+        sat = Sat()
+
         # Make sure the build exists
         sat.prepare(appli + " -p " + product_name)
         sat.compile(appli + " -p " + product_name)
-        
+
         # Call the command
         sat.clean(appli + " -p " + product_name + " --all", batch=True)
-           
-        expected_install_dir = src.product.get_product_config(sat.cfg, product_name).install_dir
-        expected_build_dir = src.product.get_product_config(sat.cfg, product_name).build_dir
-        expected_src_dir = src.product.get_product_config(sat.cfg, product_name).source_dir
-        
-        if not os.path.exists(expected_install_dir) and not os.path.exists(expected_build_dir) and not os.path.exists(expected_src_dir):
+
+        expected_install_dir = src.product.get_product_config(
+            sat.cfg, product_name
+        ).install_dir
+        expected_build_dir = src.product.get_product_config(
+            sat.cfg, product_name
+        ).build_dir
+        expected_src_dir = src.product.get_product_config(
+            sat.cfg, product_name
+        ).source_dir
+
+        if (
+            not os.path.exists(expected_install_dir)
+            and not os.path.exists(expected_build_dir)
+            and not os.path.exists(expected_src_dir)
+        ):
             OK = "OK"
         self.assertEqual(OK, "OK")
 
@@ -144,37 +161,44 @@ class TestCase(unittest.TestCase):
         # Test the clean with sources_without_dev option
         OK = "KO"
 
-        appli = 'appli-test'
+        appli = "appli-test"
         product_name = "PRODUCT_GIT"
         product_name2 = "PRODUCT_DEV"
 
-        sat = Sat()      
-        
+        sat = Sat()
+
         # Make sure the build exists
         sat.prepare(appli + " -p " + product_name + "," + product_name2)
-        
+
         # Call the command
         sat.clean(appli + " -p " + product_name + " --sources_without_dev", batch=True)
-           
-        expected_src_dir = src.product.get_product_config(sat.cfg, product_name).source_dir
-        expected_src_dir2 = src.product.get_product_config(sat.cfg, product_name2).source_dir
-        
+
+        expected_src_dir = src.product.get_product_config(
+            sat.cfg, product_name
+        ).source_dir
+        expected_src_dir2 = src.product.get_product_config(
+            sat.cfg, product_name2
+        ).source_dir
+
         if not os.path.exists(expected_src_dir) and os.path.exists(expected_src_dir2):
             OK = "OK"
         self.assertEqual(OK, "OK")
 
-
     def test_070(self):
         # Test the sat -h clean
         OK = "KO"
 
         import clean
-        
-        if "The clean command suppress the source, build, or install" in clean.description():
+
+        if (
+            "The clean command suppress the source, build, or install"
+            in clean.description()
+        ):
             OK = "OK"
         self.assertEqual(OK, "OK")
 
+
 # test launch
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
     pass
index 1dc24d43dfb29f6fb0de06bf8ba56f246bb7af64..af7a375ee9ccbaba8148aff3f368d5835779216c 100755 (executable)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 
 #  Copyright (C) 2010-2018  CEA/DEN
 #
@@ -26,142 +26,151 @@ from src.salomeTools import Sat
 import src.product
 from unittestpy.tools import outRedirection
 
+
 class TestCase(unittest.TestCase):
     """Test of the patch command"""
 
     def test_010(self):
         # Test the patch command with a product in dev mode
-        OK = 'KO'
+        OK = "KO"
 
-        appli = 'appli-test'
-        product_name = 'PRODUCT_DEV'
+        appli = "appli-test"
+        product_name = "PRODUCT_DEV"
 
         sat = Sat("-oUSER.output_level=2")
-               
+
         sat.config(appli)
-        
-        expected_src_dir = src.product.get_product_config(sat.cfg, product_name).source_dir
-        expected_file_path = os.path.join(expected_src_dir, 'my_test_file.txt')
-        expected_text = 'HELLO WORLD\n'
-        
+
+        expected_src_dir = src.product.get_product_config(
+            sat.cfg, product_name
+        ).source_dir
+        expected_file_path = os.path.join(expected_src_dir, "my_test_file.txt")
+        expected_text = "HELLO WORLD\n"
+
         if os.path.exists(expected_src_dir):
             shutil.rmtree(expected_src_dir)
-        
-        sat.source(appli + ' --product ' + product_name)
-        
-        f = open(expected_file_path, 'r')
+
+        sat.source(appli + " --product " + product_name)
+
+        f = open(expected_file_path, "r")
         text = f.readlines()[0]
-        OK1 = 'KO'
+        OK1 = "KO"
         if text == expected_text:
-            OK1 = 'OK'
-       
-        sat.patch(appli + ' --product ' + product_name)
-        
-        new_expected_text = 'HELLO WORLD MODIFIED\n'
-        f = open(expected_file_path, 'r')
+            OK1 = "OK"
+
+        sat.patch(appli + " --product " + product_name)
+
+        new_expected_text = "HELLO WORLD MODIFIED\n"
+        f = open(expected_file_path, "r")
         text = f.readlines()[0]
-        
-        OK2 = 'KO'
+
+        OK2 = "KO"
         if text == new_expected_text:
-            OK2 = 'OK'         
+            OK2 = "OK"
 
-        if (OK1, OK2)==('OK', 'OK'):
-            OK = 'OK'
-        self.assertEqual(OK, 'OK')
+        if (OK1, OK2) == ("OK", "OK"):
+            OK = "OK"
+        self.assertEqual(OK, "OK")
 
     def test_020(self):
         # Test the patch command with a product with no sources found
-        OK = 'KO'
+        OK = "KO"
 
-        appli = 'appli-test'
-        product_name = 'PRODUCT_DEV'
+        appli = "appli-test"
+        product_name = "PRODUCT_DEV"
 
-        sat = Sat('')
+        sat = Sat("")
         sat.config(appli)
-        
-        expected_src_dir = src.product.get_product_config(sat.cfg, product_name).source_dir
-        
+
+        expected_src_dir = src.product.get_product_config(
+            sat.cfg, product_name
+        ).source_dir
+
         if os.path.exists(expected_src_dir):
             shutil.rmtree(expected_src_dir)
-               
+
         # output redirection
         my_out = outRedirection()
-        
-        sat.patch(appli + ' --product ' + product_name)
-        
+
+        sat.patch(appli + " --product " + product_name)
+
         # stop output redirection
         my_out.end_redirection()
 
         # get results
         res = my_out.read_results()
-        
-        if "No sources found for the " + product_name +" product" in res:
-            OK = 'OK'
-        self.assertEqual(OK, 'OK')
+
+        if "No sources found for the " + product_name + " product" in res:
+            OK = "OK"
+        self.assertEqual(OK, "OK")
 
     def test_030(self):
         # Test the patch command with a product without patch
-        OK = 'KO'
+        OK = "KO"
+
+        appli = "appli-test"
+        product_name = "PRODUCT_ARCHIVE"
 
-        appli = 'appli-test'
-        product_name = 'PRODUCT_ARCHIVE'
+        sat = Sat("-v4")
+
+        sat.source(appli + " --product " + product_name)
 
-        sat = Sat('-v4')
-                      
-        sat.source(appli + ' --product ' + product_name)
-               
         # output redirection
         my_out = outRedirection()
-        
-        sat.patch(appli + ' --product ' + product_name)
-        
+
+        sat.patch(appli + " --product " + product_name)
+
         # stop output redirection
         my_out.end_redirection()
 
         # get results
         res = my_out.read_results()
-        
-        if "No patch for the " + product_name +" product" in res:
-            OK = 'OK'
-        self.assertEqual(OK, 'OK')
+
+        if "No patch for the " + product_name + " product" in res:
+            OK = "OK"
+        self.assertEqual(OK, "OK")
 
     def test_040(self):
         # Test the patch command with a product with a not valid patch
-        OK = 'KO'
+        OK = "KO"
 
-        appli = 'appli-test'
-        product_name = 'PRODUCT_DEV'
+        appli = "appli-test"
+        product_name = "PRODUCT_DEV"
 
         sat = Sat("-oPRODUCTS.PRODUCT_DEV.default.patches=['/']")
-                      
-        sat.source(appli + ' --product ' + product_name)
-               
+
+        sat.source(appli + " --product " + product_name)
+
         # output redirection
         my_out = outRedirection()
-        
-        sat.patch(appli + ' --product ' + product_name)
-        
+
+        sat.patch(appli + " --product " + product_name)
+
         # stop output redirection
         my_out.end_redirection()
 
         # get results
         res = my_out.read_results()
-        
+
         if "Not a valid patch" in res:
-            OK = 'OK'
-        self.assertEqual(OK, 'OK')
+            OK = "OK"
+        self.assertEqual(OK, "OK")
 
     def test_050(self):
         # Test the sat -h patch
         OK = "KO"
 
         import patch
-        
-        if "The patch command apply the patches on the sources of" in patch.description():
+
+        if (
+            "The patch command apply the patches on the sources of"
+            in patch.description()
+        ):
             OK = "OK"
         self.assertEqual(OK, "OK")
 
+
 # test launch
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
     pass
index 334054772909d6c934b4013c23abcdbc871cb46c..92f5e04a8ac05616b47117b5c6188b4c9572ee82 100755 (executable)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 
 #  Copyright (C) 2010-2018  CEA/DEN
 #
@@ -25,87 +25,93 @@ import unittest
 import src
 from src.salomeTools import Sat
 
+
 class TestCase(unittest.TestCase):
     """Test of the prepare command"""
 
     def test_010(self):
         # Test the prepare command with a product in dev mode
-        OK = 'KO'
+        OK = "KO"
 
-        appli = 'appli-test'
-        product_name = 'PRODUCT_DEV'
+        appli = "appli-test"
+        product_name = "PRODUCT_DEV"
 
         sat = Sat()
-               
+
         sat.config(appli)
-        
-        expected_src_dir = src.product.get_product_config(sat.cfg, product_name).source_dir
-        expected_file_path = os.path.join(expected_src_dir, 'my_test_file.txt')
-        expected_text = 'HELLO WORLD\n'
-        
+
+        expected_src_dir = src.product.get_product_config(
+            sat.cfg, product_name
+        ).source_dir
+        expected_file_path = os.path.join(expected_src_dir, "my_test_file.txt")
+        expected_text = "HELLO WORLD\n"
+
         if os.path.exists(expected_src_dir):
             shutil.rmtree(expected_src_dir)
-        
-        sat.prepare(appli + ' --product ' + product_name)
-        
-        f = open(expected_file_path, 'r')
+
+        sat.prepare(appli + " --product " + product_name)
+
+        f = open(expected_file_path, "r")
         text = f.readlines()[0]
         if text == expected_text:
-            OK = 'OK'
-        self.assertEqual(OK, 'OK')
+            OK = "OK"
+        self.assertEqual(OK, "OK")
 
     def test_020(self):
         # Test the prepare command with all products
-        OK = 'KO'
+        OK = "KO"
 
-        appli = 'appli-test'
-        product_name = 'PRODUCT_DEV'
+        appli = "appli-test"
+        product_name = "PRODUCT_DEV"
 
         sat = Sat()
         sat.config(appli)
-        
-        expected_src_dir = src.product.get_product_config(sat.cfg, product_name).source_dir
-        expected_file_path = os.path.join(expected_src_dir, 'my_test_file.txt')
-        expected_text = 'HELLO WORLD\n'
-        
+
+        expected_src_dir = src.product.get_product_config(
+            sat.cfg, product_name
+        ).source_dir
+        expected_file_path = os.path.join(expected_src_dir, "my_test_file.txt")
+        expected_text = "HELLO WORLD\n"
+
         if os.path.exists(expected_src_dir):
             shutil.rmtree(expected_src_dir)
-        
+
         sat.prepare(appli)
-        
-        f = open(expected_file_path, 'r')
+
+        f = open(expected_file_path, "r")
         text = f.readlines()[0]
         if text == expected_text:
-            OK = 'OK'
-        self.assertEqual(OK, 'OK')
+            OK = "OK"
+        self.assertEqual(OK, "OK")
 
     def test_030(self):
         # Test the prepare command with all products
-        OK = 'KO'
+        OK = "KO"
 
-        appli = 'appli-test'
+        appli = "appli-test"
 
         sat = Sat()
         sat.config(appli)
-       
+
         try:
             sat.prepare(appli + " --force --force_patch")
-            OK = 'OK'
+            OK = "OK"
         except:
             pass
-        self.assertEqual(OK, 'OK')
+        self.assertEqual(OK, "OK")
 
     def test_040(self):
         # Test the sat -h prepare
         OK = "KO"
 
         import prepare
-        
+
         if "The prepare command gets the sources" in prepare.description():
             OK = "OK"
         self.assertEqual(OK, "OK")
 
+
 # test launch
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
     pass
index f7a053d47717a953eeba5748fc751fb8ed141fd6..ac09fe3f3718d2440315ce0bc249cc4f27365cde 100755 (executable)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 
 #  Copyright (C) 2010-2018  CEA/DEN
 #
@@ -25,59 +25,66 @@ from src.salomeTools import Sat
 import src.product
 from unittestpy.tools import outRedirection
 
+
 class TestCase(unittest.TestCase):
     """Test of the source command"""
-    
+
     def test_010(self):
         # Test the source command with archive product
-        appli = 'appli-test'
-        product_name = 'PRODUCT_ARCHIVE'
+        appli = "appli-test"
+        product_name = "PRODUCT_ARCHIVE"
 
         sat = Sat()
-        sat.source(appli + ' --product ' + product_name)
+        sat.source(appli + " --product " + product_name)
 
-        expected_src_dir = src.product.get_product_config(sat.cfg, product_name).source_dir
-        expected_file_path = os.path.join(expected_src_dir, 'my_test_file.txt')
-        expected_text = 'HELLO WORLD\n'
+        expected_src_dir = src.product.get_product_config(
+            sat.cfg, product_name
+        ).source_dir
+        expected_file_path = os.path.join(expected_src_dir, "my_test_file.txt")
+        expected_text = "HELLO WORLD\n"
 
-        f = open(expected_file_path, 'r')
+        f = open(expected_file_path, "r")
         text = f.read()
         self.assertEqual(text, expected_text)
-        
+
     def test_020(self):
         # Test the source command with git product
-        appli = 'appli-test'
-        product_name = 'PRODUCT_GIT'
+        appli = "appli-test"
+        product_name = "PRODUCT_GIT"
 
         sat = Sat()
-        sat.source(appli + ' --product ' + product_name)
+        sat.source(appli + " --product " + product_name)
 
-        expected_src_dir = src.product.get_product_config(sat.cfg, product_name).source_dir
-        expected_file_path = os.path.join(expected_src_dir, 'my_test_file.txt')
-        expected_text = 'HELLO WORLD\n'
+        expected_src_dir = src.product.get_product_config(
+            sat.cfg, product_name
+        ).source_dir
+        expected_file_path = os.path.join(expected_src_dir, "my_test_file.txt")
+        expected_text = "HELLO WORLD\n"
 
-        f = open(expected_file_path, 'r')
+        f = open(expected_file_path, "r")
         text = f.read()
         self.assertEqual(text, expected_text)
 
     def test_030(self):
         # Test the source command with cvs product
-        appli = 'appli-test'
-        product_name = 'PRODUCT_CVS'
+        appli = "appli-test"
+        product_name = "PRODUCT_CVS"
 
         sat = Sat()
-        sat.source(appli + ' --product ' + product_name)
+        sat.source(appli + " --product " + product_name)
 
-        expected_src_dir = src.product.get_product_config(sat.cfg, product_name).source_dir
-        expected_file_path = os.path.join(expected_src_dir, 'README.FIRST.txt')
-        expected_text = 'Copyright (C) 2007-2012  CEA/DEN, EDF R&D, OPEN CASCADE\n'
+        expected_src_dir = src.product.get_product_config(
+            sat.cfg, product_name
+        ).source_dir
+        expected_file_path = os.path.join(expected_src_dir, "README.FIRST.txt")
+        expected_text = "Copyright (C) 2007-2012  CEA/DEN, EDF R&D, OPEN CASCADE\n"
 
-        f = open(expected_file_path, 'r')
+        f = open(expected_file_path, "r")
         text = f.readlines()[0]
 
         # pyunit method to compare 2 str
         self.assertEqual(text, expected_text)
-    
+
     """
     def test_040(self):
         # Test the source command with svn product
@@ -105,34 +112,38 @@ class TestCase(unittest.TestCase):
 
     def test_050(self):
         # Test the source command with native product
-        OK = 'KO'
-        
-        appli = 'appli-test'
-        product_name = 'PRODUCT_NATIVE'
+        OK = "KO"
+
+        appli = "appli-test"
+        product_name = "PRODUCT_NATIVE"
 
         sat = Sat()
-        sat.source(appli + ' --product ' + product_name)
+        sat.source(appli + " --product " + product_name)
 
-        expected_src_dir = os.path.join(sat.cfg.APPLICATION.workdir, 'SOURCES', product_name)
+        expected_src_dir = os.path.join(
+            sat.cfg.APPLICATION.workdir, "SOURCES", product_name
+        )
         if not os.path.exists(expected_src_dir):
-            OK = 'OK'
-        self.assertEqual(OK, 'OK')
+            OK = "OK"
+        self.assertEqual(OK, "OK")
 
     def test_060(self):
         # Test the source command with fixed product
-        OK = 'KO'
-        
-        appli = 'appli-test'
-        product_name = 'PRODUCT_FIXED'
+        OK = "KO"
+
+        appli = "appli-test"
+        product_name = "PRODUCT_FIXED"
 
         sat = Sat()
-        sat.source(appli + ' --product ' + product_name)
+        sat.source(appli + " --product " + product_name)
 
-        expected_src_dir = src.product.get_product_config(sat.cfg, product_name).install_dir
+        expected_src_dir = src.product.get_product_config(
+            sat.cfg, product_name
+        ).install_dir
 
         if os.path.exists(expected_src_dir):
-            OK = 'OK'
-        self.assertEqual(OK, 'OK')
+            OK = "OK"
+        self.assertEqual(OK, "OK")
 
     """
     def test_070(self):
@@ -164,12 +175,13 @@ class TestCase(unittest.TestCase):
         OK = "KO"
 
         import source
-        
+
         if "gets the sources of the application" in source.description():
             OK = "OK"
         self.assertEqual(OK, "OK")
 
+
 # test launch
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
     pass
index 80f579e2a50a15d0841208b43278ead2d4a07069..65521be254a62ecc196237d9505704e44339a953 100755 (executable)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 
 #  Copyright (C) 2010-2018  CEA/DEN
 #
@@ -23,19 +23,20 @@ import unittest
 
 from src.salomeTools import Sat
 
+
 class TestCase(unittest.TestCase):
     """Test of the shell command"""
 
     def test_010(self):
         # Test the shell command with the --command option
-        OK = 'KO'
+        OK = "KO"
         tmp_file = "/tmp/test.txt"
 
         sat = Sat("-l " + tmp_file)
-        
+
         sat.config()
         sat_way = sat.cfg.VARS.salometoolsway
-        
+
         # Execute the shell command
         sat.shell("--command ls " + sat_way)
 
@@ -44,22 +45,22 @@ class TestCase(unittest.TestCase):
         ff.close()
         os.remove(tmp_file)
         log_files = [line.replace("\n", "") for line in log_files]
-        
+
         text = open(log_files[2], "r").read()
 
         if "salomeTools.py" in text:
-            OK = 'OK'         
-        self.assertEqual(OK, 'OK')
+            OK = "OK"
+        self.assertEqual(OK, "OK")
 
     def test_020(self):
         # Test the shell command with the --command option with a failing command
-        OK = 'KO'
+        OK = "KO"
         tmp_file = "/tmp/test.txt"
 
         sat = Sat("-l " + tmp_file)
-        
+
         sat.config()
-        
+
         # Execute the shell command
         res = sat.shell("--command i_fail")
 
@@ -68,24 +69,25 @@ class TestCase(unittest.TestCase):
         ff.close()
         os.remove(tmp_file)
         log_files = [line.replace("\n", "") for line in log_files]
-        
+
         text = open(log_files[2], "r").read()
 
         if "i_fail" in text and res == 1:
-            OK = 'OK'         
-        self.assertEqual(OK, 'OK')
+            OK = "OK"
+        self.assertEqual(OK, "OK")
 
     def test_030(self):
         # Test the sat -h shell
         OK = "KO"
 
         import shell
-        
+
         if "Executes the shell command passed as argument" in shell.description():
             OK = "OK"
         self.assertEqual(OK, "OK")
 
+
 # test launch
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
     pass
index 136df62bf14c3911e891eb4cb371b6918c168495..8ea743fb2c2abfa61a81fd7599582eaa4f1ce5fe 100755 (executable)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-#-*- coding:utf-8 -*-
+# -*- coding:utf-8 -*-
 
 #  Copyright (C) 2010-2018  CEA/DEN
 #
@@ -23,66 +23,75 @@ import unittest
 
 from src.salomeTools import Sat
 
+
 class TestTest(unittest.TestCase):
     """Test of the test command"""
 
     def test_010(self):
         # Test the test command
-        OK = 'KO'
+        OK = "KO"
         tmp_file = "/tmp/test.txt"
         application = "SALOME-7.8.0"
 
         sat = Sat("-l " + tmp_file)
-        
+
         # Execute the job command
-        sat.test(application + " --grid GEOM --session light" )
+        sat.test(application + " --grid GEOM --session light")
 
         ff = open(tmp_file, "r")
         log_files = ff.readlines()
         ff.close()
         os.remove(tmp_file)
-        log_testboard = [line.replace("\n", "") for line in log_files if 'testboard.xml' in line]
-        
+        log_testboard = [
+            line.replace("\n", "") for line in log_files if "testboard.xml" in line
+        ]
+
         text = open(log_testboard[0], "r").read()
 
         if '<session name="light">' in text:
-            OK = 'OK'         
-        self.assertEqual(OK, 'OK')
+            OK = "OK"
+        self.assertEqual(OK, "OK")
 
     def test_020(self):
         # Test the test command with PY type
-        OK = 'KO'
+        OK = "KO"
         tmp_file = "/tmp/test.txt"
         application = "SALOME-7.8.0"
 
         sat = Sat("-l " + tmp_file)
-        
+
         # Execute the job command
-        sat.test(application + " --grid MED --session PY_test_withKernel" )
+        sat.test(application + " --grid MED --session PY_test_withKernel")
 
         ff = open(tmp_file, "r")
         log_files = ff.readlines()
         ff.close()
         os.remove(tmp_file)
-        log_testboard = [line.replace("\n", "") for line in log_files if 'testboard.xml' in line]
-        
+        log_testboard = [
+            line.replace("\n", "") for line in log_files if "testboard.xml" in line
+        ]
+
         text = open(log_testboard[0], "r").read()
 
         if '<session name="PY_test_withKernel">' in text:
-            OK = 'OK'         
-        self.assertEqual(OK, 'OK')
+            OK = "OK"
+        self.assertEqual(OK, "OK")
 
     def test_030(self):
         # Test the sat -h test
         OK = "KO"
 
         import test
-        
-        if "The test command runs a test base on a SALOME installation" in test.description():
+
+        if (
+            "The test command runs a test base on a SALOME installation"
+            in test.description()
+        ):
             OK = "OK"
         self.assertEqual(OK, "OK")
 
+
 # test launch
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
     pass
index 17ef2cd45a129f1c507f7788e18fe2fa0f5baffa..012f36b0f5bca9bbd8c0f22423063122c4515d5b 100644 (file)
@@ -109,8 +109,10 @@ from xml.sax import saxutils
 #   >>> logging.basicConfig(stream=HTMLTestRunner.stdout_redirector)
 #   >>>
 
+
 class OutputRedirector(object):
-    """ Wrapper to redirect stdout or stderr """
+    """Wrapper to redirect stdout or stderr"""
+
     def __init__(self, fp):
         self.fp = fp
 
@@ -123,14 +125,15 @@ class OutputRedirector(object):
     def flush(self):
         self.fp.flush()
 
+
 stdout_redirector = OutputRedirector(sys.stdout)
 stderr_redirector = OutputRedirector(sys.stderr)
 
 
-
 # ----------------------------------------------------------------------
 # Template
 
+
 class Template_mixin(object):
     """
     Define a HTML template for report customerization and generation.
@@ -172,13 +175,13 @@ class Template_mixin(object):
     """
 
     STATUS = {
-    0: 'pass',
-    1: 'fail',
-    2: 'error',
+        0: "pass",
+        1: "fail",
+        2: "error",
     }
 
-    DEFAULT_TITLE = 'Unit Test Report'
-    DEFAULT_DESCRIPTION = ''
+    DEFAULT_TITLE = "Unit Test Report"
+    DEFAULT_DESCRIPTION = ""
 
     # ------------------------------------------------------------------------
     # HTML Template
@@ -297,7 +300,6 @@ function showOutput(id, name) {
 """
     # variables: (title, generator, stylesheet, heading, report, ending)
 
-
     # ------------------------------------------------------------------------
     # Stylesheet
     #
@@ -390,8 +392,6 @@ a.popup_link:hover {
 </style>
 """
 
-
-
     # ------------------------------------------------------------------------
     # Heading
     #
@@ -402,12 +402,10 @@ a.popup_link:hover {
 <p class='description'>%(description)s</p>
 </div>
 
-""" # variables: (title, parameters, description)
+"""  # variables: (title, parameters, description)
 
     HEADING_ATTRIBUTE_TMPL = """<p class='attribute'><strong>%(name)s:</strong> %(value)s</p>
-""" # variables: (name, value)
-
-
+"""  # variables: (name, value)
 
     # ------------------------------------------------------------------------
     # Report
@@ -446,7 +444,7 @@ a.popup_link:hover {
     <td>&nbsp;</td>
 </tr>
 </table>
-""" # variables: (test_list, count, Pass, fail, error)
+"""  # variables: (test_list, count, Pass, fail, error)
 
     REPORT_CLASS_TMPL = r"""
 <tr class='%(style)s'>
@@ -457,8 +455,7 @@ a.popup_link:hover {
     <td>%(error)s</td>
     <td><a href="javascript:showClassDetail('%(cid)s',%(count)s)">Detail</a></td>
 </tr>
-""" # variables: (style, desc, count, Pass, fail, error, cid)
-
+"""  # variables: (style, desc, count, Pass, fail, error, cid)
 
     REPORT_TEST_WITH_OUTPUT_TMPL = r"""
 <tr id='%(tid)s' class='%(Class)s'>
@@ -482,22 +479,18 @@ a.popup_link:hover {
 
     </td>
 </tr>
-""" # variables: (tid, Class, style, desc, status)
-
+"""  # variables: (tid, Class, style, desc, status)
 
     REPORT_TEST_NO_OUTPUT_TMPL = r"""
 <tr id='%(tid)s' class='%(Class)s'>
     <td class='%(style)s'><div class='testcase'>%(desc)s</div></td>
     <td colspan='5' align='center'>%(status)s</td>
 </tr>
-""" # variables: (tid, Class, style, desc, status)
-
+"""  # variables: (tid, Class, style, desc, status)
 
     REPORT_TEST_OUTPUT_TMPL = r"""
 %(id)s: %(output)s
-""" # variables: (id, output)
-
-
+"""  # variables: (id, output)
 
     # ------------------------------------------------------------------------
     # ENDING
@@ -505,11 +498,13 @@ a.popup_link:hover {
 
     ENDING_TMPL = """<div id='ending'>&nbsp;</div>"""
 
+
 # -------------------- The end of the Template class -------------------
 
 
 TestResult = unittest.TestResult
 
+
 class _TestResult(TestResult):
     # note: _TestResult is a pure representation of results.
     # It lacks the output and reporting ability compares to unittest._TextTestResult.
@@ -532,7 +527,6 @@ class _TestResult(TestResult):
         # )
         self.result = []
 
-
     def startTest(self, test):
         TestResult.startTest(self, test)
         # just one buffer for both stdout and stderr
@@ -544,7 +538,6 @@ class _TestResult(TestResult):
         sys.stdout = stdout_redirector
         sys.stderr = stderr_redirector
 
-
     def complete_output(self):
         """
         Disconnect output redirection and return buffer.
@@ -557,25 +550,23 @@ class _TestResult(TestResult):
             self.stderr0 = None
         return self.outputBuffer.getvalue()
 
-
     def stopTest(self, test):
         # Usually one of addSuccess, addError or addFailure would have been called.
         # But there are some path in unittest that would bypass this.
         # We must disconnect stdout in stopTest(), which is guaranteed to be called.
         self.complete_output()
 
-
     def addSuccess(self, test):
         self.success_count += 1
         TestResult.addSuccess(self, test)
         output = self.complete_output()
-        self.result.append((0, test, output, ''))
+        self.result.append((0, test, output, ""))
         if self.verbosity > 1:
-            sys.stderr.write('ok ')
+            sys.stderr.write("ok ")
             sys.stderr.write(str(test))
-            sys.stderr.write('\n')
+            sys.stderr.write("\n")
         else:
-            sys.stderr.write('.')
+            sys.stderr.write(".")
 
     def addError(self, test, err):
         self.error_count += 1
@@ -584,11 +575,11 @@ class _TestResult(TestResult):
         output = self.complete_output()
         self.result.append((2, test, output, _exc_str))
         if self.verbosity > 1:
-            sys.stderr.write('E  ')
+            sys.stderr.write("E  ")
             sys.stderr.write(str(test))
-            sys.stderr.write('\n')
+            sys.stderr.write("\n")
         else:
-            sys.stderr.write('E')
+            sys.stderr.write("E")
 
     def addFailure(self, test, err):
         self.failure_count += 1
@@ -597,16 +588,16 @@ class _TestResult(TestResult):
         output = self.complete_output()
         self.result.append((1, test, output, _exc_str))
         if self.verbosity > 1:
-            sys.stderr.write('F  ')
+            sys.stderr.write("F  ")
             sys.stderr.write(str(test))
-            sys.stderr.write('\n')
+            sys.stderr.write("\n")
         else:
-            sys.stderr.write('F')
+            sys.stderr.write("F")
 
 
 class HTMLTestRunner(Template_mixin):
-    """
-    """
+    """ """
+
     def __init__(self, stream=sys.stdout, verbosity=1, title=None, description=None):
         self.stream = stream
         self.verbosity = verbosity
@@ -621,32 +612,29 @@ class HTMLTestRunner(Template_mixin):
 
         self.startTime = datetime.datetime.now()
 
-
     def run(self, test):
         "Run the given test case or test suite."
         result = _TestResult(self.verbosity)
         test(result)
         self.stopTime = datetime.datetime.now()
         self.generateReport(test, result)
-        print >>sys.stderr, '\nTime Elapsed: %s' % (self.stopTime-self.startTime)
+        print >>sys.stderr, "\nTime Elapsed: %s" % (self.stopTime - self.startTime)
         return result
 
-
     def sortResult(self, result_list):
         # unittest does not seems to run in any particular order.
         # Here at least we want to group them together by class.
         rmap = {}
         classes = []
-        for n,t,o,e in result_list:
+        for n, t, o, e in result_list:
             cls = t.__class__
             if not cls in rmap:
                 rmap[cls] = []
                 classes.append(cls)
-            rmap[cls].append((n,t,o,e))
+            rmap[cls].append((n, t, o, e))
         r = [(cls, rmap[cls]) for cls in classes]
         return r
 
-
     def getReportAttributes(self, result):
         """
         Return report attributes as a list of (name, value).
@@ -655,68 +643,70 @@ class HTMLTestRunner(Template_mixin):
         startTime = str(self.startTime)[:19]
         duration = str(self.stopTime - self.startTime)
         status = []
-        if result.success_count: status.append('Pass %s'    % result.success_count)
-        if result.failure_count: status.append('Failure %s' % result.failure_count)
-        if result.error_count:   status.append('Error %s'   % result.error_count  )
+        if result.success_count:
+            status.append("Pass %s" % result.success_count)
+        if result.failure_count:
+            status.append("Failure %s" % result.failure_count)
+        if result.error_count:
+            status.append("Error %s" % result.error_count)
         if status:
-            status = ' '.join(status)
+            status = " ".join(status)
         else:
-            status = 'none'
+            status = "none"
         return [
-            ('Start Time', startTime),
-            ('Duration', duration),
-            ('Status', status),
+            ("Start Time", startTime),
+            ("Duration", duration),
+            ("Status", status),
         ]
 
-
     def generateReport(self, test, result):
         report_attrs = self.getReportAttributes(result)
-        generator = 'HTMLTestRunner %s' % __version__
+        generator = "HTMLTestRunner %s" % __version__
         stylesheet = self._generate_stylesheet()
         heading = self._generate_heading(report_attrs)
         report = self._generate_report(result)
         ending = self._generate_ending()
         output = self.HTML_TMPL % dict(
-            title = saxutils.escape(self.title),
-            generator = generator,
-            stylesheet = stylesheet,
-            heading = heading,
-            report = report,
-            ending = ending,
+            title=saxutils.escape(self.title),
+            generator=generator,
+            stylesheet=stylesheet,
+            heading=heading,
+            report=report,
+            ending=ending,
         )
-        self.stream.write(output.encode('utf8'))
-
+        self.stream.write(output.encode("utf8"))
 
     def _generate_stylesheet(self):
         return self.STYLESHEET_TMPL
 
-
     def _generate_heading(self, report_attrs):
         a_lines = []
         for name, value in report_attrs:
             line = self.HEADING_ATTRIBUTE_TMPL % dict(
-                    name = saxutils.escape(name),
-                    value = saxutils.escape(value),
-                )
+                name=saxutils.escape(name),
+                value=saxutils.escape(value),
+            )
             a_lines.append(line)
         heading = self.HEADING_TMPL % dict(
-            title = saxutils.escape(self.title),
-            parameters = ''.join(a_lines),
-            description = saxutils.escape(self.description),
+            title=saxutils.escape(self.title),
+            parameters="".join(a_lines),
+            description=saxutils.escape(self.description),
         )
         return heading
 
-
     def _generate_report(self, result):
         rows = []
         sortedResult = self.sortResult(result.result)
         for cid, (cls, cls_results) in enumerate(sortedResult):
             # subtotal for a class
             np = nf = ne = 0
-            for n,t,o,e in cls_results:
-                if n == 0: np += 1
-                elif n == 1: nf += 1
-                else: ne += 1
+            for n, t, o, e in cls_results:
+                if n == 0:
+                    np += 1
+                elif n == 1:
+                    nf += 1
+                else:
+                    ne += 1
 
             # format class description
             if cls.__module__ == "__main__":
@@ -724,67 +714,70 @@ class HTMLTestRunner(Template_mixin):
             else:
                 name = "%s.%s" % (cls.__module__, cls.__name__)
             doc = cls.__doc__ and cls.__doc__.split("\n")[0] or ""
-            desc = doc and '%s: %s' % (name, doc) or name
+            desc = doc and "%s: %s" % (name, doc) or name
 
             row = self.REPORT_CLASS_TMPL % dict(
-                style = ne > 0 and 'errorClass' or nf > 0 and 'failClass' or 'passClass',
-                desc = desc,
-                count = np+nf+ne,
-                Pass = np,
-                fail = nf,
-                error = ne,
-                cid = 'c%s' % (cid+1),
+                style=ne > 0 and "errorClass" or nf > 0 and "failClass" or "passClass",
+                desc=desc,
+                count=np + nf + ne,
+                Pass=np,
+                fail=nf,
+                error=ne,
+                cid="c%s" % (cid + 1),
             )
             rows.append(row)
 
-            for tid, (n,t,o,e) in enumerate(cls_results):
+            for tid, (n, t, o, e) in enumerate(cls_results):
                 self._generate_report_test(rows, cid, tid, n, t, o, e)
 
         report = self.REPORT_TMPL % dict(
-            test_list = ''.join(rows),
-            count = str(result.success_count+result.failure_count+result.error_count),
-            Pass = str(result.success_count),
-            fail = str(result.failure_count),
-            error = str(result.error_count),
+            test_list="".join(rows),
+            count=str(result.success_count + result.failure_count + result.error_count),
+            Pass=str(result.success_count),
+            fail=str(result.failure_count),
+            error=str(result.error_count),
         )
         return report
 
-
     def _generate_report_test(self, rows, cid, tid, n, t, o, e):
         # e.g. 'pt1.1', 'ft1.1', etc
         has_output = bool(o or e)
-        tid = (n == 0 and 'p' or 'f') + 't%s.%s' % (cid+1,tid+1)
-        name = t.id().split('.')[-1]
+        tid = (n == 0 and "p" or "f") + "t%s.%s" % (cid + 1, tid + 1)
+        name = t.id().split(".")[-1]
         doc = t.shortDescription() or ""
-        desc = doc and ('%s: %s' % (name, doc)) or name
-        tmpl = has_output and self.REPORT_TEST_WITH_OUTPUT_TMPL or self.REPORT_TEST_NO_OUTPUT_TMPL
+        desc = doc and ("%s: %s" % (name, doc)) or name
+        tmpl = (
+            has_output
+            and self.REPORT_TEST_WITH_OUTPUT_TMPL
+            or self.REPORT_TEST_NO_OUTPUT_TMPL
+        )
 
         # o and e should be byte string because they are collected from stdout and stderr?
-        if isinstance(o,str):
+        if isinstance(o, str):
             # TODO: some problem with 'string_escape': it escape \n and mess up formating
             # uo = unicode(o.encode('string_escape'))
-            uo = o.decode('latin-1')
+            uo = o.decode("latin-1")
         else:
             uo = o
-        if isinstance(e,str):
+        if isinstance(e, str):
             # TODO: some problem with 'string_escape': it escape \n and mess up formating
             # ue = unicode(e.encode('string_escape'))
-            ue = e.decode('latin-1')
+            ue = e.decode("latin-1")
         else:
             ue = e
 
         script = self.REPORT_TEST_OUTPUT_TMPL % dict(
-            id = tid,
-            output = saxutils.escape(uo+ue),
+            id=tid,
+            output=saxutils.escape(uo + ue),
         )
 
         row = tmpl % dict(
-            tid = tid,
-            Class = (n == 0 and 'hiddenRow' or 'none'),
-            style = n == 2 and 'errorCase' or (n == 1 and 'failCase' or 'none'),
-            desc = desc,
-            script = script,
-            status = self.STATUS[n],
+            tid=tid,
+            Class=(n == 0 and "hiddenRow" or "none"),
+            style=n == 2 and "errorCase" or (n == 1 and "failCase" or "none"),
+            desc=desc,
+            script=script,
+            status=self.STATUS[n],
         )
         rows.append(row)
         if not has_output:
@@ -806,6 +799,7 @@ class TestProgram(unittest.TestProgram):
     A variation of the unittest.TestProgram. Please refer to the base
     class for command line parameters.
     """
+
     def runTests(self):
         # Pick HTMLTestRunner as the default test runner.
         # base class's testRunner parameter is not useful because it means
@@ -814,6 +808,7 @@ class TestProgram(unittest.TestProgram):
             self.testRunner = HTMLTestRunner(verbosity=self.verbosity)
         unittest.TestProgram.runTests(self)
 
+
 main = TestProgram
 
 ##############################################################################