module Engines
{
typedef sequence<octet> IORType;
+ typedef sequence<string> NSListOfStrings;
interface EmbeddedNamingService
{
void Destroy_Name(in string Path);
IORType Resolve(in string Path);
IORType ResolveFirst(in string Path);
+ NSListOfStrings keys();
};
};
interface LogManager
{
ContainerPerfLog declareContainer(in string contInNS, in string logfile);
+ void clear();
ListOfContainerPerfLog listOfContainerLogs();
SALOME::vectorOfByte getAllStruct( in boolean clearMemory );
void putStructInFileAtomic(in boolean clearMemory, in string fileName);
#include <iostream>
#include <stdexcept>
+#include <chrono>
+#include <iomanip>
+#include <ctime>
+
enum class VerbosityMode { undefined, nolog, withlog };
static VerbosityMode isActivated = VerbosityMode::undefined;
{
return VerbosityLevel() >= VerbosityLevelType::error_level;
}
+
+ void AppendTimeClock(std::ostream& os)
+ {
+ auto now = std::chrono::system_clock::now();
+ auto duration = now.time_since_epoch();
+ auto timestamp = std::chrono::system_clock::to_time_t(now);
+ std::tm *local_time = std::localtime(×tamp);
+ auto millis = std::chrono::duration_cast<std::chrono::milliseconds>(duration).count();
+ os << std::setfill('0') << std::setw(2) << local_time->tm_hour << ":"
+ << std::setw(2) << local_time->tm_min << ":"
+ << std::setw(2) << local_time->tm_sec << "." << std::setw(3) << millis % 1000 << " - ";
+ }
}
bool BASICS_EXPORT IsInfoLevel();
bool BASICS_EXPORT IsWarningLevel();
bool BASICS_EXPORT IsErrorLevel();
+ void BASICS_EXPORT AppendTimeClock(std::ostream& os);
VerbosityLevelType BASICS_EXPORT VerbosityLevel();
std::string BASICS_EXPORT VerbosityLevelStr();
}
# shouldn't be needed after python 3.8
# see https://bugs.python.org/issue32573
argv = ['']
+ logging.debug("Instanciation of {} PID = {}".format(containerName,os.getpid()))
self._orb = CORBA.ORB_init(argv, CORBA.ORB_ID)
self._poa = self._orb.resolve_initial_references("RootPOA")
self._containerName = containerName
@property
def logm(self):
+ logging.debug("Logm PID = {}".format(os.getpid()))
import salome
if self._logm is None:
salome.salome_init()
return 1,"".join(l)
def create_pyscriptnode(self,nodeName,code):
+ logging.debug("create_pyscriptnode of {} PID = {}".format(nodeName,os.getpid()))
try:
logscript = None
if getSSLMode():
return 1,"".join(l)
def positionVerbosityOfLogger(self):
+ logging.debug("positionVerbosityOfLogger PID = {}".format(os.getpid()))
if VerbosityActivated():
import salome_utils
salome_utils.positionVerbosityOfLoggerRegardingState()
return self._timeIntervalInMs
def setLogFileName(self, logFileName):
+ logging.debug("setLogFileName {} PID = {}".format(logFileName,os.getpid()))
if getSSLMode():
self._log = self.logm.declareContainer( self._containerName, logFileName )
remain = m%1024
m//=1024
return "{} {}".format(m,UNITS[3])
+
+ @classmethod
+ def SpeedRepr(cls,memInBytePerS):
+ return "{}/s".format( cls.MemRepr(memInBytePerS) )
def __init__(self):
self._measure_time_resolution_ms = None
self._output_hdd_mem = None
self._start_pos_log = None
self._stop_pos_log = None
+ self._freestyle_log = []
+
+ @property
+ def freestyle(self):
+ return self._freestyle_log
+
+ @freestyle.setter
+ def freestyle(self, value):
+ self._freestyle_log.append( value )
@property
def measureTimeResolution(self):
@property
def CPUMemDuringExecStr(self):
- cpu = self._cpu_mem_during_exec[::2]
- mem_rss = self._cpu_mem_during_exec[1::2]
- return [(a,ScriptExecInfo.MemRepr(b)) for a,b in self._cpu_mem_during_exec]
+ return [(a,ScriptExecInfo.MemRepr(b)) for a,b in self._cpu_mem_during_exec.data]
@property
def inputMem(self):
def outputMemStr(self):
return ScriptExecInfo.MemRepr( self.outputMem )
+ def inputReadHDDSize(self):
+ return self.inputHDDMem.getSizeOfFileRead()
+
+ def inputReadHDDSizeRepr(self):
+ return ScriptExecInfo.MemRepr( self.inputReadHDDSize() )
+
+ def inputReadHDDSpeed(self):
+ return self.inputReadHDDSize() / ( self.endInputTime - self.startInputTime ).total_seconds()
+
+ def inputReadHDDSpeedRepr(self):
+ return ScriptExecInfo.SpeedRepr( self.inputReadHDDSpeed() )
+
+ def outputWriteHDDSize(self):
+ return self.outputHDDMem.getSizeOfFileRead()
+
+ def outputWriteHDDSizeRepr(self):
+ return ScriptExecInfo.MemRepr( self.outputWriteHDDSize() )
+
+ def outputWriteHDDSpeed(self):
+ return self.outputWriteHDDSize() / ( self.endOutputTime - self.startOutputTime ).total_seconds()
+
+ def outputWriteHDDSpeedRepr(self):
+ return ScriptExecInfo.SpeedRepr( self.outputWriteHDDSpeed() )
+
@property
def inputHDDMem(self):
return self._input_hdd_mem
def endExecTime(self,value):
self._end_exec_time = value
+ @property
+ def execTime(self):
+ if ( self.endExecTime is not None ) and (self.startExecTime is not None):
+ return self.endExecTime - self.startExecTime
+ return None
+
+ @property
+ def fullExecTime(self):
+ return self.endOutputTime - self.startInputTime
+
@property
def startOutputTime(self):
return self._start_output_time
return ScriptExecInfo.GetRepresentationOfTimeDelta(self.endOutputTime,self.startOutputTime)
def __str__(self):
+ CPUMemDuringExecForStr = self.CPUMemDuringExecStr
+ if len( CPUMemDuringExecForStr ) > 30:
+ CPUMemDuringExecForStr = "{} ...".format( str(CPUMemDuringExecForStr[:30]) )
+ else:
+ CPUMemDuringExecForStr = str( CPUMemDuringExecForStr )
return """start exec time = {self.startExecTime}
end exec time = {self.endExecTime}
exec_time = {self.execTimeStr}
Measure time resolution = {self.measureTimeResolution} ms
-CPU and mem monitoring = {self.CPUMemDuringExecStr}
+CPU and mem monitoring = {CPUMemDuringExecForStr}
input unpickling and ev load from disk time = {self.inputTimeStr}
output serialization and ev write to disk time = {self.outputTimeStr}
input memory size before exec (MemoryPeak 2x) = {self.inputMemStr}
with open(self.father.father.logfile,"rb") as f:
cont = f.read()
return cont[self._eff.tracePosStart:self._eff.tracePosStop].decode()
+ @property
+ def nodeName(self):
+ return self.father.get().nodeName
+ @property
+ def code(self):
+ return self.father.code
+ @property
+ def ns_entry(self):
+ return self.father.father.ns_entry
+ @property
+ def computingNode(self):
+ return self.father.computingNode
+
+ @property
+ def freestyle(self):
+ return self.get().freestyle
+
+ @property
+ def measureTimeResolution(self):
+ return self.get().measureTimeResolution
+
+ @property
+ def CPUMemDuringExec(self):
+ return self.get().CPUMemDuringExec
+
+ @property
+ def inputMem(self):
+ return self.get().inputMem
+
+ @property
+ def outputMem(self):
+ return self.get().outputMem
+
+ @property
+ def inputHDDMem(self):
+ return self.get().inputHDDMem
+
+ @property
+ def outputHDDMem(self):
+ return self.get().outputHDDMem
+
+ def inputReadHDDSize(self):
+ return self.get().inputReadHDDSize()
+
+ def inputReadHDDSizeRepr(self):
+ return self.get().inputReadHDDSizeRepr()
+
+ def inputReadHDDSpeed(self):
+ return self.get().inputReadHDDSpeed()
+
+ def inputReadHDDSpeedRepr(self):
+ return self.get().inputReadHDDSpeedRepr()
+
+ def outputWriteHDDSize(self):
+ return self.get().outputWriteHDDSize()
+
+ def outputWriteHDDSizeRepr(self):
+ return self.get().outputWriteHDDSizeRepr()
+
+ def outputWriteHDDSpeed(self):
+ return self.get().outputWriteHDDSpeed()
+
+ def outputWriteHDDSpeedRepr(self):
+ return self.get().outputWriteHDDSpeedRepr()
+
+ @property
+ def startInputTime(self):
+ return self.get().startInputTime
+
+ @property
+ def endInputTime(self):
+ return self.get().endInputTime
+
+ @property
+ def startExecTime(self):
+ return self.get().startExecTime
+
+ @property
+ def endExecTime(self):
+ return self.get().endExecTime
+
+ @property
+ def execTime(self):
+ return self.get().execTime
+
+ @property
+ def fullExecTime(self):
+ return self.get().fullExecTime
+
+ @property
+ def startOutputTime(self):
+ return self.get().startOutputTime
+
+ @property
+ def endOutputTime(self):
+ return self.get().endOutputTime
class ScriptInfoAbstract:
def __init__(self, scriptPtr):
return self._father
def get(self):
return self._eff
+ @property
+ def nodeName(self):
+ return self.get().nodeName
+ @property
+ def code(self):
+ return self.get().code
+ @property
+ def ns_entry(self):
+ return self.father.ns_entry
+ @property
+ def computingNode(self):
+ return self.father.computingNode
def __getitem__(self,i):
return ScriptExecInfoDeco( self._eff[i], self )
def __len__(self):
return self._eff.__repr__()
class ContainerLogInfoAbstract:
-
+
+ @property
def log(self):
with open(self.logfile,"rb") as f:
cont = f.read()
def logfile(self):
return self._log_file
+ @property
+ def computingNode(self):
+ return ContainerLogInfoAbstract.ComputingNodeFromNSEntry( self.ns_entry )
+
def __len__(self):
return len( self._scripts )
return ScriptInfoDeco( self._scripts[i], self)
def __str__(self):
- return """NS entry = {self.ns_entry} LogFile = {self.logfile}""".format(**locals())
+ return """NS entry = {self.ns_entry} LogFile = {self.logfile}""".format(**locals())
+
+ @classmethod
+ def ComputingNodeFromNSEntry(cls, nsEntry):
+ return nsEntry.split("/")[2]
class ContainerLogInfoClt(ContainerLogInfoAbstract):
def __init__(self,contLogPtr):
class FakeObjVisitor:
def setHDDMem(self, v):
pass
+
+ def setFileName(self, fileName):
+ pass
def visitor(self):
return None
if int(offset) != len(structData):
raise RuntimeError("Something went wrong during unserialization phase.")
return logManagerInst
+
+def ListAllExecContainIn( listOfContainerLogInfo ):
+ """
+ For all ContainerLogInfo contained in listOfContainerLogInfo extract all ScriptExecInfo contained recursively
+ in it. This method filters all "side" executions like those positionning environment for exemple.
+
+ See also : ListAllExecFinishedContainIn
+
+ Args:
+ -----
+
+ listOfContainerLogInfo (list<ContainerLogInfo>) : instance typically returned by salome.LogManagerLoadFromFile
+
+ Returns
+ -------
+
+ list<ScriptExecInfoDeco> : all ScriptExecInfoDeco instance contained recursively in all input ContainerLogInfo instances
+
+ """
+ allexecs = sum( [sum( [[myexec for myexec in ps] for ps in cont],[] ) for cont in listOfContainerLogInfo], [] )
+ return [elt for elt in allexecs if elt.get() is not None]
+
+def ListAllExecFinishedContainIn( listOfContainerLogInfo ):
+ """
+ For all ContainerLogInfo contained in listOfContainerLogInfo extract all ScriptExecInfo contained recursively
+ in it. This method filters all "side" executions like those positionning environment for exemple and those not finished.
+
+ See also : ListAllExecContainIn
+
+ Args:
+ -----
+
+ listOfContainerLogInfo (list<ContainerLogInfo>) : instance typically returned by salome.LogManagerLoadFromFile
+
+ Returns
+ -------
+
+ list<ScriptExecInfoDeco> : all ScriptExecInfoDeco instance contained recursively in all input ContainerLogInfo instances
+
+ """
+ beforeFiltering = ListAllExecContainIn( listOfContainerLogInfo )
+ return [elt for elt in beforeFiltering if elt.get().execTime is not None]
+
+def IsExecTimeHigherThan( execInstDeco, limitDuration ):
+ """
+ Example of Usage :
+
+ [elt for elt in allexecs if IsExecTimeHigherThan(elt,datetime.timedelta(hours=1))]
+
+ Args:
+ -----
+
+ execInstDeco (ScriptExecInfoDeco)
+ limitDuration (datetime.timedelta) Ex (datetime.timedelta(hours=1))
+
+ """
+ if execInstDeco.get() is not None:
+ return execInstDeco.get().execTime > limitDuration
+ else:
+ return False
+
+def GetMaxTimeExec( listOfFinishedExecs ):
+ """
+ Returns the instance among listOfFinishedExecs that spends the max time to be executed
+
+ Args:
+ -----
+
+ listOfFinishedExecs ( list<ScriptExecInfoDeco> ) : instance of ScriptExecInfoDeco that have finished to be executed
+ Typically returned by ListAllExecFinishedContainIn
+
+ Returns
+ -------
+
+ ScriptExecInfoDeco :
+ """
+ return listOfFinishedExecs[ max([(i,elt.execTime) for i,elt in enumerate( listOfFinishedExecs) ],key = lambda x : x[1])[0] ]
+
+def GetMinTimeExec( listOfFinishedExecs ):
+ """
+ Returns the instance among listOfFinishedExecs that spends the min time to be executed
+
+ Args:
+ -----
+
+ listOfFinishedExecs ( list<ScriptExecInfoDeco> ) : instance of ScriptExecInfoDeco that have finished to be executed
+ Typically returned by ListAllExecFinishedContainIn
+
+ Returns
+ -------
+
+ ScriptExecInfoDeco :
+ """
+ return listOfFinishedExecs[ min([(i,elt.execTime) for i,elt in enumerate( listOfFinishedExecs) ],key = lambda x : x[1])[0] ]
+
+class DistriutionClass:
+ def __init__(self, begin, end, listOfExecs):
+ self._begin = begin
+ self._end = end
+ self._list_of_execs = sorted( listOfExecs, key = lambda x : x.execTime)
+ def __getitem__(self, *args):
+ return self._list_of_execs.__getitem__( *args )
+ @property
+ def begin(self):
+ return self._begin
+ @property
+ def end(self):
+ return self._end
+ @property
+ def listOfExecs(self):
+ return self._list_of_execs
+ @property
+ def size(self):
+ return len( self.listOfExecs )
+ @property
+ def reprGlobal(self):
+ return f"[{self.begin}->{self.end}] : {self.size} execs"
+
+class DistributionOfExecs:
+ """
+ Class in charge to distribute execution log instance into equi-classes
+ """
+ def __init__(self, listOfFinishedExecs, nbOfClasses):
+ self._list_of_finished_execs = listOfFinishedExecs
+ self._nb_of_classes = nbOfClasses
+ self._classes = DistributionOfExecs.ComputeDistributionOfExecTime(self._list_of_finished_execs,self._nb_of_classes)
+
+ def __getitem__(self, *args):
+ return self._classes.__getitem__( *args )
+ def reprOfExecs(self):
+ import numpy as np
+ cs = np.cumsum( [elt.size for elt in self._classes] )
+ ret = []
+ for i,(elt,cum_nb) in enumerate( zip(self._classes,cs) ):
+ ratio = (cum_nb / cs[-1])*100.0
+ ret.append( f"- For class {i} - {elt.reprGlobal} ( {ratio:.1f}% )" )
+ return "\n".join( ret )
+ @classmethod
+ def ComputeDistributionOfExecTime( cls, listOfFinishedExecs, nbOfClasses ):
+ maxt = GetMaxTimeExec( listOfFinishedExecs )
+ mint = GetMinTimeExec( listOfFinishedExecs )
+ deltaTime = ( maxt.execTime - mint.execTime ) / nbOfClasses
+ res = []
+ for i in range( nbOfClasses ):
+ begin = mint.execTime + i * deltaTime
+ end = mint.execTime + (i+1) * deltaTime
+ res.append( DistriutionClass(begin,end,[elt for elt in listOfFinishedExecs if elt.execTime >=begin and elt.execTime <= end]) )
+ return res
MY_CONTAINER_ENTRY_IN_GLBS = "my_container"
+MY_PERFORMANCE_LOG_ENTRY_IN_GLBS = "my_log_4_this_session"
+
class Generic(SALOME__POA.GenericObj):
"""A Python implementation of the GenericObj CORBA IDL"""
def __init__(self,poa):
# default is 50 MB
SALOME_BIG_OBJ_ON_DISK_THRES_DFT = 50000000
-from ctypes import c_int
-TypeCounter = c_int
-
-def GetSizeOfTCnt():
- return len( bytes(TypeCounter(0) ) )
+DicoForProxyFile = { }
def GetSizeOfBufferedReader(f):
"""
def GetObjectFromFile(fname, visitor = None):
with open(fname,"rb") as f:
- cntb = f.read( GetSizeOfTCnt() )
- cnt = TypeCounter.from_buffer_copy( cntb ).value
if visitor:
visitor.setHDDMem( GetSizeOfBufferedReader(f) )
visitor.setFileName( fname )
obj = pickle.load(f)
- return obj,cnt
+ return obj
def DumpInFile(obj,fname):
with open(fname,"wb") as f:
- f.write( bytes( TypeCounter(1) ) )
f.write( obj )
def IncrRefInFile(fname):
- with open(fname,"rb") as f:
- cntb = f.read( GetSizeOfTCnt() )
- cnt = TypeCounter.from_buffer_copy( cntb ).value
- with open(fname,"rb+") as f:
- #import KernelServices ; KernelServices.EntryForDebuggerBreakPoint()
- f.write( bytes( TypeCounter(cnt+1) ) )
+ if fname in DicoForProxyFile:
+ DicoForProxyFile[fname] += 1
+ else:
+ DicoForProxyFile[fname] = 2
+ pass
def DecrRefInFile(fname):
- import os
- with open(fname,"rb") as f:
- cntb = f.read( GetSizeOfTCnt() )
- cnt = TypeCounter.from_buffer_copy( cntb ).value
- #
- #import KernelServices ; KernelServices.EntryForDebuggerBreakPoint()
- if cnt == 1:
- os.unlink( fname )
+ if fname not in DicoForProxyFile:
+ cnt = 1
else:
- with open(fname,"rb+") as f:
- f.write( bytes( TypeCounter(cnt-1) ) )
+ cnt = DicoForProxyFile[fname]
+ DicoForProxyFile[fname] -= 1
+ if cnt == 1:
+ del DicoForProxyFile[fname]
+ if cnt == 1:
+ if os.path.exists(fname):
+ os.unlink( fname )
+ pass
def GetBigObjectOnDiskThreshold():
import os
DumpInFile( objSerialized, self._filename )
def get(self, visitor = None):
- obj, _ = GetObjectFromFile( self._filename, visitor )
+ obj = GetObjectFromFile( self._filename, visitor )
return obj
def __float__(self):
else:
return obj
-class FileDeleter:
+class FileHolder:
def __init__(self, fileName):
self._filename = fileName
@property
def filename(self):
return self._filename
+
+class FileDeleter(FileHolder):
+ def __init__(self, fileName):
+ super().__init__( fileName )
def __del__(self):
import os
if os.path.exists( self._filename ):
os.unlink( self._filename )
class MonitoringInfo:
- def __init__(self, pyFileName, outFileName, pid):
+ def __init__(self, pyFileName, intervalInMs, outFileName, pid):
self._py_file_name = pyFileName
+ self._interval_in_ms = intervalInMs
self._out_file_name = outFileName
self._pid = pid
def pid(self):
return self._pid
+ @pid.setter
+ def pid(self, value):
+ self._pid = value
+
@property
def outFileName(self):
return self._out_file_name
+
+ @property
+ def intervalInMs(self):
+ return self._interval_in_ms
+
+def FileSystemMonitoring(intervalInMs, dirNameToInspect, outFileName = None):
+ """
+ This method loops indefinitely every intervalInMs milliseconds to scan
+ number of inodes and size of content recursively included into the in input directory.
+
+ Args:
+ ----
+
+ outFileName (str) : name of file inside the results will be written. If None a new file is generated
-def LaunchTimeCPUMonitoring( intervalInMs ):
+ See also CPUMemoryMonitoring
+ """
+ global orb
+ import os
+ dirNameToInspect2 = os.path.abspath( os.path.expanduser(dirNameToInspect) )
+ import tempfile
+ import logging
+ import KernelBasis
+ # outFileNameSave stores the content of outFileName during phase of dumping
+ with tempfile.NamedTemporaryFile(prefix="fs_monitor_",suffix=".txt") as f:
+ outFileNameSave = f.name
+ with tempfile.NamedTemporaryFile(prefix="fs_monitor_",suffix=".py") as f:
+ tempPyFile = f.name
+ tempOutFile = outFileName
+ if tempOutFile is None:
+ tempOutFile = "{}.txt".format( os.path.splitext( tempPyFile )[0] )
+ with open(tempPyFile,"w") as f:
+ f.write("""
+import subprocess as sp
+import re
+import os
+import time
+import datetime
+with open("{tempOutFile}","a") as f:
+ f.write( "{{}}\\n".format( "{dirNameToInspect2}" ) )
+ f.write( "{{}}\\n".format( "{intervalInMs}" ) )
+ while(True):
+ nbinodes = sp.check_output("{{}} | wc -l".format( " ".join(["find","{dirNameToInspect2}"]), ), shell = True).decode().strip()
+ szOfDirStr = re.split("[\s]+",sp.check_output(["du","-sh","{dirNameToInspect2}"]).decode())[0]
+ f.write( "{{}}\\n".format( str( datetime.datetime.now().timestamp() ) ) )
+ f.write( "{{}}\\n".format( str( nbinodes ) ) )
+ f.write( "{{}}\\n".format( str( szOfDirStr ) ) )
+ f.flush()
+ time.sleep( {intervalInMs} / 1000.0 )
+""".format( **locals()))
+ logging.debug( "File for FS monitoring dump file : {}".format(tempPyFile) )
+ pyFileName = FileDeleter( tempPyFile )
+ if outFileName is None:
+ outFileName = FileDeleter( tempOutFile )
+ else:
+ outFileName = FileHolder(outFileName)
+ return MonitoringInfo(pyFileName, intervalInMs, outFileName, None)
+
+def CPUMemoryMonitoring( intervalInMs, outFileName = None ):
"""
Launch a subprocess monitoring self process.
This monitoring subprocess is a python process lauching every intervalInMs ms evaluation of
- CPU usage and RSS memory.
+ CPU usage and RSS memory of the calling process.
Communication between subprocess and self is done by file.
+
+ Args:
+ ----
+ outFileName (str) : name of file inside the results will be written. If None a new file is generated
+
+ See also FileSystemMonitoring
"""
import KernelBasis
- def BuildPythonFileForCPUPercent( intervalInMs ):
+ def BuildPythonFileForCPUPercent( intervalInMs, outFileName):
import os
import tempfile
- with tempfile.NamedTemporaryFile(prefix="htop_",suffix=".py") as f:
+ with tempfile.NamedTemporaryFile(prefix="cpu_mem_monitor_",suffix=".py") as f:
tempPyFile = f.name
- tempOutFile = "{}.txt".format( os.path.splitext( tempPyFile )[0] )
+ tempOutFile = outFileName
+ if tempOutFile is None:
+ tempOutFile = "{}.txt".format( os.path.splitext( tempPyFile )[0] )
pid = os.getpid()
with open(tempPyFile,"w") as f:
f.write("""import psutil
process = psutil.Process( pid )
import time
with open("{}","a") as f:
+ f.write( "{{}}\\n".format( "{}" ) )
while True:
f.write( "{{}}\\n".format( str( process.cpu_percent() ) ) )
f.write( "{{}}\\n".format( str( process.memory_info().rss ) ) )
f.flush()
time.sleep( {} / 1000.0 )
-""".format(pid, tempOutFile, intervalInMs))
- return FileDeleter(tempPyFile), FileDeleter(tempOutFile)
- pyFileName, outFileName = BuildPythonFileForCPUPercent( intervalInMs )
- pid = KernelBasis.LaunchMonitoring(pyFileName.filename)
- return MonitoringInfo(pyFileName, outFileName, pid)
+""".format(pid, tempOutFile, intervalInMs, intervalInMs))
+ if outFileName is None:
+ autoOutFile = FileDeleter(tempOutFile)
+ else:
+ autoOutFile = FileHolder(tempOutFile)
+ return FileDeleter(tempPyFile),autoOutFile
+ pyFileName, outFileName = BuildPythonFileForCPUPercent( intervalInMs, outFileName )
+ return MonitoringInfo(pyFileName, intervalInMs, outFileName, None)
+
+class GenericPythonMonitoringLauncherCtxMgr:
+ def __init__(self, monitoringParams):
+ """
+ Args:
+ ----
+ monitoringParams (MonitoringInfo)
+ """
+ self._monitoring_params = monitoringParams
+ def __enter__(self):
+ import KernelBasis
+ pid = KernelBasis.LaunchMonitoring(self._monitoring_params.pyFileName.filename)
+ self._monitoring_params.pid = pid
+ return self._monitoring_params
+
+ def __exit__(self,exctype, exc, tb):
+ StopMonitoring( self._monitoring_params )
def StopMonitoring( monitoringInfo ):
"""
import KernelBasis
KernelBasis.StopMonitoring(monitoringInfo.pid)
+class CPUMemInfo:
+ def __init__(self, intervalInMs, cpu, mem_rss):
+ """
+ Args:
+ ----
+ intervalInMs (int)
+ cpu (list<float>) CPU usage
+ mem_rss (list<int>) rss memory usage
+ """
+ self._interval_in_ms = intervalInMs
+ self._data = [(a,b) for a,b in zip(cpu,mem_rss)]
+ def __str__(self):
+ st = """Interval in ms : {self.intervalInMs}
+Data : ${self.data}
+""".format( **locals() )
+ return st
+ @property
+ def intervalInMs(self):
+ return self._interval_in_ms
+ @property
+ def data(self):
+ """
+ list of triplets. First param of pair is cpu usage
+ Second param of pair is memory usage
+ """
+ return self._data
+
+def ReadCPUMemInfoInternal( fileName ):
+ intervalInMs = 0
+ cpu = [] ; mem_rss = []
+ if os.path.exists( fileName ):
+ with open(fileName, "r") as f:
+ coarseData = [ elt.strip() for elt in f.readlines() ]
+ intervalInMs = int( coarseData[0] )
+ coarseData = coarseData[1:]
+ cpu = [float(elt) for elt in coarseData[::2]]
+ mem_rss = [ int(elt) for elt in coarseData[1::2]]
+ return CPUMemInfo(intervalInMs,cpu,mem_rss)
+
def ReadCPUMemInfo( monitoringInfo ):
"""
- Retrieve data of monitoring.
+ Retrieve CPU/Mem data of monitoring.
Args:
----
Returns
-------
- list<float,str> : list of pairs. First param of pair is CPU usage. Second param of pair is rss memory usage
+ CPUMemInfo instance
"""
- import KernelBasis
- ret = KernelBasis.ReadFloatsInFile( monitoringInfo.outFileName.filename )
- cpu = ret[::2]
- mem_rss = [ int(elt) for elt in ret[1::2]]
- return [(a,b) for a,b in zip(cpu,mem_rss)]
+ return ReadCPUMemInfoInternal( monitoringInfo.outFileName.filename )
+
+class InodeSizeInfo:
+ def __init__(self, dirNameMonitored, intervalInMs, timeStamps, nbInodes, volumeOfDir):
+ """
+ Args:
+ ----
+ timeStamps (list<datetimestruct>)
+ nbInodes (list<int>)
+ volumeOfDir (list<str>)
+ """
+ self._dir_name_monitored = dirNameMonitored
+ self._interval_in_ms = intervalInMs
+ self._data = [(t,a,b) for t,a,b in zip(timeStamps,nbInodes,volumeOfDir)]
+ def __str__(self):
+ st = """Filename monitored : {self.dirNameMonitored}
+Interval in ms : ${self.intervalInMs}
+Data : ${self.data}
+""".format( **locals() )
+ return st
+ @property
+ def dirNameMonitored(self):
+ return self._dir_name_monitored
+ @property
+ def intervalInMs(self):
+ return self._interval_in_ms
+ @property
+ def data(self):
+ """
+ list of triplets. First param of triplet is datetimestruct
+ Second param of triplet is #inodes.
+ Thirst param of triplet is size.
+ """
+ return self._data
+
+def ReadInodeSizeInfoInternal( fileName ):
+ import datetime
+ import os
+ with open(fileName, "r") as f:
+ coarseData = [ elt.strip() for elt in f.readlines() ]
+ dirNameMonitored = coarseData[0] ; intervalInMs = int( coarseData[1] ) ; coarseData = coarseData[2:]
+ tss = [ datetime.datetime.fromtimestamp( float(elt) ) for elt in coarseData[::3] ]
+ nbInodes = [int(elt) for elt in coarseData[1::3]]
+ volumeOfDir = coarseData[2::3]
+ return InodeSizeInfo(dirNameMonitored,intervalInMs,tss,nbInodes,volumeOfDir)
+
+def ReadInodeSizeInfo( monitoringInfo ):
+ """
+ Retrieve nb of inodes and size of monitoring
+
+ Args:
+ ----
+ monitoringInfo (MonitoringInfo): info returned by LaunchMonitoring
+
+ Returns
+ -------
+ InodeSizeInfo
+ """
+ return ReadInodeSizeInfoInternal( monitoringInfo.outFileName.filename )
class SeqByteReceiver:
# 2GB limit to trigger split into chunks
self._remote_handle = handleToCentralizedInst
self._current_instance = ScriptExecInfo()
+ def addFreestyleAndFlush(self, value):
+ self._current_instance.freestyle = value
+ self.finalizeAndPushToMaster()
+
def addInfoOnLevel2(self, key, value):
setattr(self._current_instance,key,value)
def executeFirst(self,argsin):
""" Same than first part of self.execute to reduce memory peak."""
+ def ArgInMananger(self,argsin):
+ argsInPy = SeqByteReceiver( argsin )
+ data = argsInPy.data()
+ self.addInfoOnLevel2("inputMem",len(data))
+ _,kws=pickle.loads(data)
+ return kws
try:
self.beginOfCurrentExecutionSession()
- data = None
self.addTimeInfoOnLevel2("startInputTime")
- if True: # to force call of SeqByteReceiver's destructor
- argsInPy = SeqByteReceiver( argsin )
- data = argsInPy.data()
- self.addInfoOnLevel2("inputMem",len(data))
- _,kws=pickle.loads(data)
+ # to force call of SeqByteReceiver's destructor
+ kws = ArgInMananger(self,argsin)
vis = InOutputObjVisitor()
for elt in kws:
# fetch real data if necessary
self.addTimeInfoOnLevel2("startExecTime")
##
self.addInfoOnLevel2("measureTimeResolution",self.my_container_py.monitoringtimeresms())
- monitoringParams = LaunchTimeCPUMonitoring( self.my_container_py.monitoringtimeresms() )
- exec(self.ccode, self.context)
- StopMonitoring( monitoringParams )
- cpumeminfo = ReadCPUMemInfo( monitoringParams )
+ with GenericPythonMonitoringLauncherCtxMgr( CPUMemoryMonitoring( self.my_container_py.monitoringtimeresms() ) ) as monitoringParams:
+ exec(self.ccode, self.context)
+ cpumeminfo = ReadCPUMemInfo( monitoringParams )
##
self.addInfoOnLevel2("CPUMemDuringExec",cpumeminfo)
del monitoringParams
def beginOfCurrentExecutionSession(self):
self._current_execution_session = LogOfCurrentExecutionSession( self._log_script.addExecutionSession() )
+ self.context[MY_PERFORMANCE_LOG_ENTRY_IN_GLBS] = self._current_execution_session
def endOfCurrentExecutionSession(self):
self._current_execution_session.finalizeAndPushToMaster()
type(logm).NaiveFetch = LogManagerNaiveFetch
type(logm).Fetch = LogManagerFetch
type(logm).DumpInFile = LogManagerDumpInFile
- type(logm).LoadFromFile = LogManagerLoadFromFile
type(logm).LaunchMonitoringDumpFile = LogManagerLaunchMonitoringDumpFile
type(logm).GetLatestMonitoringDumpFile = LogManagerGetLatestMonitoringDumpFile
+ type(logm).DumpIORInFile = LogManagerDumpIORInFile
#
import KernelLogger
naming_service.Register(KernelLogger.myLogger(),"/Logger")
with open(fileName,"wb") as f:
f.write( self.getAllStruct( clearMemory ) )
-def LogManagerLoadFromFile(self,fileName):
- from SALOME_ContainerHelper import unserializeLogManager
- with open(fileName,"rb") as f:
- data = f.read()
- return unserializeLogManager( data )
class LogManagerLaunchMonitoringFileCtxMgr:
def __init__(self, intervalInMs, outFileName):
logging.debug( "File for monitoring dump file : {}".format(tempPyFile) )
pyFileName = SALOME_PyNode.FileDeleter( tempPyFile )
pid = KernelBasis.LaunchMonitoring( tempPyFile )
- return SALOME_PyNode.MonitoringInfo(pyFileName,None,pid)
+ return SALOME_PyNode.MonitoringInfo(pyFileName,intervalInMs,None,pid)
+
+def LogManagerDumpIORInFile(self, iorFileName):
+ global logm
+ with open(iorFileName,"w") as f:
+ f.write( orb.object_to_string( logm ) )
+
+def LogManagerLoadFromFile(fileName):
+ from SALOME_ContainerHelper import unserializeLogManager
+ with open(fileName,"rb") as f:
+ data = f.read()
+ return unserializeLogManager( data )
+
+def LogManagerLoadFromIORFile( iorFile ):
+ global orb
+ def LoadAndWrite(logm,tempFileName):
+ import SALOME_PyNode
+ logm.putStructInFileAtomic( False, tempFileName )
+ tempFileAuto = SALOME_PyNode.FileDeleter( tempFileName )
+ ret = LogManagerLoadFromFile( tempFileAuto.filename )
+ return ret
+ with open(iorFile,"r") as f:
+ ior = f.read()
+ import Engines
+ import tempfile
+ salome_init_without_session()
+ logm = orb.string_to_object( ior )
+ with tempfile.NamedTemporaryFile(dir=os.path.expanduser("~")) as f:
+ tempFileName = f.name
+ return LoadAndWrite( logm, tempFileName )
def LogManagerGetLatestMonitoringDumpFile(self):
import shutil
#include <sstream>
static Engines::LogManager_var LogManagerInstanceSingleton;
+static SALOME::ExternalServerLauncher_var ExternalServerLauncherSingleton;
std::string RetrieveInternalInstanceOfLocalCppResourcesManager()
{
std::string GetExternalServerInstance()
{
CORBA::ORB_ptr orb = KERNEL::getORB();
- CORBA::Object_var obj = orb->resolve_initial_references("RootPOA");
- PortableServer::POA_var root_poa = PortableServer::POA::_narrow(obj);
- //
- PortableServer::POA_var safePOA = root_poa->find_POA("SingleThreadPOA",true);
- //
- SALOME_CPythonHelper *cPyh(SALOME_CPythonHelper::Singleton());
- SALOME_Fake_NamingService *ns = new SALOME_Fake_NamingService;
- SALOME_ExternalServerLauncher *esm(new SALOME_ExternalServerLauncher(cPyh,orb,safePOA,ns));
- esm->_remove_ref();
- //
- CORBA::Object_var esmPtr = safePOA->servant_to_reference(esm);
- SALOME::ExternalServerLauncher_var esmCPtr = SALOME::ExternalServerLauncher::_narrow(esmPtr);
+ if( CORBA::is_nil(ExternalServerLauncherSingleton) )
+ {
+ CORBA::Object_var obj = orb->resolve_initial_references("RootPOA");
+ PortableServer::POA_var root_poa = PortableServer::POA::_narrow(obj);
+ //
+ PortableServer::POA_var safePOA = root_poa->find_POA("SingleThreadPOA",true);
+ //
+ SALOME_CPythonHelper *cPyh(SALOME_CPythonHelper::Singleton());
+ SALOME_Fake_NamingService *ns = new SALOME_Fake_NamingService;
+ SALOME_ExternalServerLauncher *esm(new SALOME_ExternalServerLauncher(cPyh,orb,safePOA,ns));
+ esm->_remove_ref();
+ //
+ CORBA::Object_var esmPtr = safePOA->servant_to_reference(esm);
+ ExternalServerLauncherSingleton = SALOME::ExternalServerLauncher::_narrow(esmPtr);
+ }
//
- CORBA::String_var ior = orb->object_to_string(esmCPtr);
+ CORBA::String_var ior = orb->object_to_string(ExternalServerLauncherSingleton);
return std::string(ior.in());
}
#include <Basics_Utils.hxx>
#endif
+#include "utilities.h"
+
#include <sstream>
#include <fstream>
#include <algorithm>
return ret._retn();
}
+SALOME_ContainerScriptPerfLog::~SALOME_ContainerScriptPerfLog()
+{
+ for(auto execSession : _sessions)
+ {
+ PortableServer::ServantBase *serv = getPOA()->reference_to_servant(execSession);
+ PortableServer::ObjectId_var oid = getPOA()->reference_to_id(execSession);
+ getPOA()->deactivate_object(oid);
+ }
+ _sessions.clear();
+}
+
PortableServer::POA_var SALOME_ContainerScriptExecPerfLog::getPOA()
{
return father()->getPOA();
visitor.leaveContainerScriptPerfLog( *this );
}
+
Engines::ContainerScriptExecPerfLog_ptr SALOME_ContainerScriptPerfLog::addExecutionSession()
{
SALOME_ContainerScriptExecPerfLog *execution = new SALOME_ContainerScriptExecPerfLog(this);
/////
+SALOME_ContainerPerfLog::~SALOME_ContainerPerfLog()
+{
+ for(auto script : _scripts)
+ {
+ PortableServer::ServantBase *serv = getPOA()->reference_to_servant(script);
+ PortableServer::ObjectId_var oid = getPOA()->reference_to_id(script);
+ getPOA()->deactivate_object(oid);
+ }
+ _scripts.clear();
+}
+
PortableServer::POA_var SALOME_ContainerPerfLog::getPOA()
{
return father()->getPOA();
}
}
+SALOME_LogManager::~SALOME_LogManager()
+{
+ this->clear();
+}
+
+void SALOME_LogManager::clear()
+{
+ for(auto cont : _containers)
+ {
+ PortableServer::ServantBase *serv = getPOA()->reference_to_servant(cont);
+ PortableServer::ObjectId_var oid = getPOA()->reference_to_id(cont);
+ getPOA()->deactivate_object(oid);
+ }
+ _containers.clear();
+}
+
Engines::ContainerPerfLog_ptr SALOME_LogManager::declareContainer(const char *contInNS, const char *logfile)
{
SALOME_ContainerPerfLog *cont = new SALOME_ContainerPerfLog(this,contInNS,logfile);
{
public:
SALOME_ContainerScriptPerfLog(SALOME_ContainerPerfLog *father, const std::string& name, const std::string& code):_father(father),_name(name),_code(code) { }
+ ~SALOME_ContainerScriptPerfLog();
SALOME_ContainerPerfLog *father() const { return _father; }
void setPyObj(PyObject *obj) { _pyScriptLog.set(obj); }
PyObject *pyObj() { return _pyScriptLog.get(); }
{
public:
SALOME_ContainerPerfLog(SALOME_LogManager *father, const std::string& nameInNS, const std::string& logFile):_father(father),_name_in_ns(nameInNS),_log_file(logFile) { }
+ ~SALOME_ContainerPerfLog();
SALOME_LogManager *father() const { return _father; }
void setPyObj(PyObject *obj) { _pyContLog.set(obj); }
PyObject *pyObj() { return _pyContLog.get(); }
SALOME_LogManager(CORBA::ORB_ptr orb, PortableServer::POA_var poa, SALOME_NamingService_Abstract *ns = nullptr);
PortableServer::POA_var getPOA() { return _poa; }
PyObject *pyHelper() const { return _pyLogManager; }
- virtual ~SALOME_LogManager() = default;
+ ~SALOME_LogManager();
+ void clear() override;
Engines::ContainerPerfLog_ptr declareContainer(const char *contInNS, const char *logfile) override;
Engines::ListOfContainerPerfLog *listOfContainerLogs() override;
SALOME::vectorOfByte *getAllStruct(bool clearMemory) override;
import pickle
import tempfile
import logging
+from datetime import datetime
def flush():
import sys
"""
hostname = "localhost"
cp = pylauncher.GetRequestForGiveContainer(hostname,"container_test")
+ salome.logm.clear()
#PROXY_THRES = "-1"
PROXY_THRES = "1"
- with tempfile.TemporaryDirectory() as tmpdirnameMonitoring:
- monitoringFile = os.path.join( str( tmpdirnameMonitoring ), "zeHtop.pckl" )
- monitoringFileTwo = os.path.join( str( tmpdirnameMonitoring ), "zeHtopTwo.pckl" )
- logging.debug("Monitoring file : {}".format(monitoringFile))
- with tempfile.TemporaryDirectory() as tmpdirname:
- with salome.LogManagerLaunchMonitoringFileCtxMgr(250,monitoringFile) as monitoringParams:
- pyFileContainingCodeOfMonitoring = monitoringParams.pyFileName.filename
- logging.debug("Python file containing code of monitoring : {}".format(pyFileContainingCodeOfMonitoring))
- val_for_big_obj = str( tmpdirname )
- os.environ["SALOME_FILE_BIG_OBJ_DIR"] = val_for_big_obj
- # Override environement for all containers launched
- salome.cm.SetOverrideEnvForContainersSimple(env = [("SALOME_FILE_BIG_OBJ_DIR",val_for_big_obj),("SALOME_BIG_OBJ_ON_DISK_THRES",PROXY_THRES)])
- salome.cm.SetDeltaTimeBetweenCPUMemMeasureInMilliSecond( 250 )
- cont = salome.cm.GiveContainer(cp)
- logging.debug("{} {}".format(40*"*",cont.getPID()))
- script_st = """
+ with SALOME_PyNode.GenericPythonMonitoringLauncherCtxMgr( SALOME_PyNode.FileSystemMonitoring(1000,os.path.dirname( salome.__file__ )) ) as monitoringParamsForFileMaster:
+ with SALOME_PyNode.GenericPythonMonitoringLauncherCtxMgr( SALOME_PyNode.CPUMemoryMonitoring(1000) ) as monitoringParamsMaster:
+ with tempfile.TemporaryDirectory() as tmpdirnameMonitoring:
+ monitoringFile = os.path.join( str( tmpdirnameMonitoring ), "zeHtop.pckl" )
+ monitoringFileTwo = os.path.join( str( tmpdirnameMonitoring ), "zeHtopTwo.pckl" )
+ iorLogm = os.path.join( str( tmpdirnameMonitoring ), "logm.ior" )
+ with open(iorLogm,"w") as f:
+ f.write( salome.orb.object_to_string(salome.logm) )
+ logging.debug("Monitoring file : {}".format(monitoringFile))
+ with tempfile.TemporaryDirectory() as tmpdirname:
+ with salome.LogManagerLaunchMonitoringFileCtxMgr(250,monitoringFile) as monitoringParams:
+ pyFileContainingCodeOfMonitoring = monitoringParams.pyFileName.filename
+ logging.debug("Python file containing code of monitoring : {}".format(pyFileContainingCodeOfMonitoring))
+ val_for_big_obj = str( tmpdirname )
+ os.environ["SALOME_FILE_BIG_OBJ_DIR"] = val_for_big_obj
+ # Override environement for all containers launched
+ salome.cm.SetOverrideEnvForContainersSimple(env = [("SALOME_FILE_BIG_OBJ_DIR",val_for_big_obj),("SALOME_BIG_OBJ_ON_DISK_THRES",PROXY_THRES)])
+ salome.cm.SetDeltaTimeBetweenCPUMemMeasureInMilliSecond( 250 )
+ cont = salome.cm.GiveContainer(cp)
+ logging.debug("{} {}".format(40*"*",cont.getPID()))
+ script_st = """
import logging
import sys
import KernelBasis
+from datetime import datetime
cst = KernelBasis.GetTimeAdjustmentCst()
logging.debug("constant = {}".format(cst))
nbcore = 3
+my_log_4_this_session.addFreestyleAndFlush( ("a",datetime.now()) ) # exemple of custom tracking
print("coucou {} {}".format(len(zeinput0),len(zeinput1)))
logging.debug("debug or not debug")
ob = [ [ bytes(3000000) ] ]
print("Time ellapse spent : {} s".format(ts))
sys.stderr.write("fake error message\\n")
"""
- poa = salome.orb.resolve_initial_references("RootPOA")
- zeinput0 = [ bytes(100000000) ]
- if SALOME_PyNode.GetBigObjectOnDiskThreshold() != -1:
- zeinput0 = SALOME_PyNode.ProxyfyPickeled( zeinput0 )
- zeinput0.unlinkOnDestructor()
- obj = SALOME_PyNode.SenderByte_i(poa,pickle.dumps( (["zeinput0"],{"zeinput0": [zeinput0], "zeinput1": [ [zeinput0], [zeinput0] ] }) ))
- id_o = poa.activate_object(obj)
- refPtr = poa.id_to_reference(id_o)
- pyscript2 = cont.createPyScriptNode("testScript2",script_st)
- pyscript2.executeFirst(refPtr)
- ret2 = pyscript2.executeSecond(["ob","ob2"])# generate a DeprecationWarning: PY_SSIZE_T_CLEAN will be required for '#' formats on debian11 ?
- ret3, fileNamesProxyOut = unProxyfy( ret2 )
- logging.getLogger().debug("test logging 1")
- logging.debug("test logging 2")
- logging.debug( salome.orb.object_to_string( salome.logm ) )
- a = salome.logm.NaiveFetch()
- logging.debug(a)
- logging.debug(a[0][1][0])
- logging.debug( a[0][1][0].get()._input_hdd_mem._data[0]._data[0]._hdd_mem ) # important
- logging.debug( a[0][1][0].get()._input_hdd_mem._data[1]._data[0]._data[0]._hdd_mem ) # important
- fileNameProxyIn = a[0][1][0].get()._input_hdd_mem._data[0]._data[0]._file_name
- logging.debug( fileNameProxyIn )
- del zeinput0
- del ret3
- import gc ; gc.collect()
- if fileNameProxyIn is not None:
- if os.path.exists(fileNameProxyIn):
- raise RuntimeError("Oooops 2")
- for fileNameProxyOut in fileNamesProxyOut:
- if fileNameProxyOut is not None:
- if os.path.exists(fileNameProxyOut):
- raise RuntimeError("Oooops 3")
- # execution #2 inside last
- script_st2 = """
+ poa = salome.orb.resolve_initial_references("RootPOA")
+ zeinput0 = [ bytes(100000000) ]
+ if SALOME_PyNode.GetBigObjectOnDiskThreshold() != -1:
+ zeinput0 = SALOME_PyNode.ProxyfyPickeled( zeinput0 )
+ zeinput0.unlinkOnDestructor()
+ obj = SALOME_PyNode.SenderByte_i(poa,pickle.dumps( (["zeinput0"],{"zeinput0": [zeinput0], "zeinput1": [ [zeinput0], [zeinput0] ] }) ))
+ id_o = poa.activate_object(obj)
+ refPtr = poa.id_to_reference(id_o)
+ pyscript2 = cont.createPyScriptNode("testScript2",script_st)
+ pyscript2.executeFirst(refPtr)
+ ret2 = pyscript2.executeSecond(["ob","ob2"])# generate a DeprecationWarning: PY_SSIZE_T_CLEAN will be required for '#' formats on debian11 ?
+ ret3, fileNamesProxyOut = unProxyfy( ret2 )
+ logging.getLogger().debug("test logging 1")
+ logging.debug("test logging 2")
+ logging.debug( salome.orb.object_to_string( salome.logm ) )
+ a = salome.logm.NaiveFetch()
+ logging.debug(a)
+ fs = a[0][1][0].get().freestyle
+ self.assertEqual(len(fs),1)
+ self.assertEqual(fs[0][0],"a")
+ self.assertTrue( isinstance( fs[0][1], datetime ) )
+ logging.debug(a[0][1][0])
+ logging.debug( a[0][1][0].get()._input_hdd_mem._data[0]._data[0]._hdd_mem ) # important
+ logging.debug( a[0][1][0].get()._input_hdd_mem._data[1]._data[0]._data[0]._hdd_mem ) # important
+ fileNameProxyIn = a[0][1][0].get()._input_hdd_mem._data[0]._data[0]._file_name
+ logging.debug( fileNameProxyIn )
+ del zeinput0
+ del ret3
+ import gc ; gc.collect()
+ if fileNameProxyIn is not None:
+ if os.path.exists(fileNameProxyIn):
+ raise RuntimeError("Oooops 2")
+ for fileNameProxyOut in fileNamesProxyOut:
+ if fileNameProxyOut is not None:
+ if os.path.exists(fileNameProxyOut):
+ raise RuntimeError("Oooops 3")
+ # execution #2 inside last
+ script_st2 = """
import logging
b = 7+a
logging.debug("Execution 2")
import time
time.sleep(1)
"""
- obj2 = SALOME_PyNode.SenderByte_i(poa,pickle.dumps((["a"],{"a":3})))
- id2_o = poa.activate_object(obj2)
- refPtr2 = poa.id_to_reference(id2_o)
- pyscript2.assignNewCompiledCode(script_st2)
- pyscript2.executeFirst(refPtr2)
- ret2_0 = pyscript2.executeSecond(["b"])
- ret2_1, fileNamesProxyOut2 = unProxyfy( ret2_0 )
- logging.debug( fileNamesProxyOut2 )
- a = salome.logm.NaiveFetch()
- del ret2_1
+ obj2 = SALOME_PyNode.SenderByte_i(poa,pickle.dumps((["a"],{"a":3})))
+ id2_o = poa.activate_object(obj2)
+ refPtr2 = poa.id_to_reference(id2_o)
+ pyscript2.assignNewCompiledCode(script_st2)
+ pyscript2.executeFirst(refPtr2)
+ ret2_0 = pyscript2.executeSecond(["b"])
+ ret2_1, fileNamesProxyOut2 = unProxyfy( ret2_0 )
+ logging.debug( fileNamesProxyOut2 )
+ a = salome.logm.NaiveFetch()
+ del ret2_1
+ import gc ; gc.collect()
+ for fileNameProxyOut in fileNamesProxyOut2:
+ if fileNameProxyOut is not None:
+ if os.path.exists(fileNameProxyOut):
+ raise RuntimeError("Oooops 3")
+ #
+ fname = os.path.join(str( tmpdirname ),"perf.log")
+ salome.logm.DumpInFile( fname )
+ logManagerInst0 = salome.LogManagerLoadFromFile( fname )
+ logging.debug( logManagerInst0[0][1][0].get()._input_hdd_mem._data[1]._data[0]._data[0]._hdd_mem ) # important
+ logManagerInst = salome.logm.Fetch(True)
+ logManagerInst2 = salome.logm.Fetch(True)
+ logging.debug( salome.LogManagerLoadFromIORFile( iorLogm )[0][1][0].get() )
+ salome.logm.putStructInFileAtomic(False,monitoringFileTwo)
+ logging.debug( salome.LogManagerLoadFromFile(monitoringFileTwo)[0][1][0].get() )
+ logging.debug( logManagerInst[0][1][0].get()._input_hdd_mem._data[1]._data[0]._data[0]._hdd_mem ) # important
+ self.assertTrue( logManagerInst2[0][1][0].get() is None )
+ self.assertTrue( logManagerInst[0][1][1].get()._output_hdd_mem._data[0]._file_name == fileNamesProxyOut2[0] )
+ logging.debug( logManagerInst[0][1][1].log() )
+ # 2 files because a backup file is stored in case of unexpected kill during
+ self.assertEqual( len( glob.glob("{}*".format(monitoringFile) ) ) , 2 )
+ # leaving MonitoringFile Manager -> backup file is killed
+ pass
+ #self.assertEqual(monitoringFileSafe, monitoringFile)
+ self.assertEqual( len( glob.glob("{}*".format(monitoringFile) ) ) , 1 )
+ logging.debug( salome.LogManagerLoadFromFile(monitoringFile)[0][1][0].get() )
+ del monitoringParams
import gc ; gc.collect()
- for fileNameProxyOut in fileNamesProxyOut2:
- if fileNameProxyOut is not None:
- if os.path.exists(fileNameProxyOut):
- raise RuntimeError("Oooops 3")
- #
- fname = os.path.join(str( tmpdirname ),"perf.log")
- salome.logm.DumpInFile( fname )
- logManagerInst0 = salome.logm.LoadFromFile( fname )
- logging.debug( logManagerInst0[0][1][0].get()._input_hdd_mem._data[1]._data[0]._data[0]._hdd_mem ) # important
- logManagerInst = salome.logm.Fetch(True)
- logManagerInst2 = salome.logm.Fetch(True)
- salome.logm.putStructInFileAtomic(False,monitoringFileTwo)
- logging.debug( salome.logm.LoadFromFile(monitoringFileTwo)[0][1][0].get() )
- logging.debug( logManagerInst[0][1][0].get()._input_hdd_mem._data[1]._data[0]._data[0]._hdd_mem ) # important
- self.assertTrue( logManagerInst2[0][1][0].get() is None )
- self.assertTrue( logManagerInst[0][1][1].get()._output_hdd_mem._data[0]._file_name == fileNamesProxyOut2[0] )
- logging.debug( logManagerInst[0][1][1].log() )
- # 2 files because a backup file is stored in case of unexpected kill during
- self.assertEqual( len( glob.glob("{}*".format(monitoringFile) ) ) , 2 )
- # leaving MonitoringFile Manager -> backup file is killed
- pass
- #self.assertEqual(monitoringFileSafe, monitoringFile)
- self.assertEqual( len( glob.glob("{}*".format(monitoringFile) ) ) , 1 )
- logging.debug( salome.logm.LoadFromFile(monitoringFile)[0][1][0].get() )
- del monitoringParams
- import gc ; gc.collect()
- self.assertFalse( os.path.exists(pyFileContainingCodeOfMonitoring) )
- cont.Shutdown()
+ self.assertFalse( os.path.exists(pyFileContainingCodeOfMonitoring) )
+ cont.Shutdown()
+ fnToTest0 = monitoringParamsMaster.outFileName.filename
+ cpumeminfo = SALOME_PyNode.ReadCPUMemInfo( monitoringParamsMaster )
+ self.assertTrue( isinstance(monitoringParamsMaster.outFileName,SALOME_PyNode.FileDeleter) )
+ del monitoringParamsMaster
+ import gc
+ gc.collect()
+ self.assertTrue( os.path.exists( fnToTest0 ) )
+ dirInfo = SALOME_PyNode.ReadInodeSizeInfo( monitoringParamsForFileMaster )
+ self.assertTrue( isinstance(monitoringParamsForFileMaster.outFileName,SALOME_PyNode.FileDeleter) )
+ self.assertFalse( os.path.exists( fnToTest0 ) )
+ logging.debug( cpumeminfo )
+ logging.debug( dirInfo )
+
+ def testPerfLogManager1(self):
+ """
+ [EDF29150] : Similar than testPerfLogManager1 but with out fileName precised for CPU/Mem monitoring and FS monitoring
+ """
+ hostname = "localhost"
+ cp = pylauncher.GetRequestForGiveContainer(hostname,"container_test_three")
+ salome.logm.clear()
+ #PROXY_THRES = "-1"
+ PROXY_THRES = "1"
+ with tempfile.TemporaryDirectory() as tmpdirnameMonitoring:
+ fsMonitoringFile = os.path.join( str( tmpdirnameMonitoring ), "zeFS.txt" )
+ cpuMemMonitoringFile = os.path.join( str( tmpdirnameMonitoring ), "zeCPUMem.txt" )
+ with SALOME_PyNode.GenericPythonMonitoringLauncherCtxMgr( SALOME_PyNode.FileSystemMonitoring(1000,os.path.dirname( salome.__file__ ),fsMonitoringFile) ) as monitoringParamsForFileMaster:
+ with SALOME_PyNode.GenericPythonMonitoringLauncherCtxMgr( SALOME_PyNode.CPUMemoryMonitoring(1000,cpuMemMonitoringFile) ) as monitoringParamsMaster:
+ monitoringFile = os.path.join( str( tmpdirnameMonitoring ), "zeHtop.pckl" )
+ monitoringFileTwo = os.path.join( str( tmpdirnameMonitoring ), "zeHtopTwo.pckl" )
+ iorLogm = os.path.join( str( tmpdirnameMonitoring ), "logm.ior" )
+ with open(iorLogm,"w") as f:
+ f.write( salome.orb.object_to_string(salome.logm) )
+ logging.debug("Monitoring file : {}".format(monitoringFile))
+ with tempfile.TemporaryDirectory() as tmpdirname:
+ with salome.LogManagerLaunchMonitoringFileCtxMgr(250,monitoringFile) as monitoringParams:
+ pyFileContainingCodeOfMonitoring = monitoringParams.pyFileName.filename
+ logging.debug("Python file containing code of monitoring : {}".format(pyFileContainingCodeOfMonitoring))
+ val_for_big_obj = str( tmpdirname )
+ os.environ["SALOME_FILE_BIG_OBJ_DIR"] = val_for_big_obj
+ # Override environement for all containers launched
+ salome.cm.SetOverrideEnvForContainersSimple(env = [("SALOME_FILE_BIG_OBJ_DIR",val_for_big_obj),("SALOME_BIG_OBJ_ON_DISK_THRES",PROXY_THRES)])
+ salome.cm.SetDeltaTimeBetweenCPUMemMeasureInMilliSecond( 250 )
+ cont = salome.cm.GiveContainer(cp)
+ logging.debug("{} {}".format(40*"*",cont.getPID()))
+ script_st = """
+import logging
+import sys
+import KernelBasis
+from datetime import datetime
+cst = KernelBasis.GetTimeAdjustmentCst()
+logging.debug("constant = {}".format(cst))
+nbcore = 3
+my_log_4_this_session.addFreestyleAndFlush( ("a",datetime.now()) ) # exemple of custom tracking
+print("coucou {} {}".format(len(zeinput0),len(zeinput1)))
+logging.debug("debug or not debug")
+ob = [ [ bytes(3000000) ] ]
+pihm, ts = KernelBasis.HeatMarcel(1 * nbcore * cst,nbcore)
+print("Time ellapse spent : {} s".format(ts))
+ob2 = [ [ bytes(100000) ] ]
+pihm, ts = KernelBasis.HeatMarcel(1 * nbcore * cst,nbcore)
+print("Time ellapse spent : {} s".format(ts))
+sys.stderr.write("fake error message\\n")
+"""
+ poa = salome.orb.resolve_initial_references("RootPOA")
+ zeinput0 = [ bytes(100000000) ]
+ if SALOME_PyNode.GetBigObjectOnDiskThreshold() != -1:
+ zeinput0 = SALOME_PyNode.ProxyfyPickeled( zeinput0 )
+ zeinput0.unlinkOnDestructor()
+ obj = SALOME_PyNode.SenderByte_i(poa,pickle.dumps( (["zeinput0"],{"zeinput0": [zeinput0], "zeinput1": [ [zeinput0], [zeinput0] ] }) ))
+ id_o = poa.activate_object(obj)
+ refPtr = poa.id_to_reference(id_o)
+ pyscript2 = cont.createPyScriptNode("testScript2",script_st)
+ pyscript2.executeFirst(refPtr)
+ ret2 = pyscript2.executeSecond(["ob","ob2"])# generate a DeprecationWarning: PY_SSIZE_T_CLEAN will be required for '#' formats on debian11 ?
+ ret3, fileNamesProxyOut = unProxyfy( ret2 )
+ logging.getLogger().debug("test logging 1")
+ logging.debug("test logging 2")
+ logging.debug( salome.orb.object_to_string( salome.logm ) )
+ a = salome.logm.NaiveFetch()
+ logging.debug(a)
+ cont.Shutdown()
+ cpumeminfo = SALOME_PyNode.ReadCPUMemInfo( monitoringParamsMaster )
+ self.assertTrue( isinstance(monitoringParamsMaster.outFileName,SALOME_PyNode.FileHolder) )
+ dirInfo = SALOME_PyNode.ReadInodeSizeInfo( monitoringParamsForFileMaster )
+ self.assertTrue( isinstance(monitoringParamsForFileMaster.outFileName,SALOME_PyNode.FileHolder) )
+ self.assertTrue( os.path.exists( fsMonitoringFile ) )
+ self.assertTrue( os.path.exists( cpuMemMonitoringFile ) )
+ logging.debug( cpumeminfo )
+ logging.debug( dirInfo )
+ self.assertFalse( os.path.exists( fsMonitoringFile ) )
+ self.assertFalse( os.path.exists( cpuMemMonitoringFile ) )
+
+ def testEasyNamingService(self):
+ """
+ [EDF29150] : This test checks measure performance methods
+ """
+ hostname = "localhost"
+ cp = pylauncher.GetRequestForGiveContainer(hostname,"container_test_two")
+ salome.logm.clear()
+ PROXY_THRES = "1"
+ with tempfile.TemporaryDirectory() as tmpdirname:
+ ior_ns_file = os.path.join(tmpdirname,"ns.ior")
+ val_for_big_obj = str( tmpdirname )
+ salome.cm.SetOverrideEnvForContainersSimple(env = [("SALOME_FILE_BIG_OBJ_DIR",val_for_big_obj),("SALOME_BIG_OBJ_ON_DISK_THRES",PROXY_THRES)])
+ salome.cm.SetDeltaTimeBetweenCPUMemMeasureInMilliSecond( 250 )
+ salome.naming_service.DumpIORInFile( ior_ns_file )
+ cont = salome.cm.GiveContainer(cp)
+ script_st = """
+from SALOME_Embedded_NamingService_ClientPy import SALOME_Embedded_NamingService_ClientPy
+ior_ns_file = "{ior_ns_file}"
+ns = SALOME_Embedded_NamingService_ClientPy.BuildFromIORFile( ior_ns_file )
+ret = ns.repr()
+""".format(**locals())
+ cont = salome.cm.GiveContainer(cp)
+ pyscript2 = cont.createPyScriptNode("testScript3",script_st)
+ retCoarse = pickle.loads( pyscript2.execute( ["ret"], pickle.dumps( ([],{} ) ) ) )
+ ret = retCoarse[0]
+ self.assertTrue( isinstance(ret,list) and isinstance(ret[0],str) )
+ cont.Shutdown()
+ pass
if __name__ == '__main__':
from salome_utils import positionVerbosityOfLoggerRegardingState,setVerboseLevel,setVerbose
salome.standalone()
salome.salome_init()
+ setVerbose(True)
setVerboseLevel(logging.DEBUG)
positionVerbosityOfLoggerRegardingState()
unittest.main()
orb=CORBA.ORB_init([''])
return orb.string_to_object(ret)
NamingService.RefOfNS = classmethod(NamingService_RefOfNS)
+def NamingService_DumpIORInFile(cls,iorFileName):
+ with open( iorFileName, "w" ) as f:
+ f.write( cls.IOROfNS() )
+NamingService.DumpIORInFile = classmethod(NamingService_DumpIORInFile)
def NamingService_KillContainersInFile(cls,logFileName):
import Engines
import CORBA
CORBA::Object_var obj = ns.ResolveFirst(Path);
return ObjectToIOR(obj);
}
+
+Engines::NSListOfStrings *SALOME_Embedded_NamingService::keys()
+{
+ SALOME_Fake_NamingService ns;
+ std::vector<std::string> ks = ns.repr();
+ Engines::NSListOfStrings *ret = new Engines::NSListOfStrings;
+ std::size_t sz(ks.size());
+ ret->length( sz );
+ for(std::size_t i = 0 ; i < sz ; ++i)
+ (*ret)[i] = CORBA::string_dup( ks[i].c_str() );
+ return ret;
+}
void Destroy_Name(const char *Path) override;
Engines::IORType *Resolve(const char *Path) override;
Engines::IORType *ResolveFirst(const char *Path) override;
+ Engines::NSListOfStrings *keys() override;
};
#-------------------------------------------------------------------------
def __init__(self, serv):
- self._orb=CORBA.ORB_init([''], CORBA.ORB_ID)
+ self._orb = CORBA.ORB_init([''], CORBA.ORB_ID)
self._obj = serv
#-------------------------------------------------------------------------
def Resolve(self, Path):
ret = self._obj.Resolve(Path)
return self._orb.string_to_object(ret.decode())
+
+ def keys(self):
+ return self._obj.keys()
+
+ def repr(self):
+ return self.keys()
+
+def SALOME_Embedded_NamingService_ClientPy_BuildFromIORFile(cls, iorNSFile):
+ import Engines
+ orb = CORBA.ORB_init([''], CORBA.ORB_ID)
+ with open(iorNSFile,"r") as f:
+ ior = f.read()
+ serv = orb.string_to_object( ior )
+ return SALOME_Embedded_NamingService_ClientPy(serv)
+
+SALOME_Embedded_NamingService_ClientPy.BuildFromIORFile = classmethod(SALOME_Embedded_NamingService_ClientPy_BuildFromIORFile)
* thread waken up is not guaranteed (no fifo or priority rules in Linux Kernel)
*/
-#define MESS_INIT(deb) std::ostringstream os; os<<deb
+#define MESS_INIT(deb) std::ostringstream os; SALOME::AppendTimeClock(os); os << deb
#define MESS_BEGIN(deb) MESS_INIT(deb)<<__FILE__ <<" ["<<__LINE__<<"] : "
#define MESS_END std::endl; LocalTraceBufferPool::instance()->insert(NORMAL_MESS, os.str().c_str());
#define MESS_ABORT std::endl; LocalTraceBufferPool::instance()->insert(ABORT_MESS, os.str().c_str());
// --- Some macros are always defined (without _DEBUG_): for use with release version
-#define INFOS(msg) {MESS_BEGIN("- Trace ") << msg << MESS_END}
-#define BRIEF_INFOS(msg) {std::cout << msg << std::endl;}
+#define INFOS(msg) {MESS_BEGIN("Infos - ") << msg << MESS_END}
#define PYSCRIPT(msg) {MESS_INIT("---PYSCRIPT--- ") << msg << MESS_END}
#define INTERRUPTION(msg) {MESS_BEGIN("- INTERRUPTION: ")<< msg << MESS_ABORT}