+ def __getitem__(self, i):
+ return self.get()[i]
+
+ def __len__(self):
+ return len(self.get())
+
+class BigObjectOnDiskSequence(BigObjectOnDiskBase):
+ def __init__(self, length, fileName, objSerialized):
+ BigObjectOnDiskBase.__init__(self, fileName, objSerialized)
+ self._length = length
+
+ def __getitem__(self, i):
+ return BigObjectOnDiskListElement(i, self._length, self.getFileName())
+
+ def __len__(self):
+ return self._length
+
+class BigObjectOnDiskList(BigObjectOnDiskSequence):
+ def __init__(self, length, fileName, objSerialized):
+ BigObjectOnDiskSequence.__init__(self, length, fileName, objSerialized)
+
+class BigObjectOnDiskTuple(BigObjectOnDiskSequence):
+ def __init__(self, length, fileName, objSerialized):
+ BigObjectOnDiskSequence.__init__(self, length, fileName, objSerialized)
+
+def ProxyfyPickeled( obj, pickleObjInit = None, visitor = None ):
+ """
+ This method return a proxy instance of pickled form of object given in input.
+
+ Args:
+ ----
+ obj (pickelable type) : object to be proxified
+ pickleObjInit (bytes) : Optionnal. Original pickeled form of object to be proxyfied if already computed. If not this method generate it
+
+ Returns
+ -------
+ BigObjectOnDiskBase: proxy instance
+ """
+ pickleObj = pickleObjInit
+ if pickleObj is None:
+ pickleObj = pickle.dumps( obj , pickle.HIGHEST_PROTOCOL )
+ fileName = GetBigObjectFileName()
+ if visitor:
+ visitor.setHDDMem( len(pickleObj) )
+ visitor.setFileName( fileName.getFileName() )
+ if isinstance( obj, list):
+ proxyObj = BigObjectOnDiskList( len(obj), fileName, pickleObj )
+ elif isinstance( obj, tuple):
+ proxyObj = BigObjectOnDiskTuple( len(obj), fileName , pickleObj )
+ else:
+ proxyObj = BigObjectOnDisk( fileName , pickleObj )
+ return proxyObj
+
+def SpoolPickleObject( obj, visitor = None ):
+ import pickle
+ with InOutputObjVisitorCM(visitor) as v:
+ pickleObjInit = pickle.dumps( obj , pickle.HIGHEST_PROTOCOL )
+ if not ActivateProxyMecanismOrNot( len(pickleObjInit) ):
+ return pickleObjInit
+ else:
+ proxyObj = ProxyfyPickeled( obj, pickleObjInit, v.visitor() )
+ pickleProxy = pickle.dumps( proxyObj , pickle.HIGHEST_PROTOCOL )
+ return pickleProxy
+
+from SALOME_ContainerHelper import InOutputObjVisitorCM, InOutputObjVisitor
+
+def UnProxyObjectSimple( obj, visitor = None ):
+ """
+ Method to be called in Remote mode. Alterate the obj _status attribute.
+ Because the slave process does not participate in the reference counting
+
+ Args:
+ ----
+ visitor (InOutputObjVisitor): A visitor to keep track of amount of memory on chip and those on HDD
+
+ """
+ with InOutputObjVisitorCM(visitor) as v:
+ logging.debug( "UnProxyObjectSimple {}".format(type(obj)) )
+ if isinstance(obj,BigObjectOnDiskBase):
+ obj.doNotTouchFile()
+ return obj.get( v )
+ elif isinstance( obj, list):
+ retObj = []
+ for elt in obj:
+ retObj.append( UnProxyObjectSimple(elt,v.visitor()) )
+ return retObj
+ else:
+ return obj
+
+def UnProxyObjectSimpleLocal( obj ):
+ """
+ Method to be called in Local mode. Do not alterate the PyObj counter
+ """
+ if isinstance(obj,BigObjectOnDiskBase):
+ return obj.get()
+ elif isinstance( obj, list):
+ retObj = []
+ for elt in obj:
+ retObj.append( UnProxyObjectSimpleLocal(elt) )
+ return retObj
+ else:
+ return obj
+
+class FileHolder:
+ def __init__(self, fileName):
+ self._filename = fileName
+ @property
+ def filename(self):
+ return self._filename
+
+class FileDeleter(FileHolder):
+ def __init__(self, fileName):
+ super().__init__( fileName )
+ def __del__(self):
+ import os
+ if os.path.exists( self._filename ):
+ os.unlink( self._filename )
+
+class MonitoringInfo:
+ def __init__(self, pyFileName, intervalInMs, outFileName, pid):
+ self._py_file_name = pyFileName
+ self._interval_in_ms = intervalInMs
+ self._out_file_name = outFileName
+ self._pid = pid
+
+ @property
+ def pyFileName(self):
+ return self._py_file_name
+
+ @property
+ def pid(self):
+ return self._pid
+
+ @pid.setter
+ def pid(self, value):
+ self._pid = value
+
+ @property
+ def outFileName(self):
+ return self._out_file_name
+
+ @property
+ def intervalInMs(self):
+ return self._interval_in_ms
+
+def FileSystemMonitoring(intervalInMs, dirNameToInspect, outFileName = None):
+ """
+ This method loops indefinitely every intervalInMs milliseconds to scan
+ number of inodes and size of content recursively included into the in input directory.
+
+ Args:
+ ----
+
+ outFileName (str) : name of file inside the results will be written. If None a new file is generated
+
+ See also CPUMemoryMonitoring
+ """
+ global orb
+ import os
+ dirNameToInspect2 = os.path.abspath( os.path.expanduser(dirNameToInspect) )
+ import tempfile
+ import logging
+ import KernelBasis
+ # outFileNameSave stores the content of outFileName during phase of dumping
+ with tempfile.NamedTemporaryFile(prefix="fs_monitor_",suffix=".txt") as f:
+ outFileNameSave = f.name
+ with tempfile.NamedTemporaryFile(prefix="fs_monitor_",suffix=".py") as f:
+ tempPyFile = f.name
+ tempOutFile = outFileName
+ if tempOutFile is None:
+ tempOutFile = "{}.txt".format( os.path.splitext( tempPyFile )[0] )
+ with open(tempPyFile,"w") as f:
+ f.write("""
+import subprocess as sp
+import re
+import os
+import time
+import datetime
+with open("{tempOutFile}","a") as f:
+ f.write( "{{}}\\n".format( "{dirNameToInspect2}" ) )
+ f.write( "{{}}\\n".format( "{intervalInMs}" ) )
+ while(True):
+ nbinodes = -1
+ try:
+ nbinodes = sp.check_output("{{}} | wc -l".format( " ".join(["find","{dirNameToInspect2}"]), ), shell = True).decode().strip()
+ except:
+ pass
+ szOfDirStr = "fail"
+ try:
+ st = sp.check_output(["du","-sh","{dirNameToInspect2}"]).decode()
+ szOfDirStr = re.split("[\s]+",st)[0]
+ except:
+ pass
+ f.write( "{{}}\\n".format( str( datetime.datetime.now().timestamp() ) ) )
+ f.write( "{{}}\\n".format( str( nbinodes ) ) )
+ f.write( "{{}}\\n".format( str( szOfDirStr ) ) )
+ f.flush()
+ time.sleep( {intervalInMs} / 1000.0 )
+""".format( **locals()))
+ logging.debug( "File for FS monitoring dump file : {}".format(tempPyFile) )
+ pyFileName = FileDeleter( tempPyFile )
+ if outFileName is None:
+ outFileName = FileDeleter( tempOutFile )
+ else:
+ outFileName = FileHolder(outFileName)
+ return MonitoringInfo(pyFileName, intervalInMs, outFileName, None)
+
+def CPUMemoryMonitoring( intervalInMs, outFileName = None ):
+ """
+ Launch a subprocess monitoring self process.
+ This monitoring subprocess is a python process lauching every intervalInMs ms evaluation of
+ CPU usage and RSS memory of the calling process.
+ Communication between subprocess and self is done by file.
+
+ Args:
+ ----
+ outFileName (str) : name of file inside the results will be written. If None a new file is generated
+
+ See also FileSystemMonitoring
+ """
+ import KernelBasis
+ def BuildPythonFileForCPUPercent( intervalInMs, outFileName):
+ import os
+ import tempfile
+ with tempfile.NamedTemporaryFile(prefix="cpu_mem_monitor_",suffix=".py") as f:
+ tempPyFile = f.name
+ tempOutFile = outFileName
+ if tempOutFile is None:
+ tempOutFile = "{}.txt".format( os.path.splitext( tempPyFile )[0] )
+ pid = os.getpid()
+ with open(tempPyFile,"w") as f:
+ f.write("""import psutil
+pid = {}
+process = psutil.Process( pid )
+def getChargeOf( p ):
+ a,b = p.cpu_percent(), p.memory_info().rss
+ try:
+ for c in p.children():
+ a += c.cpu_percent(interval=0.01) ; b += c.memory_info().rss
+ except:
+ pass
+ return a,b
+import time
+with open("{}","a") as f:
+ f.write( "{{}}\\n".format( "{}" ) )
+ while True:
+ cpu,mem_rss = getChargeOf( process )
+ f.write( "{{}}\\n".format( str( cpu ) ) )
+ f.write( "{{}}\\n".format( str( mem_rss ) ) )
+ f.flush()
+ time.sleep( {} / 1000.0 )
+""".format(pid, tempOutFile, intervalInMs, intervalInMs))
+ if outFileName is None:
+ autoOutFile = FileDeleter(tempOutFile)
+ else:
+ autoOutFile = FileHolder(tempOutFile)
+ return FileDeleter(tempPyFile),autoOutFile
+ pyFileName, outFileName = BuildPythonFileForCPUPercent( intervalInMs, outFileName )
+ return MonitoringInfo(pyFileName, intervalInMs, outFileName, None)
+
+class GenericPythonMonitoringLauncherCtxMgr:
+ def __init__(self, monitoringParams):
+ """
+ Args:
+ ----
+ monitoringParams (MonitoringInfo)
+ """
+ self._monitoring_params = monitoringParams
+ def __enter__(self):
+ import KernelBasis
+ pid = KernelBasis.LaunchMonitoring(self._monitoring_params.pyFileName.filename)
+ self._monitoring_params.pid = pid
+ return self._monitoring_params
+
+ def __exit__(self,exctype, exc, tb):
+ StopMonitoring( self._monitoring_params )
+ del self._monitoring_params
+ import gc
+ gc.collect() # force destruction of objects even in raise context
+
+def StopMonitoring( monitoringInfo ):
+ """
+ Kill monitoring subprocess.
+
+ Args:
+ ----
+ monitoringInfo (MonitoringInfo): info returned by LaunchMonitoring
+ """
+ import KernelBasis
+ KernelBasis.StopMonitoring(monitoringInfo.pid)
+
+class CPUMemInfo:
+ def __init__(self, intervalInMs, cpu, mem_rss):
+ """
+ Args:
+ ----
+ intervalInMs (int)
+ cpu (list<float>) CPU usage
+ mem_rss (list<int>) rss memory usage
+ """
+ self._interval_in_ms = intervalInMs
+ self._data = [(a,b) for a,b in zip(cpu,mem_rss)]
+ def __str__(self):
+ st = """Interval in ms : {self.intervalInMs}
+Data : ${self.data}
+""".format( **locals() )
+ return st
+ @property
+ def intervalInMs(self):
+ return self._interval_in_ms
+ @property
+ def data(self):
+ """
+ list of triplets. First param of pair is cpu usage
+ Second param of pair is memory usage
+ """
+ return self._data
+
+def ReadCPUMemInfoInternal( fileName ):
+ intervalInMs = 0
+ cpu = [] ; mem_rss = []
+ if os.path.exists( fileName ):
+ try:
+ with open(fileName, "r") as f:
+ coarseData = [ elt.strip() for elt in f.readlines() ]
+ intervalInMs = int( coarseData[0] )
+ coarseData = coarseData[1:]
+ cpu = [float(elt) for elt in coarseData[::2]]
+ mem_rss = [ int(elt) for elt in coarseData[1::2]]
+ except:
+ pass
+ return CPUMemInfo(intervalInMs,cpu,mem_rss)
+
+def ReadCPUMemInfo( monitoringInfo ):
+ """
+ Retrieve CPU/Mem data of monitoring.
+
+ Args:
+ ----
+ monitoringInfo (MonitoringInfo): info returned by LaunchMonitoring
+
+ Returns
+ -------
+ CPUMemInfo instance
+ """
+ return ReadCPUMemInfoInternal( monitoringInfo.outFileName.filename )
+
+class InodeSizeInfo:
+ def __init__(self, dirNameMonitored, intervalInMs, timeStamps, nbInodes, volumeOfDir):
+ """
+ Args:
+ ----
+ timeStamps (list<datetimestruct>)
+ nbInodes (list<int>)
+ volumeOfDir (list<str>)
+ """
+ self._dir_name_monitored = dirNameMonitored
+ self._interval_in_ms = intervalInMs
+ self._data = [(t,a,b) for t,a,b in zip(timeStamps,nbInodes,volumeOfDir)]
+ def __str__(self):
+ st = """Filename monitored : {self.dirNameMonitored}
+Interval in ms : ${self.intervalInMs}
+Data : ${self.data}
+""".format( **locals() )
+ return st
+ @property
+ def dirNameMonitored(self):
+ return self._dir_name_monitored
+ @property
+ def intervalInMs(self):
+ return self._interval_in_ms
+ @property
+ def data(self):
+ """
+ list of triplets. First param of triplet is datetimestruct
+ Second param of triplet is #inodes.
+ Thirst param of triplet is size.
+ """
+ return self._data
+
+def ReadInodeSizeInfoInternal( fileName ):
+ import datetime
+ import os
+ with open(fileName, "r") as f:
+ coarseData = [ elt.strip() for elt in f.readlines() ]
+ dirNameMonitored = coarseData[0] ; intervalInMs = int( coarseData[1] ) ; coarseData = coarseData[2:]
+ tss = [ datetime.datetime.fromtimestamp( float(elt) ) for elt in coarseData[::3] ]
+ nbInodes = [int(elt) for elt in coarseData[1::3]]
+ volumeOfDir = coarseData[2::3]
+ return InodeSizeInfo(dirNameMonitored,intervalInMs,tss,nbInodes,volumeOfDir)
+
+def ReadInodeSizeInfo( monitoringInfo ):
+ """
+ Retrieve nb of inodes and size of monitoring
+
+ Args:
+ ----
+ monitoringInfo (MonitoringInfo): info returned by LaunchMonitoring
+
+ Returns
+ -------
+ InodeSizeInfo
+ """
+ return ReadInodeSizeInfoInternal( monitoringInfo.outFileName.filename )
+