1 # -*- coding: iso-8859-1 -*-
2 # Copyright (C) 2007-2023 CEA, EDF, OPEN CASCADE
4 # This library is free software; you can redistribute it and/or
5 # modify it under the terms of the GNU Lesser General Public
6 # License as published by the Free Software Foundation; either
7 # version 2.1 of the License, or (at your option) any later version.
9 # This library is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 # Lesser General Public License for more details.
14 # You should have received a copy of the GNU Lesser General Public
15 # License along with this library; if not, write to the Free Software
16 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # File : SALOME_PyNode.py
22 # Author : Christian CAREMOLI, EDF
35 from SALOME_ContainerHelper import ScriptExecInfo
37 MY_CONTAINER_ENTRY_IN_GLBS = "my_container"
39 MY_PERFORMANCE_LOG_ENTRY_IN_GLBS = "my_log_4_this_session"
41 class Generic(SALOME__POA.GenericObj):
42 """A Python implementation of the GenericObj CORBA IDL"""
43 def __init__(self,poa):
48 #print("Register called : %d"%self.cnt)
52 #print("UnRegister called : %d"%self.cnt)
55 oid=self.poa.servant_to_id(self)
56 self.poa.deactivate_object(oid)
59 print("WARNING SALOME::GenericObj::Destroy() function is obsolete! Use UnRegister() instead.")
63 #print("Destuctor called")
66 class PyNode_i (Engines__POA.PyNode,Generic):
67 """The implementation of the PyNode CORBA IDL"""
68 def __init__(self, nodeName,code,poa,my_container):
69 """Initialize the node : compilation in the local context"""
70 Generic.__init__(self,poa)
71 self.nodeName=nodeName
73 self.my_container=my_container._container
74 linecache.cache[nodeName]=0,None,code.split('\n'),nodeName
75 ccode=compile(code,nodeName,'exec')
77 self.context[MY_CONTAINER_ENTRY_IN_GLBS] = self.my_container
78 exec(ccode, self.context)
80 def getContainer(self):
81 return self.my_container
89 def defineNewCustomVar(self,varName,valueOfVar):
90 self.context[varName] = pickle.loads(valueOfVar)
93 def executeAnotherPieceOfCode(self,code):
94 """Called for initialization of container lodging self."""
96 ccode=compile(code,self.nodeName,'exec')
97 exec(ccode, self.context)
99 raise SALOME.SALOME_Exception(SALOME.ExceptionStruct(SALOME.BAD_PARAM,"","PyScriptNode (%s) : code to be executed \"%s\"" %(self.nodeName,code),0))
101 def execute(self,funcName,argsin):
102 """Execute the function funcName found in local context with pickled args (argsin)"""
104 argsin,kws=pickle.loads(argsin)
105 func=self.context[funcName]
106 argsout=func(*argsin,**kws)
107 argsout=pickle.dumps(argsout,-1)
110 exc_typ,exc_val,exc_fr=sys.exc_info()
111 l=traceback.format_exception(exc_typ,exc_val,exc_fr)
112 raise SALOME.SALOME_Exception(SALOME.ExceptionStruct(SALOME.BAD_PARAM,"".join(l),"PyNode: %s, function: %s" % (self.nodeName,funcName),0))
114 class SenderByte_i(SALOME__POA.SenderByte,Generic):
115 def __init__(self,poa,bytesToSend):
116 Generic.__init__(self,poa)
117 self.bytesToSend = bytesToSend
120 return len(self.bytesToSend)
122 def sendPart(self,n1,n2):
123 return self.bytesToSend[n1:n2]
125 SALOME_FILE_BIG_OBJ_DIR = "SALOME_FILE_BIG_OBJ_DIR"
127 SALOME_BIG_OBJ_ON_DISK_THRES_VAR = "SALOME_BIG_OBJ_ON_DISK_THRES"
130 SALOME_BIG_OBJ_ON_DISK_THRES_DFT = 50000000
132 DicoForProxyFile = { }
134 def GetSizeOfBufferedReader(f):
136 This method returns in bytes size of a file openned.
140 f (io.IOBase): buffered reader returned by open
148 f.seek(0,io.SEEK_END)
150 f.seek(pos,io.SEEK_SET)
153 def GetObjectFromFile(fname, visitor = None):
154 with open(fname,"rb") as f:
156 visitor.setHDDMem( GetSizeOfBufferedReader(f) )
157 visitor.setFileName( fname )
161 def DumpInFile(obj,fname):
162 with open(fname,"wb") as f:
165 def IncrRefInFile(fname):
166 if fname in DicoForProxyFile:
167 DicoForProxyFile[fname] += 1
169 DicoForProxyFile[fname] = 2
172 def DecrRefInFile(fname):
173 if fname not in DicoForProxyFile:
176 cnt = DicoForProxyFile[fname]
177 DicoForProxyFile[fname] -= 1
179 del DicoForProxyFile[fname]
181 if os.path.exists(fname):
185 def GetBigObjectOnDiskThreshold():
187 if SALOME_BIG_OBJ_ON_DISK_THRES_VAR in os.environ:
188 return int( os.environ[SALOME_BIG_OBJ_ON_DISK_THRES_VAR] )
190 return SALOME_BIG_OBJ_ON_DISK_THRES_DFT
192 def ActivateProxyMecanismOrNot( sizeInByte ):
193 thres = GetBigObjectOnDiskThreshold()
197 return sizeInByte > thres
199 def GetBigObjectDirectory():
201 if SALOME_FILE_BIG_OBJ_DIR not in os.environ:
202 raise RuntimeError("An object of size higher than limit detected and no directory specified to dump it in file !")
203 return os.path.expanduser( os.path.expandvars( os.environ[SALOME_FILE_BIG_OBJ_DIR] ) )
205 def GetBigObjectFileName():
207 Return a filename in the most secure manner (see tempfile documentation)
210 with tempfile.NamedTemporaryFile(dir=GetBigObjectDirectory(),prefix="mem_",suffix=".pckl") as f:
214 class BigObjectOnDiskBase:
215 def __init__(self, fileName, objSerialized):
217 :param fileName: the file used to dump into.
218 :param objSerialized: the object in pickeled form
219 :type objSerialized: bytes
221 self._filename = fileName
222 # attribute _destroy is here to tell client side or server side
223 # only client side can be with _destroy set to True. server side due to risk of concurrency
224 # so pickled form of self must be done with this attribute set to False.
225 self._destroy = False
226 self.__dumpIntoFile(objSerialized)
228 def getDestroyStatus(self):
233 IncrRefInFile( self._filename )
235 # should never happen !
236 RuntimeError("Invalid call to incrRef !")
240 DecrRefInFile( self._filename )
242 # should never happen !
243 RuntimeError("Invalid call to decrRef !")
245 def unlinkOnDestructor(self):
248 def doNotTouchFile(self):
250 Method called slave side. The life cycle management of file is client side not slave side.
252 self._destroy = False
256 DecrRefInFile( self._filename )
258 def getFileName(self):
259 return self._filename
261 def __dumpIntoFile(self, objSerialized):
262 DumpInFile( objSerialized, self._filename )
264 def get(self, visitor = None):
265 obj = GetObjectFromFile( self._filename, visitor )
269 return float( self.get() )
272 return int( self.get() )
276 if isinstance(obj,str):
279 raise RuntimeError("Not a string")
281 class BigObjectOnDisk(BigObjectOnDiskBase):
282 def __init__(self, fileName, objSerialized):
283 BigObjectOnDiskBase.__init__(self, fileName, objSerialized)
285 class BigObjectOnDiskListElement(BigObjectOnDiskBase):
286 def __init__(self, pos, length, fileName):
287 self._filename = fileName
288 self._destroy = False
290 self._length = length
292 def get(self, visitor = None):
293 fullObj = BigObjectOnDiskBase.get(self, visitor)
294 return fullObj[ self._pos ]
296 def __getitem__(self, i):
300 return len(self.get())
302 class BigObjectOnDiskSequence(BigObjectOnDiskBase):
303 def __init__(self, length, fileName, objSerialized):
304 BigObjectOnDiskBase.__init__(self, fileName, objSerialized)
305 self._length = length
307 def __getitem__(self, i):
308 return BigObjectOnDiskListElement(i, self._length, self.getFileName())
313 class BigObjectOnDiskList(BigObjectOnDiskSequence):
314 def __init__(self, length, fileName, objSerialized):
315 BigObjectOnDiskSequence.__init__(self, length, fileName, objSerialized)
317 class BigObjectOnDiskTuple(BigObjectOnDiskSequence):
318 def __init__(self, length, fileName, objSerialized):
319 BigObjectOnDiskSequence.__init__(self, length, fileName, objSerialized)
321 def ProxyfyPickeled( obj, pickleObjInit = None, visitor = None ):
323 This method return a proxy instance of pickled form of object given in input.
327 obj (pickelable type) : object to be proxified
328 pickleObjInit (bytes) : Optionnal. Original pickeled form of object to be proxyfied if already computed. If not this method generate it
332 BigObjectOnDiskBase: proxy instance
334 pickleObj = pickleObjInit
335 if pickleObj is None:
336 pickleObj = pickle.dumps( obj , pickle.HIGHEST_PROTOCOL )
337 fileName = GetBigObjectFileName()
339 visitor.setHDDMem( len(pickleObj) )
340 visitor.setFileName(fileName)
341 if isinstance( obj, list):
342 proxyObj = BigObjectOnDiskList( len(obj), fileName, pickleObj )
343 elif isinstance( obj, tuple):
344 proxyObj = BigObjectOnDiskTuple( len(obj), fileName , pickleObj )
346 proxyObj = BigObjectOnDisk( fileName , pickleObj )
349 def SpoolPickleObject( obj, visitor = None ):
351 with InOutputObjVisitorCM(visitor) as v:
352 pickleObjInit = pickle.dumps( obj , pickle.HIGHEST_PROTOCOL )
353 if not ActivateProxyMecanismOrNot( len(pickleObjInit) ):
356 proxyObj = ProxyfyPickeled( obj, pickleObjInit, v.visitor() )
357 pickleProxy = pickle.dumps( proxyObj , pickle.HIGHEST_PROTOCOL )
360 from SALOME_ContainerHelper import InOutputObjVisitorCM, InOutputObjVisitor
362 def UnProxyObjectSimple( obj, visitor = None ):
364 Method to be called in Remote mode. Alterate the obj _status attribute.
365 Because the slave process does not participate in the reference counting
369 visitor (InOutputObjVisitor): A visitor to keep track of amount of memory on chip and those on HDD
372 with InOutputObjVisitorCM(visitor) as v:
373 logging.debug( "UnProxyObjectSimple {}".format(type(obj)) )
374 if isinstance(obj,BigObjectOnDiskBase):
377 elif isinstance( obj, list):
380 retObj.append( UnProxyObjectSimple(elt,v.visitor()) )
385 def UnProxyObjectSimpleLocal( obj ):
387 Method to be called in Local mode. Do not alterate the PyObj counter
389 if isinstance(obj,BigObjectOnDiskBase):
391 elif isinstance( obj, list):
394 retObj.append( UnProxyObjectSimpleLocal(elt) )
400 def __init__(self, fileName):
401 self._filename = fileName
404 return self._filename
406 class FileDeleter(FileHolder):
407 def __init__(self, fileName):
408 super().__init__( fileName )
411 if os.path.exists( self._filename ):
412 os.unlink( self._filename )
414 class MonitoringInfo:
415 def __init__(self, pyFileName, outFileName, pid):
416 self._py_file_name = pyFileName
417 self._out_file_name = outFileName
421 def pyFileName(self):
422 return self._py_file_name
429 def pid(self, value):
433 def outFileName(self):
434 return self._out_file_name
436 def FileSystemMonitoring(intervalInMs, dirNameToInspect, outFileName = None):
438 This method loops indefinitely every intervalInMs milliseconds to scan
439 number of inodes and size of content recursively included into the in input directory.
444 outFileName (str) : name of file inside the results will be written. If None a new file is generated
446 See also CPUMemoryMonitoring
450 dirNameToInspect2 = os.path.abspath( os.path.expanduser(dirNameToInspect) )
454 # outFileNameSave stores the content of outFileName during phase of dumping
455 with tempfile.NamedTemporaryFile(prefix="fs_monitor_",suffix=".txt") as f:
456 outFileNameSave = f.name
457 with tempfile.NamedTemporaryFile(prefix="fs_monitor_",suffix=".py") as f:
459 tempOutFile = outFileName
460 if tempOutFile is None:
461 tempOutFile = "{}.txt".format( os.path.splitext( tempPyFile )[0] )
462 with open(tempPyFile,"w") as f:
464 import subprocess as sp
469 with open("{tempOutFile}","a") as f:
470 f.write( "{{}}\\n".format( "{dirNameToInspect2}" ) )
472 nbinodes = sp.check_output("{{}} | wc -l".format( " ".join(["find","{dirNameToInspect2}"]), ), shell = True).decode().strip()
473 szOfDirStr = re.split("[\s]+",sp.check_output(["du","-sh","{dirNameToInspect2}"]).decode())[0]
474 f.write( "{{}}\\n".format( str( datetime.datetime.now().timestamp() ) ) )
475 f.write( "{{}}\\n".format( str( nbinodes ) ) )
476 f.write( "{{}}\\n".format( str( szOfDirStr ) ) )
478 time.sleep( {intervalInMs} / 1000.0 )
479 """.format( **locals()))
480 logging.debug( "File for FS monitoring dump file : {}".format(tempPyFile) )
481 pyFileName = FileDeleter( tempPyFile )
482 if outFileName is None:
483 outFileName = FileDeleter( tempOutFile )
485 outFileName = FileHolder(outFileName)
486 return MonitoringInfo(pyFileName,outFileName,None)
488 def CPUMemoryMonitoring( intervalInMs, outFileName = None ):
490 Launch a subprocess monitoring self process.
491 This monitoring subprocess is a python process lauching every intervalInMs ms evaluation of
492 CPU usage and RSS memory of the calling process.
493 Communication between subprocess and self is done by file.
497 outFileName (str) : name of file inside the results will be written. If None a new file is generated
499 See also FileSystemMonitoring
502 def BuildPythonFileForCPUPercent( intervalInMs, outFileName):
505 with tempfile.NamedTemporaryFile(prefix="cpu_mem_monitor_",suffix=".py") as f:
507 tempOutFile = outFileName
508 if tempOutFile is None:
509 tempOutFile = "{}.txt".format( os.path.splitext( tempPyFile )[0] )
511 with open(tempPyFile,"w") as f:
512 f.write("""import psutil
514 process = psutil.Process( pid )
516 with open("{}","a") as f:
518 f.write( "{{}}\\n".format( str( process.cpu_percent() ) ) )
519 f.write( "{{}}\\n".format( str( process.memory_info().rss ) ) )
521 time.sleep( {} / 1000.0 )
522 """.format(pid, tempOutFile, intervalInMs))
523 if outFileName is None:
524 autoOutFile = FileDeleter(tempOutFile)
526 autoOutFile = FileHolder(tempOutFile)
527 return FileDeleter(tempPyFile),autoOutFile
528 pyFileName, outFileName = BuildPythonFileForCPUPercent( intervalInMs, outFileName )
529 return MonitoringInfo(pyFileName, outFileName, None)
531 class GenericPythonMonitoringLauncherCtxMgr:
532 def __init__(self, monitoringParams):
536 monitoringParams (MonitoringInfo)
538 self._monitoring_params = monitoringParams
541 pid = KernelBasis.LaunchMonitoring(self._monitoring_params.pyFileName.filename)
542 self._monitoring_params.pid = pid
543 return self._monitoring_params
545 def __exit__(self,exctype, exc, tb):
546 StopMonitoring( self._monitoring_params )
548 def StopMonitoring( monitoringInfo ):
550 Kill monitoring subprocess.
554 monitoringInfo (MonitoringInfo): info returned by LaunchMonitoring
557 KernelBasis.StopMonitoring(monitoringInfo.pid)
559 def ReadCPUMemInfoInternal( fileName ):
561 ret = KernelBasis.ReadFloatsInFile( fileName )
563 mem_rss = [ int(elt) for elt in ret[1::2]]
564 return [(a,b) for a,b in zip(cpu,mem_rss)]
566 def ReadCPUMemInfo( monitoringInfo ):
568 Retrieve CPU/Mem data of monitoring.
572 monitoringInfo (MonitoringInfo): info returned by LaunchMonitoring
576 list<float,str> : list of pairs. First param of pair is CPU usage. Second param of pair is rss memory usage
578 return ReadCPUMemInfoInternal( monitoringInfo.outFileName.filename )
581 def __init__(self, dirNameMonitored, timeStamps, nbInodes, volumeOfDir):
585 timeStamps (list<datetimestruct>)
587 volumeOfDir (list<str>)
589 self._dir_name_monitored = dirNameMonitored
590 self._data = [(t,a,b) for t,a,b in zip(timeStamps,nbInodes,volumeOfDir)]
592 st = """Filename monitored : {self.dirNameMonitored}
594 """.format( **locals() )
597 def dirNameMonitored(self):
598 return self._dir_name_monitored
602 list of triplets. First param of triplet is datetimestruct
603 Second param of triplet is #inodes.
604 Thirst param of triplet is size.
608 def ReadInodeSizeInfoInternal( fileName ):
611 with open(fileName, "r") as f:
612 coarseData = [ elt.strip() for elt in f.readlines() ]
613 dirNameMonitored = coarseData[0] ; coarseData = coarseData[1:]
614 tss = [ datetime.datetime.fromtimestamp( float(elt) ) for elt in coarseData[::3] ]
615 nbInodes = [int(elt) for elt in coarseData[1::3]]
616 volumeOfDir = coarseData[2::3]
617 return InodeSizeInfo(dirNameMonitored,tss,nbInodes,volumeOfDir)
619 def ReadInodeSizeInfo( monitoringInfo ):
621 Retrieve nb of inodes and size of monitoring
625 monitoringInfo (MonitoringInfo): info returned by LaunchMonitoring
631 return ReadInodeSizeInfoInternal( monitoringInfo.outFileName.filename )
633 class SeqByteReceiver:
634 # 2GB limit to trigger split into chunks
635 CHUNK_SIZE = 2000000000
636 def __init__(self,sender):
639 self._obj.UnRegister()
642 size = self._obj.getSize()
643 if size <= SeqByteReceiver.CHUNK_SIZE:
644 return self.fetchOneShot( size )
646 return self.fetchByChunks( size )
647 def fetchOneShot(self,size):
648 return self._obj.sendPart(0,size)
649 def fetchByChunks(self,size):
651 To avoid memory peak parts over 2GB are sent using EFF_CHUNK_SIZE size.
653 data_for_split_case = bytes(0)
654 EFF_CHUNK_SIZE = SeqByteReceiver.CHUNK_SIZE // 8
655 iStart = 0 ; iEnd = EFF_CHUNK_SIZE
656 while iStart!=iEnd and iEnd <= size:
657 part = self._obj.sendPart(iStart,iEnd)
658 data_for_split_case = bytes(0).join( [data_for_split_case,part] )
659 iStart = iEnd; iEnd = min(iStart + EFF_CHUNK_SIZE,size)
660 return data_for_split_case
662 class LogOfCurrentExecutionSession:
663 def __init__(self, handleToCentralizedInst):
664 self._remote_handle = handleToCentralizedInst
665 self._current_instance = ScriptExecInfo()
667 def addFreestyleAndFlush(self, value):
668 self._current_instance.freestyle = value
669 self.finalizeAndPushToMaster()
671 def addInfoOnLevel2(self, key, value):
672 setattr(self._current_instance,key,value)
674 def finalizeAndPushToMaster(self):
675 self._remote_handle.assign( pickle.dumps( self._current_instance ) )
677 class PyScriptNode_i (Engines__POA.PyScriptNode,Generic):
678 """The implementation of the PyScriptNode CORBA IDL that executes a script"""
679 def __init__(self, nodeName,code,poa,my_container,logscript):
680 """Initialize the node : compilation in the local context"""
681 Generic.__init__(self,poa)
682 self.nodeName=nodeName
684 self.my_container_py = my_container
685 self.my_container=my_container._container
686 linecache.cache[nodeName]=0,None,code.split('\n'),nodeName
687 self.ccode=compile(code,nodeName,'exec')
689 self.context[MY_CONTAINER_ENTRY_IN_GLBS] = self.my_container
690 self._log_script = logscript
691 self._current_execution_session = None
692 sys.stdout.flush() ; sys.stderr.flush() # flush to correctly capture log per execution session
695 # force removal of self.context. Don t know why it s not done by default
696 self.removeAllVarsInContext()
699 def getContainer(self):
700 return self.my_container
708 def defineNewCustomVar(self,varName,valueOfVar):
709 self.context[varName] = pickle.loads(valueOfVar)
712 def executeAnotherPieceOfCode(self,code):
713 """Called for initialization of container lodging self."""
715 ccode=compile(code,self.nodeName,'exec')
716 exec(ccode, self.context)
718 raise SALOME.SALOME_Exception(SALOME.ExceptionStruct(SALOME.BAD_PARAM,"","PyScriptNode (%s) : code to be executed \"%s\"" %(self.nodeName,code),0))
720 def assignNewCompiledCode(self,codeStr):
723 self.ccode=compile(codeStr,self.nodeName,'exec')
725 raise SALOME.SALOME_Exception(SALOME.ExceptionStruct(SALOME.BAD_PARAM,"","PyScriptNode.assignNewCompiledCode (%s) : code to be executed \"%s\"" %(self.nodeName,codeStr),0))
727 def executeSimple(self, key, val):
729 Same as execute method except that no pickelization mecanism is implied here. No output is expected
732 self.context.update({ "env" : [(k,v) for k,v in zip(key,val)]})
733 exec(self.ccode,self.context)
735 exc_typ,exc_val,exc_fr=sys.exc_info()
736 l=traceback.format_exception(exc_typ,exc_val,exc_fr)
737 print("".join(l)) ; sys.stdout.flush() # print error also in logs of remote container
738 raise SALOME.SALOME_Exception(SALOME.ExceptionStruct(SALOME.BAD_PARAM,"".join(l),"PyScriptNode: %s" % (self.nodeName),0))
740 def execute(self,outargsname,argsin):
741 """Execute the script stored in attribute ccode with pickled args (argsin)"""
743 argsname,kws=pickle.loads(argsin)
744 self.context.update(kws)
745 exec(self.ccode, self.context)
747 for arg in outargsname:
748 if arg not in self.context:
749 raise KeyError("There is no variable %s in context" % arg)
750 argsout.append(self.context[arg])
751 argsout=pickle.dumps(tuple(argsout),-1)
754 exc_typ,exc_val,exc_fr=sys.exc_info()
755 l=traceback.format_exception(exc_typ,exc_val,exc_fr)
756 print("".join(l)) ; sys.stdout.flush() # print error also in logs of remote container
757 raise SALOME.SALOME_Exception(SALOME.ExceptionStruct(SALOME.BAD_PARAM,"".join(l),"PyScriptNode: %s, outargsname: %s" % (self.nodeName,outargsname),0))
759 def executeFirst(self,argsin):
760 """ Same than first part of self.execute to reduce memory peak."""
761 def ArgInMananger(self,argsin):
762 argsInPy = SeqByteReceiver( argsin )
763 data = argsInPy.data()
764 self.addInfoOnLevel2("inputMem",len(data))
765 _,kws=pickle.loads(data)
768 self.beginOfCurrentExecutionSession()
769 self.addTimeInfoOnLevel2("startInputTime")
770 # to force call of SeqByteReceiver's destructor
771 kws = ArgInMananger(self,argsin)
772 vis = InOutputObjVisitor()
774 # fetch real data if necessary
775 kws[elt] = UnProxyObjectSimple( kws[elt],vis)
776 self.addInfoOnLevel2("inputHDDMem",vis)
777 self.context.update(kws)
778 self.addTimeInfoOnLevel2("endInputTime")
780 exc_typ,exc_val,exc_fr=sys.exc_info()
781 l=traceback.format_exception(exc_typ,exc_val,exc_fr)
782 print("".join(l)) ; sys.stdout.flush() # print error also in logs of remote container
783 raise SALOME.SALOME_Exception(SALOME.ExceptionStruct(SALOME.BAD_PARAM,"".join(l),"PyScriptNode:First %s" % (self.nodeName),0))
785 def executeSecond(self,outargsname):
786 """ Same than second part of self.execute to reduce memory peak."""
789 self.addTimeInfoOnLevel2("startExecTime")
791 self.addInfoOnLevel2("measureTimeResolution",self.my_container_py.monitoringtimeresms())
792 with GenericPythonMonitoringLauncherCtxMgr( CPUMemoryMonitoring( self.my_container_py.monitoringtimeresms() ) ) as monitoringParams:
793 exec(self.ccode, self.context)
794 cpumeminfo = ReadCPUMemInfo( monitoringParams )
796 self.addInfoOnLevel2("CPUMemDuringExec",cpumeminfo)
798 self.addTimeInfoOnLevel2("endExecTime")
799 self.addTimeInfoOnLevel2("startOutputTime")
801 for arg in outargsname:
802 if arg not in self.context:
803 raise KeyError("There is no variable %s in context" % arg)
804 argsout.append(self.context[arg])
807 vis = InOutputObjVisitor()
809 # the proxy mecanism is catched here
810 argPickle = SpoolPickleObject( arg, vis )
811 retArg = SenderByte_i( self.poa,argPickle )
812 id_o = self.poa.activate_object(retArg)
813 retObj = self.poa.id_to_reference(id_o)
814 ret.append( retObj._narrow( SALOME.SenderByte ) )
815 outputMem += len(argPickle)
816 self.addInfoOnLevel2("outputMem",outputMem)
817 self.addInfoOnLevel2("outputHDDMem",vis)
818 self.addTimeInfoOnLevel2("endOutputTime")
819 self.endOfCurrentExecutionSession()
822 exc_typ,exc_val,exc_fr=sys.exc_info()
823 l=traceback.format_exception(exc_typ,exc_val,exc_fr)
824 print("".join(l)) ; sys.stdout.flush() # print error also in logs of remote container
825 raise SALOME.SALOME_Exception(SALOME.ExceptionStruct(SALOME.BAD_PARAM,"".join(l),"PyScriptNode:Second %s, outargsname: %s" % (self.nodeName,outargsname),0))
827 def listAllVarsInContext(self):
829 pat = re.compile("^__([a-z]+)__$")
830 return [elt for elt in self.context if not pat.match(elt) and elt != MY_CONTAINER_ENTRY_IN_GLBS]
832 def removeAllVarsInContext(self):
833 for elt in self.listAllVarsInContext():
834 del self.context[elt]
836 def getValueOfVarInContext(self,varName):
838 return pickle.dumps(self.context[varName],-1)
840 exc_typ,exc_val,exc_fr=sys.exc_info()
841 l=traceback.format_exception(exc_typ,exc_val,exc_fr)
842 raise SALOME.SALOME_Exception(SALOME.ExceptionStruct(SALOME.BAD_PARAM,"".join(l),"PyScriptNode: %s" %self.nodeName,0))
845 def assignVarInContext(self, varName, value):
847 self.context[varName][0] = pickle.loads(value)
849 exc_typ,exc_val,exc_fr=sys.exc_info()
850 l=traceback.format_exception(exc_typ,exc_val,exc_fr)
851 raise SALOME.SALOME_Exception(SALOME.ExceptionStruct(SALOME.BAD_PARAM,"".join(l),"PyScriptNode: %s" %self.nodeName,0))
854 def callMethodOnVarInContext(self, varName, methodName, args):
856 return pickle.dumps( getattr(self.context[varName][0],methodName)(*pickle.loads(args)),-1 )
858 exc_typ,exc_val,exc_fr=sys.exc_info()
859 l=traceback.format_exception(exc_typ,exc_val,exc_fr)
860 raise SALOME.SALOME_Exception(SALOME.ExceptionStruct(SALOME.BAD_PARAM,"".join(l),"PyScriptNode: %s" %self.nodeName,0))
863 def beginOfCurrentExecutionSession(self):
864 self._current_execution_session = LogOfCurrentExecutionSession( self._log_script.addExecutionSession() )
865 self.context[MY_PERFORMANCE_LOG_ENTRY_IN_GLBS] = self._current_execution_session
867 def endOfCurrentExecutionSession(self):
868 self._current_execution_session.finalizeAndPushToMaster()
869 self._current_execution_session = None
871 def addInfoOnLevel2(self, key, value):
872 self._current_execution_session.addInfoOnLevel2(key, value)
874 def addTimeInfoOnLevel2(self, key):
875 from datetime import datetime
876 self._current_execution_session.addInfoOnLevel2(key,datetime.now())