SET(${PROJECT_NAME_UC}_MAJOR_VERSION 7)
SET(${PROJECT_NAME_UC}_MINOR_VERSION 4)
-SET(${PROJECT_NAME_UC}_PATCH_VERSION 0)
+SET(${PROJECT_NAME_UC}_PATCH_VERSION 1)
SET(${PROJECT_NAME_UC}_VERSION
${${PROJECT_NAME_UC}_MAJOR_VERSION}.${${PROJECT_NAME_UC}_MINOR_VERSION}.${${PROJECT_NAME_UC}_PATCH_VERSION})
-SET(${PROJECT_NAME_UC}_VERSION_DEV 0)
+SET(${PROJECT_NAME_UC}_VERSION_DEV 1)
# Find KERNEL (optional)
# ==============
IF(EXISTS ${GUI_ROOT_DIR})
LIST(APPEND CMAKE_MODULE_PATH "${GUI_ROOT_DIR}/adm_local/cmake_files")
FIND_PACKAGE(SalomeGUI)
- FULL_GUI(TRUE) # check whether GUI builded in full mode and with CORBA
+ SALOME_GUI_WITH_CORBA() # check whether GUI builded with CORBA
+ SALOME_GUI_MODE(SALOME_USE_QXGRAPHVIEWER) # check whether GUI is built with the QxGraphViewer
ELSE(EXISTS ${GUI_ROOT_DIR})
MESSAGE(FATAL_ERROR "We absolutely need a Salome GUI, please define GUI_ROOT_DIR or turn option SALOME_BUILD_GUI to OFF !")
ENDIF(EXISTS ${GUI_ROOT_DIR})
# Ensure the command is run with the given PYTHONPATH
IF(WIN32 AND NOT CYGWIN)
SET(SPHINX_EXECUTABLE ${SPHINX_EXECUTABLE})
+ SET(SPHINX_APIDOC_EXECUTABLE ${SPHINX_APIDOC_EXECUTABLE})
ELSE()
SET(SPHINX_EXECUTABLE /usr/bin/env PYTHONPATH="${SPHINX_PYTHONPATH}:$$PYTHONPATH" ${SPHINX_EXECUTABLE})
+ SET(SPHINX_APIDOC_EXECUTABLE /usr/bin/env PYTHONPATH="${SPHINX_PYTHONPATH}:$$PYTHONPATH" ${SPHINX_APIDOC_EXECUTABLE})
ENDIF()
MARK_AS_ADVANCED(SPHINX_EXECUTABLE)
IF(SPHINX_FOUND)
SALOME_ACCUMULATE_ENVIRONMENT(PATH ${SPHINX_EXECUTABLE})
+ SALOME_ACCUMULATE_ENVIRONMENT(PATH ${SPHINX_APIDOC_EXECUTABLE})
SALOME_ACCUMULATE_ENVIRONMENT(PYTHONPATH ${SPHINX_PYTHONPATH})
ENDIF()
#
FIND_PROGRAM(SPHINX_EXECUTABLE sphinx-build PATH_SUFFIXES Scripts)
+FIND_PROGRAM(SPHINX_APIDOC_EXECUTABLE sphinx-apidoc PATH_SUFFIXES Scripts)
# Get root dir locally, going up two levels from the exec:
GET_FILENAME_COMPONENT(_tmp_ROOT_DIR "${SPHINX_EXECUTABLE}" PATH)
ENDIF()
# Handle the standard arguments of the find_package() command:
INCLUDE(FindPackageHandleStandardArgs)
-FIND_PACKAGE_HANDLE_STANDARD_ARGS(Sphinx REQUIRED_VARS SPHINX_EXECUTABLE)
+FIND_PACKAGE_HANDLE_STANDARD_ARGS(Sphinx REQUIRED_VARS SPHINX_EXECUTABLE SPHINX_APIDOC_EXECUTABLE)
| | | |
| | |**Center on Node** - Center the 2D view on selected Node, without resizing. |
| | | |
+| | |**Shrink/Expand** - Fold/Unfold the selected Node. See :ref:`shrink_expand_nodes`. |
+| | | |
+| | |**Shrink/Expand Children** - Fold/Unfold all direct Children Nodes of selected Node. |
+| | |See :ref:`shrink_expand_nodes`. |
+| | | |
+| | |**Shrink/Expand Elementary** - Fold/Unfold all Elementary Nodes of selected Node |
+| | |recursively. See :ref:`shrink_expand_nodes`. |
+| | | |
| | |**Compute Links** - Recompute links, useful for large schemas, when automatic link |
| | |calculation have been deactivated, see :ref:`edition_toolbar`. |
| | | |
| | | |
| | |**Center on Node** - Center the 2D view on selected Node, without resizing. |
| | | |
+| | |**Shrink/Expand** - Fold/Unfold the selected Node. See :ref:`shrink_expand_nodes`. |
+| | | |
+| | |**Shrink/Expand Children** - Fold/Unfold all direct Children Nodes of selected Node. |
+| | |See :ref:`shrink_expand_nodes`. |
+| | | |
+| | |**Shrink/Expand Elementary** - Fold/Unfold all Elementary Nodes of selected Node |
+| | |recursively. See :ref:`shrink_expand_nodes`. |
+| | | |
| | |**Compute Links** - Recompute links, useful for large schemas, when automatic link |
| | |calculation have been deactivated, see :ref:`edition_toolbar`. |
| | | |
| | |the screen |
| | | |
| | |**Center on Node** - Center the 2D view on selected Node, without resizing. |
+| | | |
+| | |**Shrink/Expand** - Fold/Unfold the selected Node. See :ref:`shrink_expand_nodes`. |
+| | | |
+--------------------------------+-------------+-------------------------------------------------------------------------------------+
Port and link objects.
| | | |
| | |**Center on Node** - Center the 2D view on selected Node, without resizing. |
| | | |
+| | |**Shrink/Expand** - Fold/Unfold the selected Node. See :ref:`shrink_expand_nodes`. |
+| | | |
+| | |**Shrink/Expand Children** - Fold/Unfold all direct Children Nodes of selected Node. |
+| | |See :ref:`shrink_expand_nodes`. |
+| | | |
+| | |**Shrink/Expand Elementary** - Fold/Unfold all Elementary Nodes of selected Node |
+| | |recursively. See :ref:`shrink_expand_nodes`. |
+| | | |
| | |**Compute Links** - Recompute links, useful for large schemas, when automatic link |
| | |calculation have been deactivated, see :ref:`edition_toolbar`. |
| | | |
| | | |
| | |**Center on Node** - Center the 2D view on selected Node, without resizing. |
| | | |
+| | |**Shrink/Expand** - Fold/Unfold the selected Node. See :ref:`shrink_expand_nodes`. |
+| | | |
+| | |**Shrink/Expand Children** - Fold/Unfold all direct Children Nodes of selected Node. |
+| | |See :ref:`shrink_expand_nodes`. |
+| | | |
+| | |**Shrink/Expand Elementary** - Fold/Unfold all Elementary Nodes of selected Node |
+| | |recursively. See :ref:`shrink_expand_nodes`. |
+| | | |
| | |**Compute Links** - Recompute links, useful for large schemas, when automatic link |
| | |calculation have been deactivated, see :ref:`edition_toolbar`. |
| | | |
| | | |
| | |**Center on Node** - Center the 2D view on selected Node, without resizing. |
| | | |
+| | |**Shrink/Expand** - Fold/Unfold the selected Node. See :ref:`shrink_expand_nodes`. |
+| | | |
+--------------------------------+-------------+-------------------------------------------------------------------------------------+
.. centered::
The plugin can be a C++ plugin implemented in a dynamic library (.so file) or a Python plugin implemented in a Python module (.py).
It is possible to implement two kinds of algorithm : synchronous or asynchronous.
+The algorithm uses a pool of samples to be evaluated.
+When all the samples of the pool are evaluated, the algorithm stops.
+
Synchronous algorithm
--------------------------------------------------
In synchronous mode, the OptimizerLoop calls the algorithm to know what are the types of the input port (sample sent to the internal node),
- **getTCForIn**, this method must return the YACS type of the input port of the internal node
- **getTCForOut**, this method must return the YACS type of the output port of the internal node
+- **getTCForAlgoInit** (optional), this method returns the type of the "algoInit" port, string if undefined
+- **getTCForAlgoResult** (optional), this method returns the type of the "algoResult" port, string if undefined
- **initialize** (optional), this method is called during the algorithm initialization
- **start**, this method is called at the beginning of iterations
- **takeDecision**, this method is called at each iteration
- **finish** (optional), this method is called to finish the algorithm at the end of the iteration process
+- **getAlgoResult** (optional), this method returns the value of the "algoResult" port, "NULL" if undefined
In Python you need to implement another method:
''''''''''''''''''''
Here is a small example of a C++ synchronous algorithm:
-.. code-block:: cpp
-
- #include <cmath>
-
- #include "OptimizerAlg.hxx"
-
- using namespace YACS::ENGINE;
-
- extern "C"
- {
- OptimizerAlgBase * createOptimizerAlgSyncExample(Pool * pool);
- }
-
- class OptimizerAlgSyncExample : public OptimizerAlgSync
- {
- private:
- int _idTest;
- TypeCode *_tcIn;
- TypeCode *_tcOut;
- public:
- OptimizerAlgSyncExample(Pool *pool);
- virtual ~OptimizerAlgSyncExample();
- TypeCode *getTCForIn() const;
- TypeCode *getTCForOut() const;
- void start();
- void takeDecision();
- void initialize(const Any *input) throw(YACS::Exception);
- void finish();
- };
-
- OptimizerAlgSyncExample::OptimizerAlgSyncExample(Pool *pool)
- : OptimizerAlgSync(pool), _tcIn(0), _tcOut(0), _idTest(0)
- {
- _tcIn=new TypeCode(Double);
- _tcOut=new TypeCode(Int);
- }
-
- OptimizerAlgSyncExample::~OptimizerAlgSyncExample()
- {
- _tcIn->decrRef();
- _tcOut->decrRef();
- }
-
- //! Return the typecode of the expected input type
- TypeCode * OptimizerAlgSyncExample::getTCForIn() const
- {
- return _tcIn;
- }
-
- //! Return the typecode of the expected output type
- TypeCode * OptimizerAlgSyncExample::getTCForOut() const
- {
- return _tcOut;
- }
-
- //! Start to fill the pool with samples to evaluate
- void OptimizerAlgSyncExample::start()
- {
- _idTest=0;
- Any *val=AtomAny::New(1.2);
- _pool->pushInSample(4,val);
- val=AtomAny::New(3.4);
- _pool->pushInSample(9,val);
- }
-
- //! This method is called each time a sample has been evaluated.
- /*!
- * It can either add new samples to evaluate in the pool, do nothing (wait
- * for more samples), or empty the pool to finish the evaluation.
- */
- void OptimizerAlgSyncExample::takeDecision()
- {
- if(_idTest==1)
- {
- Any *val=AtomAny::New(5.6);
- _pool->pushInSample(16,val);
- val=AtomAny::New(7.8);
- _pool->pushInSample(25,val);
- val=AtomAny::New(9. );
- _pool->pushInSample(36,val);
- val=AtomAny::New(12.3);
- _pool->pushInSample(49,val);
- }
- else if(_idTest==4)
- {
- Any *val=AtomAny::New(45.6);
- _pool->pushInSample(64,val);
- val=AtomAny::New(78.9);
- _pool->pushInSample(81,val);
- }
- else
- {
- Any *tmp= _pool->getCurrentInSample();
- if(fabs(tmp->getDoubleValue()-45.6)<1.e-12)
- _pool->destroyAll();
- }
- _idTest++;
- }
-
- //! Optional method to initialize the algorithm.
- /*!
- * For now, the parameter input is always NULL. It might be used in the
- * future to initialize an algorithm with custom data.
- */
- void OptimizerAlgSyncExample::initialize(const Any *input)
- throw (YACS::Exception)
- {
- }
-
- /*!
- * Optional method called when the algorithm has finished, successfully or
- * not, to perform any necessary clean up.
- */
- void OptimizerAlgSyncExample::finish()
- {
- }
-
- //! Factory method to create the algorithm.
- OptimizerAlgBase * createOptimizerAlgSyncExample(Pool *pool)
- {
- return new OptimizerAlgSyncExample(pool);
- }
-
+.. literalinclude:: ../src/yacsloader/Test/OptimizerAlgSyncExample.cxx
+ :language: cpp
Here, the entry point in the dynamic library is the name of the factory function : createOptimizerAlgSyncExample
that returns an instance of the OptimizerAlgSyncExample class that implements the algorithm.
Python plugin example
''''''''''''''''''''''
-Here, the same example of a synchronous algorithm in Python::
-
- import SALOMERuntime
-
- class myalgosync(SALOMERuntime.OptimizerAlgSync):
- def __init__(self):
- SALOMERuntime.OptimizerAlgSync.__init__(self, None)
- r=SALOMERuntime.getSALOMERuntime()
- self.tin=r.getTypeCode("double")
- self.tout=r.getTypeCode("int")
-
- def setPool(self,pool):
- """Must be implemented to set the pool"""
- self.pool=pool
-
- def getTCForIn(self):
- """returns typecode of type expected as Input"""
- return self.tin
-
- def getTCForOut(self):
- """returns typecode of type expected as Output"""
- return self.tout
-
- def initialize(self,input):
- """Optional method called on initialization. Do nothing here"""
-
- def start(self):
- """Start to fill the pool with samples to evaluate."""
- self.iter=0
- self.pool.pushInSample(4,1.2)
- self.pool.pushInSample(9,3.4)
-
- def takeDecision(self):
- """ This method is called each time a sample has been evaluated. It can
- either add new samples to evaluate in the pool, do nothing (wait for
- more samples), or empty the pool to finish the evaluation.
- """
- currentId=self.pool.getCurrentId()
-
- if self.iter==1:
- self.pool.pushInSample(16,5.6)
- self.pool.pushInSample(25,7.8)
- self.pool.pushInSample(36,9.)
- self.pool.pushInSample(49,12.3)
- elif self.iter==4:
- self.pool.pushInSample(64,45.6)
- self.pool.pushInSample(81,78.9)
- else:
- val=self.pool.getCurrentInSample()
- if abs(val.getDoubleValue()-45.6) < 1.e-12:
- self.pool.destroyAll()
- self.iter=self.iter+1
-
- def finish(self):
- """Optional method called when the algorithm has finished, successfully
- or not, to perform any necessary clean up. Do nothing here"""
+Here, the same example of a synchronous algorithm in Python:
+
+.. literalinclude:: ../src/yacsloader/Test/algosyncexample.py
Here, the entry point in the Python module is directly the name of the class that implements the algorithm : myalgosync.
- **getTCForIn**, this method must return the YACS type of the input port of the internal node
- **getTCForOut**, this method must return the YACS type of the output port of the internal node
+- **getTCForAlgoInit** (optional), this method returns the type of the "algoInit" port, string if undefined
+- **getTCForAlgoResult** (optional), this method returns the type of the "algoResult" port, string if undefined
- **initialize** (optional), this method is called during the algorithm initialization
- **startToTakeDecision**, this method is called to start the iteration process in a separate thread. It is the body of the algorithm.
- **finish** (optional), this method is called to finish the algorithm at the end of the iteration process
+- **getAlgoResult** (optional), this method returns the value of the "algoResult" port, "NULL" if undefined
In Python you need to implement another method:
''''''''''''''''''''
Here is a small example of a C++ asynchronous algorithm:
-.. code-block:: cpp
-
- #include "OptimizerAlg.hxx"
-
- using namespace YACS::ENGINE;
-
- extern "C"
- {
- OptimizerAlgBase * createOptimizerAlgASyncExample(Pool * pool);
- }
-
- class OptimizerAlgASyncExample : public OptimizerAlgASync
- {
- private:
- TypeCode * _tcIn;
- TypeCode * _tcOut;
- public:
- OptimizerAlgASyncExample(Pool * pool);
- virtual ~OptimizerAlgASyncExample();
- TypeCode * getTCForIn() const;
- TypeCode * getTCForOut() const;
- void startToTakeDecision();
- };
-
- OptimizerAlgASyncExample::OptimizerAlgASyncExample(Pool * pool)
- : OptimizerAlgASync(pool), _tcIn(0), _tcOut(0)
- {
- _tcIn = new TypeCode(Double);
- _tcOut = new TypeCode(Int);
- }
-
- OptimizerAlgASyncExample::~OptimizerAlgASyncExample()
- {
- _tcIn->decrRef();
- _tcOut->decrRef();
- }
-
- //! Return the typecode of the expected input type
- TypeCode *OptimizerAlgASyncExample::getTCForIn() const
- {
- return _tcIn;
- }
-
- //! Return the typecode of the expected output type
- TypeCode *OptimizerAlgASyncExample::getTCForOut() const
- {
- return _tcOut;
- }
-
- //! This method is called only once to launch the algorithm.
- /*!
- * It must first fill the pool with samples to evaluate and call
- * signalMasterAndWait() to block until a sample has been evaluated. When
- * returning from this method, it MUST check for an eventual termination
- * request (with the method isTerminationRequested()). If the termination
- * is requested, the method must perform any necessary cleanup and return
- * as soon as possible. Otherwise it can either add new samples to evaluate
- * in the pool, do nothing (wait for more samples), or empty the pool and
- * return to finish the evaluation.
- */
- void OptimizerAlgASyncExample::startToTakeDecision()
- {
- double val = 1.2;
- for (int i=0 ; i<5 ; i++) {
- // push a sample in the input of the slave node
- _pool->pushInSample(i, AtomAny::New(val));
- // wait until next sample is ready
- signalMasterAndWait();
- // check error notification
- if (isTerminationRequested()) {
- _pool->destroyAll();
- return;
- }
-
- // get a sample from the output of the slave node
- Any * v = _pool->getCurrentOutSample();
- val += v->getIntValue();
- }
-
- // in the end destroy the pool content
- _pool->destroyAll();
- }
-
- //! Factory method to create the algorithm.
- OptimizerAlgBase * createOptimizerAlgASyncExample(Pool * pool)
- {
- return new OptimizerAlgASyncExample(pool);
- }
+.. literalinclude:: ../src/yacsloader/Test/OptimizerAlgASyncExample.cxx
+ :language: cpp
Here, the entry point in the dynamic library is the name of the factory function : createOptimizerAlgASyncExample
Python plugin example
''''''''''''''''''''''''
-Here is an example of an asynchronous algorithm implemented in Python::
-
- import SALOMERuntime
-
- class myalgoasync(SALOMERuntime.OptimizerAlgASync):
- def __init__(self):
- SALOMERuntime.OptimizerAlgASync.__init__(self, None)
- r=SALOMERuntime.getSALOMERuntime()
- self.tin=r.getTypeCode("double")
- self.tout=r.getTypeCode("int")
-
- def setPool(self,pool):
- """Must be implemented to set the pool"""
- self.pool=pool
-
- def getTCForIn(self):
- """returns typecode of type expected as Input"""
- return self.tin
-
- def getTCForOut(self):
- """returns typecode of type expected as Output"""
- return self.tout
-
- def startToTakeDecision(self):
- """This method is called only once to launch the algorithm. It must
- first fill the pool with samples to evaluate and call
- self.signalMasterAndWait() to block until a sample has been
- evaluated. When returning from this method, it MUST check for an
- eventual termination request (with the method
- self.isTerminationRequested()). If the termination is requested, the
- method must perform any necessary cleanup and return as soon as
- possible. Otherwise it can either add new samples to evaluate in the
- pool, do nothing (wait for more samples), or empty the pool and
- return to finish the evaluation.
- """
- val=1.2
- for iter in xrange(5):
- #push a sample in the input of the slave node
- self.pool.pushInSample(iter,val)
- #wait until next sample is ready
- self.signalMasterAndWait()
- #check error notification
- if self.isTerminationRequested():
- self.pool.destroyAll()
- return
-
- #get a sample from the output of the slave node
- currentId=self.pool.getCurrentId()
- v=self.pool.getCurrentOutSample()
- val=val+v.getIntValue()
-
- #in the end destroy the pool content
- self.pool.destroyAll()
+Here is an example of an asynchronous algorithm implemented in Python:
+
+.. literalinclude:: ../src/yacsloader/Test/algoasyncexample.py
Here, the entry point in the Python module is directly the name of the class that implements the algorithm : myalgoasync.
+Managing the pool of samples
+---------------------------------
+
+Samples can be added to the pool at the initialization of the algorithm or
+every time a sample is evaluated (while "taking decision").
+The algorithm stops to take decisions when every sample is evaluated.
+
+A sample has:
+
+- an identifier - *Id*
+- a priority - it is used to choose the order of evaluation
+- a value - *In*
+- an evaluated or computed value - *Out*
+
+The current sample is the sample used by the latest terminated evaluation.
+
+These are the methods needed to manage the pool of samples:
+
+.. code-block:: cpp
+
+ class Pool
+ {
+ //...
+ public:
+ //For algorithm use
+ int getCurrentId() const ;
+ Any *getCurrentInSample() const ;
+ Any *getCurrentOutSample() const ;
+ Any *getOutSample(int id);
+ void pushInSample(int id, Any *inSample, unsigned char priority = 0);
+ void destroyAll();
+ //...
+ }
+
+In C++, the samples are of type ``YACS::ENGINE::Any``, in order to support any
+type supported by YACS. For conversion to standard types, use:
+
+- ``getIntValue``
+- ``getBoolValue``
+- ``getDoubleValue``
+- ``getStringValue``
+
+It is possible to create a pointer to a new object with:
+
+- ``YACS::ENGINE::AtomAny::New``
+
+For further information, see `include/salome/Any.hxx <file:../../../../../include/salome/Any.hxx>`_.
C++ algorithm calling Python code
--------------------------------------------------
The user can change the links representation with the options **simplify links** which tries to make the links as direct as possible with
a slight CPU cost and **separate links** which tries to avoid links superposition with again a CPU cost.
+.. _shrink_expand_nodes:
+
+Shrink/Expand nodes
+-------------------
+This functionality allows folding/unfolding any node(s) in order to decrease the schema size.
+It can be useful if user deals with a large schema that contains a lot of number of nodes.
+
+There are 3 possible ways to make the schema more compact:
+
++ call **shrink/expand** context menu or **double mouse click** on any node to fold/unfold this node;
+
++ call **shrink/expand children** context menu or **Ctrl + double mouse click** on any composed node to fold/unfold all direct children nodes of selected node;
+
++ call **shrink/expand elementary** context menu or **Ctrl + Shift + double mouse click** on any composed node to fold/unfold all elementary nodes of selected node recursively.
+
SALOME application is used. Otherwise, the PYTHONPATH environment variable has to be set to
<YACS_ROOT_DIR>/lib/pythonX.Y/site-packages/salome.
+When you build your own Salome application and use your own modules and components (using YACSGEN for example), you may need to load
+the module catalog::
+
+ import SALOMERuntime
+ SALOMERuntime.RuntimeSALOME_setRuntime()
+ salome_runtime = SALOMERuntime.getSALOMERuntime()
+ import salome
+ salome.salome_init()
+ mc = salome.naming_service.Resolve('/Kernel/ModulCatalog')
+ ior = salome.orb.object_to_string(mc)
+ session_catalog = salome_runtime.loadCatalog("session", ior)
+ salome_runtime.addCatalog(session_catalog)
+
+
.. _loadxml:
Create a calculation scheme by loading an XML file
Dataflow link
++++++++++++++++++++++++++++
-The first step in defining a dataflow link is to obtain port objects using one of the methods described above.
-The edAddDFLink method for the context node is then used, transferring the two ports to be connected to it.
-The following gives an example of a dataflow link between the output port p1 of node n3 and the input port of node n4::
+The first step in defining a dataflow link is to obtain port objects using one of the methods described above.
+Then, the edAddLink method links an output port to an input port::
pout=n3.getOutputPort("p1")
pin=n4.getInputPort("p1")
- p.edAddDFLink(pout,pin)
+ p.edAddLink(pout,pin)
-Data link
-++++++++++++++++++++++++++++
-A data link is defined as being a dataflow link using the edAddLink method instead of edAddDFLink.
-The same example as above with a data link::
+Most of the time, when you need a dataflow link between two ports, you also need a control link between the nodes
+of the ports. In this case you can use the method edAddDFLink::
pout=n3.getOutputPort("p1")
pin=n4.getInputPort("p1")
- p.edAddLink(pout,pin)
+ p.edAddDFLink(pout,pin)
+
+edAddDFLink is equivalent to edAddCFLink followed by edAddLink.
Initialising an input data port
'''''''''''''''''''''''''''''''''''''''''''''''
r = pilot.getRuntime()
p=r.createProc("pr")
ti=p.getTypeCode("int")
+ td=p.getTypeCode("double")
#node1
n1=r.createScriptNode("","node1")
p.edAddChild(n1)
p.edAddCFLink(n1,n2)
p.edAddCFLink(n1,n4)
#dataflow links
- pout=n3.getOutputPort("p1")
- pin=n4.getInputPort("p1")
- #dataflow links
- p.edAddDFLink(n1.getOutputPort("p1"),n2.getInputPort("p1"))
- p.edAddDFLink(n1.getOutputPort("p1"),n4.getInputPort("p1"))
+ p.edAddLink(n1.getOutputPort("p1"),n2.getInputPort("p1"))
+ p.edAddLink(n1.getOutputPort("p1"),n4.getInputPort("p1"))
#initialisation ports
n1.getInputPort("p1").edInitPy(5)
n2.setScript("p1=2*p1")
n2.edAddInputPort("p1",ti)
n2.edAddOutputPort("p1",ti)
- b.edAddCFLink(n1,n2)
b.edAddDFLink(n1.getOutputPort("p1"),n2.getInputPort("p1"))
.. _py_forloop:
from module_generator import Generator,Module,PYComponent
from module_generator import CPPComponent,Service,F77Component
+ from module_generator import Library
If you want to import all definitions, you can do that::
body="outputport=myfunc(inputport);",
),
],
- libs="-L/usr/local/mysoft -lmybib",
- rlibs="-Wl,--rpath -Wl,/usr/local/mysoft"
+ libs=[Library(name="mybib", path="/usr/local/mysoft")],
+ rlibs="/usr/local/mysoft"
)
+**libs** contains a list of **Library** objects. On linux, if the name of the file is "libmybib.so",
+the **name** of the library will be "mybib". The *path* shows where the library is installed.
+
The **rlibs** attribute is not compulsory but it can be used to indicate a search path for dynamic libraries in execution.
**libs** is used during the link phase. **rlibs** is only used during execution, it avoids the need to set the LD_LIBRARY_PATH
environment variable to find the dynamic library.
defs="""#include "myinclude.h" """
-The includes path will be specified in the **includes** attribute of the component in the following form::
+The include paths will be specified in the **includes** attribute of the component in the following form::
defs="""#include "myinclude.h"
body="outputport=myfunc(inputport);",
),
],
- libs="-L/usr/local/mysoft -lmybib",
- rlibs="-Wl,--rpath -Wl,/usr/local/mysoft",
- includes="-I/usr/local/mysoft/include",
+ libs=[Library(name="mybib", path="/usr/local/mysoft")],
+ rlibs="/usr/local/mysoft",
+ includes="/usr/local/mysoft/include",
)
+Multiple include paths should be separated by spaces or end of line character (\\n).
+
Adding sources
""""""""""""""""""""""""""""""""""""""""""""""""""""
It is possible to add some source files with the **sources** attribute (a list of source files will be given).
),
],
sources=["myfunc.cpp"],
- includes="-I/usr/local/mysoft/include",
+ includes="/usr/local/mysoft/include",
)
body="chdir(c);"
),
],
- libs="-L/usr/local/fcompo -lfcompo",
- rlibs="-Wl,--rpath -Wl,/usr/local/fcompo"
+ libs=[Library(name="fcompo", path="/usr/local/fcompo")],
+ rlibs="/usr/local/fcompo"
)
The Fortran “compo3” component has dataflow and datastream ports like the C++ component. The Fortran dynamic library
Once this generator has been created, simply call its commands to perform the necessary operations.
- SALOME module generation: ``g.generate()``
-- initialise automake: ``g.bootstrap()``
-- execute the configure script: ``g.configure()``
+- build configuration: ``g.configure()``
- compilation: ``g.make()``
- installation in the directory <prefix>: ``g.install()``
- create a SALOME application in the directory **appli_dir**::
body="chdir(c);"
),
],
- libs="-L/local/chris/modulegen/YACSGEN/fcompo -lfcompo",
- rlibs="-Wl,--rpath -Wl,/local/chris/modulegen/YACSGEN/fcompo")
+ libs=[Library(name="fcompo", path="/local/chris/modulegen/YACSGEN/fcompo")],
+ rlibs="/local/chris/modulegen/YACSGEN/fcompo")
m=Module("mymodule",components=[c1],prefix="Install")
g=Generator(m,context)
g.generate()
- g.bootstrap()
g.configure()
g.make()
g.install()
g=Generator(Module("astmod",components=[c1,c2],prefix=install_prefix),context)
g.generate()
- g.bootstrap()
g.configure()
g.make()
g.install()
.. autoclass:: Module
.. autoclass:: Generator
- :members: generate, bootstrap, configure, make, install, make_appli
+ :members: generate, configure, make, install, make_appli
.. autofunction:: add_type
class Any;
class OptimizerLoop;
+ /*! \brief Pool used to manage the samples of the optimizer loop plugin.
+ *
+ * Every sample has an identifier (Id), a priority, an initial value (In)
+ * and an evaluation value (Out).
+ * The current sample is the sample used by the latest terminated evaluation.
+ */
class YACSLIBENGINE_EXPORT Pool
{
friend class OptimizerLoop;
_dragModifier=false;
setColumnCount(1);
+ setHeaderHidden( true );
addCatalog(_builtinCatalog, "Built In");
addCatalog(_sessionCatalog, "Current Session");
_dwTree = new QDockWidget(_parent);
_dwTree->setVisible(false);
_dwTree->setWindowTitle("Tree View: edition mode");
- _dwTree->setObjectName("Tree View");
+ _dwTree->setObjectName("yacsTreeViewDock");
_parent->addDockWidget(Qt::LeftDockWidgetArea, _dwTree);
_dwStacked = new QDockWidget(_parent);
_dwStacked->setVisible(false);
_dwStacked->setWindowTitle("Input Panel");
- _dwStacked->setObjectName("Input Panel");
+ _dwStacked->setObjectName("yacsInputPanelDock");
_dwStacked->setMinimumWidth(270); // --- force a minimum until display
_parent->addDockWidget(Qt::RightDockWidgetArea, _dwStacked);
_dwCatalogs = new QDockWidget(_parent);
_dwCatalogs->setVisible(false);
_dwCatalogs->setWindowTitle("Catalogs");
- _dwCatalogs->setObjectName("Catalogs");
+ _dwCatalogs->setObjectName("yacsCatalogsDock");
_parent->addDockWidget(Qt::RightDockWidgetArea, _dwCatalogs);
_catalogsWidget = new CatalogWidget(_dwCatalogs,
_builtinCatalog,
tr("shrink/expand"), tr("shrink or expand the selected node"),
0, _parent, false, this, SLOT(onShrinkExpand()));
+ _shrinkExpandChildren = _wrapper->createAction(getMenuId(), tr("shrink or expand direct children of the selected node"), QIcon("icons:shrinkExpand.png"),
+ tr("shrink/expand children"), tr("shrink or expand direct children of the selected node"),
+ 0, _parent, false, this, SLOT(onShrinkExpandChildren()));
+
+ _shrinkExpandElementaryRecursively = _wrapper->createAction(getMenuId(), tr("shrink or expand elementary nodes of the selected node recursively"), QIcon("icons:shrinkExpand.png"),
+ tr("shrink/expand elementary"), tr("shrink or expand elementary nodes of the selected node recursively"),
+ 0, _parent, false, this, SLOT(onShrinkExpandElementaryRecursively()));
+
_toggleStraightLinksAct = _wrapper->createAction(getMenuId(), tr("draw straight or orthogonal links"), QIcon("icons:straightLink.png"),
tr("straight/orthogonal"), tr("draw straight or orthogonal links"),
0, _parent, true, this, SLOT(onToggleStraightLinks(bool)));
void GenericGui::createTools()
{
- int aToolId = _wrapper->createTool ( tr( "YACS Toolbar" ) );
+ int aToolId = _wrapper->createTool ( tr( "YACS Toolbar" ), QString( "YACSToolbar" ) );
_wrapper->createTool( _newSchemaAct, aToolId );
_wrapper->createTool( _importSchemaAct, aToolId );
_wrapper->createTool( _wrapper->separator(), aToolId );
_guiEditor->shrinkExpand();
}
+void GenericGui::onShrinkExpandChildren() {
+ DEBTRACE("GenericGui::onShrinkExpandChildren");
+ _guiEditor->shrinkExpand(Qt::ControlModifier|Qt::ShiftModifier);
+}
+
+void GenericGui::onShrinkExpandElementaryRecursively() {
+ DEBTRACE("GenericGui::onShrinkExpandElementaryRecursively");
+ _guiEditor->shrinkExpand(Qt::ControlModifier);
+}
+
void GenericGui::onToggleStraightLinks(bool checked)
{
Scene::_straightLinks = checked;
QAction *_zoomToBlocAct;
QAction *_centerOnNodeAct;
QAction *_shrinkExpand;
+ QAction *_shrinkExpandChildren;
+ QAction *_shrinkExpandElementaryRecursively;
QAction *_toggleStraightLinksAct;
QAction *_toggleAutomaticComputeLinkAct;
void onZoomToBloc();
void onCenterOnNode();
void onShrinkExpand();
+ void onShrinkExpandChildren();
+ void onShrinkExpandElementaryRecursively();
void onToggleStraightLinks(bool checked);
void onToggleAutomaticComputeLinks(bool checked);
void onToggleSimplifyLinks(bool checked);
/*!
* Subject shrink or expand, command from popup menu: needs a valid selection
*/
-void GuiEditor::shrinkExpand() {
+void GuiEditor::shrinkExpand(Qt::KeyboardModifiers kbModifiers) {
DEBTRACE("GuiEditor::shrinkExpand");
Subject* sub = QtGuiContext::getQtCurrent()->getSelectedSubject();
return;
};
- if (sni->isExpanded()) {
- sni->setExpanded(false);
- } else {
- sni->setExpanded(true);
- };
- sni->reorganizeShrinkExpand();
+ ShrinkMode aShrinkMode = CurrentNode;
+ if (kbModifiers == Qt::ControlModifier) {
+ aShrinkMode = ElementaryNodes;
+ } else if (kbModifiers == (Qt::ShiftModifier|Qt::ControlModifier)) {
+ aShrinkMode = ChildrenNodes;
+ }
+
+ sni->reorganizeShrinkExpand(aShrinkMode);
sni->showOutScopeLinks();
sni->updateLinks();
}
void PutSubjectInBloc();
std::string PutGraphInBloc();
void PutGraphInNode(std::string typeNode);
- void shrinkExpand();
+ void shrinkExpand(Qt::KeyboardModifiers kbModifiers = Qt::NoModifier);
void rebuildLinks();
void arrangeNodes(bool isRecursive);
void arrangeProc();
menu.addAction(gmain->_zoomToBlocAct);
menu.addAction(gmain->_centerOnNodeAct);
menu.addAction(gmain->_shrinkExpand);
+ menu.addAction(gmain->_shrinkExpandChildren);
+ menu.addAction(gmain->_shrinkExpandElementaryRecursively);
menu.addAction(gmain->_computeLinkAct);
// menu.addAction(gmain->_toggleAutomaticComputeLinkAct);
// menu.addAction(gmain->_toggleSimplifyLinkAct);
menu.addAction(gmain->_zoomToBlocAct);
menu.addAction(gmain->_centerOnNodeAct);
menu.addAction(gmain->_shrinkExpand);
+ menu.addAction(gmain->_shrinkExpandChildren);
+ menu.addAction(gmain->_shrinkExpandElementaryRecursively);
menu.addAction(gmain->_computeLinkAct);
// menu.addAction(gmain->_toggleAutomaticComputeLinkAct);
// menu.addAction(gmain->_toggleSimplifyLinkAct);
_children.remove(child);
}
-void SceneComposedNodeItem::reorganizeShrinkExpand() {
- DEBTRACE("SceneComposedNodeItem::reorganizeShrinkExpand " << _expanded << " " << _label.toStdString());
- bool isExpanding = isExpanded();
-
+void SceneComposedNodeItem::updateControlLinks(bool toExpand)
+{
//update control links
- std::list<SubjectControlLink*> lscl=dynamic_cast<SubjectNode*>(_subject)->getSubjectControlLinks();
+ std::list<SubjectControlLink*> lscl=dynamic_cast<SubjectNode*>(getSubject())->getSubjectControlLinks();
for (std::list<SubjectControlLink*>::const_iterator it = lscl.begin(); it != lscl.end(); ++it) {
SceneLinkItem* lk = dynamic_cast<SceneLinkItem*>(QtGuiContext::getQtCurrent()->_mapOfSceneItem[*it]);
};
if (b1 && b2) {
- if (isExpanding) {
+ if (toExpand) {
lk->show();
} else {
lk->hide();
};
};
};
+}
+
+void SceneComposedNodeItem::reorganizeShrinkExpand(ShrinkMode theShrinkMode) {
+ DEBTRACE("SceneComposedNodeItem::reorganizeShrinkExpand " << _expanded << " " << _label.toStdString());
+
+ bool toExpand = true;
+ if (theShrinkMode == CurrentNode) {
+ // shrink/expand current node only
+ toExpand = !isExpanded();
+
+ updateControlLinks(toExpand);
+ shrinkExpandRecursive(toExpand, true, theShrinkMode);
+
+ } else {
+ if (!isExpanded())
+ return;
+ // shrink/expand child nodes
+ toExpand = !hasExpandedChildren(theShrinkMode == ElementaryNodes);
+ for (list<AbstractSceneItem*>::const_iterator it=_children.begin(); it!=_children.end(); ++it) {
+ SceneItem* item = dynamic_cast<SceneItem*>(*it);
+ SceneNodeItem *sni = dynamic_cast<SceneNodeItem*>(item);
+ item->shrinkExpandRecursive(toExpand, true, theShrinkMode);
+ }
+ _ancestorShrinked = !toExpand;
+ _width = _expandedWidth;
+ _height = _expandedHeight;
+ _shownState = expandShown;
+ adjustHeader();
+ rebuildLinks();
+ }
- shrinkExpandRecursive(isExpanding, true);
if (Scene::_autoComputeLinks)
{
SubjectProc* subproc = QtGuiContext::getQtCurrent()->getSubjectProc();
}
}
-void SceneComposedNodeItem::shrinkExpandRecursive(bool isExpanding, bool fromHere)
+bool SceneComposedNodeItem::hasExpandedChildren(bool recursively)
+{
+ bool res = false;
+ for (list<AbstractSceneItem*>::const_iterator it=_children.begin(); it!=_children.end() && !res; ++it) {
+ SceneItem* item = dynamic_cast<SceneItem*>(*it);
+ SceneNodeItem *sni = dynamic_cast<SceneNodeItem*>(item);
+ if (sni->isExpanded()) {
+ res = true;
+ if (recursively)
+ if (SceneComposedNodeItem *scni = dynamic_cast<SceneComposedNodeItem*>(sni))
+ res = scni->hasExpandedChildren(recursively);
+ }
+ }
+ return res;
+}
+
+void SceneComposedNodeItem::shrinkExpandRecursive(bool toExpand, bool fromHere, ShrinkMode theShrinkMode)
{
DEBTRACE("SceneComposedNodeItem::shrinkExpandRecursive " << isExpanding << " " << fromHere << " " << isExpanded() << " " << _label.toStdString());
- if (!isExpanding)
+ bool toChangeShrinkState = false;
+ switch (theShrinkMode) {
+ case CurrentNode:
+ if (fromHere)
+ toChangeShrinkState = true;
+ break;
+ case ChildrenNodes:
+ if (fromHere)
+ toChangeShrinkState = true;
+ break;
+ case ElementaryNodes:
+ toChangeShrinkState = false;
+ break;
+ }
+ if (toChangeShrinkState) {
+ if (toExpand != isExpanded())
+ setExpanded(toExpand);
+ } else if (!isExpanded() && theShrinkMode == ElementaryNodes) {
+ return;
+ }
+
+ updateControlLinks(toExpand);
+
+ if (!toExpand)
{ // ---collapsing: hide first children , then resize
for (list<AbstractSceneItem*>::const_iterator it=_children.begin(); it!=_children.end(); ++it)
{
SceneItem* item = dynamic_cast<SceneItem*>(*it);
- item->shrinkExpandRecursive(false, false);
- item->hide();
- DEBTRACE("------------------------------- Hide " << item->getLabel().toStdString());
- item->shrinkExpandLink(false);
+ item->shrinkExpandRecursive(toExpand, false, theShrinkMode);
+ if (theShrinkMode != ElementaryNodes) {
+ item->hide();
+ DEBTRACE("------------------------------- Hide " << item->getLabel().toStdString());
+ item->shrinkExpandLink(false);
+ }
}
- if (_shownState == expandShown)
- {
- _expandedWidth = _width;
- _expandedHeight = _height;
- }
+ if (toChangeShrinkState || theShrinkMode != ElementaryNodes) {
+ if (_shownState == expandShown)
+ {
+ _expandedWidth = _width;
+ _expandedHeight = _height;
+ }
- if (fromHere)
- {
- _shownState = shrinkShown;
- }
- else
- {
- _ancestorShrinked = true;
- _shownState = shrinkHidden;
- }
+ if (fromHere)
+ {
+ _shownState = shrinkShown;
+ }
+ else
+ {
+ _ancestorShrinked = true;
+ _shownState = shrinkHidden;
+ }
- _width = 2*Resource::Corner_Margin + 2*Resource::DataPort_Width + Resource::Space_Margin;
- if (_shownState == shrinkShown)
- _height = getHeaderBottom() + Resource::Corner_Margin;
- else
- _height = Resource::Header_Height + Resource::Corner_Margin;
+ _width = 2*Resource::Corner_Margin + 2*Resource::DataPort_Width + Resource::Space_Margin;
+ if (_shownState == shrinkShown)
+ _height = getHeaderBottom() + Resource::Corner_Margin;
+ else
+ _height = Resource::Header_Height + Resource::Corner_Margin;
- if (_shownState == shrinkHidden) // shrink of ancestor
- setPos(0 ,0);
- else
- setPos(_expandedPos);
- adjustHeader();
- if (_progressItem)
- _progressItem->adjustGeometry();
+ if (_shownState == shrinkHidden) // shrink of ancestor
+ setPos(0 ,0);
+ else
+ setPos(_expandedPos);
+ adjustHeader();
+ if (_progressItem)
+ _progressItem->adjustGeometry();
+ }
}
else
{ // --- expanding: resize, then show children
- _ancestorShrinked = false;
+ if (toChangeShrinkState)
+ _ancestorShrinked = false;
for (list<AbstractSceneItem*>::const_iterator it=_children.begin(); it!=_children.end(); ++it)
{
SceneItem* item = dynamic_cast<SceneItem*>(*it);
- item->shrinkExpandRecursive(isExpanded(), false);
- if (isExpanded())
- {
- item->show();
- DEBTRACE("------------------------------- Show " << item->getLabel().toStdString());
- }
- else
- {
- item->hide();
- DEBTRACE("------------------------------- Hide " << item->getLabel().toStdString());
- }
- item->shrinkExpandLink(fromHere);
+ item->shrinkExpandRecursive(isExpanded(), false, theShrinkMode);
+ if (theShrinkMode != ElementaryNodes) {
+ if (isExpanded())
+ {
+ item->show();
+ DEBTRACE("------------------------------- Show " << item->getLabel().toStdString());
+ }
+ else
+ {
+ item->hide();
+ DEBTRACE("------------------------------- Hide " << item->getLabel().toStdString());
+ }
+ item->shrinkExpandLink(fromHere);
+ }
}
if (isExpanded())
virtual std::list<AbstractSceneItem*> getChildren();
virtual void removeChildFromList(AbstractSceneItem* child);
virtual void reorganize();
- virtual void reorganizeShrinkExpand();
- virtual void shrinkExpandRecursive(bool isExpanding, bool fromHere);
+ virtual void reorganizeShrinkExpand(ShrinkMode theShrinkMode);
+ virtual void shrinkExpandRecursive(bool isExpanding, bool fromHere, ShrinkMode theShrinkMode);
virtual void shrinkExpandLink(bool se);
virtual void collisionResolv(SceneItem* child, QPointF oldPos);
virtual void rebuildLinks();
virtual void arrangeChildNodes();
virtual void adjustColors();
virtual void setShownState(shownState ss);
+ virtual bool hasExpandedChildren(bool recursively);
protected:
void dragEnterEvent(QGraphicsSceneDragDropEvent *event);
void dragLeaveEvent(QGraphicsSceneDragDropEvent *event);
void dropEvent(QGraphicsSceneDragDropEvent *event);
+ void updateControlLinks(bool toExpand);
virtual QColor getPenColor();
virtual QColor getBrushColor();
m.popupMenu(caller, globalPos);
}
-void SceneElementaryNodeItem::reorganizeShrinkExpand()
+void SceneElementaryNodeItem::reorganizeShrinkExpand(ShrinkMode theShrinkMode)
{
DEBTRACE("SceneElementaryNodeItem::reorganizeShrinkExpand " << isExpanded() << " " << _label.toStdString());
- shrinkExpandRecursive(isExpanded(), true);
+ if (theShrinkMode != CurrentNode)
+ return;
+ shrinkExpandRecursive(!isExpanded(), true, theShrinkMode);
if (Scene::_autoComputeLinks)
{
SubjectProc* subproc = QtGuiContext::getQtCurrent()->getSubjectProc();
}
}
-void SceneElementaryNodeItem::shrinkExpandRecursive(bool isExpanding, bool fromHere)
+void SceneElementaryNodeItem::shrinkExpandRecursive(bool toExpand, bool fromHere, ShrinkMode theShrinkMode)
{
- DEBTRACE("SceneElementaryNodeItem::shrinkExpandRecursive " << isExpanding << " " << fromHere << " " << isExpanded() << " " << _label.toStdString());
- if (isExpanding)
- {
+ DEBTRACE("SceneElementaryNodeItem::shrinkExpandRecursive " << toExpand << " " << fromHere << " " << isExpanded() << " " << _label.toStdString());
+
+ bool toChangeShrinkState = false;
+ switch (theShrinkMode) {
+ case CurrentNode:
+ if (fromHere)
+ toChangeShrinkState = true;
+ break;
+ case ChildrenNodes:
+ if (fromHere)
+ toChangeShrinkState = true;
+ break;
+ case ElementaryNodes:
+ toChangeShrinkState = true;
+ break;
+ }
+ if (toChangeShrinkState && toExpand != isExpanded())
+ setExpanded(toExpand);
+
+ if (toExpand) {
+ if (toChangeShrinkState) {
_ancestorShrinked = false;
+ _shownState = expandShown;
+ } else {
if (isExpanded())
_shownState = expandShown;
else
_shownState = shrinkShown;
}
- else
- {
- if (fromHere)
- _shownState = shrinkShown;
- else
- {
- _ancestorShrinked = true;
- _shownState = shrinkHidden;
- }
+ } else {
+ if (fromHere || theShrinkMode==ElementaryNodes) {
+ _shownState = shrinkShown;
+ } else {
+ _ancestorShrinked = true;
+ _shownState = shrinkHidden;
}
+ }
if (_shownState == shrinkHidden) // shrink of ancestor
- setPos(0 ,0);
+ setPos(0, 0);
else
setPos(_expandedPos);
virtual void popupMenu(QWidget *caller, const QPoint &globalPos);
virtual void autoPosNewPort(AbstractSceneItem *item, int nbPorts);
virtual void reorganize();
- virtual void reorganizeShrinkExpand();
- virtual void shrinkExpandRecursive(bool isExpanding, bool fromHere);
+ virtual void reorganizeShrinkExpand(ShrinkMode theShrinkMode);
+ virtual void shrinkExpandRecursive(bool isExpanding, bool fromHere, ShrinkMode theShrinkMode);
virtual void setShownState(shownState ss);
protected:
int _maxPorts;
}
-void SceneItem::shrinkExpandRecursive(bool isExpanding, bool fromHere)
+void SceneItem::shrinkExpandRecursive(bool isExpanding, bool fromHere, ShrinkMode theShrinkMode)
{
}
{
namespace HMI
{
+ typedef enum
+ {
+ CurrentNode,
+ ChildrenNodes,
+ ElementaryNodes
+ } ShrinkMode;
+
class Scene;
class RootSceneItem: public GuiObserver
virtual void updateChildItems();
virtual void updateLinks();
virtual void shrinkExpandLink(bool se);
- virtual void shrinkExpandRecursive(bool isExpanding, bool fromHere);
+ virtual void shrinkExpandRecursive(bool isExpanding, bool fromHere, ShrinkMode theShrinkMode);
bool isAncestorShrinked() { return _ancestorShrinked; };
bool _blocX;
bool _blocY;
{
}
-void SceneNodeItem::reorganizeShrinkExpand()
+void SceneNodeItem::reorganizeShrinkExpand(ShrinkMode theShrinkMode)
{
}
void SceneNodeItem::mouseDoubleClickEvent(QGraphicsSceneMouseEvent *event)
{
DEBTRACE("SceneNodeItem::mouseDoubleClickEvent");
- QtGuiContext::getQtCurrent()->getGMain()->_guiEditor->shrinkExpand();
+ QtGuiContext::getQtCurrent()->getGMain()->_guiEditor->shrinkExpand(QApplication::keyboardModifiers());
}
void SceneNodeItem::setTopLeft(QPointF topLeft)
virtual void updateName();
virtual void arrangeNodes(bool isRecursive);
virtual void arrangeChildNodes();
- virtual void reorganizeShrinkExpand();
+ virtual void reorganizeShrinkExpand(ShrinkMode theShrinkMode);
virtual void updateChildItems();
virtual void shrinkExpandLink(bool se);
virtual void showOutScopeLinks();
m.popupMenu(caller, globalPos);
}
-void SceneProcItem::reorganizeShrinkExpand() {
- if (_children.size() == 0)
- return;
- bool isExpanding = true;
- DEBTRACE("SceneProcItem::reorganizeShrinkExpand " << _expanded << " " << _label.toStdString());
- for (list<AbstractSceneItem*>::const_iterator it=_children.begin(); it!=_children.end(); ++it)
- {
- SceneItem* item = dynamic_cast<SceneItem*>(*it);
- SceneNodeItem *sni = dynamic_cast<SceneNodeItem*>(item);
- if (sni->isExpanded()) {
- isExpanding = false;
- break;
- }
- }
- for (list<AbstractSceneItem*>::const_iterator it=_children.begin(); it!=_children.end(); ++it)
- {
- SceneItem* item = dynamic_cast<SceneItem*>(*it);
- SceneNodeItem *sni = dynamic_cast<SceneNodeItem*>(item);
- if (!isExpanding && sni->isExpanded()) {
- sni->setExpanded(false);
- item->shrinkExpandRecursive(false, true);
- DEBTRACE("------------------------------- Hide " << item->getLabel().toStdString());
- }
- if (isExpanding && !sni->isExpanded()) {
- sni->setExpanded(true);
- item->shrinkExpandRecursive(true, false);
- DEBTRACE("------------------------------- Show " << item->getLabel().toStdString());
- }
- item->shrinkExpandLink(isExpanding);
- }
- _ancestorShrinked = !isExpanding;
- _width = _expandedWidth;
- _height = _expandedHeight;
- _shownState = expandShown;
- adjustHeader();
- rebuildLinks();
-}
-
QString label, Subject *subject);
virtual ~SceneProcItem();
virtual void popupMenu(QWidget *caller, const QPoint &globalPos);
- virtual void reorganizeShrinkExpand();
};
}
}
"Install path: SALOME Python shared modules")
ENDIF(SALOME_YACS_USE_SWIG)
-SET(SALOME_INSTALL_RES share/salome/resources CACHE PATH "Install path: SALOME resources")
SET(SALOME_PMML_INSTALL_RES_DATA "${SALOME_INSTALL_RES}/pmml" CACHE PATH "Install path: SALOME PMML specific data")
# Sources
PMMLlib.cxx
)
-ADD_SUBDIRECTORY(resources)
-
ADD_LIBRARY(pmmlLib SHARED ${pmml_SOURCES})
TARGET_LINK_LIBRARIES(pmmlLib ${LIBXML2_LIBRARIES} )
INSTALL(TARGETS pmmlLib EXPORT ${PROJECT_NAME}TargetGroup DESTINATION ${SALOME_INSTALL_LIBS})
ADD_TEST(TestPMML TestPMML)
+FILE(COPY ${PROJECT_SOURCE_DIR}/src/pmml/Test/samples DESTINATION . )
INSTALL(TARGETS TestPMML DESTINATION ${SALOME_INSTALL_BINS})
void PMMLBasicsTest1::setUp()
{
+ resourcesDir = "samples/";
#ifdef WIN32
- const char* p = std::getenv("YACS_ROOT_DIR");
- std::string strP("");
- if (p)
- strP = std::string(p);
- else
- throw std::string("unable to get YACS_ROOT_DIR");
- resourcesDir = strP;
- resourcesDir += "/share/salome/resources/pmml/";
const char* user = std::getenv("USERPROFILE");
std::string strUser("");
- if (user)
+ if (user)
strUser = std::string(user);
- else
+ else
throw std::string("unable to get USERPROFILE");
tmpDir = strUser;
tmpDir += "\\tmp";
std::string cmd = "mkdir " + tmpDir;
system( cmd.c_str() );
#else
- resourcesDir = getenv("YACS_ROOT_DIR");
- resourcesDir += "/share/salome/resources/pmml/";
tmpDir = "/tmp/";
tmpDir += getenv("USER");
tmpDir += "/PmmlUnitTest/";
std::string cmd = "mkdir -p " + tmpDir;
system( cmd.c_str() );
-#endif
+#endif
}
void PMMLBasicsTest1::tearDown()
--- /dev/null
+# Copyright (C) 2012-2014 CEA/DEN, EDF R&D
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+#
+
+SET(PMML_RESOURCES_FILES
+ # ici les noms des fichiers ressources
+ ann_model.pmml
+ ann_model_2.pmml
+ lr_model.pmml
+ lr_model_2.pmml
+ no_model.pmml
+ two_models_ann_lr.pmml
+ unittest_ref_ann_model.cpp
+ unittest_ref_ann_model.f
+ unittest_ref_ann_model.py
+ unittest_ref_lr_model.cpp
+ unittest_ref_lr_model.f
+ unittest_ref_lr_model.py
+ win32_ann_model.pmml
+ win32_lr_model.pmml
+ )
+
+INSTALL(FILES ${PMML_RESOURCES_FILES} DESTINATION ${SALOME_PMML_INSTALL_RES_DATA})
+
+# MESSAGE(STATUS "Creation of ${CMAKE_CURRENT_BINARY_DIR}/PMMLCatalog.xml")
+# CONFIGURE_FILE(${CMAKE_CURRENT_SOURCE_DIR}/PMMLCatalog.xml.in ${CMAKE_CURRENT_BINARY_DIR}/PMMLCatalog.xml @ONLY)
+# MESSAGE(STATUS "Creation of ${CMAKE_CURRENT_BINARY_DIR}/SalomeApp.xml")
+# CONFIGURE_FILE(${CMAKE_CURRENT_SOURCE_DIR}/SalomeApp.xml.in ${CMAKE_CURRENT_BINARY_DIR}/SalomeApp.xml @ONLY)
+
+# INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/PMMLCatalog.xml ${CMAKE_CURRENT_BINARY_DIR}/SalomeApp.xml DESTINATION ${SALOME_PMML_INSTALL_RES_DATA})
--- /dev/null
+<?xml version="1.0"?>
+<PMML xmlns="http://www.dmg.org/PMML-4_1" version="4.1">
+ <Header copyright="myCopyright" description="Tests unitaires">
+ <Application name="PMMLlib" version="myVersion"/>
+ <Annotation>Tests unitaires PMMLlib</Annotation>
+ </Header>
+ <DataDictionary>
+ <DataField name="rw" displayName="rw" optype="continuous" dataType="float"/>
+ <DataField name="r" displayName="r" optype="continuous" dataType="float"/>
+ <DataField name="tu" displayName="tu" optype="continuous" dataType="float"/>
+ <DataField name="tl" displayName="tl" optype="continuous" dataType="float"/>
+ <DataField name="hu" displayName="hu" optype="continuous" dataType="float"/>
+ <DataField name="hl" displayName="hl" optype="continuous" dataType="float"/>
+ <DataField name="l" displayName="l" optype="continuous" dataType="float"/>
+ <DataField name="kw" displayName="kw" optype="continuous" dataType="float"/>
+ <DataField name="yhat" displayName="yhat" optype="continuous" dataType="float"/>
+ </DataDictionary>
+ <NeuralNetwork modelName="sANNName" functionName="regression" numberOfLayers="2">
+ <MiningSchema>
+ <MiningField name="rw" usageType="active"/>
+ <MiningField name="r" usageType="active"/>
+ <MiningField name="tu" usageType="active"/>
+ <MiningField name="tl" usageType="active"/>
+ <MiningField name="hu" usageType="active"/>
+ <MiningField name="hl" usageType="active"/>
+ <MiningField name="l" usageType="active"/>
+ <MiningField name="kw" usageType="active"/>
+ <MiningField name="yhat" usageType="predicted"/>
+ </MiningSchema>
+ <NeuralInputs numberOfInputs="8">
+ <NeuralInput id="0">
+ <DerivedField optype="continuous" dataType="float">
+ <NormContinuous field="rw">
+ <LinearNorm orig="0.000000e+00" norm="-2.889932e-01"/>
+ <LinearNorm orig="9.999901e-02" norm="0.000000e+00"/>
+ </NormContinuous>
+ </DerivedField>
+ </NeuralInput>
+ <NeuralInput id="1">
+ <DerivedField optype="continuous" dataType="float">
+ <NormContinuous field="r">
+ <LinearNorm orig="0.000000e+00" norm="-5.756638e-01"/>
+ <LinearNorm orig="2.504894e+04" norm="0.000000e+00"/>
+ </NormContinuous>
+ </DerivedField>
+ </NeuralInput>
+ <NeuralInput id="2">
+ <DerivedField optype="continuous" dataType="float">
+ <NormContinuous field="tu">
+ <LinearNorm orig="0.000000e+00" norm="-1.699313e-01"/>
+ <LinearNorm orig="8.933486e+04" norm="0.000000e+00"/>
+ </NormContinuous>
+ </DerivedField>
+ </NeuralInput>
+ <NeuralInput id="3">
+ <DerivedField optype="continuous" dataType="float">
+ <NormContinuous field="tl">
+ <LinearNorm orig="0.000000e+00" norm="-1.707007e-01"/>
+ <LinearNorm orig="8.955232e+01" norm="0.000000e+00"/>
+ </NormContinuous>
+ </DerivedField>
+ </NeuralInput>
+ <NeuralInput id="4">
+ <DerivedField optype="continuous" dataType="float">
+ <NormContinuous field="hu">
+ <LinearNorm orig="0.000000e+00" norm="-3.302777e-02"/>
+ <LinearNorm orig="1.050003e+03" norm="0.000000e+00"/>
+ </NormContinuous>
+ </DerivedField>
+ </NeuralInput>
+ <NeuralInput id="5">
+ <DerivedField optype="continuous" dataType="float">
+ <NormContinuous field="hl">
+ <LinearNorm orig="0.000000e+00" norm="-4.562070e-02"/>
+ <LinearNorm orig="7.600007e+02" norm="0.000000e+00"/>
+ </NormContinuous>
+ </DerivedField>
+ </NeuralInput>
+ <NeuralInput id="6">
+ <DerivedField optype="continuous" dataType="float">
+ <NormContinuous field="l">
+ <LinearNorm orig="0.000000e+00" norm="-1.155882e-01"/>
+ <LinearNorm orig="1.400018e+03" norm="0.000000e+00"/>
+ </NormContinuous>
+ </DerivedField>
+ </NeuralInput>
+ <NeuralInput id="7">
+ <DerivedField optype="continuous" dataType="float">
+ <NormContinuous field="kw">
+ <LinearNorm orig="0.000000e+00" norm="-5.780019e-02"/>
+ <LinearNorm orig="1.095001e+04" norm="0.000000e+00"/>
+ </NormContinuous>
+ </DerivedField>
+ </NeuralInput>
+ </NeuralInputs>
+ <NeuralLayer activationFunction="tanh" numberOfNeurons="1">
+ <Neuron id="8" bias="-1.263572e+00">
+ <Con from="0" weight="7.536629e-01"/>
+ <Con from="1" weight="1.653660e-03"/>
+ <Con from="2" weight="4.725001e-03"/>
+ <Con from="3" weight="9.969786e-03"/>
+ <Con from="4" weight="1.787976e-01"/>
+ <Con from="5" weight="-1.809809e-01"/>
+ <Con from="6" weight="-1.735688e-01"/>
+ <Con from="7" weight="8.559675e-02"/>
+ </Neuron>
+ </NeuralLayer>
+ <NeuralLayer activationFunction="identity" numberOfNeurons="1">
+ <Neuron id="9" bias="-1.745483e+00">
+ <Con from="8" weight="6.965512e+00"/>
+ </Neuron>
+ </NeuralLayer>
+ <NeuralOutputs numberOfOutputs="1">
+ <NeuralOutput outputNeuron="9">
+ <DerivedField optype="continuous" dataType="float">
+ <NormContinuous field="yhat">
+ <LinearNorm orig="0.000000e+00" norm="-5.873935e-01"/>
+ <LinearNorm orig="7.781171e+01" norm="0.000000e+00"/>
+ </NormContinuous>
+ </DerivedField>
+ </NeuralOutput>
+ </NeuralOutputs>
+ </NeuralNetwork>
+</PMML>
--- /dev/null
+<?xml version="1.0"?>
+<PMML xmlns="http://www.dmg.org/PMML-3_0" version="3.0">
+ <Header copyright="texte copyright" description="texte description">
+ <Application name="Uranie" version="2.3/1"/>
+ <Annotation>date Fri Oct 07, 2011</Annotation>
+ </Header>
+ <DataDictionary>
+ <DataField name="rw" displayName="rw" optype="continuous" dataType="float"/>
+ <DataField name="r" displayName="r" optype="continuous" dataType="float"/>
+ <DataField name="tu" displayName="tu" optype="continuous" dataType="float"/>
+ <DataField name="tl" displayName="tl" optype="continuous" dataType="float"/>
+ <DataField name="hu" displayName="hu" optype="continuous" dataType="float"/>
+ <DataField name="hl" displayName="hl" optype="continuous" dataType="float"/>
+ <DataField name="l" displayName="l" optype="continuous" dataType="float"/>
+ <DataField name="kw" displayName="kw" optype="continuous" dataType="float"/>
+ <DataField name="yhat" displayName="yhat" optype="continuous" dataType="float"/>
+ </DataDictionary>
+ <NeuralNetwork modelName="sANNName" functionName="regression" numberOfLayers="2">
+ <MiningSchema>
+ <MiningField name="rw" usageType="active"/>
+ <MiningField name="r" usageType="active"/>
+ <MiningField name="tu" usageType="active"/>
+ <MiningField name="tl" usageType="active"/>
+ <MiningField name="hu" usageType="active"/>
+ <MiningField name="hl" usageType="active"/>
+ <MiningField name="l" usageType="active"/>
+ <MiningField name="kw" usageType="active"/>
+ <MiningField name="yhat" usageType="predicted"/>
+ </MiningSchema>
+ <NeuralInputs numberOfInputs="8">
+ <NeuralInput id="0">
+ <DerivedField optype="continuous" dataType="float">
+ <NormContinuous field="rw">
+ <LinearNorm orig="0" norm="-2.889932e-01"/>
+ <LinearNorm orig="9.999901e-02" norm="0"/>
+ </NormContinuous>
+ </DerivedField>
+ </NeuralInput>
+ <NeuralInput id="1">
+ <DerivedField optype="continuous" dataType="float">
+ <NormContinuous field="r">
+ <LinearNorm orig="0" norm="-5.756638e-01"/>
+ <LinearNorm orig="2.504894e+04" norm="0"/>
+ </NormContinuous>
+ </DerivedField>
+ </NeuralInput>
+ <NeuralInput id="2">
+ <DerivedField optype="continuous" dataType="float">
+ <NormContinuous field="tu">
+ <LinearNorm orig="0" norm="-1.699313e-01"/>
+ <LinearNorm orig="8.933486e+04" norm="0"/>
+ </NormContinuous>
+ </DerivedField>
+ </NeuralInput>
+ <NeuralInput id="3">
+ <DerivedField optype="continuous" dataType="float">
+ <NormContinuous field="tl">
+ <LinearNorm orig="0" norm="-1.707007e-01"/>
+ <LinearNorm orig="8.955232e+01" norm="0"/>
+ </NormContinuous>
+ </DerivedField>
+ </NeuralInput>
+ <NeuralInput id="4">
+ <DerivedField optype="continuous" dataType="float">
+ <NormContinuous field="hu">
+ <LinearNorm orig="0" norm="-3.302777e-02"/>
+ <LinearNorm orig="1.050003e+03" norm="0"/>
+ </NormContinuous>
+ </DerivedField>
+ </NeuralInput>
+ <NeuralInput id="5">
+ <DerivedField optype="continuous" dataType="float">
+ <NormContinuous field="hl">
+ <LinearNorm orig="0" norm="-4.562070e-02"/>
+ <LinearNorm orig="7.600007e+02" norm="0"/>
+ </NormContinuous>
+ </DerivedField>
+ </NeuralInput>
+ <NeuralInput id="6">
+ <DerivedField optype="continuous" dataType="float">
+ <NormContinuous field="l">
+ <LinearNorm orig="2" norm="1."/>
+ <LinearNorm orig="1.400018e+03" norm="-1."/>
+ </NormContinuous>
+ </DerivedField>
+ </NeuralInput>
+ <NeuralInput id="7">
+ <DerivedField optype="continuous" dataType="float">
+ <NormContinuous field="kw">
+ <LinearNorm orig="0" norm="-5.780019e-02"/>
+ <LinearNorm orig="1.095001e+04" norm="0"/>
+ </NormContinuous>
+ </DerivedField>
+ </NeuralInput>
+ </NeuralInputs>
+ <NeuralLayer activationFunction="tanh" numberOfNeurons="1">
+ <Neuron id="8" bias="-1.263572e+00">
+ <Con from="0" weight="7.536629e-01"/>
+ <Con from="1" weight="1.653660e-03"/>
+ <Con from="2" weight="4.725001e-03"/>
+ <Con from="3" weight="9.969786e-03"/>
+ <Con from="4" weight="1.787976e-01"/>
+ <Con from="5" weight="-1.809809e-01"/>
+ <Con from="6" weight="-1.735688e-01"/>
+ <Con from="7" weight="8.559675e-02"/>
+ </Neuron>
+ </NeuralLayer>
+ <NeuralLayer activationFunction="identity" numberOfNeurons="1">
+ <Neuron id="9" bias="-1.745483e+00">
+ <Con from="8" weight="6.965512e+00"/>
+ </Neuron>
+ </NeuralLayer>
+ <NeuralOutputs numberOfOutputs="1">
+ <NeuralOutput outputNeuron="9">
+ <DerivedField optype="continuous" dataType="float">
+ <NormContinuous field="yhat">
+ <LinearNorm orig="0" norm="-5.873935e-01"/>
+ <LinearNorm orig="7.781171e+01" norm="0"/>
+ </NormContinuous>
+ </DerivedField>
+ </NeuralOutput>
+ <NeuralOutput outputNeuron="-1">
+ <DerivedField optype="continuous" dataType="float">
+ <NormContinuous field="yhat">
+ <LinearNorm orig="2." norm="-1"/>
+ <LinearNorm orig="5.781171e+01" norm="1"/>
+ </NormContinuous>
+ </DerivedField>
+ </NeuralOutput>
+ </NeuralOutputs>
+ </NeuralNetwork>
+</PMML>
--- /dev/null
+<?xml version="1.0"?>
+<PMML xmlns="http://www.dmg.org/PMML-4_1" version="4.1">
+ <Header copyright="myCopyright" description="Tests unitaires">
+ <Application name="PMMLlib" version="myVersion"/>
+ <Annotation>Tests unitaires PMMLlib</Annotation>
+ </Header>
+ <DataDictionary>
+ <DataField name="x6" displayName=" x_{6}" optype="continuous" dataType="double">
+ <Interval closure="ClosedClosed" leftMargin="1.100000e+01" rightMargin="2.300000e+01"/>
+ </DataField>
+ <DataField name="x8" displayName=" x_{8}" optype="continuous" dataType="double">
+ <Interval closure="ClosedClosed" leftMargin="2.810000e+01" rightMargin="7.670000e+01"/>
+ </DataField>
+ <DataField name="x1" displayName=" x_{1}" optype="continuous" dataType="double">
+ <Interval closure="ClosedClosed" leftMargin="6.360000e+00" rightMargin="1.251000e+01"/>
+ </DataField>
+ </DataDictionary>
+ <RegressionModel functionName="regression" modelName="Modeler[LinearRegression]Tds[steamplant]Predictor[x6:x8:x6x8:x6x6x8]Target[x1]" targetFieldName="x1">
+ <MiningSchema>
+ <MiningField name="x6" usageType="active"/>
+ <MiningField name="x8" usageType="active"/>
+ <MiningField name="x1" usageType="predicted"/>
+ </MiningSchema>
+ <RegressionTable intercept="3.837365e+00">
+ <NumericPredictor name="x6" exponent="1" coefficient="4.759134e-01"/>
+ <NumericPredictor name="x8" exponent="1" coefficient="1.428838e-01"/>
+ <PredictorTerm coefficient="-2.201903e-02">
+ <FieldRef field="x6"/>
+ <FieldRef field="x8"/>
+ </PredictorTerm>
+ <PredictorTerm coefficient="5.362560e-04">
+ <FieldRef field="x6"/>
+ <FieldRef field="x6"/>
+ <FieldRef field="x8"/>
+ </PredictorTerm>
+ </RegressionTable>
+ </RegressionModel>
+</PMML>
--- /dev/null
+<?xml version="1.0"?>
+<PMML version="4.1" xmlns="http://www.dmg.org/PMML-4_1">
+ <Header copyright="myCopyright" description="Text Description">
+ <Application name="Uranie" version="2013.7/18"/>
+ <Annotation>Compilation date : Wed Jul 17, 2013</Annotation>
+ </Header>
+ <DataDictionary>
+ <DataField name="x6" displayName=" x_{6}" optype="continuous" dataType="double">
+ <Interval closure="ClosedClosed" leftMargin="1.100000e+01" rightMargin="2.300000e+01"/>
+ </DataField>
+ <DataField name="x8" displayName=" x_{8}" optype="continuous" dataType="double">
+ <Interval closure="ClosedClosed" leftMargin="2.810000e+01" rightMargin="7.670000e+01"/>
+ </DataField>
+ <DataField name="x1" displayName=" x_{1}" optype="continuous" dataType="double">
+ <Interval closure="ClosedClosed" leftMargin="6.360000e+00" rightMargin="1.251000e+01"/>
+ </DataField>
+ </DataDictionary>
+ <RegressionModel functionName="regression" modelName="Modeler[LinearRegression]Tds[steamplant]Predictor[x6:x8:x6x8:x6x6x8]Target[x1]" targetFieldName="x1">
+ <MiningSchema>
+ <MiningField name="x6" usageType="active"/>
+ <MiningField name="x8" usageType="active"/>
+ <MiningField name="x1" usageType="predicted"/>
+ </MiningSchema>
+ <RegressionTable>
+ <NumericPredictor name="x6" exponent="1" coefficient="4.759134e-01"/>
+ <NumericPredictor name="x8" exponent="1" coefficient="1.428838e-01"/>
+ <PredictorTerm coefficient="-2.201903e-02">
+ <FieldRef field="x6"/>
+ <FieldRef field="x8"/>
+ </PredictorTerm>
+ <PredictorTerm coefficient="5.362560e-04">
+ <FieldRef field="x6"/>
+ <FieldRef field="x6"/>
+ <FieldRef field="x8"/>
+ </PredictorTerm>
+ </RegressionTable>
+ </RegressionModel>
+</PMML>
--- /dev/null
+<?xml version="1.0"?>
+<PMML xmlns="http://www.dmg.org/PMML-4_1" version="4.1">
+ <Header copyright="myCopyright" description="Tests unitaires">
+ <Application name="PMMLlib" version="myVersion"/>
+ <Annotation>Tests unitaires PMMLlib</Annotation>
+ </Header>
+ <DataDictionary>
+ <DataField name="rw" displayName="rw" optype="continuous" dataType="float"/>
+ <DataField name="r" displayName="r" optype="continuous" dataType="float"/>
+ <DataField name="tu" displayName="tu" optype="continuous" dataType="float"/>
+ <DataField name="tl" displayName="tl" optype="continuous" dataType="float"/>
+ <DataField name="hu" displayName="hu" optype="continuous" dataType="float"/>
+ <DataField name="hl" displayName="hl" optype="continuous" dataType="float"/>
+ <DataField name="l" displayName="l" optype="continuous" dataType="float"/>
+ <DataField name="kw" displayName="kw" optype="continuous" dataType="float"/>
+ <DataField name="yhat" displayName="yhat" optype="continuous" dataType="float"/>
+ </DataDictionary>
+ <NeuralNetwork modelName="modelName" functionName="regression" numberOfLayers="2">
+ <MiningSchema>
+ <MiningField name="rw" usageType="active"/>
+ <MiningField name="r" usageType="active"/>
+ <MiningField name="tu" usageType="active"/>
+ <MiningField name="tl" usageType="active"/>
+ <MiningField name="hu" usageType="active"/>
+ <MiningField name="hl" usageType="active"/>
+ <MiningField name="l" usageType="active"/>
+ <MiningField name="kw" usageType="active"/>
+ <MiningField name="yhat" usageType="predicted"/>
+ </MiningSchema>
+ <NeuralInputs numberOfInputs="8">
+ <NeuralInput id="0">
+ <DerivedField optype="continuous" dataType="float">
+ <NormContinuous field="rw">
+ <LinearNorm orig="0.000000e+00" norm="-2.889932e-01"/>
+ <LinearNorm orig="9.999901e-02" norm="0.000000e+00"/>
+ </NormContinuous>
+ </DerivedField>
+ </NeuralInput>
+ <NeuralInput id="1">
+ <DerivedField optype="continuous" dataType="float">
+ <NormContinuous field="r">
+ <LinearNorm orig="0.000000e+00" norm="-5.756638e-01"/>
+ <LinearNorm orig="2.504894e+04" norm="0.000000e+00"/>
+ </NormContinuous>
+ </DerivedField>
+ </NeuralInput>
+ <NeuralInput id="2">
+ <DerivedField optype="continuous" dataType="float">
+ <NormContinuous field="tu">
+ <LinearNorm orig="0.000000e+00" norm="-1.699313e-01"/>
+ <LinearNorm orig="8.933486e+04" norm="0.000000e+00"/>
+ </NormContinuous>
+ </DerivedField>
+ </NeuralInput>
+ <NeuralInput id="3">
+ <DerivedField optype="continuous" dataType="float">
+ <NormContinuous field="tl">
+ <LinearNorm orig="0.000000e+00" norm="-1.707007e-01"/>
+ <LinearNorm orig="8.955232e+01" norm="0.000000e+00"/>
+ </NormContinuous>
+ </DerivedField>
+ </NeuralInput>
+ <NeuralInput id="4">
+ <DerivedField optype="continuous" dataType="float">
+ <NormContinuous field="hu">
+ <LinearNorm orig="0.000000e+00" norm="-3.302777e-02"/>
+ <LinearNorm orig="1.050003e+03" norm="0.000000e+00"/>
+ </NormContinuous>
+ </DerivedField>
+ </NeuralInput>
+ <NeuralInput id="5">
+ <DerivedField optype="continuous" dataType="float">
+ <NormContinuous field="hl">
+ <LineakLRAndkANNrNorm orig="0.000000e+00" norm="-4.562070e-02"/>
+ <LinearNorm orig="7.600007e+02" norm="0.000000e+00"/>
+ </NormContinuous>
+ </DerivedField>
+ </NeuralInput>
+ <NeuralInput id="6">
+ <DerivedField optype="continuous" dataType="float">
+ <NormContinuous field="l">
+ <LinearNorm orig="0.000000e+00" norm="-1.155882e-01"/>
+ <LinearNorm orig="1.400018e+03" norm="0.000000e+00"/>
+ </NormContinuous>
+ </DerivedField>
+ </NeuralInput>
+ <NeuralInput id="7">
+ <DerivedField optype="continuous" dataType="float">
+ <NormContinuous field="kw">
+ <LinearNorm orig="0.000000e+00" norm="-5.780019e-02"/>
+ <LinearNorm orig="1.095001e+04" norm="0.000000e+00"/>
+ </NormContinuous>
+ </DerivedField>
+ </NeuralInput>
+ </NeuralInputs>
+ <NeuralLayer activationFunction="tanh" numberOfNeurons="1">
+ <Neuron id="8" bias="-1.263572e+00">
+ <Con from="0" weight="7.536629e-01"/>
+ <Con from="1" weight="1.653660e-03"/>
+ <Con from="2" weight="4.725001e-03"/>
+ <Con from="3" weight="9.969786e-03"/>
+ <Con from="4" weight="1.787976e-01"/>
+ <Con from="5" weight="-1.809809e-01"/>
+ <Con from="6" weight="-1.735688e-01"/>
+ <Con from="7" weight="8.559675e-02"/>
+ </Neuron>
+ </NeuralLayer>
+ <NeuralLayer activationFunction="identity" numberOfNeurons="1">
+ <Neuron id="9" bias="-1.745483e+00">
+ <Con from="8" weight="6.965512e+00"/>
+ </Neuron>
+ </NeuralLayer>
+ <NeuralOutputs numberOfOutputs="1">
+ <NeuralOutput outputNeuron="9">
+ <DerivedField optype="continuous" dataType="float">
+ <NormContinuous field="yhat">
+ <LinearNorm orig="0.000000e+00" norm="-5.873935e-01"/>
+ <LinearNorm orig="7.781171e+01" norm="0.000000e+00"/>
+ </NormContinuous>
+ </DerivedField>
+ </NeuralOutput>
+ </NeuralOutputs>
+ </NeuralNetwork>
+ <RegressionModel functionName="regression" modelName="modelName" targetFieldName="x1">
+ <MiningSchema>
+ <MiningField name="x6" usageType="active"/>
+ <MiningField name="x8" usageType="active"/>
+ <MiningField name="x1" usageType="predicted"/>
+ </MiningSchema>
+ <RegressionTable intercept="3.837365e+00">
+ <NumericPredictor name="x6" exponent="1" coefficient="4.759134e-01"/>
+ <NumericPredictor name="x8" exponent="1" coefficient="1.428838e-01"/>
+ <PredictorTerm coefficient="-2.201903e-02">
+ <FieldRef field="x6"/>
+ <FieldRef field="x8"/>
+ </PredictorTerm>
+ <PredictorTerm coefficient="5.362560e-04">
+ <FieldRef field="x6"/>
+ <FieldRef field="x6"/>
+ <FieldRef field="x8"/>
+ </PredictorTerm>
+ </RegressionTable>
+ </RegressionModel>
+</PMML>
--- /dev/null
+<?xml version="1.0"?>
+<PMML xmlns="http://www.dmg.org/PMML-4_1" version="4.1">
+ <Header copyright="myCopyright" description="Tests unitaires">
+ <Application name="PMMLlib" version="myVersion"/>
+ <Annotation>Tests unitaires PMMLlib</Annotation>
+ </Header>
+ <DataDictionary>
+ <DataField name="rw" displayName="rw" optype="continuous" dataType="float"/>
+ <DataField name="r" displayName="r" optype="continuous" dataType="float"/>
+ <DataField name="tu" displayName="tu" optype="continuous" dataType="float"/>
+ <DataField name="tl" displayName="tl" optype="continuous" dataType="float"/>
+ <DataField name="hu" displayName="hu" optype="continuous" dataType="float"/>
+ <DataField name="hl" displayName="hl" optype="continuous" dataType="float"/>
+ <DataField name="l" displayName="l" optype="continuous" dataType="float"/>
+ <DataField name="kw" displayName="kw" optype="continuous" dataType="float"/>
+ <DataField name="yhat" displayName="yhat" optype="continuous" dataType="float"/>
+ </DataDictionary>
+ <NeuralNetwork modelName="modelName" functionName="regression" numberOfLayers="2">
+ <MiningSchema>
+ <MiningField name="rw" usageType="active"/>
+ <MiningField name="r" usageType="active"/>
+ <MiningField name="tu" usageType="active"/>
+ <MiningField name="tl" usageType="active"/>
+ <MiningField name="hu" usageType="active"/>
+ <MiningField name="hl" usageType="active"/>
+ <MiningField name="l" usageType="active"/>
+ <MiningField name="kw" usageType="active"/>
+ <MiningField name="yhat" usageType="predicted"/>
+ </MiningSchema>
+ <NeuralInputs numberOfInputs="8">
+ <NeuralInput id="0">
+ <DerivedField optype="continuous" dataType="float">
+ <NormContinuous field="rw">
+ <LinearNorm orig="0.000000e+00" norm="-2.889932e-01"/>
+ <LinearNorm orig="9.999901e-02" norm="0.000000e+00"/>
+ </NormContinuous>
+ </DerivedField>
+ </NeuralInput>
+ <NeuralInput id="1">
+ <DerivedField optype="continuous" dataType="float">
+ <NormContinuous field="r">
+ <LinearNorm orig="0.000000e+00" norm="-5.756638e-01"/>
+ <LinearNorm orig="2.504894e+04" norm="0.000000e+00"/>
+ </NormContinuous>
+ </DerivedField>
+ </NeuralInput>
+ <NeuralInput id="2">
+ <DerivedField optype="continuous" dataType="float">
+ <NormContinuous field="tu">
+ <LinearNorm orig="0.000000e+00" norm="-1.699313e-01"/>
+ <LinearNorm orig="8.933486e+04" norm="0.000000e+00"/>
+ </NormContinuous>
+ </DerivedField>
+ </NeuralInput>
+ <NeuralInput id="3">
+ <DerivedField optype="continuous" dataType="float">
+ <NormContinuous field="tl">
+ <LinearNorm orig="0.000000e+00" norm="-1.707007e-01"/>
+ <LinearNorm orig="8.955232e+01" norm="0.000000e+00"/>
+ </NormContinuous>
+ </DerivedField>
+ </NeuralInput>
+ <NeuralInput id="4">
+ <DerivedField optype="continuous" dataType="float">
+ <NormContinuous field="hu">
+ <LinearNorm orig="0.000000e+00" norm="-3.302777e-02"/>
+ <LinearNorm orig="1.050003e+03" norm="0.000000e+00"/>
+ </NormContinuous>
+ </DerivedField>
+ </NeuralInput>
+ <NeuralInput id="5">
+ <DerivedField optype="continuous" dataType="float">
+ <NormContinuous field="hl">
+ <LineakLRAndkANNrNorm orig="0.000000e+00" norm="-4.562070e-02"/>
+ <LinearNorm orig="7.600007e+02" norm="0.000000e+00"/>
+ </NormContinuous>
+ </DerivedField>
+ </NeuralInput>
+ <NeuralInput id="6">
+ <DerivedField optype="continuous" dataType="float">
+ <NormContinuous field="l">
+ <LinearNorm orig="0.000000e+00" norm="-1.155882e-01"/>
+ <LinearNorm orig="1.400018e+03" norm="0.000000e+00"/>
+ </NormContinuous>
+ </DerivedField>
+ </NeuralInput>
+ <NeuralInput id="7">
+ <DerivedField optype="continuous" dataType="float">
+ <NormContinuous field="kw">
+ <LinearNorm orig="0.000000e+00" norm="-5.780019e-02"/>
+ <LinearNorm orig="1.095001e+04" norm="0.000000e+00"/>
+ </NormContinuous>
+ </DerivedField>
+ </NeuralInput>
+ </NeuralInputs>
+ <NeuralLayer activationFunction="tanh" numberOfNeurons="1">
+ <Neuron id="8" bias="-1.263572e+00">
+ <Con from="0" weight="7.536629e-01"/>
+ <Con from="1" weight="1.653660e-03"/>
+ <Con from="2" weight="4.725001e-03"/>
+ <Con from="3" weight="9.969786e-03"/>
+ <Con from="4" weight="1.787976e-01"/>
+ <Con from="5" weight="-1.809809e-01"/>
+ <Con from="6" weight="-1.735688e-01"/>
+ <Con from="7" weight="8.559675e-02"/>
+ </Neuron>
+ </NeuralLayer>
+ <NeuralLayer activationFunction="identity" numberOfNeurons="1">
+ <Neuron id="9" bias="-1.745483e+00">
+ <Con from="8" weight="6.965512e+00"/>
+ </Neuron>
+ </NeuralLayer>
+ <NeuralOutputs numberOfOutputs="1">
+ <NeuralOutput outputNeuron="9">
+ <DerivedField optype="continuous" dataType="float">
+ <NormContinuous field="yhat">
+ <LinearNorm orig="0.000000e+00" norm="-5.873935e-01"/>
+ <LinearNorm orig="7.781171e+01" norm="0.000000e+00"/>
+ </NormContinuous>
+ </DerivedField>
+ </NeuralOutput>
+ </NeuralOutputs>
+ </NeuralNetwork>
+ <RegressionModel functionName="regression" modelName="modelName" targetFieldName="x1">
+ <MiningSchema>
+ <MiningField name="x6" usageType="active"/>
+ <MiningField name="x8" usageType="active"/>
+ <MiningField name="x1" usageType="predicted"/>
+ </MiningSchema>
+ <RegressionTable intercept="3.837365e+00">
+ <NumericPredictor name="x6" exponent="1" coefficient="4.759134e-01"/>
+ <NumericPredictor name="x8" exponent="1" coefficient="1.428838e-01"/>
+ <PredictorTerm coefficient="-2.201903e-02">
+ <FieldRef field="x6"/>
+ <FieldRef field="x8"/>
+ </PredictorTerm>
+ <PredictorTerm coefficient="5.362560e-04">
+ <FieldRef field="x6"/>
+ <FieldRef field="x6"/>
+ <FieldRef field="x8"/>
+ </PredictorTerm>
+ </RegressionTable>
+ </RegressionModel>
+</PMML>
--- /dev/null
+#define ActivationFunction(sum) ( 1.0 / ( 1.0 + exp( -1.0 * sum )) )
+void myTestFunc(double *param, double *res)
+{
+ //////////////////////////////
+ //
+ // File used by unit test
+ // PMMLBasicsTest1::testExportNeuralNetworkCpp
+ //
+ //////////////////////////////
+
+ int nInput = 8;
+ int nOutput = 1;
+ int nHidden = 1;
+ const int nNeurones = 10;
+ double myTestFunc_act[nNeurones];
+
+ // --- Preprocessing of the inputs and outputs
+ double myTestFunc_minInput[] = {
+ 0.099999, 25048.9, 89334.9, 89.5523, 1050,
+ 760.001, 1400.02, 10950,
+ };
+ double myTestFunc_minOutput[] = {
+ 77.8117, };
+ double myTestFunc_maxInput[] = {
+ 0.028899, 14419.8, 15180.8, 15.2866, 34.6793,
+ 34.6718, 161.826, 632.913,
+ };
+ double myTestFunc_maxOutput[] = {
+ 45.7061, };
+
+ // --- Values of the weights
+ double myTestFunc_valW[] = {
+ -1.74548, 6.96551, -1.26357, 0.753663, 0.00165366,
+ 0.004725, 0.00996979, 0.178798, -0.180981, -0.173569,
+ 0.0855967,
+ };
+ // --- Constants
+ int indNeurone = 0;
+ int CrtW;
+ double sum;
+
+ // --- Input Layers
+ for(int i = 0; i < nInput; i++) {
+ myTestFunc_act[indNeurone++] = ( param[i] - myTestFunc_minInput[i] ) / myTestFunc_maxInput[i];
+ }
+
+ // --- Hidden Layers
+ for (int member = 0; member < nHidden; member++) {
+ int CrtW = member * ( nInput + 2) + 2;
+ sum = myTestFunc_valW[CrtW++];
+ for (int source = 0; source < nInput; source++) {
+ sum += myTestFunc_act[source] * myTestFunc_valW[CrtW++];
+ }
+ myTestFunc_act[indNeurone++] = ActivationFunction(sum);
+ }
+
+ // --- Output
+ for (int member = 0; member < nOutput; member++) {
+ sum = myTestFunc_valW[0];
+ for (int source = 0; source < nHidden; source++) {
+ CrtW = source * ( nInput + 2) + 1;
+ sum += myTestFunc_act[nInput+source] * myTestFunc_valW[CrtW];
+ }
+ myTestFunc_act[indNeurone++] = sum;
+ res[member] = myTestFunc_minOutput[member] + myTestFunc_maxOutput[member] * sum;
+ }
+}
--- /dev/null
+ SUBROUTINE myTestFunc(rw,r,tu,tl,hu,hl,l,kw,yhat)
+C --- *********************************************
+C ---
+C --- File used by unit test
+C --- PMMLBasicsTest1::testExportNeuralNetworkFortran
+C ---
+C --- *********************************************
+ IMPLICIT DOUBLE PRECISION (V)
+ DOUBLE PRECISION rw
+ DOUBLE PRECISION r
+ DOUBLE PRECISION tu
+ DOUBLE PRECISION tl
+ DOUBLE PRECISION hu
+ DOUBLE PRECISION hl
+ DOUBLE PRECISION l
+ DOUBLE PRECISION kw
+ DOUBLE PRECISION yhat
+
+C --- Preprocessing of the inputs
+ VXNrw = ( rw - 0.099999D0 ) / 0.028899D0
+ VXNr = ( r - 25048.9D0 ) / 14419.8D0
+ VXNtu = ( tu - 89334.9D0 ) / 15180.8D0
+ VXNtl = ( tl - 89.5523D0 ) / 15.2866D0
+ VXNhu = ( hu - 1050D0 ) / 34.6793D0
+ VXNhl = ( hl - 760.001D0 ) / 34.6718D0
+ VXNl = ( l - 1400.02D0 ) / 161.826D0
+ VXNkw = ( kw - 10950D0 ) / 632.913D0
+
+C --- Values of the weights
+ VW1 = -1.74548
+ VW2 = 6.96551
+ VW3 = -1.26357
+ VW4 = 0.753663
+ VW5 = 0.00165366
+ VW6 = 0.004725
+ VW7 = 0.00996979
+ VW8 = 0.178798
+ VW9 = -0.180981
+ VW10 = -0.173569
+ VW11 = 0.0855967
+
+C --- hidden neural number 1
+ VAct1 = VW3
+ 1 + VW4 * VXNrw
+ 1 + VW5 * VXNr
+ 1 + VW6 * VXNtu
+ 1 + VW7 * VXNtl
+ 1 + VW8 * VXNhu
+ 1 + VW9 * VXNhl
+ 1 + VW10 * VXNl
+ 1 + VW11 * VXNkw
+
+ VPot1 = 1.D0 / (1.D0 + DEXP(-1.D0 * VAct1))
+
+C --- Output
+ VOut = VW1
+ 1 + VW2 * VPot1
+
+C --- Pretraitment of the output
+ yhat = 77.8117D0 + 45.7061D0 * VOut;
+
+C ---
+ RETURN
+ END
--- /dev/null
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from math import tanh, exp
+
+def ActivationFunction(sum):
+ return ( 1.0 / ( 1.0 + exp( -1.0 * sum ) ) );
+
+def myTestFunc(param):
+
+ ##############################
+ #
+ # File used by unit test
+ # PMMLBasicsTest1::testExportNeuralNetworkPython
+ #
+ ##############################
+
+ nInput = 8;
+ nOutput = 1;
+ nHidden = 1;
+ nNeurones = 10;
+ myTestFunc_act = [];
+ res = [];
+
+ # --- Preprocessing of the inputs and outputs
+ myTestFunc_minInput = [
+ 0.099999, 25048.9, 89334.9, 89.5523, 1050,
+ 760.001, 1400.02, 10950,
+ ];
+ myTestFunc_minOutput = [
+ 77.8117
+ ];
+ myTestFunc_maxInput = [
+ 0.028899, 14419.8, 15180.8, 15.2866, 34.6793,
+ 34.6718, 161.826, 632.913,
+ ];
+ myTestFunc_maxOutput = [
+ 45.7061
+ ];
+ # --- Values of the weights
+ myTestFunc_valW = [
+ -1.74548, 6.96551, -1.26357, 0.753663, 0.00165366,
+ 0.004725, 0.00996979, 0.178798, -0.180981, -0.173569,
+ 0.0855967,
+ ];
+ # --- Constants
+ indNeurone = 0;
+
+ # --- Input Layers
+ for i in range(nInput) :
+ myTestFunc_act.append( ( param[i] - myTestFunc_minInput[i] ) / myTestFunc_maxInput[i] ) ;
+ indNeurone += 1 ;
+ pass
+
+ # --- Hidden Layers
+ for member in range(nHidden):
+ CrtW = member * ( nInput + 2) + 2;
+ sum = myTestFunc_valW[CrtW];
+ CrtW += 1 ;
+ for source in range(nInput) :
+ sum += myTestFunc_act[source] * myTestFunc_valW[CrtW];
+ CrtW += 1 ;
+ pass
+ myTestFunc_act.append( ActivationFunction(sum) ) ;
+ indNeurone += 1 ;
+ pass
+
+ # --- Output
+ for member in range(nOutput):
+ sum = myTestFunc_valW[0];
+ for source in range(nHidden):
+ CrtW = source * ( nInput + 2) + 1;
+ sum += myTestFunc_act[nInput+source] * myTestFunc_valW[CrtW];
+ pass
+ myTestFunc_act.append( sum );
+ indNeurone += 1 ;
+ res.append( myTestFunc_minOutput[member] + myTestFunc_maxOutput[member] * sum );
+ pass
+
+ return res;
+
+
--- /dev/null
+void myTestFunc(double *param, double *res)
+{
+ //////////////////////////////
+ //
+ // File used by unit test
+ // PMMLBasicsTest1::testExportLinearRegressionCpp
+ //
+ //////////////////////////////
+
+ // Intercept
+ double y = 3.83737;
+
+ // Attribute : x6
+ y += param[0]*0.475913;
+
+ // Attribute : x8
+ y += param[1]*0.142884;
+
+ // Attribute : x6x8
+ y += param[2]*-0.022019;
+
+ // Attribute : x6x6x8
+ y += param[3]*0.000536256;
+
+ // Return the value
+ res[0] = y;
+}
--- /dev/null
+ SUBROUTINE myTestFunc(P0, P1, P2, P3, RES)
+C --- *********************************************
+C ---
+C --- File used by unit test
+C --- PMMLBasicsTest1::testExportLinearRegressionFortran
+C ---
+C --- *********************************************
+
+ IMPLICIT DOUBLE PRECISION (P)
+ DOUBLE PRECISION RES
+ DOUBLE PRECISION Y
+
+C --- Intercept
+ Y = 3.83737;
+
+C --- Attribute : x6
+ Y += P[0]*0.475913;
+
+C --- Attribute : x8
+ Y += P[1]*0.142884;
+
+C --- Attribute : x6x8
+ Y += P[2]*-0.022019;
+
+C --- Attribute : x6x6x8
+ Y += P[3]*0.000536256;
+
+C --- Return the value
+ RES = Y
+ RETURN
+ END
--- /dev/null
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+def myTestFunc(param):
+
+ ##############################
+ #
+ # File used by unit test
+ # PMMLBasicsTest1::testExportLinearRegressionPython
+ #
+ ##############################
+
+ # Intercept
+ y = 3.83737;
+
+ # Attribute : x6
+ y += param[0]*0.475913;
+
+ # Attribute : x8
+ y += param[1]*0.142884;
+
+ # Attribute : x6x8
+ y += param[2]*-0.022019;
+
+ # Attribute : x6x6x8
+ y += param[3]*0.000536256;
+
+ # Return the value
+ return [y];
--- /dev/null
+<?xml version="1.0"?>
+<PMML xmlns="http://www.dmg.org/PMML-4_1" version="4.1">
+ <Header copyright="myCopyright" description="Tests unitaires">
+ <Application name="PMMLlib" version="myVersion"/>
+ <Annotation>Tests unitaires PMMLlib</Annotation>
+ </Header>
+ <DataDictionary>
+ <DataField name="rw" displayName="rw" optype="continuous" dataType="float"/>
+ <DataField name="r" displayName="r" optype="continuous" dataType="float"/>
+ <DataField name="tu" displayName="tu" optype="continuous" dataType="float"/>
+ <DataField name="tl" displayName="tl" optype="continuous" dataType="float"/>
+ <DataField name="hu" displayName="hu" optype="continuous" dataType="float"/>
+ <DataField name="hl" displayName="hl" optype="continuous" dataType="float"/>
+ <DataField name="l" displayName="l" optype="continuous" dataType="float"/>
+ <DataField name="kw" displayName="kw" optype="continuous" dataType="float"/>
+ <DataField name="yhat" displayName="yhat" optype="continuous" dataType="float"/>
+ </DataDictionary>
+ <NeuralNetwork modelName="sANNName" functionName="regression" numberOfLayers="2">
+ <MiningSchema>
+ <MiningField name="rw" usageType="active"/>
+ <MiningField name="r" usageType="active"/>
+ <MiningField name="tu" usageType="active"/>
+ <MiningField name="tl" usageType="active"/>
+ <MiningField name="hu" usageType="active"/>
+ <MiningField name="hl" usageType="active"/>
+ <MiningField name="l" usageType="active"/>
+ <MiningField name="kw" usageType="active"/>
+ <MiningField name="yhat" usageType="predicted"/>
+ </MiningSchema>
+ <NeuralInputs numberOfInputs="8">
+ <NeuralInput id="0">
+ <DerivedField optype="continuous" dataType="float">
+ <NormContinuous field="rw">
+ <LinearNorm orig="0.000000e+000" norm="-2.889932e-001"/>
+ <LinearNorm orig="9.999901e-002" norm="0.000000e+000"/>
+ </NormContinuous>
+ </DerivedField>
+ </NeuralInput>
+ <NeuralInput id="1">
+ <DerivedField optype="continuous" dataType="float">
+ <NormContinuous field="r">
+ <LinearNorm orig="0.000000e+000" norm="-5.756638e-001"/>
+ <LinearNorm orig="2.504894e+004" norm="0.000000e+000"/>
+ </NormContinuous>
+ </DerivedField>
+ </NeuralInput>
+ <NeuralInput id="2">
+ <DerivedField optype="continuous" dataType="float">
+ <NormContinuous field="tu">
+ <LinearNorm orig="0.000000e+000" norm="-1.699313e-001"/>
+ <LinearNorm orig="8.933486e+004" norm="0.000000e+000"/>
+ </NormContinuous>
+ </DerivedField>
+ </NeuralInput>
+ <NeuralInput id="3">
+ <DerivedField optype="continuous" dataType="float">
+ <NormContinuous field="tl">
+ <LinearNorm orig="0.000000e+000" norm="-1.707007e-001"/>
+ <LinearNorm orig="8.955232e+001" norm="0.000000e+000"/>
+ </NormContinuous>
+ </DerivedField>
+ </NeuralInput>
+ <NeuralInput id="4">
+ <DerivedField optype="continuous" dataType="float">
+ <NormContinuous field="hu">
+ <LinearNorm orig="0.000000e+000" norm="-3.302777e-002"/>
+ <LinearNorm orig="1.050003e+003" norm="0.000000e+000"/>
+ </NormContinuous>
+ </DerivedField>
+ </NeuralInput>
+ <NeuralInput id="5">
+ <DerivedField optype="continuous" dataType="float">
+ <NormContinuous field="hl">
+ <LinearNorm orig="0.000000e+000" norm="-4.562070e-002"/>
+ <LinearNorm orig="7.600007e+002" norm="0.000000e+000"/>
+ </NormContinuous>
+ </DerivedField>
+ </NeuralInput>
+ <NeuralInput id="6">
+ <DerivedField optype="continuous" dataType="float">
+ <NormContinuous field="l">
+ <LinearNorm orig="0.000000e+000" norm="-1.155882e-001"/>
+ <LinearNorm orig="1.400018e+003" norm="0.000000e+000"/>
+ </NormContinuous>
+ </DerivedField>
+ </NeuralInput>
+ <NeuralInput id="7">
+ <DerivedField optype="continuous" dataType="float">
+ <NormContinuous field="kw">
+ <LinearNorm orig="0.000000e+000" norm="-5.780019e-002"/>
+ <LinearNorm orig="1.095001e+004" norm="0.000000e+000"/>
+ </NormContinuous>
+ </DerivedField>
+ </NeuralInput>
+ </NeuralInputs>
+ <NeuralLayer activationFunction="tanh" numberOfNeurons="1">
+ <Neuron id="8" bias="-1.263572e+000">
+ <Con from="0" weight="7.536629e-001"/>
+ <Con from="1" weight="1.653660e-003"/>
+ <Con from="2" weight="4.725001e-003"/>
+ <Con from="3" weight="9.969786e-003"/>
+ <Con from="4" weight="1.787976e-001"/>
+ <Con from="5" weight="-1.809809e-001"/>
+ <Con from="6" weight="-1.735688e-001"/>
+ <Con from="7" weight="8.559675e-002"/>
+ </Neuron>
+ </NeuralLayer>
+ <NeuralLayer activationFunction="identity" numberOfNeurons="1">
+ <Neuron id="9" bias="-1.745483e+000">
+ <Con from="8" weight="6.965512e+000"/>
+ </Neuron>
+ </NeuralLayer>
+ <NeuralOutputs numberOfOutputs="1">
+ <NeuralOutput outputNeuron="9">
+ <DerivedField optype="continuous" dataType="float">
+ <NormContinuous field="yhat">
+ <LinearNorm orig="0.000000e+000" norm="-5.873935e-001"/>
+ <LinearNorm orig="7.781171e+001" norm="0.000000e+000"/>
+ </NormContinuous>
+ </DerivedField>
+ </NeuralOutput>
+ </NeuralOutputs>
+ </NeuralNetwork>
+</PMML>
--- /dev/null
+<?xml version="1.0"?>
+<PMML xmlns="http://www.dmg.org/PMML-4_1" version="4.1">
+ <Header copyright="myCopyright" description="Tests unitaires">
+ <Application name="PMMLlib" version="myVersion"/>
+ <Annotation>Tests unitaires PMMLlib</Annotation>
+ </Header>
+ <DataDictionary>
+ <DataField name="x6" displayName=" x_{6}" optype="continuous" dataType="double">
+ <Interval closure="ClosedClosed" leftMargin="1.100000e+001" rightMargin="2.300000e+001"/>
+ </DataField>
+ <DataField name="x8" displayName=" x_{8}" optype="continuous" dataType="double">
+ <Interval closure="ClosedClosed" leftMargin="2.810000e+001" rightMargin="7.670000e+001"/>
+ </DataField>
+ <DataField name="x1" displayName=" x_{1}" optype="continuous" dataType="double">
+ <Interval closure="ClosedClosed" leftMargin="6.360000e+000" rightMargin="1.251000e+001"/>
+ </DataField>
+ </DataDictionary>
+ <RegressionModel functionName="regression" modelName="Modeler[LinearRegression]Tds[steamplant]Predictor[x6:x8:x6x8:x6x6x8]Target[x1]" targetFieldName="x1">
+ <MiningSchema>
+ <MiningField name="x6" usageType="active"/>
+ <MiningField name="x8" usageType="active"/>
+ <MiningField name="x1" usageType="predicted"/>
+ </MiningSchema>
+ <RegressionTable intercept="3.837365e+000">
+ <NumericPredictor name="x6" exponent="1" coefficient="4.759134e-001"/>
+ <NumericPredictor name="x8" exponent="1" coefficient="1.428838e-001"/>
+ <PredictorTerm coefficient="-2.201903e-002">
+ <FieldRef field="x6"/>
+ <FieldRef field="x8"/>
+ </PredictorTerm>
+ <PredictorTerm coefficient="5.362560e-004">
+ <FieldRef field="x6"/>
+ <FieldRef field="x6"/>
+ <FieldRef field="x8"/>
+ </PredictorTerm>
+ </RegressionTable>
+ </RegressionModel>
+</PMML>
class PMMLBasicsTest(unittest.TestCase):
def setUp(self):
- pmmlRootDir = os.getenv("YACS_ROOT_DIR");
- self.resourcesDir = os.path.join(pmmlRootDir,"share","salome","resources","pmml");
- self.resourcesDir += os.sep ;
- self.tmpDir = "/tmp/";
- self.tmpDir += os.environ['LOGNAME']; # ("USER");
- self.tmpDir += "/PmmlUnitTest/";
+ self.resourcesDir = ".." + os.sep + "Test" + os.sep + "samples" + os.sep ;
+ self.tmpDir = os.sep + "tmp" + os.sep + os.environ['LOGNAME'] + os.sep ;
+ self.tmpDir += "PmmlUnitTest";
+ self.tmpDir += os.sep ;
if ( not os.path.exists(self.tmpDir) ):
os.mkdir(self.tmpDir);
pass
+++ /dev/null
-# Copyright (C) 2012-2014 CEA/DEN, EDF R&D
-#
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-#
-# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
-#
-
-SET(PMML_RESOURCES_FILES
- # ici les noms des fichiers ressources
- ann_model.pmml
- ann_model_2.pmml
- lr_model.pmml
- lr_model_2.pmml
- no_model.pmml
- two_models_ann_lr.pmml
- unittest_ref_ann_model.cpp
- unittest_ref_ann_model.f
- unittest_ref_ann_model.py
- unittest_ref_lr_model.cpp
- unittest_ref_lr_model.f
- unittest_ref_lr_model.py
- win32_ann_model.pmml
- win32_lr_model.pmml
- )
-
-INSTALL(FILES ${PMML_RESOURCES_FILES} DESTINATION ${SALOME_PMML_INSTALL_RES_DATA})
-
-# MESSAGE(STATUS "Creation of ${CMAKE_CURRENT_BINARY_DIR}/PMMLCatalog.xml")
-# CONFIGURE_FILE(${CMAKE_CURRENT_SOURCE_DIR}/PMMLCatalog.xml.in ${CMAKE_CURRENT_BINARY_DIR}/PMMLCatalog.xml @ONLY)
-# MESSAGE(STATUS "Creation of ${CMAKE_CURRENT_BINARY_DIR}/SalomeApp.xml")
-# CONFIGURE_FILE(${CMAKE_CURRENT_SOURCE_DIR}/SalomeApp.xml.in ${CMAKE_CURRENT_BINARY_DIR}/SalomeApp.xml @ONLY)
-
-# INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/PMMLCatalog.xml ${CMAKE_CURRENT_BINARY_DIR}/SalomeApp.xml DESTINATION ${SALOME_PMML_INSTALL_RES_DATA})
+++ /dev/null
-<?xml version="1.0"?>
-<PMML xmlns="http://www.dmg.org/PMML-4_1" version="4.1">
- <Header copyright="myCopyright" description="Tests unitaires">
- <Application name="PMMLlib" version="myVersion"/>
- <Annotation>Tests unitaires PMMLlib</Annotation>
- </Header>
- <DataDictionary>
- <DataField name="rw" displayName="rw" optype="continuous" dataType="float"/>
- <DataField name="r" displayName="r" optype="continuous" dataType="float"/>
- <DataField name="tu" displayName="tu" optype="continuous" dataType="float"/>
- <DataField name="tl" displayName="tl" optype="continuous" dataType="float"/>
- <DataField name="hu" displayName="hu" optype="continuous" dataType="float"/>
- <DataField name="hl" displayName="hl" optype="continuous" dataType="float"/>
- <DataField name="l" displayName="l" optype="continuous" dataType="float"/>
- <DataField name="kw" displayName="kw" optype="continuous" dataType="float"/>
- <DataField name="yhat" displayName="yhat" optype="continuous" dataType="float"/>
- </DataDictionary>
- <NeuralNetwork modelName="sANNName" functionName="regression" numberOfLayers="2">
- <MiningSchema>
- <MiningField name="rw" usageType="active"/>
- <MiningField name="r" usageType="active"/>
- <MiningField name="tu" usageType="active"/>
- <MiningField name="tl" usageType="active"/>
- <MiningField name="hu" usageType="active"/>
- <MiningField name="hl" usageType="active"/>
- <MiningField name="l" usageType="active"/>
- <MiningField name="kw" usageType="active"/>
- <MiningField name="yhat" usageType="predicted"/>
- </MiningSchema>
- <NeuralInputs numberOfInputs="8">
- <NeuralInput id="0">
- <DerivedField optype="continuous" dataType="float">
- <NormContinuous field="rw">
- <LinearNorm orig="0.000000e+00" norm="-2.889932e-01"/>
- <LinearNorm orig="9.999901e-02" norm="0.000000e+00"/>
- </NormContinuous>
- </DerivedField>
- </NeuralInput>
- <NeuralInput id="1">
- <DerivedField optype="continuous" dataType="float">
- <NormContinuous field="r">
- <LinearNorm orig="0.000000e+00" norm="-5.756638e-01"/>
- <LinearNorm orig="2.504894e+04" norm="0.000000e+00"/>
- </NormContinuous>
- </DerivedField>
- </NeuralInput>
- <NeuralInput id="2">
- <DerivedField optype="continuous" dataType="float">
- <NormContinuous field="tu">
- <LinearNorm orig="0.000000e+00" norm="-1.699313e-01"/>
- <LinearNorm orig="8.933486e+04" norm="0.000000e+00"/>
- </NormContinuous>
- </DerivedField>
- </NeuralInput>
- <NeuralInput id="3">
- <DerivedField optype="continuous" dataType="float">
- <NormContinuous field="tl">
- <LinearNorm orig="0.000000e+00" norm="-1.707007e-01"/>
- <LinearNorm orig="8.955232e+01" norm="0.000000e+00"/>
- </NormContinuous>
- </DerivedField>
- </NeuralInput>
- <NeuralInput id="4">
- <DerivedField optype="continuous" dataType="float">
- <NormContinuous field="hu">
- <LinearNorm orig="0.000000e+00" norm="-3.302777e-02"/>
- <LinearNorm orig="1.050003e+03" norm="0.000000e+00"/>
- </NormContinuous>
- </DerivedField>
- </NeuralInput>
- <NeuralInput id="5">
- <DerivedField optype="continuous" dataType="float">
- <NormContinuous field="hl">
- <LinearNorm orig="0.000000e+00" norm="-4.562070e-02"/>
- <LinearNorm orig="7.600007e+02" norm="0.000000e+00"/>
- </NormContinuous>
- </DerivedField>
- </NeuralInput>
- <NeuralInput id="6">
- <DerivedField optype="continuous" dataType="float">
- <NormContinuous field="l">
- <LinearNorm orig="0.000000e+00" norm="-1.155882e-01"/>
- <LinearNorm orig="1.400018e+03" norm="0.000000e+00"/>
- </NormContinuous>
- </DerivedField>
- </NeuralInput>
- <NeuralInput id="7">
- <DerivedField optype="continuous" dataType="float">
- <NormContinuous field="kw">
- <LinearNorm orig="0.000000e+00" norm="-5.780019e-02"/>
- <LinearNorm orig="1.095001e+04" norm="0.000000e+00"/>
- </NormContinuous>
- </DerivedField>
- </NeuralInput>
- </NeuralInputs>
- <NeuralLayer activationFunction="tanh" numberOfNeurons="1">
- <Neuron id="8" bias="-1.263572e+00">
- <Con from="0" weight="7.536629e-01"/>
- <Con from="1" weight="1.653660e-03"/>
- <Con from="2" weight="4.725001e-03"/>
- <Con from="3" weight="9.969786e-03"/>
- <Con from="4" weight="1.787976e-01"/>
- <Con from="5" weight="-1.809809e-01"/>
- <Con from="6" weight="-1.735688e-01"/>
- <Con from="7" weight="8.559675e-02"/>
- </Neuron>
- </NeuralLayer>
- <NeuralLayer activationFunction="identity" numberOfNeurons="1">
- <Neuron id="9" bias="-1.745483e+00">
- <Con from="8" weight="6.965512e+00"/>
- </Neuron>
- </NeuralLayer>
- <NeuralOutputs numberOfOutputs="1">
- <NeuralOutput outputNeuron="9">
- <DerivedField optype="continuous" dataType="float">
- <NormContinuous field="yhat">
- <LinearNorm orig="0.000000e+00" norm="-5.873935e-01"/>
- <LinearNorm orig="7.781171e+01" norm="0.000000e+00"/>
- </NormContinuous>
- </DerivedField>
- </NeuralOutput>
- </NeuralOutputs>
- </NeuralNetwork>
-</PMML>
+++ /dev/null
-<?xml version="1.0"?>
-<PMML xmlns="http://www.dmg.org/PMML-3_0" version="3.0">
- <Header copyright="texte copyright" description="texte description">
- <Application name="Uranie" version="2.3/1"/>
- <Annotation>date Fri Oct 07, 2011</Annotation>
- </Header>
- <DataDictionary>
- <DataField name="rw" displayName="rw" optype="continuous" dataType="float"/>
- <DataField name="r" displayName="r" optype="continuous" dataType="float"/>
- <DataField name="tu" displayName="tu" optype="continuous" dataType="float"/>
- <DataField name="tl" displayName="tl" optype="continuous" dataType="float"/>
- <DataField name="hu" displayName="hu" optype="continuous" dataType="float"/>
- <DataField name="hl" displayName="hl" optype="continuous" dataType="float"/>
- <DataField name="l" displayName="l" optype="continuous" dataType="float"/>
- <DataField name="kw" displayName="kw" optype="continuous" dataType="float"/>
- <DataField name="yhat" displayName="yhat" optype="continuous" dataType="float"/>
- </DataDictionary>
- <NeuralNetwork modelName="sANNName" functionName="regression" numberOfLayers="2">
- <MiningSchema>
- <MiningField name="rw" usageType="active"/>
- <MiningField name="r" usageType="active"/>
- <MiningField name="tu" usageType="active"/>
- <MiningField name="tl" usageType="active"/>
- <MiningField name="hu" usageType="active"/>
- <MiningField name="hl" usageType="active"/>
- <MiningField name="l" usageType="active"/>
- <MiningField name="kw" usageType="active"/>
- <MiningField name="yhat" usageType="predicted"/>
- </MiningSchema>
- <NeuralInputs numberOfInputs="8">
- <NeuralInput id="0">
- <DerivedField optype="continuous" dataType="float">
- <NormContinuous field="rw">
- <LinearNorm orig="0" norm="-2.889932e-01"/>
- <LinearNorm orig="9.999901e-02" norm="0"/>
- </NormContinuous>
- </DerivedField>
- </NeuralInput>
- <NeuralInput id="1">
- <DerivedField optype="continuous" dataType="float">
- <NormContinuous field="r">
- <LinearNorm orig="0" norm="-5.756638e-01"/>
- <LinearNorm orig="2.504894e+04" norm="0"/>
- </NormContinuous>
- </DerivedField>
- </NeuralInput>
- <NeuralInput id="2">
- <DerivedField optype="continuous" dataType="float">
- <NormContinuous field="tu">
- <LinearNorm orig="0" norm="-1.699313e-01"/>
- <LinearNorm orig="8.933486e+04" norm="0"/>
- </NormContinuous>
- </DerivedField>
- </NeuralInput>
- <NeuralInput id="3">
- <DerivedField optype="continuous" dataType="float">
- <NormContinuous field="tl">
- <LinearNorm orig="0" norm="-1.707007e-01"/>
- <LinearNorm orig="8.955232e+01" norm="0"/>
- </NormContinuous>
- </DerivedField>
- </NeuralInput>
- <NeuralInput id="4">
- <DerivedField optype="continuous" dataType="float">
- <NormContinuous field="hu">
- <LinearNorm orig="0" norm="-3.302777e-02"/>
- <LinearNorm orig="1.050003e+03" norm="0"/>
- </NormContinuous>
- </DerivedField>
- </NeuralInput>
- <NeuralInput id="5">
- <DerivedField optype="continuous" dataType="float">
- <NormContinuous field="hl">
- <LinearNorm orig="0" norm="-4.562070e-02"/>
- <LinearNorm orig="7.600007e+02" norm="0"/>
- </NormContinuous>
- </DerivedField>
- </NeuralInput>
- <NeuralInput id="6">
- <DerivedField optype="continuous" dataType="float">
- <NormContinuous field="l">
- <LinearNorm orig="2" norm="1."/>
- <LinearNorm orig="1.400018e+03" norm="-1."/>
- </NormContinuous>
- </DerivedField>
- </NeuralInput>
- <NeuralInput id="7">
- <DerivedField optype="continuous" dataType="float">
- <NormContinuous field="kw">
- <LinearNorm orig="0" norm="-5.780019e-02"/>
- <LinearNorm orig="1.095001e+04" norm="0"/>
- </NormContinuous>
- </DerivedField>
- </NeuralInput>
- </NeuralInputs>
- <NeuralLayer activationFunction="tanh" numberOfNeurons="1">
- <Neuron id="8" bias="-1.263572e+00">
- <Con from="0" weight="7.536629e-01"/>
- <Con from="1" weight="1.653660e-03"/>
- <Con from="2" weight="4.725001e-03"/>
- <Con from="3" weight="9.969786e-03"/>
- <Con from="4" weight="1.787976e-01"/>
- <Con from="5" weight="-1.809809e-01"/>
- <Con from="6" weight="-1.735688e-01"/>
- <Con from="7" weight="8.559675e-02"/>
- </Neuron>
- </NeuralLayer>
- <NeuralLayer activationFunction="identity" numberOfNeurons="1">
- <Neuron id="9" bias="-1.745483e+00">
- <Con from="8" weight="6.965512e+00"/>
- </Neuron>
- </NeuralLayer>
- <NeuralOutputs numberOfOutputs="1">
- <NeuralOutput outputNeuron="9">
- <DerivedField optype="continuous" dataType="float">
- <NormContinuous field="yhat">
- <LinearNorm orig="0" norm="-5.873935e-01"/>
- <LinearNorm orig="7.781171e+01" norm="0"/>
- </NormContinuous>
- </DerivedField>
- </NeuralOutput>
- <NeuralOutput outputNeuron="-1">
- <DerivedField optype="continuous" dataType="float">
- <NormContinuous field="yhat">
- <LinearNorm orig="2." norm="-1"/>
- <LinearNorm orig="5.781171e+01" norm="1"/>
- </NormContinuous>
- </DerivedField>
- </NeuralOutput>
- </NeuralOutputs>
- </NeuralNetwork>
-</PMML>
+++ /dev/null
-<?xml version="1.0"?>
-<PMML xmlns="http://www.dmg.org/PMML-4_1" version="4.1">
- <Header copyright="myCopyright" description="Tests unitaires">
- <Application name="PMMLlib" version="myVersion"/>
- <Annotation>Tests unitaires PMMLlib</Annotation>
- </Header>
- <DataDictionary>
- <DataField name="x6" displayName=" x_{6}" optype="continuous" dataType="double">
- <Interval closure="ClosedClosed" leftMargin="1.100000e+01" rightMargin="2.300000e+01"/>
- </DataField>
- <DataField name="x8" displayName=" x_{8}" optype="continuous" dataType="double">
- <Interval closure="ClosedClosed" leftMargin="2.810000e+01" rightMargin="7.670000e+01"/>
- </DataField>
- <DataField name="x1" displayName=" x_{1}" optype="continuous" dataType="double">
- <Interval closure="ClosedClosed" leftMargin="6.360000e+00" rightMargin="1.251000e+01"/>
- </DataField>
- </DataDictionary>
- <RegressionModel functionName="regression" modelName="Modeler[LinearRegression]Tds[steamplant]Predictor[x6:x8:x6x8:x6x6x8]Target[x1]" targetFieldName="x1">
- <MiningSchema>
- <MiningField name="x6" usageType="active"/>
- <MiningField name="x8" usageType="active"/>
- <MiningField name="x1" usageType="predicted"/>
- </MiningSchema>
- <RegressionTable intercept="3.837365e+00">
- <NumericPredictor name="x6" exponent="1" coefficient="4.759134e-01"/>
- <NumericPredictor name="x8" exponent="1" coefficient="1.428838e-01"/>
- <PredictorTerm coefficient="-2.201903e-02">
- <FieldRef field="x6"/>
- <FieldRef field="x8"/>
- </PredictorTerm>
- <PredictorTerm coefficient="5.362560e-04">
- <FieldRef field="x6"/>
- <FieldRef field="x6"/>
- <FieldRef field="x8"/>
- </PredictorTerm>
- </RegressionTable>
- </RegressionModel>
-</PMML>
+++ /dev/null
-<?xml version="1.0"?>
-<PMML version="4.1" xmlns="http://www.dmg.org/PMML-4_1">
- <Header copyright="myCopyright" description="Text Description">
- <Application name="Uranie" version="2013.7/18"/>
- <Annotation>Compilation date : Wed Jul 17, 2013</Annotation>
- </Header>
- <DataDictionary>
- <DataField name="x6" displayName=" x_{6}" optype="continuous" dataType="double">
- <Interval closure="ClosedClosed" leftMargin="1.100000e+01" rightMargin="2.300000e+01"/>
- </DataField>
- <DataField name="x8" displayName=" x_{8}" optype="continuous" dataType="double">
- <Interval closure="ClosedClosed" leftMargin="2.810000e+01" rightMargin="7.670000e+01"/>
- </DataField>
- <DataField name="x1" displayName=" x_{1}" optype="continuous" dataType="double">
- <Interval closure="ClosedClosed" leftMargin="6.360000e+00" rightMargin="1.251000e+01"/>
- </DataField>
- </DataDictionary>
- <RegressionModel functionName="regression" modelName="Modeler[LinearRegression]Tds[steamplant]Predictor[x6:x8:x6x8:x6x6x8]Target[x1]" targetFieldName="x1">
- <MiningSchema>
- <MiningField name="x6" usageType="active"/>
- <MiningField name="x8" usageType="active"/>
- <MiningField name="x1" usageType="predicted"/>
- </MiningSchema>
- <RegressionTable>
- <NumericPredictor name="x6" exponent="1" coefficient="4.759134e-01"/>
- <NumericPredictor name="x8" exponent="1" coefficient="1.428838e-01"/>
- <PredictorTerm coefficient="-2.201903e-02">
- <FieldRef field="x6"/>
- <FieldRef field="x8"/>
- </PredictorTerm>
- <PredictorTerm coefficient="5.362560e-04">
- <FieldRef field="x6"/>
- <FieldRef field="x6"/>
- <FieldRef field="x8"/>
- </PredictorTerm>
- </RegressionTable>
- </RegressionModel>
-</PMML>
+++ /dev/null
-<?xml version="1.0"?>
-<PMML xmlns="http://www.dmg.org/PMML-4_1" version="4.1">
- <Header copyright="myCopyright" description="Tests unitaires">
- <Application name="PMMLlib" version="myVersion"/>
- <Annotation>Tests unitaires PMMLlib</Annotation>
- </Header>
- <DataDictionary>
- <DataField name="rw" displayName="rw" optype="continuous" dataType="float"/>
- <DataField name="r" displayName="r" optype="continuous" dataType="float"/>
- <DataField name="tu" displayName="tu" optype="continuous" dataType="float"/>
- <DataField name="tl" displayName="tl" optype="continuous" dataType="float"/>
- <DataField name="hu" displayName="hu" optype="continuous" dataType="float"/>
- <DataField name="hl" displayName="hl" optype="continuous" dataType="float"/>
- <DataField name="l" displayName="l" optype="continuous" dataType="float"/>
- <DataField name="kw" displayName="kw" optype="continuous" dataType="float"/>
- <DataField name="yhat" displayName="yhat" optype="continuous" dataType="float"/>
- </DataDictionary>
- <NeuralNetwork modelName="modelName" functionName="regression" numberOfLayers="2">
- <MiningSchema>
- <MiningField name="rw" usageType="active"/>
- <MiningField name="r" usageType="active"/>
- <MiningField name="tu" usageType="active"/>
- <MiningField name="tl" usageType="active"/>
- <MiningField name="hu" usageType="active"/>
- <MiningField name="hl" usageType="active"/>
- <MiningField name="l" usageType="active"/>
- <MiningField name="kw" usageType="active"/>
- <MiningField name="yhat" usageType="predicted"/>
- </MiningSchema>
- <NeuralInputs numberOfInputs="8">
- <NeuralInput id="0">
- <DerivedField optype="continuous" dataType="float">
- <NormContinuous field="rw">
- <LinearNorm orig="0.000000e+00" norm="-2.889932e-01"/>
- <LinearNorm orig="9.999901e-02" norm="0.000000e+00"/>
- </NormContinuous>
- </DerivedField>
- </NeuralInput>
- <NeuralInput id="1">
- <DerivedField optype="continuous" dataType="float">
- <NormContinuous field="r">
- <LinearNorm orig="0.000000e+00" norm="-5.756638e-01"/>
- <LinearNorm orig="2.504894e+04" norm="0.000000e+00"/>
- </NormContinuous>
- </DerivedField>
- </NeuralInput>
- <NeuralInput id="2">
- <DerivedField optype="continuous" dataType="float">
- <NormContinuous field="tu">
- <LinearNorm orig="0.000000e+00" norm="-1.699313e-01"/>
- <LinearNorm orig="8.933486e+04" norm="0.000000e+00"/>
- </NormContinuous>
- </DerivedField>
- </NeuralInput>
- <NeuralInput id="3">
- <DerivedField optype="continuous" dataType="float">
- <NormContinuous field="tl">
- <LinearNorm orig="0.000000e+00" norm="-1.707007e-01"/>
- <LinearNorm orig="8.955232e+01" norm="0.000000e+00"/>
- </NormContinuous>
- </DerivedField>
- </NeuralInput>
- <NeuralInput id="4">
- <DerivedField optype="continuous" dataType="float">
- <NormContinuous field="hu">
- <LinearNorm orig="0.000000e+00" norm="-3.302777e-02"/>
- <LinearNorm orig="1.050003e+03" norm="0.000000e+00"/>
- </NormContinuous>
- </DerivedField>
- </NeuralInput>
- <NeuralInput id="5">
- <DerivedField optype="continuous" dataType="float">
- <NormContinuous field="hl">
- <LineakLRAndkANNrNorm orig="0.000000e+00" norm="-4.562070e-02"/>
- <LinearNorm orig="7.600007e+02" norm="0.000000e+00"/>
- </NormContinuous>
- </DerivedField>
- </NeuralInput>
- <NeuralInput id="6">
- <DerivedField optype="continuous" dataType="float">
- <NormContinuous field="l">
- <LinearNorm orig="0.000000e+00" norm="-1.155882e-01"/>
- <LinearNorm orig="1.400018e+03" norm="0.000000e+00"/>
- </NormContinuous>
- </DerivedField>
- </NeuralInput>
- <NeuralInput id="7">
- <DerivedField optype="continuous" dataType="float">
- <NormContinuous field="kw">
- <LinearNorm orig="0.000000e+00" norm="-5.780019e-02"/>
- <LinearNorm orig="1.095001e+04" norm="0.000000e+00"/>
- </NormContinuous>
- </DerivedField>
- </NeuralInput>
- </NeuralInputs>
- <NeuralLayer activationFunction="tanh" numberOfNeurons="1">
- <Neuron id="8" bias="-1.263572e+00">
- <Con from="0" weight="7.536629e-01"/>
- <Con from="1" weight="1.653660e-03"/>
- <Con from="2" weight="4.725001e-03"/>
- <Con from="3" weight="9.969786e-03"/>
- <Con from="4" weight="1.787976e-01"/>
- <Con from="5" weight="-1.809809e-01"/>
- <Con from="6" weight="-1.735688e-01"/>
- <Con from="7" weight="8.559675e-02"/>
- </Neuron>
- </NeuralLayer>
- <NeuralLayer activationFunction="identity" numberOfNeurons="1">
- <Neuron id="9" bias="-1.745483e+00">
- <Con from="8" weight="6.965512e+00"/>
- </Neuron>
- </NeuralLayer>
- <NeuralOutputs numberOfOutputs="1">
- <NeuralOutput outputNeuron="9">
- <DerivedField optype="continuous" dataType="float">
- <NormContinuous field="yhat">
- <LinearNorm orig="0.000000e+00" norm="-5.873935e-01"/>
- <LinearNorm orig="7.781171e+01" norm="0.000000e+00"/>
- </NormContinuous>
- </DerivedField>
- </NeuralOutput>
- </NeuralOutputs>
- </NeuralNetwork>
- <RegressionModel functionName="regression" modelName="modelName" targetFieldName="x1">
- <MiningSchema>
- <MiningField name="x6" usageType="active"/>
- <MiningField name="x8" usageType="active"/>
- <MiningField name="x1" usageType="predicted"/>
- </MiningSchema>
- <RegressionTable intercept="3.837365e+00">
- <NumericPredictor name="x6" exponent="1" coefficient="4.759134e-01"/>
- <NumericPredictor name="x8" exponent="1" coefficient="1.428838e-01"/>
- <PredictorTerm coefficient="-2.201903e-02">
- <FieldRef field="x6"/>
- <FieldRef field="x8"/>
- </PredictorTerm>
- <PredictorTerm coefficient="5.362560e-04">
- <FieldRef field="x6"/>
- <FieldRef field="x6"/>
- <FieldRef field="x8"/>
- </PredictorTerm>
- </RegressionTable>
- </RegressionModel>
-</PMML>
+++ /dev/null
-<?xml version="1.0"?>
-<PMML xmlns="http://www.dmg.org/PMML-4_1" version="4.1">
- <Header copyright="myCopyright" description="Tests unitaires">
- <Application name="PMMLlib" version="myVersion"/>
- <Annotation>Tests unitaires PMMLlib</Annotation>
- </Header>
- <DataDictionary>
- <DataField name="rw" displayName="rw" optype="continuous" dataType="float"/>
- <DataField name="r" displayName="r" optype="continuous" dataType="float"/>
- <DataField name="tu" displayName="tu" optype="continuous" dataType="float"/>
- <DataField name="tl" displayName="tl" optype="continuous" dataType="float"/>
- <DataField name="hu" displayName="hu" optype="continuous" dataType="float"/>
- <DataField name="hl" displayName="hl" optype="continuous" dataType="float"/>
- <DataField name="l" displayName="l" optype="continuous" dataType="float"/>
- <DataField name="kw" displayName="kw" optype="continuous" dataType="float"/>
- <DataField name="yhat" displayName="yhat" optype="continuous" dataType="float"/>
- </DataDictionary>
- <NeuralNetwork modelName="modelName" functionName="regression" numberOfLayers="2">
- <MiningSchema>
- <MiningField name="rw" usageType="active"/>
- <MiningField name="r" usageType="active"/>
- <MiningField name="tu" usageType="active"/>
- <MiningField name="tl" usageType="active"/>
- <MiningField name="hu" usageType="active"/>
- <MiningField name="hl" usageType="active"/>
- <MiningField name="l" usageType="active"/>
- <MiningField name="kw" usageType="active"/>
- <MiningField name="yhat" usageType="predicted"/>
- </MiningSchema>
- <NeuralInputs numberOfInputs="8">
- <NeuralInput id="0">
- <DerivedField optype="continuous" dataType="float">
- <NormContinuous field="rw">
- <LinearNorm orig="0.000000e+00" norm="-2.889932e-01"/>
- <LinearNorm orig="9.999901e-02" norm="0.000000e+00"/>
- </NormContinuous>
- </DerivedField>
- </NeuralInput>
- <NeuralInput id="1">
- <DerivedField optype="continuous" dataType="float">
- <NormContinuous field="r">
- <LinearNorm orig="0.000000e+00" norm="-5.756638e-01"/>
- <LinearNorm orig="2.504894e+04" norm="0.000000e+00"/>
- </NormContinuous>
- </DerivedField>
- </NeuralInput>
- <NeuralInput id="2">
- <DerivedField optype="continuous" dataType="float">
- <NormContinuous field="tu">
- <LinearNorm orig="0.000000e+00" norm="-1.699313e-01"/>
- <LinearNorm orig="8.933486e+04" norm="0.000000e+00"/>
- </NormContinuous>
- </DerivedField>
- </NeuralInput>
- <NeuralInput id="3">
- <DerivedField optype="continuous" dataType="float">
- <NormContinuous field="tl">
- <LinearNorm orig="0.000000e+00" norm="-1.707007e-01"/>
- <LinearNorm orig="8.955232e+01" norm="0.000000e+00"/>
- </NormContinuous>
- </DerivedField>
- </NeuralInput>
- <NeuralInput id="4">
- <DerivedField optype="continuous" dataType="float">
- <NormContinuous field="hu">
- <LinearNorm orig="0.000000e+00" norm="-3.302777e-02"/>
- <LinearNorm orig="1.050003e+03" norm="0.000000e+00"/>
- </NormContinuous>
- </DerivedField>
- </NeuralInput>
- <NeuralInput id="5">
- <DerivedField optype="continuous" dataType="float">
- <NormContinuous field="hl">
- <LineakLRAndkANNrNorm orig="0.000000e+00" norm="-4.562070e-02"/>
- <LinearNorm orig="7.600007e+02" norm="0.000000e+00"/>
- </NormContinuous>
- </DerivedField>
- </NeuralInput>
- <NeuralInput id="6">
- <DerivedField optype="continuous" dataType="float">
- <NormContinuous field="l">
- <LinearNorm orig="0.000000e+00" norm="-1.155882e-01"/>
- <LinearNorm orig="1.400018e+03" norm="0.000000e+00"/>
- </NormContinuous>
- </DerivedField>
- </NeuralInput>
- <NeuralInput id="7">
- <DerivedField optype="continuous" dataType="float">
- <NormContinuous field="kw">
- <LinearNorm orig="0.000000e+00" norm="-5.780019e-02"/>
- <LinearNorm orig="1.095001e+04" norm="0.000000e+00"/>
- </NormContinuous>
- </DerivedField>
- </NeuralInput>
- </NeuralInputs>
- <NeuralLayer activationFunction="tanh" numberOfNeurons="1">
- <Neuron id="8" bias="-1.263572e+00">
- <Con from="0" weight="7.536629e-01"/>
- <Con from="1" weight="1.653660e-03"/>
- <Con from="2" weight="4.725001e-03"/>
- <Con from="3" weight="9.969786e-03"/>
- <Con from="4" weight="1.787976e-01"/>
- <Con from="5" weight="-1.809809e-01"/>
- <Con from="6" weight="-1.735688e-01"/>
- <Con from="7" weight="8.559675e-02"/>
- </Neuron>
- </NeuralLayer>
- <NeuralLayer activationFunction="identity" numberOfNeurons="1">
- <Neuron id="9" bias="-1.745483e+00">
- <Con from="8" weight="6.965512e+00"/>
- </Neuron>
- </NeuralLayer>
- <NeuralOutputs numberOfOutputs="1">
- <NeuralOutput outputNeuron="9">
- <DerivedField optype="continuous" dataType="float">
- <NormContinuous field="yhat">
- <LinearNorm orig="0.000000e+00" norm="-5.873935e-01"/>
- <LinearNorm orig="7.781171e+01" norm="0.000000e+00"/>
- </NormContinuous>
- </DerivedField>
- </NeuralOutput>
- </NeuralOutputs>
- </NeuralNetwork>
- <RegressionModel functionName="regression" modelName="modelName" targetFieldName="x1">
- <MiningSchema>
- <MiningField name="x6" usageType="active"/>
- <MiningField name="x8" usageType="active"/>
- <MiningField name="x1" usageType="predicted"/>
- </MiningSchema>
- <RegressionTable intercept="3.837365e+00">
- <NumericPredictor name="x6" exponent="1" coefficient="4.759134e-01"/>
- <NumericPredictor name="x8" exponent="1" coefficient="1.428838e-01"/>
- <PredictorTerm coefficient="-2.201903e-02">
- <FieldRef field="x6"/>
- <FieldRef field="x8"/>
- </PredictorTerm>
- <PredictorTerm coefficient="5.362560e-04">
- <FieldRef field="x6"/>
- <FieldRef field="x6"/>
- <FieldRef field="x8"/>
- </PredictorTerm>
- </RegressionTable>
- </RegressionModel>
-</PMML>
+++ /dev/null
-#define ActivationFunction(sum) ( 1.0 / ( 1.0 + exp( -1.0 * sum )) )
-void myTestFunc(double *param, double *res)
-{
- //////////////////////////////
- //
- // File used by unit test
- // PMMLBasicsTest1::testExportNeuralNetworkCpp
- //
- //////////////////////////////
-
- int nInput = 8;
- int nOutput = 1;
- int nHidden = 1;
- const int nNeurones = 10;
- double myTestFunc_act[nNeurones];
-
- // --- Preprocessing of the inputs and outputs
- double myTestFunc_minInput[] = {
- 0.099999, 25048.9, 89334.9, 89.5523, 1050,
- 760.001, 1400.02, 10950,
- };
- double myTestFunc_minOutput[] = {
- 77.8117, };
- double myTestFunc_maxInput[] = {
- 0.028899, 14419.8, 15180.8, 15.2866, 34.6793,
- 34.6718, 161.826, 632.913,
- };
- double myTestFunc_maxOutput[] = {
- 45.7061, };
-
- // --- Values of the weights
- double myTestFunc_valW[] = {
- -1.74548, 6.96551, -1.26357, 0.753663, 0.00165366,
- 0.004725, 0.00996979, 0.178798, -0.180981, -0.173569,
- 0.0855967,
- };
- // --- Constants
- int indNeurone = 0;
- int CrtW;
- double sum;
-
- // --- Input Layers
- for(int i = 0; i < nInput; i++) {
- myTestFunc_act[indNeurone++] = ( param[i] - myTestFunc_minInput[i] ) / myTestFunc_maxInput[i];
- }
-
- // --- Hidden Layers
- for (int member = 0; member < nHidden; member++) {
- int CrtW = member * ( nInput + 2) + 2;
- sum = myTestFunc_valW[CrtW++];
- for (int source = 0; source < nInput; source++) {
- sum += myTestFunc_act[source] * myTestFunc_valW[CrtW++];
- }
- myTestFunc_act[indNeurone++] = ActivationFunction(sum);
- }
-
- // --- Output
- for (int member = 0; member < nOutput; member++) {
- sum = myTestFunc_valW[0];
- for (int source = 0; source < nHidden; source++) {
- CrtW = source * ( nInput + 2) + 1;
- sum += myTestFunc_act[nInput+source] * myTestFunc_valW[CrtW];
- }
- myTestFunc_act[indNeurone++] = sum;
- res[member] = myTestFunc_minOutput[member] + myTestFunc_maxOutput[member] * sum;
- }
-}
+++ /dev/null
- SUBROUTINE myTestFunc(rw,r,tu,tl,hu,hl,l,kw,yhat)
-C --- *********************************************
-C ---
-C --- File used by unit test
-C --- PMMLBasicsTest1::testExportNeuralNetworkFortran
-C ---
-C --- *********************************************
- IMPLICIT DOUBLE PRECISION (V)
- DOUBLE PRECISION rw
- DOUBLE PRECISION r
- DOUBLE PRECISION tu
- DOUBLE PRECISION tl
- DOUBLE PRECISION hu
- DOUBLE PRECISION hl
- DOUBLE PRECISION l
- DOUBLE PRECISION kw
- DOUBLE PRECISION yhat
-
-C --- Preprocessing of the inputs
- VXNrw = ( rw - 0.099999D0 ) / 0.028899D0
- VXNr = ( r - 25048.9D0 ) / 14419.8D0
- VXNtu = ( tu - 89334.9D0 ) / 15180.8D0
- VXNtl = ( tl - 89.5523D0 ) / 15.2866D0
- VXNhu = ( hu - 1050D0 ) / 34.6793D0
- VXNhl = ( hl - 760.001D0 ) / 34.6718D0
- VXNl = ( l - 1400.02D0 ) / 161.826D0
- VXNkw = ( kw - 10950D0 ) / 632.913D0
-
-C --- Values of the weights
- VW1 = -1.74548
- VW2 = 6.96551
- VW3 = -1.26357
- VW4 = 0.753663
- VW5 = 0.00165366
- VW6 = 0.004725
- VW7 = 0.00996979
- VW8 = 0.178798
- VW9 = -0.180981
- VW10 = -0.173569
- VW11 = 0.0855967
-
-C --- hidden neural number 1
- VAct1 = VW3
- 1 + VW4 * VXNrw
- 1 + VW5 * VXNr
- 1 + VW6 * VXNtu
- 1 + VW7 * VXNtl
- 1 + VW8 * VXNhu
- 1 + VW9 * VXNhl
- 1 + VW10 * VXNl
- 1 + VW11 * VXNkw
-
- VPot1 = 1.D0 / (1.D0 + DEXP(-1.D0 * VAct1))
-
-C --- Output
- VOut = VW1
- 1 + VW2 * VPot1
-
-C --- Pretraitment of the output
- yhat = 77.8117D0 + 45.7061D0 * VOut;
-
-C ---
- RETURN
- END
+++ /dev/null
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-from math import tanh, exp
-
-def ActivationFunction(sum):
- return ( 1.0 / ( 1.0 + exp( -1.0 * sum ) ) );
-
-def myTestFunc(param):
-
- ##############################
- #
- # File used by unit test
- # PMMLBasicsTest1::testExportNeuralNetworkPython
- #
- ##############################
-
- nInput = 8;
- nOutput = 1;
- nHidden = 1;
- nNeurones = 10;
- myTestFunc_act = [];
- res = [];
-
- # --- Preprocessing of the inputs and outputs
- myTestFunc_minInput = [
- 0.099999, 25048.9, 89334.9, 89.5523, 1050,
- 760.001, 1400.02, 10950,
- ];
- myTestFunc_minOutput = [
- 77.8117
- ];
- myTestFunc_maxInput = [
- 0.028899, 14419.8, 15180.8, 15.2866, 34.6793,
- 34.6718, 161.826, 632.913,
- ];
- myTestFunc_maxOutput = [
- 45.7061
- ];
- # --- Values of the weights
- myTestFunc_valW = [
- -1.74548, 6.96551, -1.26357, 0.753663, 0.00165366,
- 0.004725, 0.00996979, 0.178798, -0.180981, -0.173569,
- 0.0855967,
- ];
- # --- Constants
- indNeurone = 0;
-
- # --- Input Layers
- for i in range(nInput) :
- myTestFunc_act.append( ( param[i] - myTestFunc_minInput[i] ) / myTestFunc_maxInput[i] ) ;
- indNeurone += 1 ;
- pass
-
- # --- Hidden Layers
- for member in range(nHidden):
- CrtW = member * ( nInput + 2) + 2;
- sum = myTestFunc_valW[CrtW];
- CrtW += 1 ;
- for source in range(nInput) :
- sum += myTestFunc_act[source] * myTestFunc_valW[CrtW];
- CrtW += 1 ;
- pass
- myTestFunc_act.append( ActivationFunction(sum) ) ;
- indNeurone += 1 ;
- pass
-
- # --- Output
- for member in range(nOutput):
- sum = myTestFunc_valW[0];
- for source in range(nHidden):
- CrtW = source * ( nInput + 2) + 1;
- sum += myTestFunc_act[nInput+source] * myTestFunc_valW[CrtW];
- pass
- myTestFunc_act.append( sum );
- indNeurone += 1 ;
- res.append( myTestFunc_minOutput[member] + myTestFunc_maxOutput[member] * sum );
- pass
-
- return res;
-
-
+++ /dev/null
-void myTestFunc(double *param, double *res)
-{
- //////////////////////////////
- //
- // File used by unit test
- // PMMLBasicsTest1::testExportLinearRegressionCpp
- //
- //////////////////////////////
-
- // Intercept
- double y = 3.83737;
-
- // Attribute : x6
- y += param[0]*0.475913;
-
- // Attribute : x8
- y += param[1]*0.142884;
-
- // Attribute : x6x8
- y += param[2]*-0.022019;
-
- // Attribute : x6x6x8
- y += param[3]*0.000536256;
-
- // Return the value
- res[0] = y;
-}
+++ /dev/null
- SUBROUTINE myTestFunc(P0, P1, P2, P3, RES)
-C --- *********************************************
-C ---
-C --- File used by unit test
-C --- PMMLBasicsTest1::testExportLinearRegressionFortran
-C ---
-C --- *********************************************
-
- IMPLICIT DOUBLE PRECISION (P)
- DOUBLE PRECISION RES
- DOUBLE PRECISION Y
-
-C --- Intercept
- Y = 3.83737;
-
-C --- Attribute : x6
- Y += P[0]*0.475913;
-
-C --- Attribute : x8
- Y += P[1]*0.142884;
-
-C --- Attribute : x6x8
- Y += P[2]*-0.022019;
-
-C --- Attribute : x6x6x8
- Y += P[3]*0.000536256;
-
-C --- Return the value
- RES = Y
- RETURN
- END
+++ /dev/null
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-def myTestFunc(param):
-
- ##############################
- #
- # File used by unit test
- # PMMLBasicsTest1::testExportLinearRegressionPython
- #
- ##############################
-
- # Intercept
- y = 3.83737;
-
- # Attribute : x6
- y += param[0]*0.475913;
-
- # Attribute : x8
- y += param[1]*0.142884;
-
- # Attribute : x6x8
- y += param[2]*-0.022019;
-
- # Attribute : x6x6x8
- y += param[3]*0.000536256;
-
- # Return the value
- return [y];
+++ /dev/null
-<?xml version="1.0"?>
-<PMML xmlns="http://www.dmg.org/PMML-4_1" version="4.1">
- <Header copyright="myCopyright" description="Tests unitaires">
- <Application name="PMMLlib" version="myVersion"/>
- <Annotation>Tests unitaires PMMLlib</Annotation>
- </Header>
- <DataDictionary>
- <DataField name="rw" displayName="rw" optype="continuous" dataType="float"/>
- <DataField name="r" displayName="r" optype="continuous" dataType="float"/>
- <DataField name="tu" displayName="tu" optype="continuous" dataType="float"/>
- <DataField name="tl" displayName="tl" optype="continuous" dataType="float"/>
- <DataField name="hu" displayName="hu" optype="continuous" dataType="float"/>
- <DataField name="hl" displayName="hl" optype="continuous" dataType="float"/>
- <DataField name="l" displayName="l" optype="continuous" dataType="float"/>
- <DataField name="kw" displayName="kw" optype="continuous" dataType="float"/>
- <DataField name="yhat" displayName="yhat" optype="continuous" dataType="float"/>
- </DataDictionary>
- <NeuralNetwork modelName="sANNName" functionName="regression" numberOfLayers="2">
- <MiningSchema>
- <MiningField name="rw" usageType="active"/>
- <MiningField name="r" usageType="active"/>
- <MiningField name="tu" usageType="active"/>
- <MiningField name="tl" usageType="active"/>
- <MiningField name="hu" usageType="active"/>
- <MiningField name="hl" usageType="active"/>
- <MiningField name="l" usageType="active"/>
- <MiningField name="kw" usageType="active"/>
- <MiningField name="yhat" usageType="predicted"/>
- </MiningSchema>
- <NeuralInputs numberOfInputs="8">
- <NeuralInput id="0">
- <DerivedField optype="continuous" dataType="float">
- <NormContinuous field="rw">
- <LinearNorm orig="0.000000e+000" norm="-2.889932e-001"/>
- <LinearNorm orig="9.999901e-002" norm="0.000000e+000"/>
- </NormContinuous>
- </DerivedField>
- </NeuralInput>
- <NeuralInput id="1">
- <DerivedField optype="continuous" dataType="float">
- <NormContinuous field="r">
- <LinearNorm orig="0.000000e+000" norm="-5.756638e-001"/>
- <LinearNorm orig="2.504894e+004" norm="0.000000e+000"/>
- </NormContinuous>
- </DerivedField>
- </NeuralInput>
- <NeuralInput id="2">
- <DerivedField optype="continuous" dataType="float">
- <NormContinuous field="tu">
- <LinearNorm orig="0.000000e+000" norm="-1.699313e-001"/>
- <LinearNorm orig="8.933486e+004" norm="0.000000e+000"/>
- </NormContinuous>
- </DerivedField>
- </NeuralInput>
- <NeuralInput id="3">
- <DerivedField optype="continuous" dataType="float">
- <NormContinuous field="tl">
- <LinearNorm orig="0.000000e+000" norm="-1.707007e-001"/>
- <LinearNorm orig="8.955232e+001" norm="0.000000e+000"/>
- </NormContinuous>
- </DerivedField>
- </NeuralInput>
- <NeuralInput id="4">
- <DerivedField optype="continuous" dataType="float">
- <NormContinuous field="hu">
- <LinearNorm orig="0.000000e+000" norm="-3.302777e-002"/>
- <LinearNorm orig="1.050003e+003" norm="0.000000e+000"/>
- </NormContinuous>
- </DerivedField>
- </NeuralInput>
- <NeuralInput id="5">
- <DerivedField optype="continuous" dataType="float">
- <NormContinuous field="hl">
- <LinearNorm orig="0.000000e+000" norm="-4.562070e-002"/>
- <LinearNorm orig="7.600007e+002" norm="0.000000e+000"/>
- </NormContinuous>
- </DerivedField>
- </NeuralInput>
- <NeuralInput id="6">
- <DerivedField optype="continuous" dataType="float">
- <NormContinuous field="l">
- <LinearNorm orig="0.000000e+000" norm="-1.155882e-001"/>
- <LinearNorm orig="1.400018e+003" norm="0.000000e+000"/>
- </NormContinuous>
- </DerivedField>
- </NeuralInput>
- <NeuralInput id="7">
- <DerivedField optype="continuous" dataType="float">
- <NormContinuous field="kw">
- <LinearNorm orig="0.000000e+000" norm="-5.780019e-002"/>
- <LinearNorm orig="1.095001e+004" norm="0.000000e+000"/>
- </NormContinuous>
- </DerivedField>
- </NeuralInput>
- </NeuralInputs>
- <NeuralLayer activationFunction="tanh" numberOfNeurons="1">
- <Neuron id="8" bias="-1.263572e+000">
- <Con from="0" weight="7.536629e-001"/>
- <Con from="1" weight="1.653660e-003"/>
- <Con from="2" weight="4.725001e-003"/>
- <Con from="3" weight="9.969786e-003"/>
- <Con from="4" weight="1.787976e-001"/>
- <Con from="5" weight="-1.809809e-001"/>
- <Con from="6" weight="-1.735688e-001"/>
- <Con from="7" weight="8.559675e-002"/>
- </Neuron>
- </NeuralLayer>
- <NeuralLayer activationFunction="identity" numberOfNeurons="1">
- <Neuron id="9" bias="-1.745483e+000">
- <Con from="8" weight="6.965512e+000"/>
- </Neuron>
- </NeuralLayer>
- <NeuralOutputs numberOfOutputs="1">
- <NeuralOutput outputNeuron="9">
- <DerivedField optype="continuous" dataType="float">
- <NormContinuous field="yhat">
- <LinearNorm orig="0.000000e+000" norm="-5.873935e-001"/>
- <LinearNorm orig="7.781171e+001" norm="0.000000e+000"/>
- </NormContinuous>
- </DerivedField>
- </NeuralOutput>
- </NeuralOutputs>
- </NeuralNetwork>
-</PMML>
+++ /dev/null
-<?xml version="1.0"?>
-<PMML xmlns="http://www.dmg.org/PMML-4_1" version="4.1">
- <Header copyright="myCopyright" description="Tests unitaires">
- <Application name="PMMLlib" version="myVersion"/>
- <Annotation>Tests unitaires PMMLlib</Annotation>
- </Header>
- <DataDictionary>
- <DataField name="x6" displayName=" x_{6}" optype="continuous" dataType="double">
- <Interval closure="ClosedClosed" leftMargin="1.100000e+001" rightMargin="2.300000e+001"/>
- </DataField>
- <DataField name="x8" displayName=" x_{8}" optype="continuous" dataType="double">
- <Interval closure="ClosedClosed" leftMargin="2.810000e+001" rightMargin="7.670000e+001"/>
- </DataField>
- <DataField name="x1" displayName=" x_{1}" optype="continuous" dataType="double">
- <Interval closure="ClosedClosed" leftMargin="6.360000e+000" rightMargin="1.251000e+001"/>
- </DataField>
- </DataDictionary>
- <RegressionModel functionName="regression" modelName="Modeler[LinearRegression]Tds[steamplant]Predictor[x6:x8:x6x8:x6x6x8]Target[x1]" targetFieldName="x1">
- <MiningSchema>
- <MiningField name="x6" usageType="active"/>
- <MiningField name="x8" usageType="active"/>
- <MiningField name="x1" usageType="predicted"/>
- </MiningSchema>
- <RegressionTable intercept="3.837365e+000">
- <NumericPredictor name="x6" exponent="1" coefficient="4.759134e-001"/>
- <NumericPredictor name="x8" exponent="1" coefficient="1.428838e-001"/>
- <PredictorTerm coefficient="-2.201903e-002">
- <FieldRef field="x6"/>
- <FieldRef field="x8"/>
- </PredictorTerm>
- <PredictorTerm coefficient="5.362560e-004">
- <FieldRef field="x6"/>
- <FieldRef field="x6"/>
- <FieldRef field="x8"/>
- </PredictorTerm>
- </RegressionTable>
- </RegressionModel>
-</PMML>
<parameter name="DataPort_Width" value="100" />
<parameter name="DataPort_Height" value="25" />
</section>
+ <section name="windows_geometry">
+ <parameter name="YACS" value="#00 #00 #00 #FF #00 #00 #00 #00 #FD #00 #00 #00 #03 #00 #00 #00 #00 #00 #00 #01 #00 #00 #00 #02 #6B #FC #02 #00 #00 #00 #01 #FC #00 #00 #00 #59 #00 #00 #02 #6B #00 #00 #00 #68 #01 #00 #00 #14 #FA #00 #00 #00 #01 #02 #00 #00 #00 #02 #FB #00 #00 #00 #20 #00 #79 #00 #61 #00 #63 #00 #73 #00 #54 #00 #72 #00 #65 #00 #65 #00 #56 #00 #69 #00 #65 #00 #77 #00 #44 #00 #6F #00 #63 #00 #6B #01 #00 #00 #00 #00 #FF #FF #FF #FF #00 #00 #00 #16 #00 #FF #FF #FF #FB #00 #00 #00 #22 #00 #6F #00 #62 #00 #6A #00 #65 #00 #63 #00 #74 #00 #42 #00 #72 #00 #6F #00 #77 #00 #73 #00 #65 #00 #72 #00 #44 #00 #6F #00 #63 #00 #6B #01 #00 #00 #00 #00 #FF #FF #FF #FF #00 #00 #00 #53 #00 #FF #FF #FF #00 #00 #00 #01 #00 #00 #01 #0E #00 #00 #02 #6B #FC #02 #00 #00 #00 #01 #FC #00 #00 #00 #59 #00 #00 #02 #6B #00 #00 #00 #68 #01 #00 #00 #14 #FA #00 #00 #00 #01 #02 #00 #00 #00 #02 #FB #00 #00 #00 #24 #00 #79 #00 #61 #00 #63 #00 #73 #00 #49 #00 #6E #00 #70 #00 #75 #00 #74 #00 #50 #00 #61 #00 #6E #00 #65 #00 #6C #00 #44 #00 #6F #00 #63 #00 #6B #01 #00 #00 #00 #00 #FF #FF #FF #FF #00 #00 #00 #16 #00 #FF #FF #FF #FB #00 #00 #00 #20 #00 #79 #00 #61 #00 #63 #00 #73 #00 #43 #00 #61 #00 #74 #00 #61 #00 #6C #00 #6F #00 #67 #00 #73 #00 #44 #00 #6F #00 #63 #00 #6B #01 #00 #00 #00 #00 #FF #FF #FF #FF #00 #00 #00 #53 #00 #FF #FF #FF #00 #00 #00 #03 #00 #00 #05 #40 #00 #00 #00 #53 #FC #01 #00 #00 #00 #01 #FB #00 #00 #00 #22 #00 #70 #00 #79 #00 #74 #00 #68 #00 #6F #00 #6E #00 #43 #00 #6F #00 #6E #00 #73 #00 #6F #00 #6C #00 #65 #00 #44 #00 #6F #00 #63 #00 #6B #01 #00 #00 #00 #00 #00 #00 #05 #40 #00 #00 #00 #46 #00 #FF #FF #FF #00 #00 #03 #22 #00 #00 #02 #6B #00 #00 #00 #04 #00 #00 #00 #04 #00 #00 #00 #08 #00 #00 #00 #08 #FC #00 #00 #00 #02 #00 #00 #00 #02 #00 #00 #00 #02 #00 #00 #00 #1C #00 #53 #00 #61 #00 #6C #00 #6F #00 #6D #00 #65 #00 #53 #00 #74 #00 #61 #00 #6E #00 #64 #00 #61 #00 #72 #00 #64 #01 #00 #00 #00 #00 #FF #FF #FF #FF #00 #00 #00 #00 #00 #00 #00 #00 #00 #00 #00 #1A #00 #53 #00 #61 #00 #6C #00 #6F #00 #6D #00 #65 #00 #4D #00 #6F #00 #64 #00 #75 #00 #6C #00 #65 #00 #73 #01 #00 #00 #00 #CE #FF #FF #FF #FF #00 #00 #00 #00 #00 #00 #00 #00 #00 #00 #00 #02 #00 #00 #00 #01 #00 #00 #00 #16 #00 #59 #00 #41 #00 #43 #00 #53 #00 #54 #00 #6F #00 #6F #00 #6C #00 #62 #00 #61 #00 #72 #01 #00 #00 #00 #00 #FF #FF #FF #FF #00 #00 #00 #00 #00 #00 #00 #00"/>
+ </section>
+ <section name="windows_visibility">
+ <parameter name="YACS" value="#00 #00 #00 #00 #08 #00 #00 #00 #0E #00 #4D #00 #6F #00 #64 #00 #75 #00 #6C #00 #65 #00 #73 #01 #00 #00 #00 #2A #00 #51 #00 #78 #00 #53 #00 #63 #00 #65 #00 #6E #00 #65 #00 #56 #00 #69 #00 #65 #00 #77 #00 #4F #00 #70 #00 #65 #00 #72 #00 #61 #00 #74 #00 #69 #00 #6F #00 #6E #00 #73 #01 #00 #00 #00 #1A #00 #53 #00 #61 #00 #6C #00 #6F #00 #6D #00 #65 #00 #4D #00 #6F #00 #64 #00 #75 #00 #6C #00 #65 #00 #73 #01 #00 #00 #00 #1C #00 #53 #00 #61 #00 #6C #00 #6F #00 #6D #00 #65 #00 #53 #00 #74 #00 #61 #00 #6E #00 #64 #00 #61 #00 #72 #00 #64 #01 #00 #00 #00 #10 #00 #53 #00 #74 #00 #61 #00 #6E #00 #64 #00 #61 #00 #72 #00 #64 #01 #00 #00 #00 #1E #00 #56 #00 #69 #00 #65 #00 #77 #00 #20 #00 #4F #00 #70 #00 #65 #00 #72 #00 #61 #00 #74 #00 #69 #00 #6F #00 #6E #00 #73 #01 #00 #00 #00 #18 #00 #59 #00 #41 #00 #43 #00 #53 #00 #20 #00 #54 #00 #6F #00 #6F #00 #6C #00 #62 #00 #61 #00 #72 #01 #00 #00 #00 #16 #00 #59 #00 #41 #00 #43 #00 #53 #00 #54 #00 #6F #00 #6F #00 #6C #00 #62 #00 #61 #00 #72 #01 #01 #00 #00 #00 #05 #00 #00 #00 #22 #00 #6F #00 #62 #00 #6A #00 #65 #00 #63 #00 #74 #00 #42 #00 #72 #00 #6F #00 #77 #00 #73 #00 #65 #00 #72 #00 #44 #00 #6F #00 #63 #00 #6B #01 #00 #00 #00 #22 #00 #70 #00 #79 #00 #74 #00 #68 #00 #6F #00 #6E #00 #43 #00 #6F #00 #6E #00 #73 #00 #6F #00 #6C #00 #65 #00 #44 #00 #6F #00 #63 #00 #6B #01 #00 #00 #00 #20 #00 #79 #00 #61 #00 #63 #00 #73 #00 #43 #00 #61 #00 #74 #00 #61 #00 #6C #00 #6F #00 #67 #00 #73 #00 #44 #00 #6F #00 #63 #00 #6B #01 #00 #00 #00 #24 #00 #79 #00 #61 #00 #63 #00 #73 #00 #49 #00 #6E #00 #70 #00 #75 #00 #74 #00 #50 #00 #61 #00 #6E #00 #65 #00 #6C #00 #44 #00 #6F #00 #63 #00 #6B #01 #00 #00 #00 #20 #00 #79 #00 #61 #00 #63 #00 #73 #00 #54 #00 #72 #00 #65 #00 #65 #00 #56 #00 #69 #00 #65 #00 #77 #00 #44 #00 #6F #00 #63 #00 #6B #01"/>
+ </section>
</document>
<source>shrink or expand the selected node</source>
<translation>contracter ou étendre le noeud choisi</translation>
</message>
+ <message>
+ <source>shrink or expand direct children of the selected node</source>
+ <translation type="unfinished">contracter ou étendre le noeud choisi</translation>
+ </message>
+ <message>
+ <source>shrink or expand elementary nodes of the selected node recursively</source>
+ <translation type="unfinished">contracter ou étendre le noeud choisi</translation>
+ </message>
<message>
<source>shrink/expand</source>
<translation>contracter/étendre</translation>
</message>
+ <message>
+ <source>shrink/expand children</source>
+ <translation type="unfinished">contracter/étendre</translation>
+ </message>
+ <message>
+ <source>shrink/expand elementary</source>
+ <translation type="unfinished">contracter/étendre</translation>
+ </message>
<message>
<source>draw straight or orthogonal links</source>
<translation>Créer les liens droits ou orthogonaux</translation>
<source>shrink or expand the selected node</source>
<translation>選択中のノードを展開または縮小</translation>
</message>
+ <message>
+ <source>shrink or expand direct children of the selected node</source>
+ <translation type="unfinished">選択中のノードを展開または縮小</translation>
+ </message>
+ <message>
+ <source>shrink or expand elementary nodes of the selected node recursively</source>
+ <translation type="unfinished">選択中のノードを展開または縮小</translation>
+ </message>
<message>
<source>shrink/expand</source>
<translation>展開/縮小</translation>
</message>
+ <message>
+ <source>shrink/expand children</source>
+ <translation type="unfinished">展開/縮小</translation>
+ </message>
+ <message>
+ <source>shrink/expand elementary</source>
+ <translation type="unfinished">展開/縮小</translation>
+ </message>
<message>
<source>draw straight or orthogonal links</source>
<translation>直線または直交リンクを描画</translation>
return createMenu(action, menu, actionId, groupId, index);
}
-int SalomeWrap_Module::wCreateTool(const QString& name)
+int SalomeWrap_Module::wCreateTool(const QString& title, const QString& name)
{
- return createTool(name);
+ return createTool(title, name);
}
int SalomeWrap_Module::wCreateTool(const int actionId,
const int groupId = -1,
const int index = -1);
- int wCreateTool(const QString& name);
+ int wCreateTool(const QString& title,
+ const QString& name = QString());
int wCreateTool(const int actionId,
const int toolbarId,
return module->wCreateMenu(action, menu, actionId, groupId, index);
}
-int SuitWrapper::createTool(const QString& name)
+int SuitWrapper::createTool(const QString& title, const QString& name)
{
SalomeWrap_Module* module = dynamic_cast<SalomeWrap_Module*>(_wrapped);
- return module->wCreateTool(name);
+ return module->wCreateTool(title, name);
}
int SuitWrapper::createTool(const int actionId,
const int groupId = -1,
const int index = -1);
- int createTool(const QString& name);
+ int createTool(const QString& title,
+ const QString& name = QString());
int createTool(const int actionId,
const int toolbarId,
IF(SALOME_BUILD_TESTS)
ADD_SUBDIRECTORY(Test)
- ADD_SUBDIRECTORY(pmml)
+ ADD_SUBDIRECTORY(pmml)
ENDIF(SALOME_BUILD_TESTS)
# --- options ---
SALOME_CONFIGURE_FILE(xmlrun_orig.sh xmlrun.sh)
ADD_TEST(NAME YacsLoaderTest COMMAND ${SHELL} YacsLoaderTest.sh)
ENDIF()
+
#include "OptimizerAlg.hxx"
-using namespace YACS::ENGINE;
+#include <iostream>
+//using namespace YACS::ENGINE;
extern "C"
{
- OptimizerAlgBase * createOptimizerAlgASyncExample(Pool * pool);
+ YACS::ENGINE::OptimizerAlgBase * createOptimizerAlgASyncExample(YACS::ENGINE::Pool * pool);
}
-class OptimizerAlgASyncExample : public OptimizerAlgASync
- {
+class OptimizerAlgASyncExample : public YACS::ENGINE::OptimizerAlgASync
+{
private:
- TypeCode * _tcIn;
- TypeCode * _tcOut;
+ YACS::ENGINE::TypeCode *_tcInt;
+ YACS::ENGINE::TypeCode *_tcDouble;
public:
- OptimizerAlgASyncExample(Pool * pool);
+ OptimizerAlgASyncExample(YACS::ENGINE::Pool *pool);
virtual ~OptimizerAlgASyncExample();
- TypeCode * getTCForIn() const;
- TypeCode * getTCForOut() const;
- void startToTakeDecision();
- };
+
+ //! returns typecode of type expected as Input. OwnerShip of returned pointer is held by this.
+ virtual YACS::ENGINE::TypeCode *getTCForIn() const;
+ //! returns typecode of type expected as Output. OwnerShip of returned pointer is held by this.
+ virtual YACS::ENGINE::TypeCode *getTCForOut() const;
+ //! returns typecode of type expected for algo initialization. OwnerShip of returned pointer is held by this.
+ virtual YACS::ENGINE::TypeCode *getTCForAlgoInit() const;
+ //! returns typecode of type expected as algo result. OwnerShip of returned pointer is held by this.
+ virtual YACS::ENGINE::TypeCode *getTCForAlgoResult() const;
+ virtual void initialize(const YACS::ENGINE::Any *input) throw (YACS::Exception);
+ virtual void startToTakeDecision();
+ virtual void finish();//! Called when optimization has succeed.
+ virtual YACS::ENGINE::Any * getAlgoResult();
+};
-OptimizerAlgASyncExample::OptimizerAlgASyncExample(Pool * pool) : OptimizerAlgASync(pool),
- _tcIn(0), _tcOut(0)
+OptimizerAlgASyncExample::OptimizerAlgASyncExample(YACS::ENGINE::Pool *pool)
+ : YACS::ENGINE::OptimizerAlgASync(pool), _tcInt(0), _tcDouble(0)
{
- _tcIn = new TypeCode(Double);
- _tcOut = new TypeCode(Int);
+ _tcDouble = new YACS::ENGINE::TypeCode(YACS::ENGINE::Double);
+ _tcInt = new YACS::ENGINE::TypeCode(YACS::ENGINE::Int);
}
OptimizerAlgASyncExample::~OptimizerAlgASyncExample()
{
- _tcIn->decrRef();
- _tcOut->decrRef();
+ _tcDouble->decrRef();
+ _tcInt->decrRef();
+}
+
+//! Return the typecode of the expected input of the internal node
+YACS::ENGINE::TypeCode * OptimizerAlgASyncExample::getTCForIn() const
+{
+ return _tcDouble;
+}
+
+//! Return the typecode of the expected output of the internal node
+YACS::ENGINE::TypeCode * OptimizerAlgASyncExample::getTCForOut() const
+{
+ return _tcInt;
}
-//! Return the typecode of the expected input type
-TypeCode *OptimizerAlgASyncExample::getTCForIn() const
+//! Return the typecode of the expected input of the algorithm (algoInit port)
+YACS::ENGINE::TypeCode * OptimizerAlgASyncExample::getTCForAlgoInit() const
{
- return _tcIn;
+ return _tcInt;
}
-//! Return the typecode of the expected output type
-TypeCode *OptimizerAlgASyncExample::getTCForOut() const
+//! Return the typecode of the expected output of the algorithm (algoResult port)
+YACS::ENGINE::TypeCode * OptimizerAlgASyncExample::getTCForAlgoResult() const
{
- return _tcOut;
+ return _tcInt;
+}
+
+//! Optional method to initialize the algorithm.
+/*!
+ * For now, the parameter input is always NULL. It might be used in the
+ * future to initialize an algorithm with custom data.
+ */
+void OptimizerAlgASyncExample::initialize(const YACS::ENGINE::Any *input)
+ throw (YACS::Exception)
+{
+ std::cout << "Algo initialize, input = " << input->getIntValue() << std::endl;
}
//! This method is called only once to launch the algorithm.
/*!
- * It must first fill the pool with samples to evaluate and call signalMasterAndWait()
- * to block until a sample has been evaluated. When returning from this method, it MUST
- * check for an eventual termination request (with the method isTerminationRequested()).
- * If the termination is requested, the method must perform any necessary cleanup and
- * return as soon as possible. Otherwise it can either add new samples to evaluate in
- * the pool, do nothing (wait for more samples), or empty the pool and return to finish
- * the evaluation.
+ * It must first fill the pool with samples to evaluate and call
+ * signalMasterAndWait() to block until a sample has been evaluated. When
+ * returning from this method, it MUST check for an eventual termination
+ * request (with the method isTerminationRequested()). If the termination
+ * is requested, the method must perform any necessary cleanup and return
+ * as soon as possible. Otherwise it can either add new samples to evaluate
+ * in the pool, do nothing (wait for more samples), or empty the pool and
+ * return to finish the evaluation.
*/
void OptimizerAlgASyncExample::startToTakeDecision()
{
- double val = 1.2;
- for (int i=0 ; i<5 ; i++) {
- // push a sample in the input of the slave node
- _pool->pushInSample(i, AtomAny::New(val));
- // wait until next sample is ready
- signalMasterAndWait();
- // check error notification
- if (isTerminationRequested()) {
- _pool->destroyAll();
- return;
+ std::cout << "startToTakeDecision" << std::endl;
+ int iter = 0;
+ YACS::ENGINE::Any *val=YACS::ENGINE::AtomAny::New(0.5);
+ _pool->pushInSample(iter, val);
+
+ signalMasterAndWait();
+ while(!isTerminationRequested())
+ {
+ int currentId = _pool->getCurrentId();
+ double valIn = _pool->getCurrentInSample()->getDoubleValue();
+ int valOut = _pool->getCurrentOutSample()->getIntValue();
+
+ std::cout << "Compute currentId=" << currentId;
+ std::cout << ", valIn=" << valIn;
+ std::cout << ", valOut=" << valOut << std::endl;
+
+ iter++;
+ if(iter < 3)
+ {
+ YACS::ENGINE::Any *val=YACS::ENGINE::AtomAny::New(valIn + 1);
+ _pool->pushInSample(iter, val);
}
-
- // get a sample from the output of the slave node
- Any * v = _pool->getCurrentOutSample();
- val += v->getIntValue();
+ signalMasterAndWait();
}
+}
- // in the end destroy the pool content
+/*!
+ * Optional method called when the algorithm has finished, successfully or
+ * not, to perform any necessary clean up.
+ */
+void OptimizerAlgASyncExample::finish()
+{
+ std::cout << "Algo finish" << std::endl;
_pool->destroyAll();
}
+/*!
+ * Return the value of the algoResult port.
+ */
+YACS::ENGINE::Any * OptimizerAlgASyncExample::getAlgoResult()
+{
+ YACS::ENGINE::Any *val=YACS::ENGINE::AtomAny::New(42);
+ return val;
+}
+
//! Factory method to create the algorithm.
-OptimizerAlgBase * createOptimizerAlgASyncExample(Pool * pool)
+YACS::ENGINE::OptimizerAlgBase * createOptimizerAlgASyncExample(YACS::ENGINE::Pool *pool)
{
return new OptimizerAlgASyncExample(pool);
-}
+}
\ No newline at end of file
#include "OptimizerAlg.hxx"
-using namespace YACS::ENGINE;
+#include <iostream>
+//using namespace YACS::ENGINE;
extern "C"
{
- OptimizerAlgBase * createOptimizerAlgSyncExample(Pool * pool);
+ YACS::ENGINE::OptimizerAlgBase * createOptimizerAlgSyncExample(YACS::ENGINE::Pool * pool);
}
-class OptimizerAlgSyncExample : public OptimizerAlgSync
- {
+class OptimizerAlgSyncExample : public YACS::ENGINE::OptimizerAlgSync
+{
private:
- int _idTest;
- TypeCode *_tcIn;
- TypeCode *_tcOut;
+ int _iter;
+ YACS::ENGINE::TypeCode *_tcInt;
+ YACS::ENGINE::TypeCode *_tcDouble;
public:
- OptimizerAlgSyncExample(Pool *pool);
+ OptimizerAlgSyncExample(YACS::ENGINE::Pool *pool);
virtual ~OptimizerAlgSyncExample();
- TypeCode *getTCForIn() const;
- TypeCode *getTCForOut() const;
- void start();
- void takeDecision();
- void initialize(const Any *input) throw(YACS::Exception);
- void finish();
- };
-
-OptimizerAlgSyncExample::OptimizerAlgSyncExample(Pool *pool) : OptimizerAlgSync(pool),
- _tcIn(0), _tcOut(0),
- _idTest(0)
+
+ //! returns typecode of type expected as Input. OwnerShip of returned pointer is held by this.
+ virtual YACS::ENGINE::TypeCode *getTCForIn() const;
+ //! returns typecode of type expected as Output. OwnerShip of returned pointer is held by this.
+ virtual YACS::ENGINE::TypeCode *getTCForOut() const;
+ //! returns typecode of type expected for algo initialization. OwnerShip of returned pointer is held by this.
+ virtual YACS::ENGINE::TypeCode *getTCForAlgoInit() const;
+ //! returns typecode of type expected as algo result. OwnerShip of returned pointer is held by this.
+ virtual YACS::ENGINE::TypeCode *getTCForAlgoResult() const;
+ virtual void initialize(const YACS::ENGINE::Any *input) throw (YACS::Exception);
+ virtual void start(); //! Update _pool attribute before performing anything.
+ virtual void takeDecision();//! _pool->getCurrentId gives the \b id at the origin of this call.
+ //! Perform the job of analysing to know what new jobs to do (_pool->pushInSample)
+ //! or in case of convergence _pool->destroyAll
+ virtual void finish();//! Called when optimization has succeed.
+ virtual YACS::ENGINE::Any * getAlgoResult();
+};
+
+OptimizerAlgSyncExample::OptimizerAlgSyncExample(YACS::ENGINE::Pool *pool)
+ : YACS::ENGINE::OptimizerAlgSync(pool), _tcInt(0), _tcDouble(0), _iter(0)
{
- _tcIn=new TypeCode(Double);
- _tcOut=new TypeCode(Int);
+ _tcDouble = new YACS::ENGINE::TypeCode(YACS::ENGINE::Double);
+ _tcInt = new YACS::ENGINE::TypeCode(YACS::ENGINE::Int);
}
OptimizerAlgSyncExample::~OptimizerAlgSyncExample()
{
- std::cout << "Destroying OptimizerAlgSyncExample" << std::endl;
- _tcIn->decrRef();
- _tcOut->decrRef();
- std::cout << "Destroyed OptimizerAlgSyncExample" << std::endl;
+ _tcDouble->decrRef();
+ _tcInt->decrRef();
}
-//! Return the typecode of the expected input type
-TypeCode * OptimizerAlgSyncExample::getTCForIn() const
+//! Return the typecode of the expected input of the internal node
+YACS::ENGINE::TypeCode * OptimizerAlgSyncExample::getTCForIn() const
{
- return _tcIn;
+ return _tcDouble;
}
-//! Return the typecode of the expected output type
-TypeCode * OptimizerAlgSyncExample::getTCForOut() const
+//! Return the typecode of the expected output of the internal node
+YACS::ENGINE::TypeCode * OptimizerAlgSyncExample::getTCForOut() const
{
- return _tcOut;
+ return _tcInt;
+}
+
+//! Return the typecode of the expected input of the algorithm (algoInit port)
+YACS::ENGINE::TypeCode * OptimizerAlgSyncExample::getTCForAlgoInit() const
+{
+ return _tcInt;
+}
+
+//! Return the typecode of the expected output of the algorithm (algoResult port)
+YACS::ENGINE::TypeCode * OptimizerAlgSyncExample::getTCForAlgoResult() const
+{
+ return _tcInt;
+}
+
+//! Optional method to initialize the algorithm.
+/*!
+ * For now, the parameter input is always NULL. It might be used in the
+ * future to initialize an algorithm with custom data.
+ */
+void OptimizerAlgSyncExample::initialize(const YACS::ENGINE::Any *input)
+ throw (YACS::Exception)
+{
+ std::cout << "Algo initialize, input = " << input->getIntValue() << std::endl;
}
//! Start to fill the pool with samples to evaluate
void OptimizerAlgSyncExample::start()
{
- _idTest=0;
- Any *val=AtomAny::New(1.2);
- _pool->pushInSample(4,val);
- val=AtomAny::New(3.4);
- _pool->pushInSample(9,val);
+ std::cout << "Algo start " << std::endl;
+ _iter=0;
+ YACS::ENGINE::Any *val=YACS::ENGINE::AtomAny::New(0.5);
+ _pool->pushInSample(_iter,val);
}
//! This method is called each time a sample has been evaluated.
/*!
- * It can either add new samples to evaluate in the pool, do nothing (wait for more
- * samples), or empty the pool to finish the evaluation.
+ * It can either add new samples to evaluate in the pool, do nothing (wait
+ * for more samples), or empty the pool to finish the evaluation.
*/
void OptimizerAlgSyncExample::takeDecision()
{
- if(_idTest==1)
- {
- Any *val=AtomAny::New(5.6);
- _pool->pushInSample(16,val);
- val=AtomAny::New(7.8);
- _pool->pushInSample(25,val);
- val=AtomAny::New(9. );
- _pool->pushInSample(36,val);
- val=AtomAny::New(12.3);
- _pool->pushInSample(49,val);
- }
- else if(_idTest==4)
- {
- Any *val=AtomAny::New(45.6);
- _pool->pushInSample(64,val);
- val=AtomAny::New(78.9);
- _pool->pushInSample(81,val);
- }
- else
- {
- Any *tmp= _pool->getCurrentInSample();
- if(fabs(tmp->getDoubleValue()-45.6)<1.e-12)
- _pool->destroyAll();
- }
- _idTest++;
+ int currentId = _pool->getCurrentId();
+ double valIn = _pool->getCurrentInSample()->getDoubleValue();
+ int valOut = _pool->getCurrentOutSample()->getIntValue();
+
+ std::cout << "Algo takeDecision currentId=" << currentId;
+ std::cout << ", valIn=" << valIn;
+ std::cout << ", valOut=" << valOut << std::endl;
+
+ _iter++;
+ if(_iter < 3)
+ {
+ YACS::ENGINE::Any *val=YACS::ENGINE::AtomAny::New(valIn + 1);
+ _pool->pushInSample(_iter, val);
+ }
}
-//! Optional method to initialize the algorithm.
/*!
- * For now, the parameter input is always NULL. It might be used in the future to
- * initialize an algorithm with custom data.
+ * Optional method called when the algorithm has finished, successfully or
+ * not, to perform any necessary clean up.
*/
-void OptimizerAlgSyncExample::initialize(const Any *input) throw (YACS::Exception)
+void OptimizerAlgSyncExample::finish()
{
+ std::cout << "Algo finish" << std::endl;
+ _pool->destroyAll();
}
/*!
- * Optional method called when the algorithm has finished, successfully or not, to
- * perform any necessary clean up.
+ * Return the value of the algoResult port.
*/
-void OptimizerAlgSyncExample::finish()
+YACS::ENGINE::Any * OptimizerAlgSyncExample::getAlgoResult()
{
+ YACS::ENGINE::Any *val=YACS::ENGINE::AtomAny::New(42);
+ return val;
}
//! Factory method to create the algorithm.
-OptimizerAlgBase * createOptimizerAlgSyncExample(Pool *pool)
+YACS::ENGINE::OptimizerAlgBase * createOptimizerAlgSyncExample(YACS::ENGINE::Pool *pool)
{
return new OptimizerAlgSyncExample(pool);
-}
+}
\ No newline at end of file
r=SALOMERuntime.getSALOMERuntime()
self.tin=r.getTypeCode("double")
self.tout=r.getTypeCode("int")
+ self.tAlgoInit=r.getTypeCode("int")
+ self.tAlgoResult=r.getTypeCode("int")
def setPool(self,pool):
"""Must be implemented to set the pool"""
self.pool=pool
def getTCForIn(self):
- """returns typecode of type expected as Input"""
+ """return typecode of type expected as Input of the internal node """
return self.tin
def getTCForOut(self):
- """returns typecode of type expected as Output"""
+ """return typecode of type expected as Output of the internal node"""
return self.tout
- def startToTakeDecision(self):
- """This method is called only once to launch the algorithm. It must first fill the
- pool with samples to evaluate and call self.signalMasterAndWait() to block until a
- sample has been evaluated. When returning from this method, it MUST check for an
- eventual termination request (with the method self.isTerminationRequested()). If
- the termination is requested, the method must perform any necessary cleanup and
- return as soon as possible. Otherwise it can either add new samples to evaluate in
- the pool, do nothing (wait for more samples), or empty the pool and return to
- finish the evaluation.
+ def getTCForAlgoInit(self):
+ """return typecode of type expected as input for initialize """
+ return self.tAlgoInit
+
+ def getTCForAlgoResult(self):
+ """return typecode of type expected as output of the algorithm """
+ return self.tAlgoResult
+
+ def initialize(self,input):
+ """Optional method called on initialization.
+ The type of "input" is returned by "getTCForAlgoInit"
"""
- val=1.2
- for iter in xrange(5):
- #push a sample in the input of the slave node
- self.pool.pushInSample(iter,val)
- #wait until next sample is ready
- self.signalMasterAndWait()
- #check error notification
- if self.isTerminationRequested():
- self.pool.destroyAll()
- return
+ print "Algo initialize, input = ", input.getIntValue()
- #get a sample from the output of the slave node
+ def startToTakeDecision(self):
+ """This method is called only once to launch the algorithm. It must
+ first fill the pool with samples to evaluate and call
+ self.signalMasterAndWait() to block until a sample has been
+ evaluated. When returning from this method, it MUST check for an
+ eventual termination request (with the method
+ self.isTerminationRequested()). If the termination is requested, the
+ method must perform any necessary cleanup and return as soon as
+ possible. Otherwise it can either add new samples to evaluate in the
+ pool, do nothing (wait for more samples), or empty the pool and
+ return to finish the evaluation.
+ """
+ print "startToTakeDecision"
+ # fill the pool with samples
+ iter=0
+ self.pool.pushInSample(0, 0.5)
+
+ #
+ self.signalMasterAndWait()
+ while not self.isTerminationRequested():
currentId=self.pool.getCurrentId()
- v=self.pool.getCurrentOutSample()
- val=val+v.getIntValue()
+ valIn = self.pool.getCurrentInSample().getDoubleValue()
+ valOut = self.pool.getCurrentOutSample().getIntValue()
+ print "Compute currentId=%s, valIn=%s, valOut=%s" % (currentId, valIn, valOut)
+ iter=iter+1
+
+ if iter < 3:
+ nextSample = valIn + 1
+ self.pool.pushInSample(iter, nextSample)
+
+ self.signalMasterAndWait()
- #in the end destroy the pool content
+ def finish(self):
+ """Optional method called when the algorithm has finished, successfully
+ or not, to perform any necessary clean up."""
+ print "Algo finish"
self.pool.destroyAll()
+
+ def getAlgoResult(self):
+ """return the result of the algorithm.
+ The object returned is of type indicated by getTCForAlgoResult.
+ """
+ return 42
+
+
r=SALOMERuntime.getSALOMERuntime()
self.tin=r.getTypeCode("double")
self.tout=r.getTypeCode("int")
+ self.tAlgoInit=r.getTypeCode("int")
+ self.tAlgoResult=r.getTypeCode("int")
def setPool(self,pool):
"""Must be implemented to set the pool"""
self.pool=pool
def getTCForIn(self):
- """returns typecode of type expected as Input"""
+ """return typecode of type expected as Input of the internal node """
return self.tin
def getTCForOut(self):
- """returns typecode of type expected as Output"""
+ """return typecode of type expected as Output of the internal node"""
return self.tout
+ def getTCForAlgoInit(self):
+ """return typecode of type expected as input for initialize """
+ return self.tAlgoInit
+
+ def getTCForAlgoResult(self):
+ """return typecode of type expected as output of the algorithm """
+ return self.tAlgoResult
+
def initialize(self,input):
- """Optional method called on initialization. Do nothing here"""
+ """Optional method called on initialization.
+ The type of "input" is returned by "getTCForAlgoInit"
+ """
+ print "Algo initialize, input = ", input.getIntValue()
def start(self):
"""Start to fill the pool with samples to evaluate."""
+ print "Algo start "
self.iter=0
- self.pool.pushInSample(4,1.2)
- self.pool.pushInSample(9,3.4)
+ # pushInSample(id, value)
+ self.pool.pushInSample(self.iter, 0.5)
def takeDecision(self):
- """ This method is called each time a sample has been evaluated. It can either add
- new samples to evaluate in the pool, do nothing (wait for more samples), or empty
- the pool to finish the evaluation.
+ """ This method is called each time a sample has been evaluated. It can
+ either add new samples to evaluate in the pool, do nothing (wait for
+ more samples), or empty the pool to finish the evaluation.
"""
currentId=self.pool.getCurrentId()
+ valIn = self.pool.getCurrentInSample().getDoubleValue()
+ valOut = self.pool.getCurrentOutSample().getIntValue()
+ print "Algo takeDecision currentId=%s, valIn=%s, valOut=%s" % (currentId, valIn, valOut)
- if self.iter==1:
- self.pool.pushInSample(16,5.6)
- self.pool.pushInSample(25,7.8)
- self.pool.pushInSample(36,9.)
- self.pool.pushInSample(49,12.3)
- elif self.iter==4:
- self.pool.pushInSample(64,45.6)
- self.pool.pushInSample(81,78.9)
- else:
- val=self.pool.getCurrentInSample()
- if abs(val.getDoubleValue()-45.6) < 1.e-12:
- self.pool.destroyAll()
self.iter=self.iter+1
+ if self.iter < 3:
+ # continue
+ nextSample = valIn + 1
+ self.pool.pushInSample(self.iter, nextSample)
def finish(self):
- """Optional method called when the algorithm has finished, successfully or not, to
- perform any necessary clean up. Do nothing here"""
+ """Optional method called when the algorithm has finished, successfully
+ or not, to perform any necessary clean up."""
+ print "Algo finish"
+ self.pool.destroyAll()
+
+ def getAlgoResult(self):
+ """return the result of the algorithm.
+ The object returned is of type indicated by getTCForAlgoResult.
+ """
+ return 42
+
+
T b=this->_cnode;
this->_cnodes.pop_back();
currentProc->names.pop_back();
- this->_cnode=this->_cnodes.back();
+ this->_cnode=this->_cnodes.empty() ? 0 : this->_cnodes.back();
return b;
}
}
T b=this->_cnode;
this->_cnodes.pop_back();
currentProc->names.pop_back();
- this->_cnode=this->_cnodes.back();
+ this->_cnode=this->_cnodes.empty() ? 0 : this->_cnodes.back();
return b;
}
DEBTRACE("pseudocomposednode_post" << this->_cnode->getNode()->getName())
T b = this->_cnode;
this->_cnodes.pop_back();
- if(this->_cnodes.size() == 0)
- this->_cnode = 0;
- else
- this->_cnode = this->_cnodes.back();
+ this->_cnode=this->_cnodes.empty() ? 0 : this->_cnodes.back();
return b;
}
T b=this->_cnode;
this->_cnodes.pop_back();
currentProc->names.pop_back();
- if(this->_cnodes.size() == 0)
- this->_cnode=0;
- else
- this->_cnode=this->_cnodes.back();
+ this->_cnode=this->_cnodes.empty() ? 0 : this->_cnodes.back();
return b;
}
int _nbranch;
T b=this->_cnode;
this->_cnodes.pop_back();
currentProc->names.pop_back();
- if(this->_cnodes.size() == 0)
- this->_cnode=0;
- else
- this->_cnode=this->_cnodes.back();
+ this->_cnode=this->_cnodes.empty() ? 0 : this->_cnodes.back();
return b;
}
YACSBases
${CPPUNIT_LIBRARIES} ${PLATFORM_LIBS})
-ADD_TEST(TestYACSPMML TestYACSPMML)
-
INSTALL(TARGETS TestYACSPMML DESTINATION ${SALOME_INSTALL_BINS})
+
+IF(NOT WIN32)
+ SET(SHELL /bin/sh)
+ SALOME_CONFIGURE_FILE(config_appli.xml.in config_appli.xml)
+ SALOME_CONFIGURE_FILE(PmmlExeTest.sh.in PmmlExeTest.sh)
+ SALOME_CONFIGURE_FILE(PmmlInSessionTest.sh.in PmmlInSessionTest.sh)
+ ADD_TEST(NAME PmmlExeTest COMMAND ${SHELL} PmmlExeTest.sh)
+ENDIF()
\ No newline at end of file
--- /dev/null
+#!/bin/bash
+# Copyright (C) 2006-2014 CEA/DEN, EDF R&D
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+#
+
+if test -f @CMAKE_CURRENT_BINARY_DIR@/config_appli.xml; then
+ if test -n "${GEOM_ROOT_DIR}" && test -d ${GEOM_ROOT_DIR}; then
+ sed -i s%\"GEOM_ROOT_DIR\"%\"${GEOM_ROOT_DIR}\"% @CMAKE_CURRENT_BINARY_DIR@/config_appli.xml
+ fi
+ if test -n "${PYHELLO_ROOT_DIR}" && test -d ${PYHELLO_ROOT_DIR}; then
+ sed -i s%\"PYHELLO_ROOT_DIR\"%\"${PYHELLO_ROOT_DIR}\"% @CMAKE_CURRENT_BINARY_DIR@/config_appli.xml
+ fi
+fi
+
+if test -f @KERNEL_ROOT_DIR@/bin/salome/appli_gen.py ; then
+
+ # --- create a SALOME Application environment
+
+ @KERNEL_ROOT_DIR@/bin/salome/appli_gen.py
+ sed -i s/\"yes\"/\"no\"/ SalomeApp.xml
+ sed -i s/\,study\,cppContainer\,registry\,moduleCatalog// SalomeApp.xml
+ sed -i s/pyContainer/pyContainer\,study\,cppContainer\,registry\,moduleCatalog/ SalomeApp.xml
+
+cat > CatalogResources.xml << EOF
+<!DOCTYPE ResourcesCatalog>
+<resources>
+ <machine hostname="localhost" />
+</resources>
+EOF
+
+ # ---------------------------------------------------------------------------
+ # --- first set of test in C++
+
+ # --- launch in background a SALOME session (servers)
+
+ ln -fs @CMAKE_SOURCE_DIR@/src/yacsloader/samples .
+ ./runAppli > log1 2>&1
+
+ # --- wait a little to let the background process define
+ # the CORBA naming service port and host
+
+ sleep 5
+
+ # --- execute the test script in SALOME session environment
+
+ chmod +x @CMAKE_CURRENT_BINARY_DIR@/PmmlInSessionTest.sh
+ ./runSession @CMAKE_CURRENT_BINARY_DIR@/PmmlInSessionTest.sh
+ ret=$?
+
+ # ---------------------------------------------------------------------------
+
+ kill -9 `cat "/tmp/YACSTEST_PidEcho"`
+ ./runSession killSalome.py
+
+ echo "exec status PmmlInSessionTest.sh " $ret
+
+ # --- delete all the SALOME Application environment
+
+ ./bin/salome/appli_clean.sh -f
+
+else
+
+ ln -fs @CMAKE_SOURCE_DIR@/src/yacsloader/samples .
+ chmod +x @CMAKE_CURRENT_BINARY_DIR@/PmmlInSessionTest.sh
+ @CMAKE_CURRENT_BINARY_DIR@/PmmlInSessionTest.sh
+ ret=$?
+ echo "exec status PmmlInSessionTest.sh " $ret
+
+fi
+
+if [ $ret -ne 0 ]
+then cat /tmp/${USER}/UnitTestsResult
+else echo "Results are in /tmp/${USER}/UnitTestsResult"
+fi
+
+exit $ret
--- /dev/null
+#!/bin/bash
+# Copyright (C) 2006-2014 CEA/DEN, EDF R&D
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+#
+
+# --- script to execute in SALOME environment (use runSession)
+
+# --- wait until SALOME containers are ready
+
+python @CMAKE_CURRENT_SOURCE_DIR@/waitContainers.py
+
+# --- launch CORBA server echoSrv for tests
+
+./echoSrv &
+pidecho=$!
+echo $pidecho > "/tmp/YACSTEST_PidEcho"
+
+# Environment variables needed for the optimizer loop tests
+export LD_LIBRARY_PATH=@CMAKE_CURRENT_BINARY_DIR@:$LD_LIBRARY_PATH
+export PYTHONPATH=@CMAKE_BINARY_DIR@/src/engine_swig:$PYTHONPATH
+export PYTHONPATH=@CMAKE_BINARY_DIR@/src/runtime_swig:$PYTHONPATH
+export PYTHONPATH=@CMAKE_CURRENT_SOURCE_DIR@:$PYTHONPATH
+
+# --- launch unit tests
+
+export ROOT_SAMPLES=@CMAKE_SOURCE_DIR@/src/yacsloader/samples
+export TESTCOMPONENT_ROOT_DIR=@CMAKE_BINARY_DIR@/src/runtime/Test
+
+./TestYACSPMML
+ret=$?
+echo "exec status TestYACSPMML " $ret
+
+# --- return unit tests status
+
+exit $ret
void YACSPMMLBasicsTest1::setUp()
{
-#ifdef WIN32
- const char* p = std::getenv("YACS_ROOT_DIR");
- std::string strP("");
- if (p)
- strP = std::string(p);
- else
- throw std::string("unable to get YACS_ROOT_DIR");
- resourcesDir = strP;
- resourcesDir += "/share/salome/yacssamples/";
-#else
- resourcesDir = getenv("YACS_ROOT_DIR");
- resourcesDir += "/share/salome/yacssamples/";
-#endif
+ resourcesDir = "samples/";
}
void YACSPMMLBasicsTest1::tearDown()
--- /dev/null
+<!--
+ Copyright (C) 2006-2014 CEA/DEN, EDF R&D
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+ See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+
+-->
+<application>
+<prerequisites path="profile@SALOMEYACS_VERSION@.sh"/>
+<modules>
+ <!-- variable name <MODULE>_ROOT_DIR is built with <MODULE> == name attribute value -->
+ <!-- <MODULE>_ROOT_DIR values is set with path attribute value -->
+ <!-- attribute gui (defaults = yes) indicates if the module has a gui interface -->
+ <module name="KERNEL" gui="no" path="@KERNEL_ROOT_DIR@"/>
+ <module name="GUI" gui="no" path="@GUI_ROOT_DIR@"/>
+ <module name="GEOM" path="GEOM_ROOT_DIR"/> <!--GEOM_ROOT_DIR will be substituted at starting of test-->
+ <module name="PYHELLO" path="PYHELLO_ROOT_DIR"/> <!--PYHELLO_ROOT_DIR will be substituted at starting of test-->
+ <module name="YACS" path="@CMAKE_INSTALL_PREFIX@"/>
+</modules>
+</application>
+
<objref name="file" id="file"/>
<type name="int" kind="int"/>
<sequence name="intvec" content="int"/>
+ <struct name="stringpair">
+ <member name="name" type="string"/>
+ <member name="value" type="string"/>
+ </struct>
+ <sequence name="propvec" content="stringpair"/>
<objref name="pyobj" id="python:obj:1.0"/>
+ <sequence name="seqboolvec" content="boolvec"/>
+ <sequence name="seqdblevec" content="dblevec"/>
+ <sequence name="seqintvec" content="intvec"/>
<sequence name="stringvec" content="string"/>
+ <sequence name="seqstringvec" content="stringvec"/>
<container name="DefaultContainer">
<property name="container_name" value="FactoryServer"/>
<property name="name" value="localhost"/>
</container>
- <optimizer name="OptimizerLoop0" nbranch="4" lib="libTestOptLoop" entry="createOptimizerAlgASyncExample">
- <inline name="PyFunction1">
- <function name="myfunc">
- <code><![CDATA[def myfunc(inValue):
- outValue = int(3*inValue+5)
- print "Received", inValue, ", returning", outValue
- return outValue
-]]></code>
- </function>
- <inport name="inValue" type="double"/>
- <outport name="outValue" type="int"/>
+ <optimizer name="OptimizerLoop1" nbranch="1" lib="libTestOptLoop" entry="createOptimizerAlgASyncExample">
+ <inline name="PyScript7">
+ <script><code><![CDATA[o9 = int(i8)
+print "traitement:", i8
+]]></code></script>
+ <load container="DefaultContainer"/>
+ <inport name="i8" type="double"/>
+ <outport name="o9" type="int"/>
</inline>
</optimizer>
+ <datanode name="DataIn3">
+ <parameter name="o4" type="int">
+ <value><int>5</int></value>
+ </parameter>
+ </datanode>
+ <outnode name="OutNode5">
+ <parameter name="i6" type="int"/>
+ </outnode>
+ <control> <fromnode>OptimizerLoop1</fromnode> <tonode>OutNode5</tonode> </control>
+ <control> <fromnode>DataIn3</fromnode> <tonode>OptimizerLoop1</tonode> </control>
+ <datalink control="false">
+ <fromnode>OptimizerLoop1</fromnode> <fromport>algoResults</fromport>
+ <tonode>OutNode5</tonode> <toport>i6</toport>
+ </datalink>
+ <datalink control="false">
+ <fromnode>OptimizerLoop1</fromnode> <fromport>evalSamples</fromport>
+ <tonode>OptimizerLoop1.PyScript7</tonode> <toport>i8</toport>
+ </datalink>
<datalink control="false">
- <fromnode>OptimizerLoop0</fromnode> <fromport>evalSamples</fromport>
- <tonode>OptimizerLoop0.PyFunction1</tonode> <toport>inValue</toport>
+ <fromnode>DataIn3</fromnode> <fromport>o4</fromport>
+ <tonode>OptimizerLoop1</tonode> <toport>algoInit</toport>
</datalink>
<datalink control="false">
- <fromnode>OptimizerLoop0.PyFunction1</fromnode> <fromport>outValue</fromport>
- <tonode>OptimizerLoop0</tonode> <toport>evalResults</toport>
+ <fromnode>OptimizerLoop1.PyScript7</fromnode> <fromport>o9</fromport>
+ <tonode>OptimizerLoop1</tonode> <toport>evalResults</toport>
</datalink>
<parameter>
- <tonode>OptimizerLoop0</tonode><toport>nbBranches</toport>
- <value><int>4</int></value>
+ <tonode>OptimizerLoop1</tonode><toport>nbBranches</toport>
+ <value><int>1</int></value>
</parameter>
- <presentation name="OptimizerLoop0" x="6" y="34" width="168" height="178.5" expanded="1" expx="6" expy="34" expWidth="168" expHeight="178.5" shownState="0"/>
- <presentation name="OptimizerLoop0.PyFunction1" x="6" y="111.5" width="158" height="63" expanded="1" expx="6" expy="111.5" expWidth="158" expHeight="63" shownState="0"/>
- <presentation name="__ROOT__" x="0" y="0" width="178" height="216.5" expanded="1" expx="0" expy="0" expWidth="178" expHeight="216.5" shownState="0"/>
+ <presentation name="DataIn3" x="11" y="86" width="158" height="63" expanded="1" expx="11" expy="86" expWidth="158" expHeight="63" shownState="0"/>
+ <presentation name="OptimizerLoop1" x="238.5" y="83.5" width="204.5" height="216" expanded="1" expx="238.5" expy="83.5" expWidth="204.5" expHeight="216" shownState="0"/>
+ <presentation name="OutNode5" x="488.5" y="84" width="158" height="63" expanded="1" expx="488.5" expy="84" expWidth="158" expHeight="63" shownState="0"/>
+ <presentation name="OptimizerLoop1.PyScript7" x="42.5" y="149" width="158" height="63" expanded="1" expx="42.5" expy="149" expWidth="158" expHeight="63" shownState="0"/>
+ <presentation name="__ROOT__" x="0" y="0" width="650.5" height="303.5" expanded="1" expx="0" expy="0" expWidth="650.5" expHeight="303.5" shownState="0"/>
</proc>
<objref name="file" id="file"/>
<type name="int" kind="int"/>
<sequence name="intvec" content="int"/>
+ <struct name="stringpair">
+ <member name="name" type="string"/>
+ <member name="value" type="string"/>
+ </struct>
+ <sequence name="propvec" content="stringpair"/>
<objref name="pyobj" id="python:obj:1.0"/>
+ <sequence name="seqboolvec" content="boolvec"/>
+ <sequence name="seqdblevec" content="dblevec"/>
+ <sequence name="seqintvec" content="intvec"/>
<sequence name="stringvec" content="string"/>
+ <sequence name="seqstringvec" content="stringvec"/>
<container name="DefaultContainer">
<property name="container_name" value="FactoryServer"/>
<property name="name" value="localhost"/>
</container>
- <optimizer name="OptimizerLoop0" nbranch="4" lib="algoasyncexample.py" entry="myalgoasync">
- <inline name="PyFunction0">
- <function name="myfunc">
- <code><![CDATA[def myfunc(inputValue):
- outputValue = int(inputValue*3+5)
- print "Received", inputValue, ", returning", outputValue
- return outputValue
-]]></code>
- </function>
- <inport name="inputValue" type="double"/>
- <outport name="outputValue" type="int"/>
+ <optimizer name="OptimizerLoop1" nbranch="1" lib="algoasyncexample.py" entry="myalgoasync">
+ <inline name="PyScript7">
+ <script><code><![CDATA[o9 = int(i8)
+print "traitement:", i8
+]]></code></script>
+ <load container="DefaultContainer"/>
+ <inport name="i8" type="double"/>
+ <outport name="o9" type="int"/>
</inline>
</optimizer>
+ <datanode name="DataIn3">
+ <parameter name="o4" type="int">
+ <value><int>5</int></value>
+ </parameter>
+ </datanode>
+ <outnode name="OutNode5">
+ <parameter name="i6" type="int"/>
+ </outnode>
+ <control> <fromnode>OptimizerLoop1</fromnode> <tonode>OutNode5</tonode> </control>
+ <control> <fromnode>DataIn3</fromnode> <tonode>OptimizerLoop1</tonode> </control>
+ <datalink control="false">
+ <fromnode>OptimizerLoop1</fromnode> <fromport>algoResults</fromport>
+ <tonode>OutNode5</tonode> <toport>i6</toport>
+ </datalink>
+ <datalink control="false">
+ <fromnode>OptimizerLoop1</fromnode> <fromport>evalSamples</fromport>
+ <tonode>OptimizerLoop1.PyScript7</tonode> <toport>i8</toport>
+ </datalink>
<datalink control="false">
- <fromnode>OptimizerLoop0</fromnode> <fromport>evalSamples</fromport>
- <tonode>OptimizerLoop0.PyFunction0</tonode> <toport>inputValue</toport>
+ <fromnode>DataIn3</fromnode> <fromport>o4</fromport>
+ <tonode>OptimizerLoop1</tonode> <toport>algoInit</toport>
</datalink>
<datalink control="false">
- <fromnode>OptimizerLoop0.PyFunction0</fromnode> <fromport>outputValue</fromport>
- <tonode>OptimizerLoop0</tonode> <toport>evalResults</toport>
+ <fromnode>OptimizerLoop1.PyScript7</fromnode> <fromport>o9</fromport>
+ <tonode>OptimizerLoop1</tonode> <toport>evalResults</toport>
</datalink>
<parameter>
- <tonode>OptimizerLoop0</tonode><toport>nbBranches</toport>
- <value><int>4</int></value>
+ <tonode>OptimizerLoop1</tonode><toport>nbBranches</toport>
+ <value><int>1</int></value>
</parameter>
- <presentation name="OptimizerLoop0" x="6" y="34" width="167" height="191" expanded="1" expx="6" expy="34" expWidth="167" expHeight="191" shownState="0"/>
- <presentation name="OptimizerLoop0.PyFunction0" x="5" y="124" width="158" height="63" expanded="1" expx="5" expy="124" expWidth="158" expHeight="63" shownState="0"/>
- <presentation name="__ROOT__" x="0" y="0" width="177" height="229" expanded="1" expx="0" expy="0" expWidth="177" expHeight="229" shownState="0"/>
+ <presentation name="DataIn3" x="11" y="86" width="158" height="63" expanded="1" expx="11" expy="86" expWidth="158" expHeight="63" shownState="0"/>
+ <presentation name="OptimizerLoop1" x="238.5" y="83.5" width="204.5" height="216" expanded="1" expx="238.5" expy="83.5" expWidth="204.5" expHeight="216" shownState="0"/>
+ <presentation name="OutNode5" x="488.5" y="84" width="158" height="63" expanded="1" expx="488.5" expy="84" expWidth="158" expHeight="63" shownState="0"/>
+ <presentation name="OptimizerLoop1.PyScript7" x="42.5" y="149" width="158" height="63" expanded="1" expx="42.5" expy="149" expWidth="158" expHeight="63" shownState="0"/>
+ <presentation name="__ROOT__" x="0" y="0" width="650.5" height="303.5" expanded="1" expx="0" expy="0" expWidth="650.5" expHeight="303.5" shownState="0"/>
</proc>
<objref name="file" id="file"/>
<type name="int" kind="int"/>
<sequence name="intvec" content="int"/>
+ <struct name="stringpair">
+ <member name="name" type="string"/>
+ <member name="value" type="string"/>
+ </struct>
+ <sequence name="propvec" content="stringpair"/>
<objref name="pyobj" id="python:obj:1.0"/>
+ <sequence name="seqboolvec" content="boolvec"/>
+ <sequence name="seqdblevec" content="dblevec"/>
+ <sequence name="seqintvec" content="intvec"/>
<sequence name="stringvec" content="string"/>
+ <sequence name="seqstringvec" content="stringvec"/>
<container name="DefaultContainer">
<property name="container_name" value="FactoryServer"/>
<property name="name" value="localhost"/>
</container>
- <optimizer name="OptimizerLoop0" nbranch="4" lib="libTestOptLoop" entry="createOptimizerAlgSyncExample">
- <inline name="PyFunction1">
- <function name="myfunc">
- <code><![CDATA[def myfunc(inValue):
- outValue = int(3*inValue+5)
- print "Received", inValue, ", returning", outValue
- return outValue
-]]></code>
- </function>
- <inport name="inValue" type="double"/>
- <outport name="outValue" type="int"/>
+ <optimizer name="OptimizerLoop1" nbranch="1" lib="libTestOptLoop" entry="createOptimizerAlgSyncExample">
+ <inline name="PyScript7">
+ <script><code><![CDATA[o9 = int(i8)
+print "traitement:", i8
+]]></code></script>
+ <load container="DefaultContainer"/>
+ <inport name="i8" type="double"/>
+ <outport name="o9" type="int"/>
</inline>
</optimizer>
+ <datanode name="DataIn3">
+ <parameter name="o4" type="int">
+ <value><int>5</int></value>
+ </parameter>
+ </datanode>
+ <outnode name="OutNode5">
+ <parameter name="i6" type="int"/>
+ </outnode>
+ <control> <fromnode>OptimizerLoop1</fromnode> <tonode>OutNode5</tonode> </control>
+ <control> <fromnode>DataIn3</fromnode> <tonode>OptimizerLoop1</tonode> </control>
+ <datalink control="false">
+ <fromnode>OptimizerLoop1</fromnode> <fromport>algoResults</fromport>
+ <tonode>OutNode5</tonode> <toport>i6</toport>
+ </datalink>
+ <datalink control="false">
+ <fromnode>OptimizerLoop1</fromnode> <fromport>evalSamples</fromport>
+ <tonode>OptimizerLoop1.PyScript7</tonode> <toport>i8</toport>
+ </datalink>
<datalink control="false">
- <fromnode>OptimizerLoop0</fromnode> <fromport>evalSamples</fromport>
- <tonode>OptimizerLoop0.PyFunction1</tonode> <toport>inValue</toport>
+ <fromnode>DataIn3</fromnode> <fromport>o4</fromport>
+ <tonode>OptimizerLoop1</tonode> <toport>algoInit</toport>
</datalink>
<datalink control="false">
- <fromnode>OptimizerLoop0.PyFunction1</fromnode> <fromport>outValue</fromport>
- <tonode>OptimizerLoop0</tonode> <toport>evalResults</toport>
+ <fromnode>OptimizerLoop1.PyScript7</fromnode> <fromport>o9</fromport>
+ <tonode>OptimizerLoop1</tonode> <toport>evalResults</toport>
</datalink>
<parameter>
- <tonode>OptimizerLoop0</tonode><toport>nbBranches</toport>
- <value><int>4</int></value>
+ <tonode>OptimizerLoop1</tonode><toport>nbBranches</toport>
+ <value><int>1</int></value>
</parameter>
- <presentation name="OptimizerLoop0" x="6" y="34" width="169" height="187.5" expanded="1" expx="6" expy="34" expWidth="169" expHeight="187.5" shownState="0"/>
- <presentation name="OptimizerLoop0.PyFunction1" x="7" y="120.5" width="158" height="63" expanded="1" expx="7" expy="120.5" expWidth="158" expHeight="63" shownState="0"/>
- <presentation name="__ROOT__" x="0" y="0" width="179" height="225.5" expanded="1" expx="0" expy="0" expWidth="179" expHeight="225.5" shownState="0"/>
+ <presentation name="DataIn3" x="11" y="86" width="158" height="63" expanded="1" expx="11" expy="86" expWidth="158" expHeight="63" shownState="0"/>
+ <presentation name="OptimizerLoop1" x="238.5" y="83.5" width="204.5" height="216" expanded="1" expx="238.5" expy="83.5" expWidth="204.5" expHeight="216" shownState="0"/>
+ <presentation name="OutNode5" x="488.5" y="84" width="158" height="63" expanded="1" expx="488.5" expy="84" expWidth="158" expHeight="63" shownState="0"/>
+ <presentation name="OptimizerLoop1.PyScript7" x="42.5" y="149" width="158" height="63" expanded="1" expx="42.5" expy="149" expWidth="158" expHeight="63" shownState="0"/>
+ <presentation name="__ROOT__" x="0" y="0" width="650.5" height="303.5" expanded="1" expx="0" expy="0" expWidth="650.5" expHeight="303.5" shownState="0"/>
</proc>
<objref name="file" id="file"/>
<type name="int" kind="int"/>
<sequence name="intvec" content="int"/>
+ <struct name="stringpair">
+ <member name="name" type="string"/>
+ <member name="value" type="string"/>
+ </struct>
+ <sequence name="propvec" content="stringpair"/>
<objref name="pyobj" id="python:obj:1.0"/>
+ <sequence name="seqboolvec" content="boolvec"/>
+ <sequence name="seqdblevec" content="dblevec"/>
+ <sequence name="seqintvec" content="intvec"/>
<sequence name="stringvec" content="string"/>
+ <sequence name="seqstringvec" content="stringvec"/>
<container name="DefaultContainer">
<property name="container_name" value="FactoryServer"/>
<property name="name" value="localhost"/>
</container>
- <optimizer name="OptimizerLoop0" nbranch="4" lib="algosyncexample.py" entry="myalgosync">
- <inline name="PyFunction0">
- <function name="myfunc">
- <code><![CDATA[def myfunc(inputValue):
- outputValue = int(inputValue*3+5)
- print "Received", inputValue, ", returning", outputValue
- return outputValue
-]]></code>
- </function>
- <inport name="inputValue" type="double"/>
- <outport name="outputValue" type="int"/>
+ <optimizer name="OptimizerLoop1" nbranch="1" lib="algosyncexample.py" entry="myalgosync">
+ <inline name="PyScript7">
+ <script><code><![CDATA[o9 = int(i8)
+print "traitement:", i8
+]]></code></script>
+ <load container="DefaultContainer"/>
+ <inport name="i8" type="double"/>
+ <outport name="o9" type="int"/>
</inline>
</optimizer>
+ <datanode name="DataIn3">
+ <parameter name="o4" type="int">
+ <value><int>5</int></value>
+ </parameter>
+ </datanode>
+ <outnode name="OutNode5">
+ <parameter name="i6" type="int"/>
+ </outnode>
+ <control> <fromnode>OptimizerLoop1</fromnode> <tonode>OutNode5</tonode> </control>
+ <control> <fromnode>DataIn3</fromnode> <tonode>OptimizerLoop1</tonode> </control>
+ <datalink control="false">
+ <fromnode>OptimizerLoop1</fromnode> <fromport>algoResults</fromport>
+ <tonode>OutNode5</tonode> <toport>i6</toport>
+ </datalink>
+ <datalink control="false">
+ <fromnode>OptimizerLoop1</fromnode> <fromport>evalSamples</fromport>
+ <tonode>OptimizerLoop1.PyScript7</tonode> <toport>i8</toport>
+ </datalink>
<datalink control="false">
- <fromnode>OptimizerLoop0</fromnode> <fromport>evalSamples</fromport>
- <tonode>OptimizerLoop0.PyFunction0</tonode> <toport>inputValue</toport>
+ <fromnode>DataIn3</fromnode> <fromport>o4</fromport>
+ <tonode>OptimizerLoop1</tonode> <toport>algoInit</toport>
</datalink>
<datalink control="false">
- <fromnode>OptimizerLoop0.PyFunction0</fromnode> <fromport>outputValue</fromport>
- <tonode>OptimizerLoop0</tonode> <toport>evalResults</toport>
+ <fromnode>OptimizerLoop1.PyScript7</fromnode> <fromport>o9</fromport>
+ <tonode>OptimizerLoop1</tonode> <toport>evalResults</toport>
</datalink>
<parameter>
- <tonode>OptimizerLoop0</tonode><toport>nbBranches</toport>
- <value><int>4</int></value>
+ <tonode>OptimizerLoop1</tonode><toport>nbBranches</toport>
+ <value><int>1</int></value>
</parameter>
- <presentation name="OptimizerLoop0.PyFunction0" x="6" y="111.5" width="158" height="63" expanded="1" expx="6" expy="111.5" expWidth="158" expHeight="63" shownState="0"/>
- <presentation name="OptimizerLoop0" x="6" y="34" width="168" height="178.5" expanded="1" expx="6" expy="34" expWidth="168" expHeight="178.5" shownState="0"/>
- <presentation name="__ROOT__" x="0" y="0" width="178" height="216.5" expanded="1" expx="0" expy="0" expWidth="178" expHeight="216.5" shownState="0"/>
+ <presentation name="DataIn3" x="11" y="86" width="158" height="63" expanded="1" expx="11" expy="86" expWidth="158" expHeight="63" shownState="0"/>
+ <presentation name="OptimizerLoop1" x="238.5" y="83.5" width="204.5" height="216" expanded="1" expx="238.5" expy="83.5" expWidth="204.5" expHeight="216" shownState="0"/>
+ <presentation name="OutNode5" x="488.5" y="84" width="158" height="63" expanded="1" expx="488.5" expy="84" expWidth="158" expHeight="63" shownState="0"/>
+ <presentation name="OptimizerLoop1.PyScript7" x="42.5" y="149" width="158" height="63" expanded="1" expx="42.5" expy="149" expWidth="158" expHeight="63" shownState="0"/>
+ <presentation name="__ROOT__" x="0" y="0" width="650.5" height="303.5" expanded="1" expx="0" expy="0" expWidth="650.5" expHeight="303.5" shownState="0"/>
</proc>
CONFIGURE_FILE(YacsLoaderTest.sh.in YacsLoaderTest.sh)
CONFIGURE_FILE(YacsLoaderInSessionTest.sh.in YacsLoaderInSessionTest.sh)
ADD_TEST(NAME YacsLoaderTest_swig COMMAND ${SHELL} ${CMAKE_CURRENT_BINARY_DIR}/YacsLoaderTest.sh)
+
+ SALOME_GENERATE_TESTS_ENVIRONMENT(tests_env)
ADD_TEST(NAME StdAloneYacsLoaderTest1 COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/StdAloneYacsLoaderTest1.py)
+ SET_TESTS_PROPERTIES(StdAloneYacsLoaderTest1 PROPERTIES ENVIRONMENT "${tests_env}")
ENDIF()
--- /dev/null
+
+# Dump generated by HEXABLOCK at 2014/08/12 17:27:07
+
+import hexablock
+
+doc = hexablock.addDocument ('default')
+rep001 = doc.countVertex ()
+rep002 = doc.countVertex ()
+rep003 = doc.countEdge ()
+rep004 = doc.countQuad ()
+rep005 = doc.countHexa ()
+rep006 = doc.countVertex ()
+law0 = doc.getLaw (0)
+rep007 = doc.countVertex ()
+rep008 = doc.countVertex ()
+rep009 = doc.countEdge ()
+rep010 = doc.countQuad ()
+rep011 = doc.countHexa ()
+rep012 = doc.countVertex ()